Commit 6d23d3ac authored by Oran Agra's avatar Oran Agra
Browse files

Squashed 'deps/jemalloc/' changes from ea6b3e973..54eaed1d8

54eaed1d8 Merge branch 'dev'
304c91982 Update ChangeLog for 5.3.0.
8cb814629 Make the default option of zero realloc match the system allocator.
66c889500 Make test/unit/background_thread_enable more conservative.
a7d73dd4c Update TUNING.md to include the new tcache_max option.
254b01191 Small doc tweak of opt.trust_madvise.
f5e840bbf Minor typo fix in doc.
ceca07d2c Correct the name of stats.mutexes.prof_thds_data in doc.
391bad4b9 Avoid abort() in test/integration/cpp/infallible_new_true.
9a242f16d fix some typos
0e29ad4ef Rename zero_realloc option "strict" to "alloc".
5841b6dbe Update FreeBSD image to 12.3 for cirrus ci.
ed5fc14b2 Use volatile to workaround buffer overflow false positives.
25517b852 Reoreder TravisCI jobs to optimize CI time
8a49b62e7 Enable TravisCI for Windows
fdb6c1016 Add FreeBSD to TravisCI
a93931537 Do not disable SEC by default for 64k pages platforms
eaaa368ba Add comments and use meaningful vars in sz_psz2ind.
5bf03f8ce Implement PAGE_FLOOR macro
52631c90f Fix size class calculation for sec
7ae0f15c5 Add a default page size when cross-compile for Apple M1.
eb65d1b07 Fix FreeBSD system jemalloc TSD cleanup
78b58379c Fix possible "nmalloc >= ndalloc" assertion.
ca709c313 Fix failed assertion due to racy memory access
063d134ae Properly detect background thread support on Darwin.
a4e81221c Document 'make uninstall'
20f9802e4 Avoid overflow warnings in test/unit/safety_check.
8c59c44ff Add a dependency checking step at the end of malloc_conf_init.
efc539c04 Initialize prof_leak during prof init.
002f0e939 Disable TravisCI jobs generation for Windows
01a293fc0 Add Windows to TravisCI
b798fabdf Add prof_leak_error option
eafd2ac39 Forbid spaces in prefix and exec_prefix
36a09ba2c Forbid spaces in install suffix
640c3c72e Add support for 'make uninstall'
f15d8f3b4 Echo installed files via verbose 'install' command
eb196815d Avoid calculating size of size class twice & delete sc_data_global.
011449f17 Fix doc build with install-suffix.
8b49eb132 Fix the HELP_STRING of --enable-doc.
ddb170b1d Simplify arena_migrate() to take arena_t* instead of indices.
648b3b9f7 Lower the num_threads in the stress test of test/unit/prof_recent
d66162e03 Fix the extent state checking on the merge error path.
c9946fa7e FreeBSD also needs the OS-X "don't declare system functions as nothrow" fix since it also has jemalloc in the base system
89fe8ee6b Use the isb instruction instead of yield for spin locks on arm
6230cc88b Add background thread sleep retry in test/unit/hpa_background_thread
61978bbe6 Purge all if the last thread migrated away from an arena.
c91e62dd3 #include <features.h> as requested
18510020e Fix symbol conflict with musl libc
f509703af Fix two conversion warnings in tcache.
067c2da07 Fix unnecessary returns in san_(un)guard_pages_two_sided.
d660683d3 Fix test config of lg_san_uaf_align.
eabe88916 Rename full_position to low_bound in cache_bin.h.
dfdd7562f Rename san_enabled() to san_guard_enabled().
01d61a3c6 Fix a conversion warning.
8b34a788b Fix an used-uninitialized warning (false positive).
e491cef9a Add stats for stashed bytes in tcache.
b75822bc6 Implement use-after-free detection using junk and stash.
06aac61c4 Split the core logic of tcache flush into a separate function.
d038160f3 Fix shadowed variable usage.
bd70d8fc0 Add the profiling settings for tests explicit.
e491df1d2 Fix warnings when using autoheader.
60b9637cc Only invoke malloc_cpu_count_is_deterministic() when necessary.
837b37c4c Fix the time-since computation in HPA.
310af725b Add nstime_ns_since which obtains the duration since the input time.
cafe9a315 Disable percpu arena in case of non deterministic CPU count
bb5052ce9 Fix base_ehooks_get_for_metadata
9015e129b Update visual studio projects
d90655390 San: Create a function for committing and zeroing
800ce49c1 San: Bump alloc frequently reused guarded allocations
f56f5b993 Pass 'frequent_reuse' hint to PAI
2c70e8d35 Rename 'arena_decay' to 'arena_util'
0f6da1257 San: Implement bump alloc
34b00f896 San: Avoid running san tests with prof enabled
62f9c54d2 San: Rename 'guard' to 'san'
d9bbf539f CI: Refactor gen_travis.py
7dcf77809 Mark slab as true on sized dealloc fast path.
af6ee27c0 Enforce abort_conf:true when malloc_conf is not fully recognized.
113e8e68e freebsd 14 build fix proposal.
3b3257a70 Correct opt.prof_leak documentation
cdabe908d Track the initialized state of nstime_t on debug build.
400c59895 Fix uninitialized nstime reading / updating on the stack in hpa.
8b81d3f21 Fix the initialization of last_event in thread event init.
6bdb4f5ab Check prof_active in addtion to opt_prof during batch_alloc().
37342a4d3 Add ctl interface for experimental_infallible_new.
6cb585b13 San: Unguard guarded slabs during arena destruction
b6a7a535b Optimize away a branch on the free fastpath.
4d56aaeca Optimize away the tsd_fast() check on free fastpath.
26f5257b8 Remove declaration of an undefined function
215961541 Add new architecture loongarch.
8daac7958 Redefine functions with test hooks only for tests
c9ebff0fd Initialize deferred_work_generated
912324a1a Add debug check outside of the loop in hpa_alloc_batch.
cf9724531 Darwin malloc_size override support proposal.
ab0f1604b Delay the atexit call to prof_log_start().
11b6db744 CPU affinity on BSD platforms support.
83f329402 Small refactors around 7bb05e0.
3c4b717ff Remove unused header base_structs.h.
deb8e62a8 Implement guard pages.
7bb05e04b add experimental.arenas_create_ext mallctl
a9031a097 Allow setting a dump hook
f7d46b811 Allow setting custom backtrace hook
523cfa55c Guard prof related mallctl with opt_prof.
6e848a005 Remove opt_background_thread_hpa_interval_max_ms
8229cc77c Wake up background threads on demand
97da57c13 HPA: Add min_purge_interval_ms option
b8b8027f1 Allow PAI to calculate time until deferred work
26140dd24 Reject --enable-prof-libunwind without --enable-prof
e5062e9fb Makefile.in: make sure doc generated before install
8b24cb8fd Don't assume initialized arena in the default alloc hook.
c01a885e9 HPA: Correctly calculate retained pages
2c625d5cd Fix warnings when compiled with clang
9d02bdc88 Port gen_run_tests.py to python3
5884a076f Rename prof.dump_prefix to prof.prefix
6a0160071 Add Cirrus CI testing matrix
f58064b93 Verify that HPA is used before calling its functions
27f71242b Mutex: Tweak internal spin count.
6f41ba55e Mutex: Make spin count configurable.
dae24589b PH: Insert-below-min fast-path.
40d53e007 ph: Add aux-list counting and pre-merging.
dcb7b83fa Eset: Cache summary information for heap edatas.
252e0942d Eset: Pull per-pszind data into structs.
dc0a4b8b2 Edata: Pull out comparison fields into a summary.
0170dd198 Edata: Fix a couple typos.
08a4cc096 Pairing heap: inline functions instead of macros.
92a1e38f5 edata_cache: Allow unbounded fast caching.
d93eef2f4 HPA: Introduce a redesigned hpa_central_t.
e09eac1d4 Remove hpa_central.
c88fe355e Add unit tests for decay
aaea4fd1e Add more documentation to decay.c
4b633b9a8 Clean up background thread sleep computation
6630c5989 HPA: Hugification hysteresis.
113938b6f HPA: Pull out a hooks type.
1d4a7666d HPA: Do deferred operations on background threads.
583284f2d Add HPA deferral functionality.
ace329d11 HPA batch dalloc: Just do one deferred work check.
47d8a7e6b psset: Purge empty slabs first.
41fd56605 HPA: Purge across retained extents.
347523517 PAI: Fix a typo.
9c42ed2d1 Travis: Don't test "clang" on OS X.
d202218e8 HPA: Fix typos with big performance implications.
de033f56c mpsc_queue: Add module.
4452a4812 Add opt.experimental_infallible_new.
0689448b1 Travis: Unbreak the builds.
4fb93a18e extent_can_acquire_neighbor typo fix
2381efab5 ARC: add Minimum allocation alignment
2c0f4c2ac Fix typo in configure.ac: experimetal -> experimental
36c6bfb96 SEC: Allow arbitrarily many shards, cached sizes.
11beab38b Added --debug-syms-by-id option
08089589f Fix an interaction between the oversize_threshold test and bgthds.
541793821 Red-black tree: add summarize/filter.
b2c08ef2e RB unit tests: don't test reentrantly.
aea91b8c3 Clean up some minor data structure inconsistencies
1f688490e Stats: Fix a printing bug when hpa_dirty_mult = -1
4f7cb3a41 Sized deallocation: fix a typo.
12cd13cd4 Fix thread.name/prof_sys_thread_name interaction
304cdbb13 Fix a prof_recent/prof_sys_thread_name interaction
9b523c6c1 Refactor the locking in extent_recycle().
ce68f326b Avoid the release & re-acquire of the ecache locks around the merge hook.
7dc77527b Delete the mutex_pool module.
03d95cba8 Remove the unnecessary arena_ind_set in base_alloc_edata().
3093d9455 Move the edata mergeability related functions to extent.h.
7c964b035 Add rtree_write_range(): writing the same content to multiple leaf elements.
add636596 Stop checking head state in the merge hook.
49b7d7f0a Passing down the original edata on the expand path.
178493968 Use rtree tracked states to protect edata outside of ecache locks.
9ea235f8f Add witness_assert_positive_depth_to_rank().
4d8c22f9a Store edata->state in rtree leaf and make edata_t 128B aligned.
70d1541c5 Track extent is_head state in rtree leaf.
862219e46 Add quiescence sync before deleting base during arena_destroy.
a137a6825 Remove redundant declaration, pac_retain_grow_limit_get_set was declared twice in pac.h
2ae1ef7db Fix doc large size 54 KiB error
61afb6a40 Fix locking on arena_i_destroy_ctl().
9193ea224 Cirrus: fix build.
391307714 Mark head state during dss alloc.
11127240c Remove redundant enable-debug definition in configure.
22be724af Set is_head in extent_alloc_wrapper w/ retain.
73ca4b8ef HPA: Use dirtiest-first purging.
0f6c420f8 HPA: Make purging/hugifying more principled.
6bddb92ad psset: Rename "bitmap" to "pageslab_bitmap".
154aa5fcc Use the flat bitmap for eset and psset bitmaps.
271a676dc hpdata: early bailout for longest free range.
d21d5b46b Edata: Move sn into its own field.
fb327368d SEC: Expand option configurability.
ce9386370 HPA: Implement batch allocation.
cdae6706a SEC: Use batch fills.
480f3b11c Add a batch allocation interface to the PAI.
bf448d7a5 SEC: Reduce lock hold times.
1944ebbe7 HPA: Implement batch deallocation.
f47b4c2cd PAI/SEC: Add a dalloc_batch function.
4b8870c7d SEC: Fix a comment typo.
cde7097ec Update INSTALL.md to mention 'autoconf'
a11be5033 Implement opt.cache_oblivious.
8c5e5f50a Fix stats for "tcache_max" (was "lg_tcache_max")
041145c27 Report the correct and wrong sizes on sized dealloc bug detection.
f3b2668b3 Report the offending pointer on sized dealloc bug detection.
edbfe6912 Inline malloc fastpath into operator new.
79f81a373 HPA: Make dirty_mult configurable.
32dd15379 HPA: Make dehugification threshold configurable.
4790db15e HPA: make the hugification threshold configurable.
b3df80bc7 Pull HPA options into a containing struct.
bdb7307ff fxp: Add FXP_INIT_PERCENT
caef4c286 FXP: add fxp_mul_frac.
56e85c0e4 HPA: Use a whole-shard purging heuristic.
dc886e560 hpdata: Return the number of pages to be purged.
9fd9c876b psset: keep aggregate stats.
da63f23e6 HPA: Track pending purges/hugifies in the psset.
0ea3d6307 CTL, Stats: report HPA empty slab stats.
bf64557ed Move empty slab tracking to the psset.
99fc0717e psset: Reconceptualize insertion/removal.
061cabb71 HPA stats: report retained instead of inactive.
d3e5ea03c HPA: Track dirty stats.
68a1666e9 hpdata: Rename "dirty" to "touched".
be0d7a53f HPA: Don't track inactive pages.
55e0f60ca psset stats: Simplify handling.
94cd9444c HPA: Some minor reformattings.
b25ee5d88 HPA: Add purge stats.
746ea3de6 HPA stats: Allow some derived stats.
30b9e8162 HPA: Generalize purging.
70692cfb1 hpdata: Add state changing helpers.
9b75808be flat bitmap: Add a bitwise and/or/not.
2ae966222 hpdata: track per-page dirty state.
ff4086aa6 hpdata: count active pages instead of free ones.
3624dd42f hpdata: Add a comment for hpdata_consistent.
20140629b Bin: Move stats closer to the mutex.
c259323ab Use ticker_geom_t for arena tcache decay.
8edfc5b17 Add ticker_geom_t.
396732981 Arena: share bin offsets in a global.
2fcbd1811 Cache bin: Don't reverse flush order.
4c46e1136 Cache an arena's index in the arena.
229994a20 Tcache flush: keep common path state in registers.
31a629c3d Tcache flush: prefetch edata contents.
9f9247a62 Tcache fluhing: increase cache miss parallelism.
181ba7fd4 Tcache flush: Add an emap "batch lookup" path.
c007c537f Tcache flush: Unify edata lookup path.
35a855260 Mac OS: Tag mapped pages.
f6699803e Fix duration in prof log
a943172b7 Add runtime detection for MADV_DONTNEED zeroes pages (mostly for qemu)
2e3104ba0 Update config.{sub,guess} to support support-aarch64-apple-darwin as a target
a011c4c22 cache_bin: Separate out local and remote accesses.
14d689c0f Add prof stats mutex stats
9f71b5779 Output prof stats in stats print
1f1a0231e Split macros for initializing stats headers
4352cbc21 Add alignment tests for prof stats
54f3351f1 Add mallctl for prof stats fetching
40fa4d29d Track per size class internal fragmentation
afa489c3c Record request size in prof info
f9bb8dede Un-force-inline do_rallocx.
a9fa2defd Add JEMALLOC_COLD, and mark some functions cold.
5d8e70ab2 prof_recent: cassert(config_prof) more often.
83cad746a prof_log: cassert(config_prof) in public functions
526180b76 Extent.c: Avoid an rtree NULL-check.
b35ac00d5 Do not bump to large size for page aligned request
8a56d6b63 Add last-N mutex stats
22d62d8cb Handle ending gap properly for HPA stats
6c5a3a24d Omit bin stats rows with no data
ea013d8fa Enforce realloc sizing stability
74bd63b20 Optimize stats print using partial name-to-mib
4557c0a67 Enable ctl on partial mib and partial name
006dd0414 Add partial name-to-mib functionality
f2e1a5be7 Do not fail on partial ctl path for ctl_nametomib()
6ab181d2b Extract node lookup given mib input
3a627b967 No need to record all nodes in ctl_lookup()
91e006c4c Enable ctl_lookup() to start from arbitrary node
063a767ff Define JEMALLOC_HAS_ALLOCA_H for QNX
4e3fe218e Use posix_madvise to purge pages when available
26c1dc5a3 Support AutoConf for posix_madvise and POSIX_MADV_DONTNEED
96a59c3bb Fix recursive malloc during bootstrap on QNX
986cbe488 Disable JEMALLOC_TLS for QNX
1e3b8636f HPA: Remove unused malloc_conf options.
e82771807 Cache mallctl mib for batch allocation stress test
0dfdd31e0 Add tiny batch size to batch allocation stress test
9522ae41d Move n_search outside of assert as reported by static analyzer
a559caf74 hpdata: Strengthen assertions.
f51948d9e psset unit test: fix a bug.
54c94c167 flat bitmap: add scount / ucount functions.
e6c057ad3 fb: implement assign in terms of a visitor.
734e72ce8 bit_util: Guarantee popcount's presence.
d9f7e6c66 hpdata: Add a test.
3ed0b4e8a HPA: Add an nevictions counter.
fffcefed3 malloc_conf: Clarify HPA options.
f7cf23aa4 psset: Relegate alloc/dalloc to test code.
f9299ca57 HPA: Use psset fit/insert/remove.
0971e1e4e hpdata: Use addr/size instead of begin/npages.
5228d869e psset: Use fit/insert/remove as basis functions.
089f8fa44 Move hpdata bitmap logic out of the psset.
ca30b5db2 Introduce hpdata_t.
4a15008cf HPA unit test: skip if unsupported.
43af63fff HPA: Manage whole hugepages at a time.
63677dde6 Pages: Statically detect if pages_huge may succeed
c1b2a7793 psset: Move in stats.
d0a991d47 psset: Add insert/remove functions.
d438296b1 narenas_ratio: Accept fractional values.
ecd39418a Add fxp: A fixed-point math library.
99c2d6c23 Backport jeprof --collapse for flamegraph generation
520b75fa2 utrace support with label based signature.
92e189be8 Add some comments to the batch allocation logic flow
d96e4525a Route batch allocation of small batch size to tcache
ac480136d Split out locality checking in batch allocation tests
be5e49f4f Add a batch mode for cache_bin_alloc()
4a65f3493 Fix a cache bin test
566c4a859 Slight changes to cache bin internal functions
9545c2cd3 Add sample interval to prof last-N dump
cf2549a14 Add a per-arena oversize_threshold.
4ca3d91e9 Rename geom_grow -> exp_grow.
b4c37a6e8 Rename edata_tree_t -> edata_avail_t.
95f0a77fd Detect pthread_getname_np explicitly.
b3c5690b7 Update config.{guess,sub} to 2020-11-07@77632d9
589638182 Use the edata_cache_small_t in the HPA.
03a604711 Edata cache small: rewrite.
c9757d9e3 HPA: Don't disable shards that were never started.
1b3ee7566 Add experimental.thread.activity_callback.
27ef02ca9 Android build fix proposal.
d2d941017 MADV_DO[NOT]DUMP support equivalence on FreeBSD.
180b84315 Appveyor: fix 404 errors.
ef6d51ed4 DragonFlyBSD build support.
bf72188f8 Allow opt.tcache_max to accept small size classes.
ea32060f9 SEC: Implement thread affinity.
d16849c91 psset: Do first-fit based on slab age.
634ec6f50 Edata: add an "age" field.
6599651ae PA: Use an SEC in fron of the HPA shard.
ea51e97bb Add SEC module: a small extent cache.
1964b0839 HPA: Add stats for the hpa_shard.
534504d4a HPA: add size-exclusion functionality.
484f04733 HPA: Add central mutex contention stats.
bf025d2ec HPA: Make slab sizes and maxes configurable.
1c7da3331 HPA: Tie components into a PAI implementation.
c8209150f Switch from opt.lg_tcache_max to opt.tcache_max
5ba861715 Add thread name in prof last-N records
4ef5b8b4d Add a logo to doc_internal.
5e41ff9b7 Add a hard limit on tcache max size class.
3de19ba40 Eagerly detect double free and sized dealloc bugs for large sizes.
be9548f2b Tcaches: Fix a subtle race condition.
a9aa6f6d0 Fix the alloc_ctx check in free_fastpath.
b971f7c4d Add "default" option to slab sizes.
21b70cb54 Add hpa_central module
1ed7ec369 Emap: Add emap_assert_not_mapped.
2a6ba121b PRNG test: cleanups.
9e6aa77ab PRNG: Remove atomic functionality.
051304717 PRNG: Allow a a range argument of 1.
bdb60a805 Appveyor: don't update msys2 keyring.
025d8c37c Add a script to check for clang-formattedness.
f6bbfc1e9 Add a .clang-format file.
259c5e3e8 psset: Add stats
018b162d6 Add psset: a set of pageslabs.
ed99d300b Flat bitmap: Add longest-range computation.
e03450069 Edata: rename "ranged" bit to "pai".
7ad2f7866 Avoid a -Wundef warning on LG_SLAB_MAXREGS.
40cf71a06 Remove --with-slab-maxregs options from INSTALL.md
36ebb5abe CI support for PPC64LE architecture
1541ffc76 configure: add --with-lg-slab-maxregs configure option.
d243b4ec4 Add PROFILING_INTERNALS.md
09eda2c9b Add unit tests for usize in prof recent records
b549389e4 Correct usize in prof last-N record
202f01d4f Fix szind computation in profiling
866231fc6 Do not repeat reentrancy test in profiling
20f2479ed Do not create size class tables for non-prof builds
8efcdc3f9 Move unbias data to prof_data
5e90fd006 Geom_grow: Don't keep the mutex internal.
c57494879 Geom_grow: Don't take tsdn at init.
ffe552223 Geom_grow: Move in advancing logic.
131b1b533 Rename ecache_grow -> geom_grow.
b399463fb flat_bitmap unit test: Silence a warning.
b0ffa39ca Mallctl stress test: fix a type.
753bbf184 Benchmarks: Also print ns / iter.
7b187360e IO: Support 0-padding for unsigned numbers.
32d467322 Add a mallctl speed stress test.
38867c5c1 Makefile: alphabetize stress/analyze utilities.
ab274a23b Add narenas_ratio.
9e18ae639 Config: safety checks don't imply size checks.
8f9e958e1 Add alignment stress test for rallocx
743021b63 Fix size miscalculation bug in reallocation
eaed1e39b Add sized-delete size-checking functionality.
53084cc5c Safety check: Don't directly abort.
60993697d Prof: Add prof_unbias.
81c2f841e Add a simple utility to detect profiling bias.
e032a1a1d Add a stress test for batch allocation
f6cf5eb38 Add mallctl for batch allocation API
978f830ee Add batch allocation API
c6f59e9bb Add surplus reading API for thread event lookahead
f80546895 Add zero option to arena batch allocation
49e5c2fe7 Add batch allocation from fresh slabs
2bb8060d5 Add empty test and concat for typed list
f28cc2bc8 Extract bin shard selection out of bin locking
ddb8dc4ad FB: Add range iteration support.
ceee82351 Add flat_bitmap.
7fde6ac49 Nbits: Add a couple more interesting sizes.
efeab1f49 bitset test: Pull NBITS_TAB into its own file.
22da83609 bit_util: Add fls_ functions; "find last set".
1ed0288d9 bit_util: Change ffs functions indexing.
786a27b9e CI: Update keyring.
fb347dc61 Verify output space before doing heavy work in mallctl
f5fb4e5a9 Modify mallctl output length when needed
425840204 Corrections for prof_log_start()
e6cb7a1c9 Shorten wait time for peak events
6107857b7 PA->PAC: Move in PAI implementation.
6041aaba9 PA -> PAC: Move in destruction functions.
cbf096b05 Arena: remove redundant bg inactivity check.
471eb5913 PAC: Move in decay rate setting.
6a2774719 PA->PAC: Move in decay functions.
4ee75be3a PA -> PAC: Move in decay_purge enum.
72435b0ab PA->PAC: Make extent.c forget about PA.
dee5d1c42 PA->PAC: Move in extent_sn.
739138234 PA->PAC: Move in stats.
db211eefb PAC: Move in decay.
c81e38999 PAC: Move in ecache_grow.
65803171a PAC: move in emap
7efcb946c PAC: Add an init function.
722652222 PAC: Move in edata_cache accesses.
777b0ba96 Add PAC: Page allocator classic.
1b5f632e0 Introduce PAI: Page allocator interface
3cf19c6e5 atomic: add atomic_load_sub_store
f1f4ec315 Tcache: Tweak nslots_max tuning parameter.
ae541d3fa Edata: Reserve some space for hugepages.
392f645f4 Edata: split up different list linkage uses.
129b72705 Add typed-list module.
00f06c9be enabling mpss on solaris/illumos.
c2e7a0639 No need to intercept prof_dump_header() in tests
f58ebdff7 Generalize prof_cnt_all() for testing
80d18c18c Pass prof dump parameters explicitly in prof_sys
d4259ea53 Simplify signatures for prof dump functions
5d823f3a9 Consolidate struct definitions for prof dump parameters
1f5fe3a3e Pass write callback explicitly in prof_data
4556d3c0c Define structures for prof dump parameters
1c6742e6a Migrate prof dumping to use buffered writer
dad821bb2 Move unwind to prof_sys
d128efcb6 Relocate a few prof utilities to the right modules
4736fb4fc Move file handling logic in prof_data to prof_sys
767a2e179 Move file handling logic in prof to prof_sys
03ae509f3 Create prof_sys module for reading system thread name
adfd9d7b1 Change tsdn to tsd for thread name allocation
841af2b42 Move thread name handling to prof_data module
8118056c0 Expose prof_data testing internals only in prof tests
f43ac8543 Correct prof header macro namings
c8683bee8 Unify printing for prof counts object
5d292b566 Push error handling logic out of core dumping logic
f541871f5 Reduce prof dump buffer size in debug build
354183b10 Define prof dump buffer size centrally
7455813e5 Make dump file writing replaceable in test
21e44c45d Make maps file opening replaceable in test
4bb4037db Extract utility function for opening maps file
f307b2580 Only replace the dump file opening function in test
d8cea8756 Move size inspections to test/analyze
537a4bedb Add a tool to examine random number distributions
d460333ef Improve naming for prof system thread name option
25e43c602 Witness: Make ranks an enum.
092fcac0b Remove unnecessary source files
a795b1932 Remove beginning define in source files
24bbf376c Unify arena flag reading and selection
e128b170a Do not fallback to auto arena when manual arena is requested
95a59d2f7 Unify tcache flag reading and selection
4b0c00848 Unify zero flag reading and setting
2a84f9b8f Unify alignment flag reading and computation
b7858abfc Expose prof testing internal functions
40fa6674a Fix prof timestamp conf reading
7e09a57b3 stress/sizes: Fix an off-by-one issue.
dcfa6fd50 stress/sizes: Add a couple more types.
40672b0b7 Remove duplicate logging in malloc.
4aea74327 High Resolution Timestamps for Profiling
d82a164d0 Add thread.peak.[read|reset] mallctls.
fe7108305 Add peak_t, for tracking allocator net max.
17a64fe91 Add a small program to print data structure sizes.
3e19ebd2e Add lock to protect prof last-N dumping
a835d9cf8 Make prof last-N dumping non-blocking
fc8bc4b5c Increase dump buffer for prof last-N list
264d89d64 Extract restore and async cleanup functions for prof last-N list
857ebd3da Make edata pointer on prof recent record an atomic fence
b8bdea6b2 Fix: prof_recent_alloc_max_ctl_read() does not take tsd
730658f72 Extract alloc/dalloc utility for last-N nodes
035be4486 Separate out dumping for each prof recent record
8da0896b7 Tcache: Make an integer conversion explicit.
cd28e6033 Don't warn on uniform initialization.
6cdac3c57 Tcache: Make flush fractions configurable.
7503b5b33 Stats, CTL: Expose new tcache settings.
ee72bf1cf Tcache: Add tcache gc delay option.
d338dd45d Tcache: Make incremental gc bytes configurable.
ec0b57956 Tcache: Privatize opt_lg_tcache_max default.
10b96f635 Tcache: Remove some unused gc constants.
181093173 Tcache: make slot sizing configurable.
b58dea8d1 Cache bin: expose ncached_max publicly.
634afc412 Tcache: Make size computation configurable.
97b7a9cf7 Add a fill/flush microbenchmark.
33372cbd4 cpu instruction spin wait for arm32/64
27f29e424 LQ_QUANTUM should be 4 on mips64 hardware.
eda9c2858 Edata: zero stack edatas before initializing.
5dead37a9 Allow narenas:default.
dcea2c0f8 Get rid of TSD -> thread event dependency
75dae934a Always initialize TE counters in TSD init
b06dfb9cc Push event handlers to constituent modules
381c97caa Treat postponed prof sample event as new event
abd467493 Extract out per event postponed wait time fetching
f72014d09 Only compute thread event threshold once per trigger
7324c4f85 Break down event init and handler functions
6de77799d Move thread event wait time update to local
733ae918f Extract out per event new wait time fetching
1e2524e15 Do not reset sample wait time when re-initing tdata
855d20f6f Remove outdated comments in thread event
fc052ff72 Migrate counter to use locked int
b543c20a9 Minor update to locked int
f533ab6da Add forking handling for stats
508303077 Add forking handling for prof idump counter
4d970f8bf Add forking handling for counter module
2097e1945 Unify write callback signature
fef9abdcc Cleanup tcache allocation logic
e6cb6919c Consolidate prof inline function headers
d454af90f Remove unused prof_accum field from arena
8be558449 Initialize prof idump counter once rather than once per arena
e10e5059e Make prof_idump_accum() non-inline
039bfd4e3 Do not rollback prof idump counter in arena_prof_promote()
0295aa38a Deduplicate entries in witness error message
f1f8a7549 Let opt.zero propagate to core allocation.
2c09d4349 Add a benchmark of large allocations.
46471ea32 SC: Name the max lookup constant.
79dd0c04e SC: Simplify SC_NPSIZES computation.
fb6cfffd3 Configure: Get rid of LG_QUANTA.
4f8efba82 TSD: Make rtree_ctx a slow-path field.
cd29ebefd Tcache: treat small and large cache bins uniformly
a13fbad37 Tcache: split up fast and slow path data.
7099c6620 Arena: fill in terms of cache_bins.
40e7aed59 TSD: Move in some of the tcache fields.
58a00df23 TSD: Put all fast-path data together.
3589571bf SC: use SC_LG_NGROUP instead of its value.
877af247a QL, QR: Add documentation.
79ae7f921 Rtree: Remove the per-field accessors.
26e9a3103 PA: Simple decay test.
bb6a41852 Emap: Drop szind/slab splitting parameters.
50289750b Extent: Remove szind/slab knowledge.
dc26b3009 Rtree: Clean up compact/non-compact split.
93b99dd14 Extent: Stop passing an edata_cache everywhere.
a4759a191 Ehooks: avoid touching arena_emap_global in tests.
11c47cb13 Extent: Take "bool zero" over "bool *zero".
1a1124462 PA: Take zero as a bool rather than as a bool *.
294b276fc PA: Parameterize emap.  Move emap_global to arena.
f73057727 Eset: Parameterize last globals accesses.
7bb6e2dc0 Eset: take opt_lg_max_active_fit as a parameter.
883ab327c Emap: Move out last edata state touching.
0c96a2f03 Emap: Move out remaining edata modifications.
dfef0df71 Emap: Move edata modification out of emap_remap.
12eb888e5 Edata: Add a ranged bit.
bd4fdf295 Rtree: Pull leaf contents into their own struct.
faec7219b PA: Move in decay initialization.
45671e4a2 PA: Move in retain growth limit setting.
daefde88f PA: Move in mutex stats reading.
07675840a PA: Move in some more internals accesses.
238f3c743 PA: Move in full stats merging.
81c602759 Arena stats: Give it its own "mapped".
506d907e4 PA: Move in basic stats merging.
f29f6090f PA: Add pa_extra.c and put PA forking there.
8164fad40 Stats: Fix edata_cache size merging.
565045ef7 Arena: Make more derived stats non-atomic/locked.
d0c43217b Arena stats: Move retained to PA, use plain ints.
e2cf3fb1a PA: Move in all modifications of mapped.
436789ad9 PA: Make mapped stat atomic.
3c28aa6f1 PA: Move edata_avail stat in, make it non-atomic.
f6bfa3dcc Move extent stats to the PA module.
527dd4cdb PA: Move in nactive counter.
c075fd0bc PA: Minor cleanups and comment fixes.
46a9d7fc0 PA: Move in rest of purging.
2d6eec7b5 PA: Move in decay-all pathway.
65698b7f2 PA: Remove public visibility of some internals.
f012c43be PA: Move in decay_to_limit
103f5feda Move bg thread activity check out of purging core.
3034f4a50 PA: Move in decay_stashed.
aef28b2f8 PA: Move in stash_decayed.
655a09634 Move bg inactivity check out of purge inner loop.
71fc0dc96 PA: Move in remaining page allocation functions.
74958567a PA: have expand take sizes instead of new usize.
5bcc2c2ab PA: Have expand take szind and slab.
0880c2ab9 PA: Have large expands use it.
7be3dea82 PA: Have slab allocations use it.
9f93625c1 PA: Move in arena large allocation functionality.
7624043a4 PA: Add ehook-getting support.
eba35e2e4 Remove extent knowledge of arena.
e77f47a85 Move arena decay getters to PA.
48a2cd6d7 Decay: Add a (mostly stub) test case.
f77cec311 Decay: Take current time as an argument.
bf55e58e6 Rename test/unit/decay -> test/unit/arena_decay.
d1d7e1076 Decay: move in some background_thread accesses.
cdb916ed3 Decay: Add comments for the public API.
8f2193dc8 Decay: Move in arena decay functions.
4d090d23f Decay: Introduce a stub .c file.
7b6288547 Introduce decay module and put decay objects in PA
497836dbc Arena stats: mark edata_avail as derived.
3192d6b77 Extents: Have extent_dalloc_gap take ehooks.
22a0a7b93 Move arena_decay_extent to extent module.
70d12ffa0 PA: Move mapped into pa stats.
6ca918d0c PA: Add a stats comment.
ce8c0d6c0 PA: Move in arena extent_sn counter.
1ada4aef8 PA: Get rid of arena_ind_get calls.
1ad368c8b PA: Move in decay stats.
356aaa7dc Introduce lockedint module.
acd0bf6a2 PA: move in ecache_grow.
32cb7c2f0 PA: Add a stats type.
688fb3eb8 PA: Move in the arena edata_cache.
8433ad84e PA: move in shard initialization.
a24faed56 PA: Move in the ecache_t objects.
585f92505 Move cache index randomization out of extent.
12be9f572 Add a stub PA module -- a page allocator.
c4e9ea8cc Get rid of locks in prof recent test
2deabac07 Get rid of custom iterator for last-N records
a5ddfa7d9 Use ql for prof last-N list
8da6676a0 Don't do reentrant testing in junk tests.
ce17af422 Better structure ql module
4b66297ea Add move constructor to ql module
a62b7ed92 Add emptiness checking to ql module
1dd24ca6d Add rotate functionality to ql module
0dc95a882 Add concat and split functionality to ql module
1ad06aa53 deduplicate insert and delete logic in qr module
c9d56cddf Optimize meld in qr module
0d6d9e858 configure.ac: Put public symbols on one line.
f9aad7a49 Add piping API to buffered writer
09cd79495 Encapsulate buffer allocation failure in buffered writer
a166c2081 Make prof_tctx_t pointer a true prof atomic fence
d936b46d3 Add malloc_conf_2_conf_harder
3b4a03b92 Mac: don't declare system functions as nothrow.
2256ef896 Add option to fetch system thread name on each prof sample
ccdc70a5c Fix: assertion could abort on past failures
b30a5c2f9 Reorganize cpp APIs and suppress unused function warnings
2e5899c12 Stats: Fix tcache_bytes reporting.
a5780598b Remove thread_event_rollback()
ba783b3a0 Remove prof -> thread_event dependency
441d88d1c Rewrite profiling thread event
0dcd57660 Edata cache: atomic fetch-add -> load-store.
99b1291d1 Edata cache: add edata_cache_small_t.
734109d9c Edata cache: add a unit test.
e732344ef Inspect test: Reduce checks when profiling is on.
92485032b Cache bin: improve comments.
d701a085c Fast path: allow low-water mark changes.
397da0386 Cache bin: rewrite to track more state.
fef0b1ffe Cache bin: Remove last internals accesses.
0a2fcfac0 Tcache: Hold cache bin allocation explicitly.
d498a4bb0 Cache bin: Add an emptiness assertion.
6a7aa46ef Cache bin: Add a debug method for init checking.
370c1ea00 Cache bin: Write the unit test in terms of the API
7f5ebd211 Cache bin: set low-water internally.
60113dfe3 Cache bin: Move in initialization code.
44529da85 Cache-bin: Make flush modifications internal
ff6acc6ed Cache bin: simplify names and argument ordering.
e1dcc557d Cache bin: Only take the relevant cache_bin_info_t
1b00d808d cache_bin: Don't let arena see empty position.
d303f3079 cache_bin nflush -> n.
74d36d78e Cache bin: Make ncached_max a query on the info_t.
b66c0973c cache_bin: Don't allow direct internals access.
da68f7329 Move percpu_arena_update.
909c501b0 Cache_bin: Shouldn't know about tcache.
79f1ee2fc Move junking out of arena/tcache code.
b428dceea Config: Warn on void * pointer arithmetic.
22657a5e6 Extents: Silence the "potentially unused" warning.
4a78c6d81 Correct thread event unit test
305b1f6d9 Correction on geometric sampling
6c3491ad3 Tcache: Unify bin flush logic.
9f4fc2738 Ehooks: Fix a build warning.
bc31041ed Cirrus-CI: test on new freebsd releases.
51bd14742 Make use of assert_* in test/unit/thread_event.c
9d2cc3b0f Make use of assert_* in test/unit/prof_recent.c
a88d22ea1 Make use of assert_* in test/unit/inspect.c
0ceb31184 Make use of assert_* in test/unit/buf_writer.c
fa6157938 Add assert_* functionality to tests
21dfa4300 Change assert_* to expect_* in tests
162c2bcf3 Background thread: take base as a parameter.
29436fa05 Break prof and tcache knowledge of b0.
a0c1f4ac5 Rtree: take the base allocator as a parameter.
7013716aa Emap: Take (and propagate) a zeroed parameter.
182192f83 Base: Pull into a single header.
34b7165fd Put szind_t, pszind_t in sz.h.
7e6c8a728 Emap: Standardize naming.
ac50c1e44 Emap: Remove direct access to emap internals.
06e42090f Make jemalloc.c use the emap interface.
f7d9c6c42 Emap: Move in alloc_ctx lookup functionality.
65a54d771 Emap: Move in szind and slab modifications.
9b5d105fc Emap: Move in iealloc.
1d449bd9a Emap: Internal rtree context setting.
08eb1e6c3 Emap: Comments and cleanup
231d1477e Rename emap_split_prepare_t -> emap_prepare_t.
0586a56f3 Emap: Move in merge functionality.
040eac77c Tell edatas their creation arena immediately.
7c7b70206 Emap: Move over metadata splitting logic.
44f5f5360 Emap: Move over deregistration functions.
6513d9d92 Emap: Move over deregistration boundary functions.
9b5ca0b09 Emap: Move in slab interior registration.
d05b61db4 Emap: Move extent boundary registration in.
ca21ce407 Emap: Move in write_acquired from extent.
01f255161 Add emap, for tracking extent locking.
0f686e82a Avoid variable length array with length 0.
68e8ddcaf Add mallctl for dumping last-N profiling records
bc05ecebf Add const qualifier in assert_cmp()
ba0e35411 Rework the bin locking around tcache refill / flush.
7fd22f7b2 Fix Undefined Behavior in hash.h
ca1f08225 Disallow merge across mmap regions to preserve SN / first-fit.
7014f81e1 Add ASSURED_WRITE in mallctl
247688919 Add inspect.c to MSVC filters
9cac3fa8f Encapsulate buffer allocation in buffered writer
bdc08b515 Better naming buffered writer
c6bfe5585 Update the tsd description.
e89652261 Abbreviate thread-event to te.
5e500523a Remove thread_event_boot().
97dd79db6 Implement deallocation events.
536ea6858 NetBSD specific changes: - NetBSD overcommits - When mapping pages, use the maximum of the alignment requested and the   compiled-in PAGE constant which might be greater than the current kernel   pagesize, since we compile binaries with the maximum page size supported   by the architecture (so that they work with all kernels).
974222c62 Add safety check on sdallocx slow / sampled path.
88d9eca84 Enforce page alignment for sampled allocations.
0f552ed67 Don't purge huge extents when decay is off.
38a48e574 Set reentrancy to 1 for tsd_state_purgatory.
88b0e03a4 Implement opt.stats_interval and the _opts options.
d71a145ec Chagne prof_accum_t to counter_accum_t for general purpose.
ea351a7b5 Fix syntax errors in doc for thread.idle.
d92f0175c Introduce NEITHER_READ_NOR_WRITE in ctl.
6a622867c Add "thread.idle" mallctl.
f81341a48 Fallback to unbuffered printing if OOM
cd6e90824 Add stress test for last-N profiling mode
84b28c6a1 Properly handle tdata deletion race
d33120856 Get rid of redundant logic in prof
a72ea0db6 Restructure and correct sleep utility for testing
7b67ed0b5 Get rid of lock overlap in prof_recent_alloc_reset
bd3be8e0b Remove commit parameter to ecache functions.
b8df719d5 No tdata creation for backtracing on dying thread
dab81bd31 Rework and fix the assertions on malloc fastpath.
ad3f3fc56 Fetch time after tctx and only for samples
a5d3dd405 Fix an assertion on extent head state with dss.
2b604a301 Record request size in prof recent entries
40a391408 Define constructor for buffered writer argument
6d8e61690 Make buffered writer an independent module
6b6b4709b Unify buffered writer naming
9a60cf54e Last-N profiling mode
7a27a0594 Delete tdata states used for cleanup
e98ddf798 Fix unlikely condition in arena_prof_info_get()
3fa142cf3 Remove _externs from prof internal header names
112dc36dd Handle log_mtx during forking
ea42174d0 Refactor profiling headers
6342da097 Ehooks: Further optimize default merge case.
f2f2084e7 Ehooks: Assert alloc isn't NULL
e210ccc57 Move extent2 -> extent.
2f4fa8041 Rename extents -> ecache.
56cc56b69 Break extent split dependence on arena.
0aa9769fb Break commit functions' arena dependence
48ec5d435 Break extent_coalesce arena dependence
282a38232 Extent: Break [de]activation's arena dependence.
576d7047a Ecache: Should know its arena_ind.
372042a08 Remove merge dependence on the arena.
439219be7 Remove extent_can_coalesce arena dependency.
9cad5639f Ehooks: remove arena_ind parameter.
57fe99d4b Move relevant index into the ehooks_t itself.
c792f3e4a edata_cache: Remember the associated base_t.
ae23e5f42 Unify extent_alloc_wrapper with the other wrappers.
d8b0b66c6 Put extent_state_t into ecache as well as eset.
98eb40e56 Move delay_coalesce from the eset to the ecache.
bb70df8e5 Extent refactor: Introduce ecache module.
070451624 Ehooks: Add head tracking.
09475bf8a extent_may_dalloc -> ehooks_dalloc_will_fail
785918417 Pull out edata_t caching into its own module.
a7862df61 Rename extent_t to edata_t.
865debda2 Rename extent.h -> edata.h.
a738a66b5 Ehooks: Add some debug zero and addr checks.
4b2e5ee8b Ehooks: Add a "zero" ehook.
d0f187ad3 Arena: Loosen arena_may_have_muzzy restrictions.
ebbb97327 Base: Remove some unnecessary reentrancy guards.
403f2d166 Extents: Split out introspection functionality.
92a511d38 Make extent module hermetic.
e08c581cf Extent: Get rid of extent-specific pre/post reentrancy calls.
39fdc690a Ehooks comments and cleanup.
c8dae890c Extent -> Ehooks: Move over default hooks.
2fe510826 Extent -> Ehooks: Move merge hook.
1fff4d2ee Extent -> Ehooks: Move split hook.
a5b42a1a1 Extent -> Ehooks: Move purge_forced hook.
368baa42e Extent -> Ehooks: Move purge_lazy hook.
f83fdf533 Extent: Clean up a comma
d78fe241a Extent -> Ehooks: Move commit and decommit hooks.
5459ec9da Extent -> Ehooks: Move destroy hook.
bac8e2e5a Extent -> Ehooks: Move dalloc hook.
dc8b4e6e1 Extent -> Ehooks: Move alloc hook.
703fbc0ff Introduce unsafe reentrancy guards.
ae0d8e859 Move extent ehook calls into ehooks
ba8b9ecbc Add ehooks module
837119a94 base_structs.h: Remove some mid-line tabs.
9f6eb0958 Extents: Eagerly initialize extent hooks.
4278f8460 Move extent hook getters/setters to arena.c
9226e1f0d fix opt.thp:never still use THP with base_new
d5031ea82 Allow dallocx and sdallocx after tsd destruction.
4afd709d1 Restructure setters for profiling info
1d01e4c77 Initialization utilities for nstime
dd649c948 Optimize away the tsd_fast() check on fastpath.
1decf958d Fix incorrect usage of cassert.
45836d7fd Pass nstime_t pointer for profiling
7d2bac5a3 Refactor destroy code path for prof_tctx
055478cca Threshold is no longer updated before prof_realloc()
7e3671911 Get rid of old indentation style for prof
dfdd46f6c Refactor prof_tctx_t creation
aa1d71fb7 Rename prof_tctx to alloc_tctx in prof_info_t
5e0b09099 No need to pass usize to prof_tctx_set()
1b1e76acf Disable some spuriously-triggering warnings
a70909b13 Test on all supported release of FreeBSD
5c47a3022 Guard C++ aligned APIs
694537177 Change tsdn to tsd for profiling code path
b55419f9b Restructure profiling
8b2c2a596 Support C++17 over-aligned allocation
9a3c73800 Refactor arena_bin_malloc_hard().
9a7ae3c97 Reduce footprint of bin_t.
cb1a1f4ad Remove the unnecessary alloc_ctx on free_fastpath.
716061710 Add branch hints to free_fastpath.
a787d2f5b Prefer getaffinity() to detect number of CPUs.
04cb7d4d6 Bail out early for muzzy decay.
73510dfd1 Revert "Fix bug in prof_realloc"
3b5eecf10 Fix bug in prof_realloc
e4c36a6f3 Emphasize no modification through thread.allocatedp allowed.
c462753cc Use __forceinline for JEMALLOC_ALWAYS_INLINE on msvc
836d7a7e6 Check for large size first in the uncommon case of malloc.
9c59abe42 Fix a typo in Makefile.
da50d8ce8 Refactor and optimize prof sampling initialization.
bc774a351 Rename tsd->offset_state to tsd->prng_state.
19a51abf3 Avoid arena->offset_state when tsd not available for prng.
d01b425e5 Add -Wimplicit-fallthrough checks if supported
a8b578d53 Remove mallctl test for zero_realloc
43f0ce92d Define general purpose tsd_thread_event_init()
97f93fa0f Pull tcache GC events into thread event handler
198f02e79 Pull prof_accumbytes into thread event handler
152c0ef95 Build a general purpose thread event handler
6924f83cb use SYS_openat when available
de81a4ead Add stats counters for number of zero reallocs
9cfa80594 Realloc: Make behavior of realloc(ptr, 0) configurable.
ee961c231 Merge realloc and rallocx pathways.
bd6e28d6a Guard slabcur fetching in extent_util
4786099a3 Increase column width for global malloc/free rate
05681e387 Optimize cache_bin_alloc_easy for malloc fast path
4fe50bc7d Fix amd64 MSVC warning
4fbbc817c Simplify time setting and getting for prof log
4094b7c03 Limit # of iters of test_bitmap_xfu.
66e07f986 Suppress tdata creation in reentrancy
beb7c16e9 Guard prof_active reset by opt_prof
1df9dd351 Fix je_ prefix issue in test
3d84bd57f Arena: Add helper function arena_get_from_extent.
c97d25575 Eset: Remove temporary declaration.
ce5b128f1 Remove the undefined extent_size_quantize declarations.
821dd53a1 Extent -> Eset: Rename arena members.
e144b21e4 Extent -> Eset: Move fork handling.
77bbb35a9 Extent -> Eset: Move extent fit functions.
1210af9a4 Extent -> Eset: Move insertion and removal.
a42861540 Extents -> Eset: Convert some stats getters.
820f070c6 Move page quantization to sz module.
63d1b7a7a Extents -> Eset: move extents_state_get.
b416b96a3 Extents -> Eset: rename/move extents_init.
e6180fe1b Eset: Add a source file.
4e5e43f22 Rename extents_t -> eset_t.
723ccc6c2 Extents: Split out extent struct.
41187bdfb Extents: Break extent-struct/arena interactions
529cfe2ab Arena: rename arena_structs_b.h -> arena_structs.h
e7cf84a8d Rearrange slab data and constants
d1be488cd Add --with-lg-page=16 to CI.
ac5185f73 Fix tcache bin stack alignment.
b7c7df24b Add max_per_bg_thd stats for per background thread mutexes.
4b76c684b Add "prof.dump_prefix" to override filename prefixes for dumps.
242af439b Rename "prof_dump_seq_mtx" to "prof_dump_filename_mtx".
e06658cb2 check GNU make exists in path
22bc75ee3 Workaround the stringop-overflow check false positives.
93d615180 Pass tsd down to prof_backtrace()
671f120e2 Fix prof_backtrace() reentrancy level
785b84e60 Make cache_bin_sz_t unsigned.
23dc7a7fb Fix index type for cache_bin_alloc_easy.
2abb02ecd Fix MSVC 2015 build, as proposed by @christianaguilera-foundry.
719583f14 Fix large.nflushes in the merged stats.
adce29c88 Optimize for prof_active off
49e6fbce7 Always adjust thread_(de)allocated
57b81c078 Pull thread_(de)allocated out of config_stats
9e031c1d1 Bug fix for prof_active switch
0043e68d4 Track low_water == -1 case explicitly.
937ca1db9 Store ncached_max * ptr_size in tcache_bin_info.
7599c82d4 Redesign the cache bin metadata for fast path.
d2dddfb82 Add hint in the bogus version string.
d6b7995c1 Update INSTALL.md about the default doc build.
e2c758436 Simplify / refactor tcache_dalloc_large.
9c5c2a2c8 Unify the signature of tcache_flush small and large.
28ed9b9a5 Buffer stats printing
eb70fef8c Make compact json format as default
a219cfcda Clear tcache prof_accumbytes in tcache_flush_cache
ad3f7dbfa Buffer prof_log_stop
593484661 Fix large bin index accessed through cache bin descriptor.
22746d3c9 Properly dalloc prof nodes with idalloctm.
8c8466fa6 Add compact json option for emitter
7fc6b1b25 Add buffered writer
39343555d Report stats for tdatas_mtx and prof_dump_mtx
87e2400cb Fix tcaches mutex pre- / post-fork handling.
07ce2434b Refactor profiling
56126d0d2 Refactor prof log
56c8ecffc Correct tsd layout graph

git-subtree-dir: deps/jemalloc
git-subtree-split: 54eaed1d8b56b1aa528be3bdd1877e59c56fa90c
parent 220a0f08
...@@ -16,15 +16,39 @@ ...@@ -16,15 +16,39 @@
<ClCompile Include="..\..\..\..\src\base.c"> <ClCompile Include="..\..\..\..\src\base.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\bin.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\bitmap.c"> <ClCompile Include="..\..\..\..\src\bitmap.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\buf_writer.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\cache_bin.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\ckh.c"> <ClCompile Include="..\..\..\..\src\ckh.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\counter.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\ctl.c"> <ClCompile Include="..\..\..\..\src\ctl.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\decay.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\div.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\emap.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\exp_grow.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\extent.c"> <ClCompile Include="..\..\..\..\src\extent.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
...@@ -34,45 +58,93 @@ ...@@ -34,45 +58,93 @@
<ClCompile Include="..\..\..\..\src\extent_mmap.c"> <ClCompile Include="..\..\..\..\src\extent_mmap.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\hash.c"> <ClCompile Include="..\..\..\..\src\fxp.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\hook.c"> <ClCompile Include="..\..\..\..\src\hook.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\hpa.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\hpa_hooks.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\hpdata.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\inspect.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\jemalloc.c"> <ClCompile Include="..\..\..\..\src\jemalloc.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\large.c"> <ClCompile Include="..\..\..\..\src\large.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\log.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\malloc_io.c"> <ClCompile Include="..\..\..\..\src\malloc_io.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\mutex.c"> <ClCompile Include="..\..\..\..\src\mutex.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\mutex_pool.c"> <ClCompile Include="..\..\..\..\src\nstime.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\nstime.c"> <ClCompile Include="..\..\..\..\src\pa.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\pa_extra.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\pai.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\pac.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\pages.c"> <ClCompile Include="..\..\..\..\src\pages.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\prng.c"> <ClCompile Include="..\..\..\..\src\peak_event.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\prof.c"> <ClCompile Include="..\..\..\..\src\prof.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\prof_data.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\prof_log.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\prof_recent.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\prof_stats.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\prof_sys.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\psset.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\rtree.c"> <ClCompile Include="..\..\..\..\src\rtree.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\safety_check.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\sc.c"> <ClCompile Include="..\..\..\..\src\sc.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\sec.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\stats.c"> <ClCompile Include="..\..\..\..\src\stats.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
...@@ -82,6 +154,12 @@ ...@@ -82,6 +154,12 @@
<ClCompile Include="..\..\..\..\src\tcache.c"> <ClCompile Include="..\..\..\..\src\tcache.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\test_hooks.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\thread_event.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\ticker.c"> <ClCompile Include="..\..\..\..\src\ticker.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
...@@ -91,20 +169,29 @@ ...@@ -91,20 +169,29 @@
<ClCompile Include="..\..\..\..\src\witness.c"> <ClCompile Include="..\..\..\..\src\witness.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\log.c"> <ClCompile Include="..\..\..\..\src\bin_info.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\bin.c"> <ClCompile Include="..\..\..\..\src\ecache.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\div.c"> <ClCompile Include="..\..\..\..\src\edata.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\test_hooks.c"> <ClCompile Include="..\..\..\..\src\edata_cache.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\safety_check.c"> <ClCompile Include="..\..\..\..\src\ehooks.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\eset.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\san.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\san_bump.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
</ItemGroup> </ItemGroup>
</Project> </Project>
\ No newline at end of file
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <thread> #include <thread>
#include <vector> #include <vector>
#include <stdio.h> #include <stdio.h>
#define JEMALLOC_NO_DEMANGLE
#include <jemalloc/jemalloc.h> #include <jemalloc/jemalloc.h>
using std::vector; using std::vector;
......
#!/bin/bash
# The files that need to be properly formatted. We'll grow this incrementally
# until it includes all the jemalloc source files (as we convert things over),
# and then just replace it with
# find -name '*.c' -o -name '*.h' -o -name '*.cpp
FILES=(
)
if command -v clang-format &> /dev/null; then
CLANG_FORMAT="clang-format"
elif command -v clang-format-8 &> /dev/null; then
CLANG_FORMAT="clang-format-8"
else
echo "Couldn't find clang-format."
fi
if ! $CLANG_FORMAT -version | grep "version 8\." &> /dev/null; then
echo "clang-format is the wrong version."
exit 1
fi
for file in ${FILES[@]}; do
if ! cmp --silent $file <($CLANG_FORMAT $file) &> /dev/null; then
echo "Error: $file is not clang-formatted"
exit 1
fi
done
#!/bin/tcsh
su -m root -c 'pkg install -y git'
#!/bin/tcsh
autoconf
# We don't perfectly track freebsd stdlib.h definitions. This is fine when
# we count as a system header, but breaks otherwise, like during these
# tests.
./configure --with-jemalloc-prefix=ci_ ${COMPILER_FLAGS:+ CC="$CC $COMPILER_FLAGS" CXX="$CXX $COMPILER_FLAGS"} $CONFIGURE_FLAGS
JE_NCPUS=`sysctl -n kern.smp.cpus`
gmake -j${JE_NCPUS}
gmake -j${JE_NCPUS} tests
#!/bin/tcsh
gmake check
#!/usr/bin/env python #!/usr/bin/env python3
import sys import sys
from itertools import combinations from itertools import combinations
...@@ -14,14 +14,14 @@ nparallel = cpu_count() * 2 ...@@ -14,14 +14,14 @@ nparallel = cpu_count() * 2
uname = uname()[0] uname = uname()[0]
if "BSD" in uname: if call("command -v gmake", shell=True) == 0:
make_cmd = 'gmake' make_cmd = 'gmake'
else: else:
make_cmd = 'make' make_cmd = 'make'
def powerset(items): def powerset(items):
result = [] result = []
for i in xrange(len(items) + 1): for i in range(len(items) + 1):
result += combinations(items, i) result += combinations(items, i)
return result return result
...@@ -41,6 +41,7 @@ possible_config_opts = [ ...@@ -41,6 +41,7 @@ possible_config_opts = [
'--enable-prof', '--enable-prof',
'--disable-stats', '--disable-stats',
'--enable-opt-safety-checks', '--enable-opt-safety-checks',
'--with-lg-page=16',
] ]
if bits_64: if bits_64:
possible_config_opts.append('--with-lg-vaddr=56') possible_config_opts.append('--with-lg-vaddr=56')
...@@ -52,19 +53,20 @@ possible_malloc_conf_opts = [ ...@@ -52,19 +53,20 @@ possible_malloc_conf_opts = [
'background_thread:true', 'background_thread:true',
] ]
print 'set -e' print('set -e')
print 'if [ -f Makefile ] ; then %(make_cmd)s relclean ; fi' % {'make_cmd': make_cmd} print('if [ -f Makefile ] ; then %(make_cmd)s relclean ; fi' % {'make_cmd':
print 'autoconf' make_cmd})
print 'rm -rf run_tests.out' print('autoconf')
print 'mkdir run_tests.out' print('rm -rf run_tests.out')
print 'cd run_tests.out' print('mkdir run_tests.out')
print('cd run_tests.out')
ind = 0 ind = 0
for cc, cxx in possible_compilers: for cc, cxx in possible_compilers:
for compiler_opts in powerset(possible_compiler_opts): for compiler_opts in powerset(possible_compiler_opts):
for config_opts in powerset(possible_config_opts): for config_opts in powerset(possible_config_opts):
for malloc_conf_opts in powerset(possible_malloc_conf_opts): for malloc_conf_opts in powerset(possible_malloc_conf_opts):
if cc is 'clang' \ if cc == 'clang' \
and '-m32' in possible_compiler_opts \ and '-m32' in possible_compiler_opts \
and '--enable-prof' in config_opts: and '--enable-prof' in config_opts:
continue continue
...@@ -79,9 +81,9 @@ for cc, cxx in possible_compilers: ...@@ -79,9 +81,9 @@ for cc, cxx in possible_compilers:
) )
# We don't want to test large vaddr spaces in 32-bit mode. # We don't want to test large vaddr spaces in 32-bit mode.
if ('-m32' in compiler_opts and '--with-lg-vaddr=56' in if ('-m32' in compiler_opts and '--with-lg-vaddr=56' in
config_opts): config_opts):
continue continue
# Per CPU arenas are only supported on Linux. # Per CPU arenas are only supported on Linux.
linux_supported = ('percpu_arena:percpu' in malloc_conf_opts \ linux_supported = ('percpu_arena:percpu' in malloc_conf_opts \
...@@ -92,7 +94,7 @@ for cc, cxx in possible_compilers: ...@@ -92,7 +94,7 @@ for cc, cxx in possible_compilers:
if (uname == 'Linux' and linux_supported) \ if (uname == 'Linux' and linux_supported) \
or (not linux_supported and (uname != 'Darwin' or \ or (not linux_supported and (uname != 'Darwin' or \
not darwin_unsupported)): not darwin_unsupported)):
print """cat <<EOF > run_test_%(ind)d.sh print("""cat <<EOF > run_test_%(ind)d.sh
#!/bin/sh #!/bin/sh
set -e set -e
...@@ -120,7 +122,9 @@ run_cmd %(make_cmd)s all tests ...@@ -120,7 +122,9 @@ run_cmd %(make_cmd)s all tests
run_cmd %(make_cmd)s check run_cmd %(make_cmd)s check
run_cmd %(make_cmd)s distclean run_cmd %(make_cmd)s distclean
EOF EOF
chmod 755 run_test_%(ind)d.sh""" % {'ind': ind, 'config_line': config_line, 'make_cmd': make_cmd} chmod 755 run_test_%(ind)d.sh""" % {'ind': ind, 'config_line': config_line,
'make_cmd': make_cmd})
ind += 1 ind += 1
print 'for i in `seq 0 %(last_ind)d` ; do echo run_test_${i}.sh ; done | xargs -P %(nparallel)d -n 1 sh' % {'last_ind': ind-1, 'nparallel': nparallel} print('for i in `seq 0 %(last_ind)d` ; do echo run_test_${i}.sh ; done | xargs'
' -P %(nparallel)d -n 1 sh' % {'last_ind': ind-1, 'nparallel': nparallel})
#!/usr/bin/env python #!/usr/bin/env python3
from itertools import combinations from itertools import combinations, chain
from enum import Enum, auto
travis_template = """\
language: generic
dist: precise
matrix: LINUX = 'linux'
OSX = 'osx'
WINDOWS = 'windows'
FREEBSD = 'freebsd'
AMD64 = 'amd64'
ARM64 = 'arm64'
PPC64LE = 'ppc64le'
TRAVIS_TEMPLATE = """\
# This config file is generated by ./scripts/gen_travis.py.
# Do not edit by hand.
# We use 'minimal', because 'generic' makes Windows VMs hang at startup. Also
# the software provided by 'generic' is simply not needed for our tests.
# Differences are explained here:
# https://docs.travis-ci.com/user/languages/minimal-and-generic/
language: minimal
dist: focal
jobs:
include: include:
%s {jobs}
before_install:
- |-
if test -f "./scripts/$TRAVIS_OS_NAME/before_install.sh"; then
source ./scripts/$TRAVIS_OS_NAME/before_install.sh
fi
before_script: before_script:
- autoconf - |-
- scripts/gen_travis.py > travis_script && diff .travis.yml travis_script if test -f "./scripts/$TRAVIS_OS_NAME/before_script.sh"; then
- ./configure ${COMPILER_FLAGS:+ \ source ./scripts/$TRAVIS_OS_NAME/before_script.sh
CC="$CC $COMPILER_FLAGS" \ else
CXX="$CXX $COMPILER_FLAGS" } \ scripts/gen_travis.py > travis_script && diff .travis.yml travis_script
$CONFIGURE_FLAGS autoconf
- make -j3 # If COMPILER_FLAGS are not empty, add them to CC and CXX
- make -j3 tests ./configure ${{COMPILER_FLAGS:+ CC="$CC $COMPILER_FLAGS" \
CXX="$CXX $COMPILER_FLAGS"}} $CONFIGURE_FLAGS
make -j3
make -j3 tests
fi
script: script:
- make check - |-
if test -f "./scripts/$TRAVIS_OS_NAME/script.sh"; then
source ./scripts/$TRAVIS_OS_NAME/script.sh
else
make check
fi
""" """
class Option(object):
class Type:
COMPILER = auto()
COMPILER_FLAG = auto()
CONFIGURE_FLAG = auto()
MALLOC_CONF = auto()
FEATURE = auto()
def __init__(self, type, value):
self.type = type
self.value = value
@staticmethod
def as_compiler(value):
return Option(Option.Type.COMPILER, value)
@staticmethod
def as_compiler_flag(value):
return Option(Option.Type.COMPILER_FLAG, value)
@staticmethod
def as_configure_flag(value):
return Option(Option.Type.CONFIGURE_FLAG, value)
@staticmethod
def as_malloc_conf(value):
return Option(Option.Type.MALLOC_CONF, value)
@staticmethod
def as_feature(value):
return Option(Option.Type.FEATURE, value)
def __eq__(self, obj):
return (isinstance(obj, Option) and obj.type == self.type
and obj.value == self.value)
# The 'default' configuration is gcc, on linux, with no compiler or configure # The 'default' configuration is gcc, on linux, with no compiler or configure
# flags. We also test with clang, -m32, --enable-debug, --enable-prof, # flags. We also test with clang, -m32, --enable-debug, --enable-prof,
# --disable-stats, and --with-malloc-conf=tcache:false. To avoid abusing # --disable-stats, and --with-malloc-conf=tcache:false. To avoid abusing
# travis though, we don't test all 2**7 = 128 possible combinations of these; # travis though, we don't test all 2**7 = 128 possible combinations of these;
# instead, we only test combinations of up to 2 'unusual' settings, under the # instead, we only test combinations of up to 2 'unusual' settings, under the
# hope that bugs involving interactions of such settings are rare. # hope that bugs involving interactions of such settings are rare.
# Things at once, for C(7, 0) + C(7, 1) + C(7, 2) = 29
MAX_UNUSUAL_OPTIONS = 2 MAX_UNUSUAL_OPTIONS = 2
os_default = 'linux'
os_unusual = 'osx'
compilers_default = 'CC=gcc CXX=g++' GCC = Option.as_compiler('CC=gcc CXX=g++')
compilers_unusual = 'CC=clang CXX=clang++' CLANG = Option.as_compiler('CC=clang CXX=clang++')
CL = Option.as_compiler('CC=cl.exe CXX=cl.exe')
compiler_flag_unusuals = ['-m32'] compilers_unusual = [CLANG,]
configure_flag_unusuals = [
CROSS_COMPILE_32BIT = Option.as_feature('CROSS_COMPILE_32BIT')
feature_unusuals = [CROSS_COMPILE_32BIT]
configure_flag_unusuals = [Option.as_configure_flag(opt) for opt in (
'--enable-debug', '--enable-debug',
'--enable-prof', '--enable-prof',
'--disable-stats', '--disable-stats',
'--disable-libdl', '--disable-libdl',
'--enable-opt-safety-checks', '--enable-opt-safety-checks',
] '--with-lg-page=16',
)]
malloc_conf_unusuals = [
malloc_conf_unusuals = [Option.as_malloc_conf(opt) for opt in (
'tcache:false', 'tcache:false',
'dss:primary', 'dss:primary',
'percpu_arena:percpu', 'percpu_arena:percpu',
'background_thread:true', 'background_thread:true',
] )]
all_unusuals = (
[os_unusual] + [compilers_unusual] + compiler_flag_unusuals
+ configure_flag_unusuals + malloc_conf_unusuals
)
unusual_combinations_to_test = [] all_unusuals = (compilers_unusual + feature_unusuals
for i in xrange(MAX_UNUSUAL_OPTIONS + 1): + configure_flag_unusuals + malloc_conf_unusuals)
unusual_combinations_to_test += combinations(all_unusuals, i)
gcc_multilib_set = False
# Formats a job from a combination of flags
def format_job(combination):
global gcc_multilib_set
os = os_unusual if os_unusual in combination else os_default def get_extra_cflags(os, compiler):
compilers = compilers_unusual if compilers_unusual in combination else compilers_default if os == FREEBSD:
return []
if os == WINDOWS:
# For non-CL compilers under Windows (for now it's only MinGW-GCC),
# -fcommon needs to be specified to correctly handle multiple
# 'malloc_conf' symbols and such, which are declared weak under Linux.
# Weak symbols don't work with MinGW-GCC.
if compiler != CL.value:
return ['-fcommon']
else:
return []
# We get some spurious errors when -Warray-bounds is enabled.
extra_cflags = ['-Werror', '-Wno-array-bounds']
if compiler == CLANG.value or os == OSX:
extra_cflags += [
'-Wno-unknown-warning-option',
'-Wno-ignored-attributes'
]
if os == OSX:
extra_cflags += [
'-Wno-deprecated-declarations',
]
return extra_cflags
compiler_flags = [x for x in combination if x in compiler_flag_unusuals]
configure_flags = [x for x in combination if x in configure_flag_unusuals]
malloc_conf = [x for x in combination if x in malloc_conf_unusuals]
# Filter out unsupported configurations on OS X. # Formats a job from a combination of flags
if os == 'osx' and ('dss:primary' in malloc_conf or \ def format_job(os, arch, combination):
'percpu_arena:percpu' in malloc_conf or 'background_thread:true' \ compilers = [x.value for x in combination if x.type == Option.Type.COMPILER]
in malloc_conf): assert(len(compilers) <= 1)
return "" compiler_flags = [x.value for x in combination if x.type == Option.Type.COMPILER_FLAG]
configure_flags = [x.value for x in combination if x.type == Option.Type.CONFIGURE_FLAG]
malloc_conf = [x.value for x in combination if x.type == Option.Type.MALLOC_CONF]
features = [x.value for x in combination if x.type == Option.Type.FEATURE]
if len(malloc_conf) > 0: if len(malloc_conf) > 0:
configure_flags.append('--with-malloc-conf=' + ",".join(malloc_conf)) configure_flags.append('--with-malloc-conf=' + ','.join(malloc_conf))
# Filter out an unsupported configuration - heap profiling on OS X. if not compilers:
if os == 'osx' and '--enable-prof' in configure_flags: compiler = GCC.value
return "" else:
compiler = compilers[0]
# We get some spurious errors when -Warray-bounds is enabled. extra_environment_vars = ''
env_string = ('{} COMPILER_FLAGS="{}" CONFIGURE_FLAGS="{}" ' cross_compile = CROSS_COMPILE_32BIT.value in features
'EXTRA_CFLAGS="-Werror -Wno-array-bounds"').format( if os == LINUX and cross_compile:
compilers, " ".join(compiler_flags), " ".join(configure_flags)) compiler_flags.append('-m32')
job = "" features_str = ' '.join([' {}=yes'.format(feature) for feature in features])
job += ' - os: %s\n' % os
job += ' env: %s\n' % env_string stringify = lambda arr, name: ' {}="{}"'.format(name, ' '.join(arr)) if arr else ''
if '-m32' in combination and os == 'linux': env_string = '{}{}{}{}{}{}'.format(
job += ' addons:' compiler,
if gcc_multilib_set: features_str,
job += ' *gcc_multilib\n' stringify(compiler_flags, 'COMPILER_FLAGS'),
else: stringify(configure_flags, 'CONFIGURE_FLAGS'),
job += ' &gcc_multilib\n' stringify(get_extra_cflags(os, compiler), 'EXTRA_CFLAGS'),
job += ' apt:\n' extra_environment_vars)
job += ' packages:\n'
job += ' - gcc-multilib\n' job = ' - os: {}\n'.format(os)
gcc_multilib_set = True job += ' arch: {}\n'.format(arch)
job += ' env: {}'.format(env_string)
return job return job
include_rows = ""
for combination in unusual_combinations_to_test:
include_rows += format_job(combination)
# Development build def generate_unusual_combinations(unusuals, max_unusual_opts):
include_rows += '''\ """
Generates different combinations of non-standard compilers, compiler flags,
configure flags and malloc_conf settings.
@param max_unusual_opts: Limit of unusual options per combination.
"""
return chain.from_iterable(
[combinations(unusuals, i) for i in range(max_unusual_opts + 1)])
def included(combination, exclude):
"""
Checks if the combination of options should be included in the Travis
testing matrix.
@param exclude: A list of options to be avoided.
"""
return not any(excluded in combination for excluded in exclude)
def generate_jobs(os, arch, exclude, max_unusual_opts, unusuals=all_unusuals):
jobs = []
for combination in generate_unusual_combinations(unusuals, max_unusual_opts):
if included(combination, exclude):
jobs.append(format_job(os, arch, combination))
return '\n'.join(jobs)
def generate_linux(arch):
os = LINUX
# Only generate 2 unusual options for AMD64 to reduce matrix size
max_unusual_opts = MAX_UNUSUAL_OPTIONS if arch == AMD64 else 1
exclude = []
if arch == PPC64LE:
# Avoid 32 bit builds and clang on PowerPC
exclude = (CROSS_COMPILE_32BIT, CLANG,)
return generate_jobs(os, arch, exclude, max_unusual_opts)
def generate_macos(arch):
os = OSX
max_unusual_opts = 1
exclude = ([Option.as_malloc_conf(opt) for opt in (
'dss:primary',
'percpu_arena:percpu',
'background_thread:true')] +
[Option.as_configure_flag('--enable-prof')] +
[CLANG,])
return generate_jobs(os, arch, exclude, max_unusual_opts)
def generate_windows(arch):
os = WINDOWS
max_unusual_opts = 3
unusuals = (
Option.as_configure_flag('--enable-debug'),
CL,
CROSS_COMPILE_32BIT,
)
return generate_jobs(os, arch, (), max_unusual_opts, unusuals)
def generate_freebsd(arch):
os = FREEBSD
max_unusual_opts = 4
unusuals = (
Option.as_configure_flag('--enable-debug'),
Option.as_configure_flag('--enable-prof --enable-prof-libunwind'),
Option.as_configure_flag('--with-lg-page=16 --with-malloc-conf=tcache:false'),
CROSS_COMPILE_32BIT,
)
return generate_jobs(os, arch, (), max_unusual_opts, unusuals)
def get_manual_jobs():
return """\
# Development build # Development build
- os: linux - os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-cache-oblivious --enable-stats --enable-log --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds" env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug \
''' --disable-cache-oblivious --enable-stats --enable-log --enable-prof" \
EXTRA_CFLAGS="-Werror -Wno-array-bounds"
# Enable-expermental-smallocx
include_rows += '''\
# --enable-expermental-smallocx: # --enable-expermental-smallocx:
- os: linux - os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --enable-experimental-smallocx --enable-stats --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds" env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug \
''' --enable-experimental-smallocx --enable-stats --enable-prof" \
EXTRA_CFLAGS="-Werror -Wno-array-bounds"
"""
# Valgrind build bots
include_rows += ''' def main():
# Valgrind jobs = '\n'.join((
- os: linux generate_windows(AMD64),
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" JEMALLOC_TEST_PREFIX="valgrind"
addons: generate_freebsd(AMD64),
apt:
packages: generate_linux(AMD64),
- valgrind generate_linux(PPC64LE),
'''
generate_macos(AMD64),
# To enable valgrind on macosx add:
# get_manual_jobs(),
# - os: osx ))
# env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" JEMALLOC_TEST_PREFIX="valgrind"
# install: brew install valgrind print(TRAVIS_TEMPLATE.format(jobs=jobs))
#
# It currently fails due to: https://github.com/jemalloc/jemalloc/issues/1274
if __name__ == '__main__':
print travis_template % include_rows main()
#!/bin/bash
set -ev
if [[ "$TRAVIS_OS_NAME" != "linux" ]]; then
echo "Incorrect \$TRAVIS_OS_NAME: expected linux, got $TRAVIS_OS_NAME"
exit 1
fi
if [[ "$CROSS_COMPILE_32BIT" == "yes" ]]; then
sudo apt-get update
sudo apt-get -y install gcc-multilib g++-multilib
fi
#!/bin/bash
set -e
# The purpose of this script is to install build dependencies and set
# $build_env to a function that sets appropriate environment variables,
# to enable (mingw32|mingw64) environment if we want to compile with gcc, or
# (mingw32|mingw64) + vcvarsall.bat if we want to compile with cl.exe
if [[ "$TRAVIS_OS_NAME" != "windows" ]]; then
echo "Incorrect \$TRAVIS_OS_NAME: expected windows, got $TRAVIS_OS_NAME"
exit 1
fi
[[ ! -f C:/tools/msys64/msys2_shell.cmd ]] && rm -rf C:/tools/msys64
choco uninstall -y mingw
choco upgrade --no-progress -y msys2
msys_shell_cmd="cmd //C RefreshEnv.cmd && set MSYS=winsymlinks:nativestrict && C:\\tools\\msys64\\msys2_shell.cmd"
msys2() { $msys_shell_cmd -defterm -no-start -msys2 -c "$*"; }
mingw32() { $msys_shell_cmd -defterm -no-start -mingw32 -c "$*"; }
mingw64() { $msys_shell_cmd -defterm -no-start -mingw64 -c "$*"; }
if [[ "$CROSS_COMPILE_32BIT" == "yes" ]]; then
mingw=mingw32
mingw_gcc_package_arch=i686
else
mingw=mingw64
mingw_gcc_package_arch=x86_64
fi
if [[ "$CC" == *"gcc"* ]]; then
$mingw pacman -S --noconfirm --needed \
autotools \
git \
mingw-w64-${mingw_gcc_package_arch}-make \
mingw-w64-${mingw_gcc_package_arch}-gcc \
mingw-w64-${mingw_gcc_package_arch}-binutils
build_env=$mingw
elif [[ "$CC" == *"cl"* ]]; then
$mingw pacman -S --noconfirm --needed \
autotools \
git \
mingw-w64-${mingw_gcc_package_arch}-make \
mingw-w64-${mingw_gcc_package_arch}-binutils
# In order to use MSVC compiler (cl.exe), we need to correctly set some environment
# variables, namely PATH, INCLUDE, LIB and LIBPATH. The correct values of these
# variables are set by a batch script "vcvarsall.bat". The code below generates
# a batch script that calls "vcvarsall.bat" and prints the environment variables.
#
# Then, those environment variables are transformed from cmd to bash format and put
# into a script $apply_vsenv. If cl.exe needs to be used from bash, one can
# 'source $apply_vsenv' and it will apply the environment variables needed for cl.exe
# to be located and function correctly.
#
# At last, a function "mingw_with_msvc_vars" is generated which forwards user input
# into a correct mingw (32 or 64) subshell that automatically performs 'source $apply_vsenv',
# making it possible for autotools to discover and use cl.exe.
vcvarsall="vcvarsall.tmp.bat"
echo "@echo off" > $vcvarsall
echo "call \"c:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\\\vcvarsall.bat\" $USE_MSVC" >> $vcvarsall
echo "set" >> $vcvarsall
apply_vsenv="./apply_vsenv.sh"
cmd //C $vcvarsall | grep -E "^PATH=" | sed -n -e 's/\(.*\)=\(.*\)/export \1=$PATH:"\2"/g' \
-e 's/\([a-zA-Z]\):[\\\/]/\/\1\//g' \
-e 's/\\/\//g' \
-e 's/;\//:\//gp' > $apply_vsenv
cmd //C $vcvarsall | grep -E "^(INCLUDE|LIB|LIBPATH)=" | sed -n -e 's/\(.*\)=\(.*\)/export \1="\2"/gp' >> $apply_vsenv
cat $apply_vsenv
mingw_with_msvc_vars() { $msys_shell_cmd -defterm -no-start -$mingw -c "source $apply_vsenv && ""$*"; }
build_env=mingw_with_msvc_vars
rm -f $vcvarsall
else
echo "Unknown C compiler: $CC"
exit 1
fi
echo "Build environment function: $build_env"
#!/bin/bash
set -e
if [[ "$TRAVIS_OS_NAME" != "windows" ]]; then
echo "Incorrect \$TRAVIS_OS_NAME: expected windows, got $TRAVIS_OS_NAME"
exit 1
fi
$build_env autoconf
$build_env ./configure $CONFIGURE_FLAGS
# mingw32-make simply means "make", unrelated to mingw32 vs mingw64.
# Simply disregard the prefix and treat is as "make".
$build_env mingw32-make -j3
# At the moment, it's impossible to make tests in parallel,
# seemingly due to concurrent writes to '.pdb' file. I don't know why
# that happens, because we explicitly supply '/Fs' to the compiler.
# Until we figure out how to fix it, we should build tests sequentially
# on Windows.
$build_env mingw32-make tests
#!/bin/bash
set -e
if [[ "$TRAVIS_OS_NAME" != "windows" ]]; then
echo "Incorrect \$TRAVIS_OS_NAME: expected windows, got $TRAVIS_OS_NAME"
exit 1
fi
$build_env mingw32-make -k check
#define JEMALLOC_ARENA_C_
#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h" #include "jemalloc/internal/assert.h"
#include "jemalloc/internal/div.h" #include "jemalloc/internal/decay.h"
#include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/san.h"
#include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h" #include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/safety_check.h" #include "jemalloc/internal/safety_check.h"
...@@ -35,34 +36,37 @@ ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT; ...@@ -35,34 +36,37 @@ ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
static atomic_zd_t dirty_decay_ms_default; static atomic_zd_t dirty_decay_ms_default;
static atomic_zd_t muzzy_decay_ms_default; static atomic_zd_t muzzy_decay_ms_default;
const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = { emap_t arena_emap_global;
#define STEP(step, h, x, y) \ pa_central_t arena_pa_central_global;
h,
SMOOTHSTEP
#undef STEP
};
static div_info_t arena_binind_div_info[SC_NBINS]; div_info_t arena_binind_div_info[SC_NBINS];
size_t opt_oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT; size_t opt_oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT;
size_t oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT; size_t oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT;
uint32_t arena_bin_offsets[SC_NBINS];
static unsigned nbins_total;
static unsigned huge_arena_ind; static unsigned huge_arena_ind;
const arena_config_t arena_config_default = {
/* .extent_hooks = */ (extent_hooks_t *)&ehooks_default_extent_hooks,
/* .metadata_use_hooks = */ true,
};
/******************************************************************************/ /******************************************************************************/
/* /*
* Function prototypes for static functions that are referenced prior to * Function prototypes for static functions that are referenced prior to
* definition. * definition.
*/ */
static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit,
size_t npages_decay_max, bool is_background_thread);
static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
bool is_background_thread, bool all); bool is_background_thread, bool all);
static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
bin_t *bin);
static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
bin_t *bin); bin_t *bin);
static void
arena_maybe_do_deferred_work(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
size_t npages_new);
/******************************************************************************/ /******************************************************************************/
...@@ -72,19 +76,17 @@ arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, ...@@ -72,19 +76,17 @@ arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
size_t *nactive, size_t *ndirty, size_t *nmuzzy) { size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
*nthreads += arena_nthreads_get(arena, false); *nthreads += arena_nthreads_get(arena, false);
*dss = dss_prec_names[arena_dss_prec_get(arena)]; *dss = dss_prec_names[arena_dss_prec_get(arena)];
*dirty_decay_ms = arena_dirty_decay_ms_get(arena); *dirty_decay_ms = arena_decay_ms_get(arena, extent_state_dirty);
*muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); *muzzy_decay_ms = arena_decay_ms_get(arena, extent_state_muzzy);
*nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED); pa_shard_basic_stats_merge(&arena->pa_shard, nactive, ndirty, nmuzzy);
*ndirty += extents_npages_get(&arena->extents_dirty);
*nmuzzy += extents_npages_get(&arena->extents_muzzy);
} }
void void
arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
bin_stats_t *bstats, arena_stats_large_t *lstats, bin_stats_data_t *bstats, arena_stats_large_t *lstats,
arena_stats_extents_t *estats) { pac_estats_t *estats, hpa_shard_stats_t *hpastats, sec_stats_t *secstats) {
cassert(config_stats); cassert(config_stats);
arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms, arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
...@@ -93,122 +95,74 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, ...@@ -93,122 +95,74 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
size_t base_allocated, base_resident, base_mapped, metadata_thp; size_t base_allocated, base_resident, base_mapped, metadata_thp;
base_stats_get(tsdn, arena->base, &base_allocated, &base_resident, base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
&base_mapped, &metadata_thp); &base_mapped, &metadata_thp);
size_t pac_mapped_sz = pac_mapped(&arena->pa_shard.pac);
astats->mapped += base_mapped + pac_mapped_sz;
astats->resident += base_resident;
arena_stats_lock(tsdn, &arena->stats); LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
arena_stats_accum_zu(&astats->mapped, base_mapped
+ arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped));
arena_stats_accum_zu(&astats->retained,
extents_npages_get(&arena->extents_retained) << LG_PAGE);
atomic_store_zu(&astats->extent_avail,
atomic_load_zu(&arena->extent_avail_cnt, ATOMIC_RELAXED),
ATOMIC_RELAXED);
arena_stats_accum_u64(&astats->decay_dirty.npurge, astats->base += base_allocated;
arena_stats_read_u64(tsdn, &arena->stats, atomic_load_add_store_zu(&astats->internal, arena_internal_get(arena));
&arena->stats.decay_dirty.npurge)); astats->metadata_thp += metadata_thp;
arena_stats_accum_u64(&astats->decay_dirty.nmadvise,
arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.decay_dirty.nmadvise));
arena_stats_accum_u64(&astats->decay_dirty.purged,
arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.decay_dirty.purged));
arena_stats_accum_u64(&astats->decay_muzzy.npurge,
arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.decay_muzzy.npurge));
arena_stats_accum_u64(&astats->decay_muzzy.nmadvise,
arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.decay_muzzy.nmadvise));
arena_stats_accum_u64(&astats->decay_muzzy.purged,
arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.decay_muzzy.purged));
arena_stats_accum_zu(&astats->base, base_allocated);
arena_stats_accum_zu(&astats->internal, arena_internal_get(arena));
arena_stats_accum_zu(&astats->metadata_thp, metadata_thp);
arena_stats_accum_zu(&astats->resident, base_resident +
(((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
extents_npages_get(&arena->extents_dirty) +
extents_npages_get(&arena->extents_muzzy)) << LG_PAGE)));
arena_stats_accum_zu(&astats->abandoned_vm, atomic_load_zu(
&arena->stats.abandoned_vm, ATOMIC_RELAXED));
for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) { for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) {
uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats, uint64_t nmalloc = locked_read_u64(tsdn,
LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[i].nmalloc); &arena->stats.lstats[i].nmalloc);
arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc); locked_inc_u64_unsynchronized(&lstats[i].nmalloc, nmalloc);
arena_stats_accum_u64(&astats->nmalloc_large, nmalloc); astats->nmalloc_large += nmalloc;
uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats, uint64_t ndalloc = locked_read_u64(tsdn,
LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[i].ndalloc); &arena->stats.lstats[i].ndalloc);
arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc); locked_inc_u64_unsynchronized(&lstats[i].ndalloc, ndalloc);
arena_stats_accum_u64(&astats->ndalloc_large, ndalloc); astats->ndalloc_large += ndalloc;
uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats, uint64_t nrequests = locked_read_u64(tsdn,
LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[i].nrequests); &arena->stats.lstats[i].nrequests);
arena_stats_accum_u64(&lstats[i].nrequests, locked_inc_u64_unsynchronized(&lstats[i].nrequests,
nmalloc + nrequests);
arena_stats_accum_u64(&astats->nrequests_large,
nmalloc + nrequests); nmalloc + nrequests);
astats->nrequests_large += nmalloc + nrequests;
/* nfill == nmalloc for large currently. */ /* nfill == nmalloc for large currently. */
arena_stats_accum_u64(&lstats[i].nfills, nmalloc); locked_inc_u64_unsynchronized(&lstats[i].nfills, nmalloc);
arena_stats_accum_u64(&astats->nfills_large, nmalloc); astats->nfills_large += nmalloc;
uint64_t nflush = arena_stats_read_u64(tsdn, &arena->stats, uint64_t nflush = locked_read_u64(tsdn,
LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[i].nflushes); &arena->stats.lstats[i].nflushes);
arena_stats_accum_u64(&lstats[i].nflushes, nflush); locked_inc_u64_unsynchronized(&lstats[i].nflushes, nflush);
arena_stats_accum_u64(&astats->nflushes_large, nflush); astats->nflushes_large += nflush;
assert(nmalloc >= ndalloc); assert(nmalloc >= ndalloc);
assert(nmalloc - ndalloc <= SIZE_T_MAX); assert(nmalloc - ndalloc <= SIZE_T_MAX);
size_t curlextents = (size_t)(nmalloc - ndalloc); size_t curlextents = (size_t)(nmalloc - ndalloc);
lstats[i].curlextents += curlextents; lstats[i].curlextents += curlextents;
arena_stats_accum_zu(&astats->allocated_large, astats->allocated_large +=
curlextents * sz_index2size(SC_NBINS + i)); curlextents * sz_index2size(SC_NBINS + i);
} }
for (pszind_t i = 0; i < SC_NPSIZES; i++) { pa_shard_stats_merge(tsdn, &arena->pa_shard, &astats->pa_shard_stats,
size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes, estats, hpastats, secstats, &astats->resident);
retained_bytes;
dirty = extents_nextents_get(&arena->extents_dirty, i); LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
muzzy = extents_nextents_get(&arena->extents_muzzy, i);
retained = extents_nextents_get(&arena->extents_retained, i); /* Currently cached bytes and sanitizer-stashed bytes in tcache. */
dirty_bytes = extents_nbytes_get(&arena->extents_dirty, i); astats->tcache_bytes = 0;
muzzy_bytes = extents_nbytes_get(&arena->extents_muzzy, i); astats->tcache_stashed_bytes = 0;
retained_bytes =
extents_nbytes_get(&arena->extents_retained, i);
atomic_store_zu(&estats[i].ndirty, dirty, ATOMIC_RELAXED);
atomic_store_zu(&estats[i].nmuzzy, muzzy, ATOMIC_RELAXED);
atomic_store_zu(&estats[i].nretained, retained, ATOMIC_RELAXED);
atomic_store_zu(&estats[i].dirty_bytes, dirty_bytes,
ATOMIC_RELAXED);
atomic_store_zu(&estats[i].muzzy_bytes, muzzy_bytes,
ATOMIC_RELAXED);
atomic_store_zu(&estats[i].retained_bytes, retained_bytes,
ATOMIC_RELAXED);
}
arena_stats_unlock(tsdn, &arena->stats);
/* tcache_bytes counts currently cached bytes. */
atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED);
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
cache_bin_array_descriptor_t *descriptor; cache_bin_array_descriptor_t *descriptor;
ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) { ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) {
szind_t i = 0; for (szind_t i = 0; i < nhbins; i++) {
for (; i < SC_NBINS; i++) { cache_bin_t *cache_bin = &descriptor->bins[i];
cache_bin_t *tbin = &descriptor->bins_small[i]; cache_bin_sz_t ncached, nstashed;
arena_stats_accum_zu(&astats->tcache_bytes, cache_bin_nitems_get_remote(cache_bin,
tbin->ncached * sz_index2size(i)); &tcache_bin_info[i], &ncached, &nstashed);
}
for (; i < nhbins; i++) { astats->tcache_bytes += ncached * sz_index2size(i);
cache_bin_t *tbin = &descriptor->bins_large[i]; astats->tcache_stashed_bytes += nstashed *
arena_stats_accum_zu(&astats->tcache_bytes, sz_index2size(i);
tbin->ncached * sz_index2size(i));
} }
} }
malloc_mutex_prof_read(tsdn, malloc_mutex_prof_read(tsdn,
...@@ -224,21 +178,11 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, ...@@ -224,21 +178,11 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
/* Gather per arena mutex profiling data. */ /* Gather per arena mutex profiling data. */
READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large); READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx,
arena_prof_mutex_extent_avail)
READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx,
arena_prof_mutex_extents_dirty)
READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx,
arena_prof_mutex_extents_muzzy)
READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx,
arena_prof_mutex_extents_retained)
READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx,
arena_prof_mutex_decay_dirty)
READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx,
arena_prof_mutex_decay_muzzy)
READ_ARENA_MUTEX_PROF_DATA(base->mtx, READ_ARENA_MUTEX_PROF_DATA(base->mtx,
arena_prof_mutex_base) arena_prof_mutex_base);
#undef READ_ARENA_MUTEX_PROF_DATA #undef READ_ARENA_MUTEX_PROF_DATA
pa_shard_mtx_stats_read(tsdn, &arena->pa_shard,
astats->mutex_prof_data);
nstime_copy(&astats->uptime, &arena->create_time); nstime_copy(&astats->uptime, &arena->create_time);
nstime_update(&astats->uptime); nstime_update(&astats->uptime);
...@@ -247,55 +191,67 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, ...@@ -247,55 +191,67 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
for (szind_t i = 0; i < SC_NBINS; i++) { for (szind_t i = 0; i < SC_NBINS; i++) {
for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
bin_stats_merge(tsdn, &bstats[i], bin_stats_merge(tsdn, &bstats[i],
&arena->bins[i].bin_shards[j]); arena_get_bin(arena, i, j));
} }
} }
} }
void static void
arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent) { bool is_background_thread) {
if (!background_thread_enabled() || is_background_thread) {
return;
}
background_thread_info_t *info =
arena_background_thread_info_get(arena);
if (background_thread_indefinite_sleep(info)) {
arena_maybe_do_deferred_work(tsdn, arena,
&arena->pa_shard.pac.decay_dirty, 0);
}
}
/*
* React to deferred work generated by a PAI function.
*/
void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0); WITNESS_RANK_CORE, 0);
extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty, if (decay_immediately(&arena->pa_shard.pac.decay_dirty)) {
extent);
if (arena_dirty_decay_ms_get(arena) == 0) {
arena_decay_dirty(tsdn, arena, false, true); arena_decay_dirty(tsdn, arena, false, true);
} else {
arena_background_thread_inactivity_check(tsdn, arena, false);
} }
arena_background_thread_inactivity_check(tsdn, arena, false);
} }
static void * static void *
arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) { arena_slab_reg_alloc(edata_t *slab, const bin_info_t *bin_info) {
void *ret; void *ret;
arena_slab_data_t *slab_data = extent_slab_data_get(slab); slab_data_t *slab_data = edata_slab_data_get(slab);
size_t regind; size_t regind;
assert(extent_nfree_get(slab) > 0); assert(edata_nfree_get(slab) > 0);
assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info); regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
ret = (void *)((uintptr_t)extent_addr_get(slab) + ret = (void *)((uintptr_t)edata_addr_get(slab) +
(uintptr_t)(bin_info->reg_size * regind)); (uintptr_t)(bin_info->reg_size * regind));
extent_nfree_dec(slab); edata_nfree_dec(slab);
return ret; return ret;
} }
static void static void
arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info, arena_slab_reg_alloc_batch(edata_t *slab, const bin_info_t *bin_info,
unsigned cnt, void** ptrs) { unsigned cnt, void** ptrs) {
arena_slab_data_t *slab_data = extent_slab_data_get(slab); slab_data_t *slab_data = edata_slab_data_get(slab);
assert(extent_nfree_get(slab) >= cnt); assert(edata_nfree_get(slab) >= cnt);
assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
#if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE) #if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE)
for (unsigned i = 0; i < cnt; i++) { for (unsigned i = 0; i < cnt; i++) {
size_t regind = bitmap_sfu(slab_data->bitmap, size_t regind = bitmap_sfu(slab_data->bitmap,
&bin_info->bitmap_info); &bin_info->bitmap_info);
*(ptrs + i) = (void *)((uintptr_t)extent_addr_get(slab) + *(ptrs + i) = (void *)((uintptr_t)edata_addr_get(slab) +
(uintptr_t)(bin_info->reg_size * regind)); (uintptr_t)(bin_info->reg_size * regind));
} }
#else #else
...@@ -316,7 +272,7 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info, ...@@ -316,7 +272,7 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
* Load from memory locations only once, outside the * Load from memory locations only once, outside the
* hot loop below. * hot loop below.
*/ */
uintptr_t base = (uintptr_t)extent_addr_get(slab); uintptr_t base = (uintptr_t)edata_addr_get(slab);
uintptr_t regsize = (uintptr_t)bin_info->reg_size; uintptr_t regsize = (uintptr_t)bin_info->reg_size;
while (pop--) { while (pop--) {
size_t bit = cfs_lu(&g); size_t bit = cfs_lu(&g);
...@@ -328,56 +284,7 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info, ...@@ -328,56 +284,7 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
slab_data->bitmap[group] = g; slab_data->bitmap[group] = g;
} }
#endif #endif
extent_nfree_sub(slab, cnt); edata_nfree_sub(slab, cnt);
}
#ifndef JEMALLOC_JET
static
#endif
size_t
arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
size_t diff, regind;
/* Freeing a pointer outside the slab can cause assertion failure. */
assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab));
assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
/* Freeing an interior pointer can cause assertion failure. */
assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
(uintptr_t)bin_infos[binind].reg_size == 0);
diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
/* Avoid doing division with a variable divisor. */
regind = div_compute(&arena_binind_div_info[binind], diff);
assert(regind < bin_infos[binind].nregs);
return regind;
}
static void
arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) {
szind_t binind = extent_szind_get(slab);
const bin_info_t *bin_info = &bin_infos[binind];
size_t regind = arena_slab_regind(slab, binind, ptr);
assert(extent_nfree_get(slab) < bin_info->nregs);
/* Freeing an unallocated pointer can cause assertion failure. */
assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
extent_nfree_inc(slab);
}
static void
arena_nactive_add(arena_t *arena, size_t add_pages) {
atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED);
}
static void
arena_nactive_sub(arena_t *arena, size_t sub_pages) {
assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages);
atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED);
} }
static void static void
...@@ -392,7 +299,7 @@ arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { ...@@ -392,7 +299,7 @@ arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
index = sz_size2index(usize); index = sz_size2index(usize);
hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0; hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
arena_stats_add_u64(tsdn, &arena->stats, locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[hindex].nmalloc, 1); &arena->stats.lstats[hindex].nmalloc, 1);
} }
...@@ -408,551 +315,118 @@ arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { ...@@ -408,551 +315,118 @@ arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
index = sz_size2index(usize); index = sz_size2index(usize);
hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0; hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
arena_stats_add_u64(tsdn, &arena->stats, locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[hindex].ndalloc, 1); &arena->stats.lstats[hindex].ndalloc, 1);
} }
static void static void
arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize, arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
size_t usize) { size_t usize) {
arena_large_dalloc_stats_update(tsdn, arena, oldusize);
arena_large_malloc_stats_update(tsdn, arena, usize); arena_large_malloc_stats_update(tsdn, arena, usize);
arena_large_dalloc_stats_update(tsdn, arena, oldusize);
} }
static bool edata_t *
arena_may_have_muzzy(arena_t *arena) {
return (pages_can_purge_lazy && (arena_muzzy_decay_ms_get(arena) != 0));
}
extent_t *
arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool *zero) { size_t alignment, bool zero) {
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; bool deferred_work_generated = false;
szind_t szind = sz_size2index(usize);
size_t esize = usize + sz_large_pad;
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), bool guarded = san_large_extent_decide_guard(tsdn,
WITNESS_RANK_CORE, 0); arena_get_ehooks(arena), esize, alignment);
edata_t *edata = pa_alloc(tsdn, &arena->pa_shard, esize, alignment,
/* slab */ false, szind, zero, guarded, &deferred_work_generated);
assert(deferred_work_generated == false);
szind_t szind = sz_size2index(usize); if (edata != NULL) {
size_t mapped_add;
bool commit = true;
extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks,
&arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false,
szind, zero, &commit);
if (extent == NULL && arena_may_have_muzzy(arena)) {
extent = extents_alloc(tsdn, arena, &extent_hooks,
&arena->extents_muzzy, NULL, usize, sz_large_pad, alignment,
false, szind, zero, &commit);
}
size_t size = usize + sz_large_pad;
if (extent == NULL) {
extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL,
usize, sz_large_pad, alignment, false, szind, zero,
&commit);
if (config_stats) { if (config_stats) {
/* LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
* extent may be NULL on OOM, but in that case arena_large_malloc_stats_update(tsdn, arena, usize);
* mapped_add isn't used below, so there's no need to LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
* conditionlly set it to 0 here.
*/
mapped_add = size;
} }
} else if (config_stats) {
mapped_add = 0;
} }
if (extent != NULL) { if (edata != NULL && sz_large_pad != 0) {
if (config_stats) { arena_cache_oblivious_randomize(tsdn, arena, edata, alignment);
arena_stats_lock(tsdn, &arena->stats);
arena_large_malloc_stats_update(tsdn, arena, usize);
if (mapped_add != 0) {
arena_stats_add_zu(tsdn, &arena->stats,
&arena->stats.mapped, mapped_add);
}
arena_stats_unlock(tsdn, &arena->stats);
}
arena_nactive_add(arena, size >> LG_PAGE);
} }
return extent; return edata;
} }
void void
arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
if (config_stats) { if (config_stats) {
arena_stats_lock(tsdn, &arena->stats); LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
arena_large_dalloc_stats_update(tsdn, arena, arena_large_dalloc_stats_update(tsdn, arena,
extent_usize_get(extent)); edata_usize_get(edata));
arena_stats_unlock(tsdn, &arena->stats); LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
} }
arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
} }
void void
arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
size_t oldusize) { size_t oldusize) {
size_t usize = extent_usize_get(extent); size_t usize = edata_usize_get(edata);
size_t udiff = oldusize - usize;
if (config_stats) { if (config_stats) {
arena_stats_lock(tsdn, &arena->stats); LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
arena_stats_unlock(tsdn, &arena->stats); LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
} }
arena_nactive_sub(arena, udiff >> LG_PAGE);
} }
void void
arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
size_t oldusize) { size_t oldusize) {
size_t usize = extent_usize_get(extent); size_t usize = edata_usize_get(edata);
size_t udiff = usize - oldusize;
if (config_stats) { if (config_stats) {
arena_stats_lock(tsdn, &arena->stats); LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
arena_stats_unlock(tsdn, &arena->stats); LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
}
arena_nactive_add(arena, udiff >> LG_PAGE);
}
static ssize_t
arena_decay_ms_read(arena_decay_t *decay) {
return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
}
static void
arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) {
atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED);
}
static void
arena_decay_deadline_init(arena_decay_t *decay) {
/*
* Generate a new deadline that is uniformly random within the next
* epoch after the current one.
*/
nstime_copy(&decay->deadline, &decay->epoch);
nstime_add(&decay->deadline, &decay->interval);
if (arena_decay_ms_read(decay) > 0) {
nstime_t jitter;
nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
nstime_ns(&decay->interval)));
nstime_add(&decay->deadline, &jitter);
}
}
static bool
arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) {
return (nstime_compare(&decay->deadline, time) <= 0);
}
static size_t
arena_decay_backlog_npages_limit(const arena_decay_t *decay) {
uint64_t sum;
size_t npages_limit_backlog;
unsigned i;
/*
* For each element of decay_backlog, multiply by the corresponding
* fixed-point smoothstep decay factor. Sum the products, then divide
* to round down to the nearest whole number of pages.
*/
sum = 0;
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
sum += decay->backlog[i] * h_steps[i];
}
npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
return npages_limit_backlog;
}
static void
arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) {
size_t npages_delta = (current_npages > decay->nunpurged) ?
current_npages - decay->nunpurged : 0;
decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta;
if (config_debug) {
if (current_npages > decay->ceil_npages) {
decay->ceil_npages = current_npages;
}
size_t npages_limit = arena_decay_backlog_npages_limit(decay);
assert(decay->ceil_npages >= npages_limit);
if (decay->ceil_npages > npages_limit) {
decay->ceil_npages = npages_limit;
}
}
}
static void
arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64,
size_t current_npages) {
if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
sizeof(size_t));
} else {
size_t nadvance_z = (size_t)nadvance_u64;
assert((uint64_t)nadvance_z == nadvance_u64);
memmove(decay->backlog, &decay->backlog[nadvance_z],
(SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
if (nadvance_z > 1) {
memset(&decay->backlog[SMOOTHSTEP_NSTEPS -
nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
}
}
arena_decay_backlog_update_last(decay, current_npages);
}
static void
arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
extents_t *extents, size_t current_npages, size_t npages_limit,
bool is_background_thread) {
if (current_npages > npages_limit) {
arena_decay_to_limit(tsdn, arena, decay, extents, false,
npages_limit, current_npages - npages_limit,
is_background_thread);
}
}
static void
arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time,
size_t current_npages) {
assert(arena_decay_deadline_reached(decay, time));
nstime_t delta;
nstime_copy(&delta, time);
nstime_subtract(&delta, &decay->epoch);
uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
assert(nadvance_u64 > 0);
/* Add nadvance_u64 decay intervals to epoch. */
nstime_copy(&delta, &decay->interval);
nstime_imultiply(&delta, nadvance_u64);
nstime_add(&decay->epoch, &delta);
/* Set a new deadline. */
arena_decay_deadline_init(decay);
/* Update the backlog. */
arena_decay_backlog_update(decay, nadvance_u64, current_npages);
}
static void
arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
extents_t *extents, const nstime_t *time, bool is_background_thread) {
size_t current_npages = extents_npages_get(extents);
arena_decay_epoch_advance_helper(decay, time, current_npages);
size_t npages_limit = arena_decay_backlog_npages_limit(decay);
/* We may unlock decay->mtx when try_purge(). Finish logging first. */
decay->nunpurged = (npages_limit > current_npages) ? npages_limit :
current_npages;
if (!background_thread_enabled() || is_background_thread) {
arena_decay_try_purge(tsdn, arena, decay, extents,
current_npages, npages_limit, is_background_thread);
}
}
static void
arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) {
arena_decay_ms_write(decay, decay_ms);
if (decay_ms > 0) {
nstime_init(&decay->interval, (uint64_t)decay_ms *
KQU(1000000));
nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS);
}
nstime_init(&decay->epoch, 0);
nstime_update(&decay->epoch);
decay->jitter_state = (uint64_t)(uintptr_t)decay;
arena_decay_deadline_init(decay);
decay->nunpurged = 0;
memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
}
static bool
arena_decay_init(arena_decay_t *decay, ssize_t decay_ms,
arena_stats_decay_t *stats) {
if (config_debug) {
for (size_t i = 0; i < sizeof(arena_decay_t); i++) {
assert(((char *)decay)[i] == 0);
}
decay->ceil_npages = 0;
}
if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY,
malloc_mutex_rank_exclusive)) {
return true;
}
decay->purging = false;
arena_decay_reinit(decay, decay_ms);
/* Memory is zeroed, so there is no need to clear stats. */
if (config_stats) {
decay->stats = stats;
}
return false;
}
static bool
arena_decay_ms_valid(ssize_t decay_ms) {
if (decay_ms < -1) {
return false;
}
if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX *
KQU(1000)) {
return true;
} }
return false;
} }
static bool /*
arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, * In situations where we're not forcing a decay (i.e. because the user
extents_t *extents, bool is_background_thread) { * specifically requested it), should we purge ourselves, or wait for the
malloc_mutex_assert_owner(tsdn, &decay->mtx); * background thread to get to it.
*/
/* Purge all or nothing if the option is disabled. */ static pac_purge_eagerness_t
ssize_t decay_ms = arena_decay_ms_read(decay); arena_decide_unforced_purge_eagerness(bool is_background_thread) {
if (decay_ms <= 0) { if (is_background_thread) {
if (decay_ms == 0) { return PAC_PURGE_ALWAYS;
arena_decay_to_limit(tsdn, arena, decay, extents, false, } else if (!is_background_thread && background_thread_enabled()) {
0, extents_npages_get(extents), return PAC_PURGE_NEVER;
is_background_thread);
}
return false;
}
nstime_t time;
nstime_init(&time, 0);
nstime_update(&time);
if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time)
> 0)) {
/*
* Time went backwards. Move the epoch back in time and
* generate a new deadline, with the expectation that time
* typically flows forward for long enough periods of time that
* epochs complete. Unfortunately, this strategy is susceptible
* to clock jitter triggering premature epoch advances, but
* clock jitter estimation and compensation isn't feasible here
* because calls into this code are event-driven.
*/
nstime_copy(&decay->epoch, &time);
arena_decay_deadline_init(decay);
} else { } else {
/* Verify that time does not go backwards. */ return PAC_PURGE_ON_EPOCH_ADVANCE;
assert(nstime_compare(&decay->epoch, &time) <= 0);
} }
/*
* If the deadline has been reached, advance to the current epoch and
* purge to the new limit if necessary. Note that dirty pages created
* during the current epoch are not subject to purge until a future
* epoch, so as a result purging only happens during epoch advances, or
* being triggered by background threads (scheduled event).
*/
bool advance_epoch = arena_decay_deadline_reached(decay, &time);
if (advance_epoch) {
arena_decay_epoch_advance(tsdn, arena, decay, extents, &time,
is_background_thread);
} else if (is_background_thread) {
arena_decay_try_purge(tsdn, arena, decay, extents,
extents_npages_get(extents),
arena_decay_backlog_npages_limit(decay),
is_background_thread);
}
return advance_epoch;
}
static ssize_t
arena_decay_ms_get(arena_decay_t *decay) {
return arena_decay_ms_read(decay);
}
ssize_t
arena_dirty_decay_ms_get(arena_t *arena) {
return arena_decay_ms_get(&arena->decay_dirty);
}
ssize_t
arena_muzzy_decay_ms_get(arena_t *arena) {
return arena_decay_ms_get(&arena->decay_muzzy);
}
static bool
arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
extents_t *extents, ssize_t decay_ms) {
if (!arena_decay_ms_valid(decay_ms)) {
return true;
}
malloc_mutex_lock(tsdn, &decay->mtx);
/*
* Restart decay backlog from scratch, which may cause many dirty pages
* to be immediately purged. It would conceptually be possible to map
* the old backlog onto the new backlog, but there is no justification
* for such complexity since decay_ms changes are intended to be
* infrequent, either between the {-1, 0, >0} states, or a one-time
* arbitrary change during initial arena configuration.
*/
arena_decay_reinit(decay, decay_ms);
arena_maybe_decay(tsdn, arena, decay, extents, false);
malloc_mutex_unlock(tsdn, &decay->mtx);
return false;
} }
bool bool
arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state,
ssize_t decay_ms) { ssize_t decay_ms) {
return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty, pac_purge_eagerness_t eagerness = arena_decide_unforced_purge_eagerness(
&arena->extents_dirty, decay_ms); /* is_background_thread */ false);
} return pa_decay_ms_set(tsdn, &arena->pa_shard, state, decay_ms,
eagerness);
bool
arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
ssize_t decay_ms) {
return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy,
&arena->extents_muzzy, decay_ms);
}
static size_t
arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit,
size_t npages_decay_max, extent_list_t *decay_extents) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
/* Stash extents according to npages_limit. */
size_t nstashed = 0;
extent_t *extent;
while (nstashed < npages_decay_max &&
(extent = extents_evict(tsdn, arena, r_extent_hooks, extents,
npages_limit)) != NULL) {
extent_list_append(decay_extents, extent);
nstashed += extent_size_get(extent) >> LG_PAGE;
}
return nstashed;
} }
static size_t ssize_t
arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, arena_decay_ms_get(arena_t *arena, extent_state_t state) {
extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents, return pa_decay_ms_get(&arena->pa_shard, state);
bool all, extent_list_t *decay_extents, bool is_background_thread) {
size_t nmadvise, nunmapped;
size_t npurged;
if (config_stats) {
nmadvise = 0;
nunmapped = 0;
}
npurged = 0;
ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
for (extent_t *extent = extent_list_first(decay_extents); extent !=
NULL; extent = extent_list_first(decay_extents)) {
if (config_stats) {
nmadvise++;
}
size_t npages = extent_size_get(extent) >> LG_PAGE;
npurged += npages;
extent_list_remove(decay_extents, extent);
switch (extents_state_get(extents)) {
case extent_state_active:
not_reached();
case extent_state_dirty:
if (!all && muzzy_decay_ms != 0 &&
!extent_purge_lazy_wrapper(tsdn, arena,
r_extent_hooks, extent, 0,
extent_size_get(extent))) {
extents_dalloc(tsdn, arena, r_extent_hooks,
&arena->extents_muzzy, extent);
arena_background_thread_inactivity_check(tsdn,
arena, is_background_thread);
break;
}
/* Fall through. */
case extent_state_muzzy:
extent_dalloc_wrapper(tsdn, arena, r_extent_hooks,
extent);
if (config_stats) {
nunmapped += npages;
}
break;
case extent_state_retained:
default:
not_reached();
}
}
if (config_stats) {
arena_stats_lock(tsdn, &arena->stats);
arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge,
1);
arena_stats_add_u64(tsdn, &arena->stats,
&decay->stats->nmadvise, nmadvise);
arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged,
npurged);
arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
nunmapped << LG_PAGE);
arena_stats_unlock(tsdn, &arena->stats);
}
return npurged;
}
/*
* npages_limit: Decay at most npages_decay_max pages without violating the
* invariant: (extents_npages_get(extents) >= npages_limit). We need an upper
* bound on number of pages in order to prevent unbounded growth (namely in
* stashed), otherwise unbounded new pages could be added to extents during the
* current decay run, so that the purging thread never finishes.
*/
static void
arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max,
bool is_background_thread) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 1);
malloc_mutex_assert_owner(tsdn, &decay->mtx);
if (decay->purging) {
return;
}
decay->purging = true;
malloc_mutex_unlock(tsdn, &decay->mtx);
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
extent_list_t decay_extents;
extent_list_init(&decay_extents);
size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents,
npages_limit, npages_decay_max, &decay_extents);
if (npurge != 0) {
size_t npurged = arena_decay_stashed(tsdn, arena,
&extent_hooks, decay, extents, all, &decay_extents,
is_background_thread);
assert(npurged == npurge);
}
malloc_mutex_lock(tsdn, &decay->mtx);
decay->purging = false;
} }
static bool static bool
arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, arena_decay_impl(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
extents_t *extents, bool is_background_thread, bool all) { pac_decay_stats_t *decay_stats, ecache_t *ecache,
bool is_background_thread, bool all) {
if (all) { if (all) {
malloc_mutex_lock(tsdn, &decay->mtx); malloc_mutex_lock(tsdn, &decay->mtx);
arena_decay_to_limit(tsdn, arena, decay, extents, all, 0, pac_decay_all(tsdn, &arena->pa_shard.pac, decay, decay_stats,
extents_npages_get(extents), is_background_thread); ecache, /* fully_decay */ all);
malloc_mutex_unlock(tsdn, &decay->mtx); malloc_mutex_unlock(tsdn, &decay->mtx);
return false; return false;
} }
...@@ -960,20 +434,20 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, ...@@ -960,20 +434,20 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
/* No need to wait if another thread is in progress. */ /* No need to wait if another thread is in progress. */
return true; return true;
} }
pac_purge_eagerness_t eagerness =
bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents, arena_decide_unforced_purge_eagerness(is_background_thread);
is_background_thread); bool epoch_advanced = pac_maybe_decay_purge(tsdn, &arena->pa_shard.pac,
decay, decay_stats, ecache, eagerness);
size_t npages_new; size_t npages_new;
if (epoch_advanced) { if (epoch_advanced) {
/* Backlog is updated on epoch advance. */ /* Backlog is updated on epoch advance. */
npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1]; npages_new = decay_epoch_npages_delta(decay);
} }
malloc_mutex_unlock(tsdn, &decay->mtx); malloc_mutex_unlock(tsdn, &decay->mtx);
if (have_background_thread && background_thread_enabled() && if (have_background_thread && background_thread_enabled() &&
epoch_advanced && !is_background_thread) { epoch_advanced && !is_background_thread) {
background_thread_interval_check(tsdn, arena, decay, arena_maybe_do_deferred_work(tsdn, arena, decay, npages_new);
npages_new);
} }
return false; return false;
...@@ -982,53 +456,143 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, ...@@ -982,53 +456,143 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
static bool static bool
arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
bool all) { bool all) {
return arena_decay_impl(tsdn, arena, &arena->decay_dirty, return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_dirty,
&arena->extents_dirty, is_background_thread, all); &arena->pa_shard.pac.stats->decay_dirty,
&arena->pa_shard.pac.ecache_dirty, is_background_thread, all);
} }
static bool static bool
arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
bool all) { bool all) {
return arena_decay_impl(tsdn, arena, &arena->decay_muzzy, if (pa_shard_dont_decay_muzzy(&arena->pa_shard)) {
&arena->extents_muzzy, is_background_thread, all); return false;
}
return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_muzzy,
&arena->pa_shard.pac.stats->decay_muzzy,
&arena->pa_shard.pac.ecache_muzzy, is_background_thread, all);
} }
void void
arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
if (all) {
/*
* We should take a purge of "all" to mean "save as much memory
* as possible", including flushing any caches (for situations
* like thread death, or manual purge calls).
*/
sec_flush(tsdn, &arena->pa_shard.hpa_sec);
}
if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) { if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) {
return; return;
} }
arena_decay_muzzy(tsdn, arena, is_background_thread, all); arena_decay_muzzy(tsdn, arena, is_background_thread, all);
} }
static bool
arena_should_decay_early(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
background_thread_info_t *info, nstime_t *remaining_sleep,
size_t npages_new) {
malloc_mutex_assert_owner(tsdn, &info->mtx);
if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
return false;
}
if (!decay_gradually(decay)) {
malloc_mutex_unlock(tsdn, &decay->mtx);
return false;
}
nstime_init(remaining_sleep, background_thread_wakeup_time_get(info));
if (nstime_compare(remaining_sleep, &decay->epoch) <= 0) {
malloc_mutex_unlock(tsdn, &decay->mtx);
return false;
}
nstime_subtract(remaining_sleep, &decay->epoch);
if (npages_new > 0) {
uint64_t npurge_new = decay_npages_purge_in(decay,
remaining_sleep, npages_new);
info->npages_to_purge_new += npurge_new;
}
malloc_mutex_unlock(tsdn, &decay->mtx);
return info->npages_to_purge_new >
ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD;
}
/*
* Check if deferred work needs to be done sooner than planned.
* For decay we might want to wake up earlier because of an influx of dirty
* pages. Rather than waiting for previously estimated time, we proactively
* purge those pages.
* If background thread sleeps indefinitely, always wake up because some
* deferred work has been generated.
*/
static void static void
arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) { arena_maybe_do_deferred_work(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE); size_t npages_new) {
background_thread_info_t *info = arena_background_thread_info_get(
arena);
if (malloc_mutex_trylock(tsdn, &info->mtx)) {
/*
* Background thread may hold the mutex for a long period of
* time. We'd like to avoid the variance on application
* threads. So keep this non-blocking, and leave the work to a
* future epoch.
*/
return;
}
if (!background_thread_is_started(info)) {
goto label_done;
}
nstime_t remaining_sleep;
if (background_thread_indefinite_sleep(info)) {
background_thread_wakeup_early(info, NULL);
} else if (arena_should_decay_early(tsdn, arena, decay, info,
&remaining_sleep, npages_new)) {
info->npages_to_purge_new = 0;
background_thread_wakeup_early(info, &remaining_sleep);
}
label_done:
malloc_mutex_unlock(tsdn, &info->mtx);
}
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; /* Called from background threads. */
arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab); void
arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena) {
arena_decay(tsdn, arena, true, false);
pa_shard_do_deferred_work(tsdn, &arena->pa_shard);
}
void
arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab) {
bool deferred_work_generated = false;
pa_dalloc(tsdn, &arena->pa_shard, slab, &deferred_work_generated);
if (deferred_work_generated) {
arena_handle_deferred_work(tsdn, arena);
}
} }
static void static void
arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) { arena_bin_slabs_nonfull_insert(bin_t *bin, edata_t *slab) {
assert(extent_nfree_get(slab) > 0); assert(edata_nfree_get(slab) > 0);
extent_heap_insert(&bin->slabs_nonfull, slab); edata_heap_insert(&bin->slabs_nonfull, slab);
if (config_stats) { if (config_stats) {
bin->stats.nonfull_slabs++; bin->stats.nonfull_slabs++;
} }
} }
static void static void
arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) { arena_bin_slabs_nonfull_remove(bin_t *bin, edata_t *slab) {
extent_heap_remove(&bin->slabs_nonfull, slab); edata_heap_remove(&bin->slabs_nonfull, slab);
if (config_stats) { if (config_stats) {
bin->stats.nonfull_slabs--; bin->stats.nonfull_slabs--;
} }
} }
static extent_t * static edata_t *
arena_bin_slabs_nonfull_tryget(bin_t *bin) { arena_bin_slabs_nonfull_tryget(bin_t *bin) {
extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull); edata_t *slab = edata_heap_remove_first(&bin->slabs_nonfull);
if (slab == NULL) { if (slab == NULL) {
return NULL; return NULL;
} }
...@@ -1040,30 +604,30 @@ arena_bin_slabs_nonfull_tryget(bin_t *bin) { ...@@ -1040,30 +604,30 @@ arena_bin_slabs_nonfull_tryget(bin_t *bin) {
} }
static void static void
arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) { arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, edata_t *slab) {
assert(extent_nfree_get(slab) == 0); assert(edata_nfree_get(slab) == 0);
/* /*
* Tracking extents is required by arena_reset, which is not allowed * Tracking extents is required by arena_reset, which is not allowed
* for auto arenas. Bypass this step to avoid touching the extent * for auto arenas. Bypass this step to avoid touching the edata
* linkage (often results in cache misses) for auto arenas. * linkage (often results in cache misses) for auto arenas.
*/ */
if (arena_is_auto(arena)) { if (arena_is_auto(arena)) {
return; return;
} }
extent_list_append(&bin->slabs_full, slab); edata_list_active_append(&bin->slabs_full, slab);
} }
static void static void
arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) { arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, edata_t *slab) {
if (arena_is_auto(arena)) { if (arena_is_auto(arena)) {
return; return;
} }
extent_list_remove(&bin->slabs_full, slab); edata_list_active_remove(&bin->slabs_full, slab);
} }
static void static void
arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) { arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
extent_t *slab; edata_t *slab;
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
if (bin->slabcur != NULL) { if (bin->slabcur != NULL) {
...@@ -1073,13 +637,13 @@ arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) { ...@@ -1073,13 +637,13 @@ arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
} }
while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != NULL) { while ((slab = edata_heap_remove_first(&bin->slabs_nonfull)) != NULL) {
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
} }
for (slab = extent_list_first(&bin->slabs_full); slab != NULL; for (slab = edata_list_active_first(&bin->slabs_full); slab != NULL;
slab = extent_list_first(&bin->slabs_full)) { slab = edata_list_active_first(&bin->slabs_full)) {
arena_bin_slabs_full_remove(arena, bin, slab); arena_bin_slabs_full_remove(arena, bin, slab);
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
...@@ -1111,16 +675,15 @@ arena_reset(tsd_t *tsd, arena_t *arena) { ...@@ -1111,16 +675,15 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
/* Large allocations. */ /* Large allocations. */
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
for (extent_t *extent = extent_list_first(&arena->large); extent != for (edata_t *edata = edata_list_active_first(&arena->large);
NULL; extent = extent_list_first(&arena->large)) { edata != NULL; edata = edata_list_active_first(&arena->large)) {
void *ptr = extent_base_get(extent); void *ptr = edata_base_get(edata);
size_t usize; size_t usize;
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
alloc_ctx_t alloc_ctx; emap_alloc_ctx_t alloc_ctx;
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, &alloc_ctx);
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
assert(alloc_ctx.szind != SC_NSIZES); assert(alloc_ctx.szind != SC_NSIZES);
if (config_stats || (config_prof && opt_prof)) { if (config_stats || (config_prof && opt_prof)) {
...@@ -1131,7 +694,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) { ...@@ -1131,7 +694,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
if (config_prof && opt_prof) { if (config_prof && opt_prof) {
prof_free(tsd, ptr, usize, &alloc_ctx); prof_free(tsd, ptr, usize, &alloc_ctx);
} }
large_dalloc(tsd_tsdn(tsd), extent); large_dalloc(tsd_tsdn(tsd), edata);
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
} }
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
...@@ -1139,32 +702,95 @@ arena_reset(tsd_t *tsd, arena_t *arena) { ...@@ -1139,32 +702,95 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
/* Bins. */ /* Bins. */
for (unsigned i = 0; i < SC_NBINS; i++) { for (unsigned i = 0; i < SC_NBINS; i++) {
for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
arena_bin_reset(tsd, arena, arena_bin_reset(tsd, arena, arena_get_bin(arena, i, j));
&arena->bins[i].bin_shards[j]);
} }
} }
pa_shard_reset(tsd_tsdn(tsd), &arena->pa_shard);
}
atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); static void
arena_prepare_base_deletion_sync_finish(tsd_t *tsd, malloc_mutex_t **mutexes,
unsigned n_mtx) {
for (unsigned i = 0; i < n_mtx; i++) {
malloc_mutex_lock(tsd_tsdn(tsd), mutexes[i]);
malloc_mutex_unlock(tsd_tsdn(tsd), mutexes[i]);
}
} }
#define ARENA_DESTROY_MAX_DELAYED_MTX 32
static void static void
arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) { arena_prepare_base_deletion_sync(tsd_t *tsd, malloc_mutex_t *mtx,
malloc_mutex_t **delayed_mtx, unsigned *n_delayed) {
if (!malloc_mutex_trylock(tsd_tsdn(tsd), mtx)) {
/* No contention. */
malloc_mutex_unlock(tsd_tsdn(tsd), mtx);
return;
}
unsigned n = *n_delayed;
assert(n < ARENA_DESTROY_MAX_DELAYED_MTX);
/* Add another to the batch. */
delayed_mtx[n++] = mtx;
if (n == ARENA_DESTROY_MAX_DELAYED_MTX) {
arena_prepare_base_deletion_sync_finish(tsd, delayed_mtx, n);
n = 0;
}
*n_delayed = n;
}
static void
arena_prepare_base_deletion(tsd_t *tsd, base_t *base_to_destroy) {
/* /*
* Iterate over the retained extents and destroy them. This gives the * In order to coalesce, emap_try_acquire_edata_neighbor will attempt to
* extent allocator underlying the extent hooks an opportunity to unmap * check neighbor edata's state to determine eligibility. This means
* all retained memory without having to keep its own metadata * under certain conditions, the metadata from an arena can be accessed
* structures. In practice, virtual memory for dss-allocated extents is * w/o holding any locks from that arena. In order to guarantee safe
* leaked here, so best practice is to avoid dss for arenas to be * memory access, the metadata and the underlying base allocator needs
* destroyed, or provide custom extent hooks that track retained * to be kept alive, until all pending accesses are done.
* dss-based extents for later reuse. *
* 1) with opt_retain, the arena boundary implies the is_head state
* (tracked in the rtree leaf), and the coalesce flow will stop at the
* head state branch. Therefore no cross arena metadata access
* possible.
*
* 2) w/o opt_retain, the arena id needs to be read from the edata_t,
* meaning read only cross-arena metadata access is possible. The
* coalesce attempt will stop at the arena_id mismatch, and is always
* under one of the ecache locks. To allow safe passthrough of such
* metadata accesses, the loop below will iterate through all manual
* arenas' ecache locks. As all the metadata from this base allocator
* have been unlinked from the rtree, after going through all the
* relevant ecache locks, it's safe to say that a) pending accesses are
* all finished, and b) no new access will be generated.
*/ */
extent_hooks_t *extent_hooks = extent_hooks_get(arena); if (opt_retain) {
extent_t *extent; return;
while ((extent = extents_evict(tsdn, arena, &extent_hooks, }
&arena->extents_retained, 0)) != NULL) { unsigned destroy_ind = base_ind_get(base_to_destroy);
extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent); assert(destroy_ind >= manual_arena_base);
tsdn_t *tsdn = tsd_tsdn(tsd);
malloc_mutex_t *delayed_mtx[ARENA_DESTROY_MAX_DELAYED_MTX];
unsigned n_delayed = 0, total = narenas_total_get();
for (unsigned i = 0; i < total; i++) {
if (i == destroy_ind) {
continue;
}
arena_t *arena = arena_get(tsdn, i, false);
if (arena == NULL) {
continue;
}
pac_t *pac = &arena->pa_shard.pac;
arena_prepare_base_deletion_sync(tsd, &pac->ecache_dirty.mtx,
delayed_mtx, &n_delayed);
arena_prepare_base_deletion_sync(tsd, &pac->ecache_muzzy.mtx,
delayed_mtx, &n_delayed);
arena_prepare_base_deletion_sync(tsd, &pac->ecache_retained.mtx,
delayed_mtx, &n_delayed);
} }
arena_prepare_base_deletion_sync_finish(tsd, delayed_mtx, n_delayed);
} }
#undef ARENA_DESTROY_MAX_DELAYED_MTX
void void
arena_destroy(tsd_t *tsd, arena_t *arena) { arena_destroy(tsd_t *tsd, arena_t *arena) {
...@@ -1175,13 +801,10 @@ arena_destroy(tsd_t *tsd, arena_t *arena) { ...@@ -1175,13 +801,10 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
/* /*
* No allocations have occurred since arena_reset() was called. * No allocations have occurred since arena_reset() was called.
* Furthermore, the caller (arena_i_destroy_ctl()) purged all cached * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
* extents, so only retained extents may remain. * extents, so only retained extents may remain and it's safe to call
* pa_shard_destroy_retained.
*/ */
assert(extents_npages_get(&arena->extents_dirty) == 0); pa_shard_destroy(tsd_tsdn(tsd), &arena->pa_shard);
assert(extents_npages_get(&arena->extents_muzzy) == 0);
/* Deallocate retained memory. */
arena_destroy_retained(tsd_tsdn(tsd), arena);
/* /*
* Remove the arena pointer from the arenas array. We rely on the fact * Remove the arena pointer from the arenas array. We rely on the fact
...@@ -1197,316 +820,370 @@ arena_destroy(tsd_t *tsd, arena_t *arena) { ...@@ -1197,316 +820,370 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
/* /*
* Destroy the base allocator, which manages all metadata ever mapped by * Destroy the base allocator, which manages all metadata ever mapped by
* this arena. * this arena. The prepare function will make sure no pending access to
* the metadata in this base anymore.
*/ */
arena_prepare_base_deletion(tsd, arena->base);
base_delete(tsd_tsdn(tsd), arena->base); base_delete(tsd_tsdn(tsd), arena->base);
} }
static extent_t * static edata_t *
arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info,
szind_t szind) {
extent_t *slab;
bool zero, commit;
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
zero = false;
commit = true;
slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL,
bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit);
if (config_stats && slab != NULL) {
arena_stats_mapped_add(tsdn, &arena->stats,
bin_info->slab_size);
}
return slab;
}
static extent_t *
arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard, arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard,
const bin_info_t *bin_info) { const bin_info_t *bin_info) {
bool deferred_work_generated = false;
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0); WITNESS_RANK_CORE, 0);
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; bool guarded = san_slab_extent_decide_guard(tsdn,
szind_t szind = sz_size2index(bin_info->reg_size); arena_get_ehooks(arena));
bool zero = false; edata_t *slab = pa_alloc(tsdn, &arena->pa_shard, bin_info->slab_size,
bool commit = true; /* alignment */ PAGE, /* slab */ true, /* szind */ binind,
extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks, /* zero */ false, guarded, &deferred_work_generated);
&arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true,
binind, &zero, &commit); if (deferred_work_generated) {
if (slab == NULL && arena_may_have_muzzy(arena)) { arena_handle_deferred_work(tsdn, arena);
slab = extents_alloc(tsdn, arena, &extent_hooks,
&arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE,
true, binind, &zero, &commit);
} }
if (slab == NULL) { if (slab == NULL) {
slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks, return NULL;
bin_info, szind);
if (slab == NULL) {
return NULL;
}
} }
assert(extent_slab_get(slab)); assert(edata_slab_get(slab));
/* Initialize slab internals. */ /* Initialize slab internals. */
arena_slab_data_t *slab_data = extent_slab_data_get(slab); slab_data_t *slab_data = edata_slab_data_get(slab);
extent_nfree_binshard_set(slab, bin_info->nregs, binshard); edata_nfree_binshard_set(slab, bin_info->nregs, binshard);
bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false); bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
return slab; return slab;
} }
static extent_t * /*
arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin, * Before attempting the _with_fresh_slab approaches below, the _no_fresh_slab
szind_t binind, unsigned binshard) { * variants (i.e. through slabcur and nonfull) must be tried first.
extent_t *slab; */
const bin_info_t *bin_info; static void
arena_bin_refill_slabcur_with_fresh_slab(tsdn_t *tsdn, arena_t *arena,
/* Look for a usable slab. */ bin_t *bin, szind_t binind, edata_t *fresh_slab) {
slab = arena_bin_slabs_nonfull_tryget(bin); malloc_mutex_assert_owner(tsdn, &bin->lock);
if (slab != NULL) { /* Only called after slabcur and nonfull both failed. */
return slab; assert(bin->slabcur == NULL);
} assert(edata_heap_first(&bin->slabs_nonfull) == NULL);
/* No existing slabs have any space available. */ assert(fresh_slab != NULL);
bin_info = &bin_infos[binind]; /* A new slab from arena_slab_alloc() */
assert(edata_nfree_get(fresh_slab) == bin_infos[binind].nregs);
/* Allocate a new slab. */ if (config_stats) {
malloc_mutex_unlock(tsdn, &bin->lock); bin->stats.nslabs++;
/******************************/ bin->stats.curslabs++;
slab = arena_slab_alloc(tsdn, arena, binind, binshard, bin_info);
/********************************/
malloc_mutex_lock(tsdn, &bin->lock);
if (slab != NULL) {
if (config_stats) {
bin->stats.nslabs++;
bin->stats.curslabs++;
}
return slab;
} }
bin->slabcur = fresh_slab;
}
/* /* Refill slabcur and then alloc using the fresh slab */
* arena_slab_alloc() failed, but another thread may have made static void *
* sufficient memory available while this one dropped bin->lock above, arena_bin_malloc_with_fresh_slab(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
* so search one more time. szind_t binind, edata_t *fresh_slab) {
*/ malloc_mutex_assert_owner(tsdn, &bin->lock);
slab = arena_bin_slabs_nonfull_tryget(bin); arena_bin_refill_slabcur_with_fresh_slab(tsdn, arena, bin, binind,
if (slab != NULL) { fresh_slab);
return slab;
}
return NULL; return arena_slab_reg_alloc(bin->slabcur, &bin_infos[binind]);
} }
/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */ static bool
static void * arena_bin_refill_slabcur_no_fresh_slab(tsdn_t *tsdn, arena_t *arena,
arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin, bin_t *bin) {
szind_t binind, unsigned binshard) { malloc_mutex_assert_owner(tsdn, &bin->lock);
const bin_info_t *bin_info; /* Only called after arena_slab_reg_alloc[_batch] failed. */
extent_t *slab; assert(bin->slabcur == NULL || edata_nfree_get(bin->slabcur) == 0);
bin_info = &bin_infos[binind];
if (!arena_is_auto(arena) && bin->slabcur != NULL) {
arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
bin->slabcur = NULL;
}
slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind, binshard);
if (bin->slabcur != NULL) { if (bin->slabcur != NULL) {
/*
* Another thread updated slabcur while this one ran without the
* bin lock in arena_bin_nonfull_slab_get().
*/
if (extent_nfree_get(bin->slabcur) > 0) {
void *ret = arena_slab_reg_alloc(bin->slabcur,
bin_info);
if (slab != NULL) {
/*
* arena_slab_alloc() may have allocated slab,
* or it may have been pulled from
* slabs_nonfull. Therefore it is unsafe to
* make any assumptions about how slab has
* previously been used, and
* arena_bin_lower_slab() must be called, as if
* a region were just deallocated from the slab.
*/
if (extent_nfree_get(slab) == bin_info->nregs) {
arena_dalloc_bin_slab(tsdn, arena, slab,
bin);
} else {
arena_bin_lower_slab(tsdn, arena, slab,
bin);
}
}
return ret;
}
arena_bin_slabs_full_insert(arena, bin, bin->slabcur); arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
bin->slabcur = NULL;
}
if (slab == NULL) {
return NULL;
} }
bin->slabcur = slab;
assert(extent_nfree_get(bin->slabcur) > 0); /* Look for a usable slab. */
bin->slabcur = arena_bin_slabs_nonfull_tryget(bin);
assert(bin->slabcur == NULL || edata_nfree_get(bin->slabcur) > 0);
return arena_slab_reg_alloc(slab, bin_info); return (bin->slabcur == NULL);
} }
/* Choose a bin shard and return the locked bin. */
bin_t * bin_t *
arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind, arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind,
unsigned *binshard) { unsigned *binshard_p) {
bin_t *bin; unsigned binshard;
if (tsdn_null(tsdn) || tsd_arena_get(tsdn_tsd(tsdn)) == NULL) { if (tsdn_null(tsdn) || tsd_arena_get(tsdn_tsd(tsdn)) == NULL) {
*binshard = 0; binshard = 0;
} else { } else {
*binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind]; binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind];
} }
assert(*binshard < bin_infos[binind].n_shards); assert(binshard < bin_infos[binind].n_shards);
bin = &arena->bins[binind].bin_shards[*binshard]; if (binshard_p != NULL) {
malloc_mutex_lock(tsdn, &bin->lock); *binshard_p = binshard;
}
return bin; return arena_get_bin(arena, binind, binshard);
} }
void void
arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena,
cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) { cache_bin_t *cache_bin, cache_bin_info_t *cache_bin_info, szind_t binind,
unsigned i, nfill, cnt; const unsigned nfill) {
assert(cache_bin_ncached_get_local(cache_bin, cache_bin_info) == 0);
const bin_info_t *bin_info = &bin_infos[binind];
CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nfill);
cache_bin_init_ptr_array_for_fill(cache_bin, cache_bin_info, &ptrs,
nfill);
/*
* Bin-local resources are used first: 1) bin->slabcur, and 2) nonfull
* slabs. After both are exhausted, new slabs will be allocated through
* arena_slab_alloc().
*
* Bin lock is only taken / released right before / after the while(...)
* refill loop, with new slab allocation (which has its own locking)
* kept outside of the loop. This setup facilitates flat combining, at
* the cost of the nested loop (through goto label_refill).
*
* To optimize for cases with contention and limited resources
* (e.g. hugepage-backed or non-overcommit arenas), each fill-iteration
* gets one chance of slab_alloc, and a retry of bin local resources
* after the slab allocation (regardless if slab_alloc failed, because
* the bin lock is dropped during the slab allocation).
*
* In other words, new slab allocation is allowed, as long as there was
* progress since the previous slab_alloc. This is tracked with
* made_progress below, initialized to true to jump start the first
* iteration.
*
* In other words (again), the loop will only terminate early (i.e. stop
* with filled < nfill) after going through the three steps: a) bin
* local exhausted, b) unlock and slab_alloc returns null, c) re-lock
* and bin local fails again.
*/
bool made_progress = true;
edata_t *fresh_slab = NULL;
bool alloc_and_retry = false;
unsigned filled = 0;
unsigned binshard;
bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard);
label_refill:
malloc_mutex_lock(tsdn, &bin->lock);
while (filled < nfill) {
/* Try batch-fill from slabcur first. */
edata_t *slabcur = bin->slabcur;
if (slabcur != NULL && edata_nfree_get(slabcur) > 0) {
unsigned tofill = nfill - filled;
unsigned nfree = edata_nfree_get(slabcur);
unsigned cnt = tofill < nfree ? tofill : nfree;
arena_slab_reg_alloc_batch(slabcur, bin_info, cnt,
&ptrs.ptr[filled]);
made_progress = true;
filled += cnt;
continue;
}
/* Next try refilling slabcur from nonfull slabs. */
if (!arena_bin_refill_slabcur_no_fresh_slab(tsdn, arena, bin)) {
assert(bin->slabcur != NULL);
continue;
}
/* Then see if a new slab was reserved already. */
if (fresh_slab != NULL) {
arena_bin_refill_slabcur_with_fresh_slab(tsdn, arena,
bin, binind, fresh_slab);
assert(bin->slabcur != NULL);
fresh_slab = NULL;
continue;
}
/* Try slab_alloc if made progress (or never did slab_alloc). */
if (made_progress) {
assert(bin->slabcur == NULL);
assert(fresh_slab == NULL);
alloc_and_retry = true;
/* Alloc a new slab then come back. */
break;
}
/* OOM. */
assert(fresh_slab == NULL);
assert(!alloc_and_retry);
break;
} /* while (filled < nfill) loop. */
if (config_stats && !alloc_and_retry) {
bin->stats.nmalloc += filled;
bin->stats.nrequests += cache_bin->tstats.nrequests;
bin->stats.curregs += filled;
bin->stats.nfills++;
cache_bin->tstats.nrequests = 0;
}
malloc_mutex_unlock(tsdn, &bin->lock);
assert(tbin->ncached == 0); if (alloc_and_retry) {
assert(fresh_slab == NULL);
assert(filled < nfill);
assert(made_progress);
if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) { fresh_slab = arena_slab_alloc(tsdn, arena, binind, binshard,
prof_idump(tsdn); bin_info);
/* fresh_slab NULL case handled in the for loop. */
alloc_and_retry = false;
made_progress = false;
goto label_refill;
}
assert(filled == nfill || (fresh_slab == NULL && !made_progress));
/* Release if allocated but not used. */
if (fresh_slab != NULL) {
assert(edata_nfree_get(fresh_slab) == bin_info->nregs);
arena_slab_dalloc(tsdn, arena, fresh_slab);
fresh_slab = NULL;
} }
cache_bin_finish_fill(cache_bin, cache_bin_info, &ptrs, filled);
arena_decay_tick(tsdn, arena);
}
size_t
arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind,
void **ptrs, size_t nfill, bool zero) {
assert(binind < SC_NBINS);
const bin_info_t *bin_info = &bin_infos[binind];
const size_t nregs = bin_info->nregs;
assert(nregs > 0);
const size_t usize = bin_info->reg_size;
const bool manual_arena = !arena_is_auto(arena);
unsigned binshard; unsigned binshard;
bin_t *bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard); bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> size_t nslab = 0;
tcache->lg_fill_div[binind]); i < nfill; i += cnt) { size_t filled = 0;
extent_t *slab; edata_t *slab = NULL;
if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > edata_list_active_t fulls;
0) { edata_list_active_init(&fulls);
unsigned tofill = nfill - i;
cnt = tofill < extent_nfree_get(slab) ? while (filled < nfill && (slab = arena_slab_alloc(tsdn, arena, binind,
tofill : extent_nfree_get(slab); binshard, bin_info)) != NULL) {
arena_slab_reg_alloc_batch( assert((size_t)edata_nfree_get(slab) == nregs);
slab, &bin_infos[binind], cnt, ++nslab;
tbin->avail - nfill + i); size_t batch = nfill - filled;
} else { if (batch > nregs) {
cnt = 1; batch = nregs;
void *ptr = arena_bin_malloc_hard(tsdn, arena, bin,
binind, binshard);
/*
* OOM. tbin->avail isn't yet filled down to its first
* element, so the successful allocations (if any) must
* be moved just before tbin->avail before bailing out.
*/
if (ptr == NULL) {
if (i > 0) {
memmove(tbin->avail - i,
tbin->avail - nfill,
i * sizeof(void *));
}
break;
}
/* Insert such that low regions get used first. */
*(tbin->avail - nfill + i) = ptr;
} }
if (config_fill && unlikely(opt_junk_alloc)) { assert(batch > 0);
for (unsigned j = 0; j < cnt; j++) { arena_slab_reg_alloc_batch(slab, bin_info, (unsigned)batch,
void* ptr = *(tbin->avail - nfill + i + j); &ptrs[filled]);
arena_alloc_junk_small(ptr, &bin_infos[binind], assert(edata_addr_get(slab) == ptrs[filled]);
true); if (zero) {
memset(ptrs[filled], 0, batch * usize);
}
filled += batch;
if (batch == nregs) {
if (manual_arena) {
edata_list_active_append(&fulls, slab);
} }
slab = NULL;
} }
} }
malloc_mutex_lock(tsdn, &bin->lock);
/*
* Only the last slab can be non-empty, and the last slab is non-empty
* iff slab != NULL.
*/
if (slab != NULL) {
arena_bin_lower_slab(tsdn, arena, slab, bin);
}
if (manual_arena) {
edata_list_active_concat(&bin->slabs_full, &fulls);
}
assert(edata_list_active_empty(&fulls));
if (config_stats) { if (config_stats) {
bin->stats.nmalloc += i; bin->stats.nslabs += nslab;
bin->stats.nrequests += tbin->tstats.nrequests; bin->stats.curslabs += nslab;
bin->stats.curregs += i; bin->stats.nmalloc += filled;
bin->stats.nfills++; bin->stats.nrequests += filled;
tbin->tstats.nrequests = 0; bin->stats.curregs += filled;
} }
malloc_mutex_unlock(tsdn, &bin->lock); malloc_mutex_unlock(tsdn, &bin->lock);
tbin->ncached = i;
arena_decay_tick(tsdn, arena); arena_decay_tick(tsdn, arena);
return filled;
} }
void /*
arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) { * Without allocating a new slab, try arena_slab_reg_alloc() and re-fill
if (!zero) { * bin->slabcur if necessary.
memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size); */
static void *
arena_bin_malloc_no_fresh_slab(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind) {
malloc_mutex_assert_owner(tsdn, &bin->lock);
if (bin->slabcur == NULL || edata_nfree_get(bin->slabcur) == 0) {
if (arena_bin_refill_slabcur_no_fresh_slab(tsdn, arena, bin)) {
return NULL;
}
} }
}
static void assert(bin->slabcur != NULL && edata_nfree_get(bin->slabcur) > 0);
arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) { return arena_slab_reg_alloc(bin->slabcur, &bin_infos[binind]);
memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
} }
arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small =
arena_dalloc_junk_small_impl;
static void * static void *
arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
void *ret;
bin_t *bin;
size_t usize;
extent_t *slab;
assert(binind < SC_NBINS); assert(binind < SC_NBINS);
usize = sz_index2size(binind); const bin_info_t *bin_info = &bin_infos[binind];
size_t usize = sz_index2size(binind);
unsigned binshard; unsigned binshard;
bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard); bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard);
if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
ret = arena_slab_reg_alloc(slab, &bin_infos[binind]);
} else {
ret = arena_bin_malloc_hard(tsdn, arena, bin, binind, binshard);
}
malloc_mutex_lock(tsdn, &bin->lock);
edata_t *fresh_slab = NULL;
void *ret = arena_bin_malloc_no_fresh_slab(tsdn, arena, bin, binind);
if (ret == NULL) { if (ret == NULL) {
malloc_mutex_unlock(tsdn, &bin->lock); malloc_mutex_unlock(tsdn, &bin->lock);
return NULL; /******************************/
fresh_slab = arena_slab_alloc(tsdn, arena, binind, binshard,
bin_info);
/********************************/
malloc_mutex_lock(tsdn, &bin->lock);
/* Retry since the lock was dropped. */
ret = arena_bin_malloc_no_fresh_slab(tsdn, arena, bin, binind);
if (ret == NULL) {
if (fresh_slab == NULL) {
/* OOM */
malloc_mutex_unlock(tsdn, &bin->lock);
return NULL;
}
ret = arena_bin_malloc_with_fresh_slab(tsdn, arena, bin,
binind, fresh_slab);
fresh_slab = NULL;
}
} }
if (config_stats) { if (config_stats) {
bin->stats.nmalloc++; bin->stats.nmalloc++;
bin->stats.nrequests++; bin->stats.nrequests++;
bin->stats.curregs++; bin->stats.curregs++;
} }
malloc_mutex_unlock(tsdn, &bin->lock); malloc_mutex_unlock(tsdn, &bin->lock);
if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
prof_idump(tsdn);
}
if (!zero) { if (fresh_slab != NULL) {
if (config_fill) { arena_slab_dalloc(tsdn, arena, fresh_slab);
if (unlikely(opt_junk_alloc)) { }
arena_alloc_junk_small(ret, if (zero) {
&bin_infos[binind], false);
} else if (unlikely(opt_zero)) {
memset(ret, 0, usize);
}
}
} else {
if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &bin_infos[binind],
true);
}
memset(ret, 0, usize); memset(ret, 0, usize);
} }
arena_decay_tick(tsdn, arena); arena_decay_tick(tsdn, arena);
return ret; return ret;
} }
...@@ -1533,10 +1210,17 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, ...@@ -1533,10 +1210,17 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero, tcache_t *tcache) { bool zero, tcache_t *tcache) {
void *ret; void *ret;
if (usize <= SC_SMALL_MAXCLASS if (usize <= SC_SMALL_MAXCLASS) {
&& (alignment < PAGE
|| (alignment == PAGE && (usize & PAGE_MASK) == 0))) {
/* Small; alignment doesn't require special slab placement. */ /* Small; alignment doesn't require special slab placement. */
/* usize should be a result of sz_sa2u() */
assert((usize & (alignment - 1)) == 0);
/*
* Small usize can't come from an alignment larger than a page.
*/
assert(alignment <= PAGE);
ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize), ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
zero, tcache, true); zero, tcache, true);
} else { } else {
...@@ -1560,33 +1244,22 @@ arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) { ...@@ -1560,33 +1244,22 @@ arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
safety_check_set_redzone(ptr, usize, SC_LARGE_MINCLASS); safety_check_set_redzone(ptr, usize, SC_LARGE_MINCLASS);
} }
rtree_ctx_t rtree_ctx_fallback; edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true);
arena_t *arena = extent_arena_get(extent);
szind_t szind = sz_size2index(usize); szind_t szind = sz_size2index(usize);
extent_szind_set(extent, szind); edata_szind_set(edata, szind);
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, emap_remap(tsdn, &arena_emap_global, edata, szind, /* slab */ false);
szind, false);
prof_accum_cancel(tsdn, &arena->prof_accum, usize);
assert(isalloc(tsdn, ptr) == usize); assert(isalloc(tsdn, ptr) == usize);
} }
static size_t static size_t
arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) { arena_prof_demote(tsdn_t *tsdn, edata_t *edata, const void *ptr) {
cassert(config_prof); cassert(config_prof);
assert(ptr != NULL); assert(ptr != NULL);
extent_szind_set(extent, SC_NBINS); edata_szind_set(edata, SC_NBINS);
rtree_ctx_t rtree_ctx_fallback; emap_remap(tsdn, &arena_emap_global, edata, SC_NBINS, /* slab */ false);
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
SC_NBINS, false);
assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS); assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
...@@ -1599,9 +1272,9 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, ...@@ -1599,9 +1272,9 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
cassert(config_prof); cassert(config_prof);
assert(opt_prof); assert(opt_prof);
extent_t *extent = iealloc(tsdn, ptr); edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
size_t usize = extent_usize_get(extent); size_t usize = edata_usize_get(edata);
size_t bumped_usize = arena_prof_demote(tsdn, extent, ptr); size_t bumped_usize = arena_prof_demote(tsdn, edata, ptr);
if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) { if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) {
/* /*
* Currently, we only do redzoning for small sampled * Currently, we only do redzoning for small sampled
...@@ -1614,17 +1287,17 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, ...@@ -1614,17 +1287,17 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
sz_size2index(bumped_usize), slow_path); sz_size2index(bumped_usize), slow_path);
} else { } else {
large_dalloc(tsdn, extent); large_dalloc(tsdn, edata);
} }
} }
static void static void
arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) { arena_dissociate_bin_slab(arena_t *arena, edata_t *slab, bin_t *bin) {
/* Dissociate slab from bin. */ /* Dissociate slab from bin. */
if (slab == bin->slabcur) { if (slab == bin->slabcur) {
bin->slabcur = NULL; bin->slabcur = NULL;
} else { } else {
szind_t binind = extent_szind_get(slab); szind_t binind = edata_szind_get(slab);
const bin_info_t *bin_info = &bin_infos[binind]; const bin_info_t *bin_info = &bin_infos[binind];
/* /*
...@@ -1641,24 +1314,9 @@ arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) { ...@@ -1641,24 +1314,9 @@ arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
} }
static void static void
arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
bin_t *bin) {
assert(slab != bin->slabcur);
malloc_mutex_unlock(tsdn, &bin->lock);
/******************************/
arena_slab_dalloc(tsdn, arena, slab);
/****************************/
malloc_mutex_lock(tsdn, &bin->lock);
if (config_stats) {
bin->stats.curslabs--;
}
}
static void
arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
bin_t *bin) { bin_t *bin) {
assert(extent_nfree_get(slab) > 0); assert(edata_nfree_get(slab) > 0);
/* /*
* Make sure that if bin->slabcur is non-NULL, it refers to the * Make sure that if bin->slabcur is non-NULL, it refers to the
...@@ -1666,9 +1324,9 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, ...@@ -1666,9 +1324,9 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
* than proactively keeping it pointing at the oldest/lowest non-full * than proactively keeping it pointing at the oldest/lowest non-full
* slab. * slab.
*/ */
if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) { if (bin->slabcur != NULL && edata_snad_comp(bin->slabcur, slab) > 0) {
/* Switch slabcur. */ /* Switch slabcur. */
if (extent_nfree_get(bin->slabcur) > 0) { if (edata_nfree_get(bin->slabcur) > 0) {
arena_bin_slabs_nonfull_insert(bin, bin->slabcur); arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
} else { } else {
arena_bin_slabs_full_insert(arena, bin, bin->slabcur); arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
...@@ -1683,56 +1341,54 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, ...@@ -1683,56 +1341,54 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
} }
static void static void
arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin, arena_dalloc_bin_slab_prepare(tsdn_t *tsdn, edata_t *slab, bin_t *bin) {
szind_t binind, extent_t *slab, void *ptr, bool junked) { malloc_mutex_assert_owner(tsdn, &bin->lock);
arena_slab_data_t *slab_data = extent_slab_data_get(slab);
const bin_info_t *bin_info = &bin_infos[binind];
if (!junked && config_fill && unlikely(opt_junk_free)) {
arena_dalloc_junk_small(ptr, bin_info);
}
arena_slab_reg_dalloc(slab, slab_data, ptr);
unsigned nfree = extent_nfree_get(slab);
if (nfree == bin_info->nregs) {
arena_dissociate_bin_slab(arena, slab, bin);
arena_dalloc_bin_slab(tsdn, arena, slab, bin);
} else if (nfree == 1 && slab != bin->slabcur) {
arena_bin_slabs_full_remove(arena, bin, slab);
arena_bin_lower_slab(tsdn, arena, slab, bin);
}
assert(slab != bin->slabcur);
if (config_stats) { if (config_stats) {
bin->stats.ndalloc++; bin->stats.curslabs--;
bin->stats.curregs--;
} }
} }
void void
arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin, arena_dalloc_bin_locked_handle_newly_empty(tsdn_t *tsdn, arena_t *arena,
szind_t binind, extent_t *extent, void *ptr) { edata_t *slab, bin_t *bin) {
arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr, arena_dissociate_bin_slab(arena, slab, bin);
true); arena_dalloc_bin_slab_prepare(tsdn, slab, bin);
}
void
arena_dalloc_bin_locked_handle_newly_nonempty(tsdn_t *tsdn, arena_t *arena,
edata_t *slab, bin_t *bin) {
arena_bin_slabs_full_remove(arena, bin, slab);
arena_bin_lower_slab(tsdn, arena, slab, bin);
} }
static void static void
arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) { arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, edata_t *edata, void *ptr) {
szind_t binind = extent_szind_get(extent); szind_t binind = edata_szind_get(edata);
unsigned binshard = extent_binshard_get(extent); unsigned binshard = edata_binshard_get(edata);
bin_t *bin = &arena->bins[binind].bin_shards[binshard]; bin_t *bin = arena_get_bin(arena, binind, binshard);
malloc_mutex_lock(tsdn, &bin->lock); malloc_mutex_lock(tsdn, &bin->lock);
arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr, arena_dalloc_bin_locked_info_t info;
false); arena_dalloc_bin_locked_begin(&info, binind);
bool ret = arena_dalloc_bin_locked_step(tsdn, arena, bin,
&info, binind, edata, ptr);
arena_dalloc_bin_locked_finish(tsdn, arena, bin, &info);
malloc_mutex_unlock(tsdn, &bin->lock); malloc_mutex_unlock(tsdn, &bin->lock);
if (ret) {
arena_slab_dalloc(tsdn, arena, edata);
}
} }
void void
arena_dalloc_small(tsdn_t *tsdn, void *ptr) { arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
extent_t *extent = iealloc(tsdn, ptr); edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
arena_t *arena = extent_arena_get(extent); arena_t *arena = arena_get_from_edata(edata);
arena_dalloc_bin(tsdn, arena, extent, ptr); arena_dalloc_bin(tsdn, arena, edata, ptr);
arena_decay_tick(tsdn, arena); arena_decay_tick(tsdn, arena);
} }
...@@ -1743,7 +1399,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, ...@@ -1743,7 +1399,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
/* Calls with non-zero extra had to clamp extra. */ /* Calls with non-zero extra had to clamp extra. */
assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS); assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS);
extent_t *extent = iealloc(tsdn, ptr); edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
if (unlikely(size > SC_LARGE_MAXCLASS)) { if (unlikely(size > SC_LARGE_MAXCLASS)) {
ret = true; ret = true;
goto done; goto done;
...@@ -1766,18 +1422,19 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, ...@@ -1766,18 +1422,19 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
goto done; goto done;
} }
arena_decay_tick(tsdn, extent_arena_get(extent)); arena_t *arena = arena_get_from_edata(edata);
arena_decay_tick(tsdn, arena);
ret = false; ret = false;
} else if (oldsize >= SC_LARGE_MINCLASS } else if (oldsize >= SC_LARGE_MINCLASS
&& usize_max >= SC_LARGE_MINCLASS) { && usize_max >= SC_LARGE_MINCLASS) {
ret = large_ralloc_no_move(tsdn, extent, usize_min, usize_max, ret = large_ralloc_no_move(tsdn, edata, usize_min, usize_max,
zero); zero);
} else { } else {
ret = true; ret = true;
} }
done: done:
assert(extent == iealloc(tsdn, ptr)); assert(edata == emap_edata_lookup(tsdn, &arena_emap_global, ptr));
*newsize = extent_usize_get(extent); *newsize = edata_usize_get(edata);
return ret; return ret;
} }
...@@ -1800,7 +1457,7 @@ void * ...@@ -1800,7 +1457,7 @@ void *
arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t alignment, bool zero, tcache_t *tcache, size_t size, size_t alignment, bool zero, tcache_t *tcache,
hook_ralloc_args_t *hook_args) { hook_ralloc_args_t *hook_args) {
size_t usize = sz_s2u(size); size_t usize = alignment == 0 ? sz_s2u(size) : sz_sa2u(size, alignment);
if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) { if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) {
return NULL; return NULL;
} }
...@@ -1850,6 +1507,29 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, ...@@ -1850,6 +1507,29 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
return ret; return ret;
} }
ehooks_t *
arena_get_ehooks(arena_t *arena) {
return base_ehooks_get(arena->base);
}
extent_hooks_t *
arena_set_extent_hooks(tsd_t *tsd, arena_t *arena,
extent_hooks_t *extent_hooks) {
background_thread_info_t *info;
if (have_background_thread) {
info = arena_background_thread_info_get(arena);
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
}
/* No using the HPA now that we have the custom hooks. */
pa_shard_disable_hpa(tsd_tsdn(tsd), &arena->pa_shard);
extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
if (have_background_thread) {
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
}
return ret;
}
dss_prec_t dss_prec_t
arena_dss_prec_get(arena_t *arena) { arena_dss_prec_get(arena_t *arena) {
return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE); return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE);
...@@ -1871,7 +1551,7 @@ arena_dirty_decay_ms_default_get(void) { ...@@ -1871,7 +1551,7 @@ arena_dirty_decay_ms_default_get(void) {
bool bool
arena_dirty_decay_ms_default_set(ssize_t decay_ms) { arena_dirty_decay_ms_default_set(ssize_t decay_ms) {
if (!arena_decay_ms_valid(decay_ms)) { if (!decay_ms_valid(decay_ms)) {
return true; return true;
} }
atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED); atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED);
...@@ -1885,7 +1565,7 @@ arena_muzzy_decay_ms_default_get(void) { ...@@ -1885,7 +1565,7 @@ arena_muzzy_decay_ms_default_get(void) {
bool bool
arena_muzzy_decay_ms_default_set(ssize_t decay_ms) { arena_muzzy_decay_ms_default_set(ssize_t decay_ms) {
if (!arena_decay_ms_valid(decay_ms)) { if (!decay_ms_valid(decay_ms)) {
return true; return true;
} }
atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED); atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED);
...@@ -1896,26 +1576,8 @@ bool ...@@ -1896,26 +1576,8 @@ bool
arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit, arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit,
size_t *new_limit) { size_t *new_limit) {
assert(opt_retain); assert(opt_retain);
return pac_retain_grow_limit_get_set(tsd_tsdn(tsd),
pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0); &arena->pa_shard.pac, old_limit, new_limit);
if (new_limit != NULL) {
size_t limit = *new_limit;
/* Grow no more than the new limit. */
if ((new_ind = sz_psz2ind(limit + 1) - 1) >= SC_NPSIZES) {
return true;
}
}
malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
if (old_limit != NULL) {
*old_limit = sz_pind2sz(arena->retain_grow_limit);
}
if (new_limit != NULL) {
arena->retain_grow_limit = new_ind;
}
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
return false;
} }
unsigned unsigned
...@@ -1933,13 +1595,8 @@ arena_nthreads_dec(arena_t *arena, bool internal) { ...@@ -1933,13 +1595,8 @@ arena_nthreads_dec(arena_t *arena, bool internal) {
atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
} }
size_t
arena_extent_sn_next(arena_t *arena) {
return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED);
}
arena_t * arena_t *
arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) {
arena_t *arena; arena_t *arena;
base_t *base; base_t *base;
unsigned i; unsigned i;
...@@ -1947,16 +1604,13 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { ...@@ -1947,16 +1604,13 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
if (ind == 0) { if (ind == 0) {
base = b0get(); base = b0get();
} else { } else {
base = base_new(tsdn, ind, extent_hooks); base = base_new(tsdn, ind, config->extent_hooks,
config->metadata_use_hooks);
if (base == NULL) { if (base == NULL) {
return NULL; return NULL;
} }
} }
unsigned nbins_total = 0;
for (i = 0; i < SC_NBINS; i++) {
nbins_total += bin_infos[i].n_shards;
}
size_t arena_size = sizeof(arena_t) + sizeof(bin_t) * nbins_total; size_t arena_size = sizeof(arena_t) + sizeof(bin_t) * nbins_total;
arena = (arena_t *)base_alloc(tsdn, base, arena_size, CACHELINE); arena = (arena_t *)base_alloc(tsdn, base, arena_size, CACHELINE);
if (arena == NULL) { if (arena == NULL) {
...@@ -1980,110 +1634,56 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { ...@@ -1980,110 +1634,56 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
} }
} }
if (config_prof) {
if (prof_accum_init(tsdn, &arena->prof_accum)) {
goto label_error;
}
}
if (config_cache_oblivious) {
/*
* A nondeterministic seed based on the address of arena reduces
* the likelihood of lockstep non-uniform cache index
* utilization among identical concurrent processes, but at the
* cost of test repeatability. For debug builds, instead use a
* deterministic seed.
*/
atomic_store_zu(&arena->offset_state, config_debug ? ind :
(size_t)(uintptr_t)arena, ATOMIC_RELAXED);
}
atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED);
atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(), atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
ATOMIC_RELAXED); ATOMIC_RELAXED);
atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); edata_list_active_init(&arena->large);
extent_list_init(&arena->large);
if (malloc_mutex_init(&arena->large_mtx, "arena_large", if (malloc_mutex_init(&arena->large_mtx, "arena_large",
WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) { WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
goto label_error; goto label_error;
} }
/* nstime_t cur_time;
* Delay coalescing for dirty extents despite the disruptive effect on nstime_init_update(&cur_time);
* memory layout for best-fit extent allocation, since cached extents if (pa_shard_init(tsdn, &arena->pa_shard, &arena_pa_central_global,
* are likely to be reused soon after deallocation, and the cost of &arena_emap_global, base, ind, &arena->stats.pa_shard_stats,
* merging/splitting extents is non-trivial. LOCKEDINT_MTX(arena->stats.mtx), &cur_time, oversize_threshold,
*/ arena_dirty_decay_ms_default_get(),
if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty, arena_muzzy_decay_ms_default_get())) {
true)) {
goto label_error;
}
/*
* Coalesce muzzy extents immediately, because operations on them are in
* the critical path much less often than for dirty extents.
*/
if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy,
false)) {
goto label_error;
}
/*
* Coalesce retained extents immediately, in part because they will
* never be evicted (and therefore there's no opportunity for delayed
* coalescing), but also because operations on retained extents are not
* in the critical path.
*/
if (extents_init(tsdn, &arena->extents_retained, extent_state_retained,
false)) {
goto label_error;
}
if (arena_decay_init(&arena->decay_dirty,
arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) {
goto label_error;
}
if (arena_decay_init(&arena->decay_muzzy,
arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) {
goto label_error;
}
arena->extent_grow_next = sz_psz2ind(HUGEPAGE);
arena->retain_grow_limit = sz_psz2ind(SC_LARGE_MAXCLASS);
if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
goto label_error;
}
extent_avail_new(&arena->extent_avail);
if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail",
WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) {
goto label_error; goto label_error;
} }
/* Initialize bins. */ /* Initialize bins. */
uintptr_t bin_addr = (uintptr_t)arena + sizeof(arena_t);
atomic_store_u(&arena->binshard_next, 0, ATOMIC_RELEASE); atomic_store_u(&arena->binshard_next, 0, ATOMIC_RELEASE);
for (i = 0; i < SC_NBINS; i++) { for (i = 0; i < nbins_total; i++) {
unsigned nshards = bin_infos[i].n_shards; bool err = bin_init(&arena->bins[i]);
arena->bins[i].bin_shards = (bin_t *)bin_addr; if (err) {
bin_addr += nshards * sizeof(bin_t); goto label_error;
for (unsigned j = 0; j < nshards; j++) {
bool err = bin_init(&arena->bins[i].bin_shards[j]);
if (err) {
goto label_error;
}
} }
} }
assert(bin_addr == (uintptr_t)arena + arena_size);
arena->base = base; arena->base = base;
/* Set arena before creating background threads. */ /* Set arena before creating background threads. */
arena_set(ind, arena); arena_set(ind, arena);
arena->ind = ind;
nstime_init(&arena->create_time, 0); nstime_init_update(&arena->create_time);
nstime_update(&arena->create_time);
/*
* We turn on the HPA if set to. There are two exceptions:
* - Custom extent hooks (we should only return memory allocated from
* them in that case).
* - Arena 0 initialization. In this case, we're mid-bootstrapping, and
* so arena_hpa_global is not yet initialized.
*/
if (opt_hpa && ehooks_are_default(base_ehooks_get(base)) && ind != 0) {
hpa_shard_opts_t hpa_shard_opts = opt_hpa_opts;
hpa_shard_opts.deferral_allowed = background_thread_enabled();
if (pa_shard_enable_hpa(tsdn, &arena->pa_shard,
&hpa_shard_opts, &opt_hpa_sec_opts)) {
goto label_error;
}
}
/* We don't support reentrancy for arena 0 bootstrapping. */ /* We don't support reentrancy for arena 0 bootstrapping. */
if (ind != 0) { if (ind != 0) {
...@@ -2129,10 +1729,12 @@ arena_choose_huge(tsd_t *tsd) { ...@@ -2129,10 +1729,12 @@ arena_choose_huge(tsd_t *tsd) {
* expected for huge allocations. * expected for huge allocations.
*/ */
if (arena_dirty_decay_ms_default_get() > 0) { if (arena_dirty_decay_ms_default_get() > 0) {
arena_dirty_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0); arena_decay_ms_set(tsd_tsdn(tsd), huge_arena,
extent_state_dirty, 0);
} }
if (arena_muzzy_decay_ms_default_get() > 0) { if (arena_muzzy_decay_ms_default_get() > 0) {
arena_muzzy_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0); arena_decay_ms_set(tsd_tsdn(tsd), huge_arena,
extent_state_muzzy, 0);
} }
} }
...@@ -2167,8 +1769,8 @@ arena_is_huge(unsigned arena_ind) { ...@@ -2167,8 +1769,8 @@ arena_is_huge(unsigned arena_ind) {
return (arena_ind == huge_arena_ind); return (arena_ind == huge_arena_ind);
} }
void bool
arena_boot(sc_data_t *sc_data) { arena_boot(sc_data_t *sc_data, base_t *base, bool hpa) {
arena_dirty_decay_ms_default_set(opt_dirty_decay_ms); arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms); arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms);
for (unsigned i = 0; i < SC_NBINS; i++) { for (unsigned i = 0; i < SC_NBINS; i++) {
...@@ -2176,12 +1778,20 @@ arena_boot(sc_data_t *sc_data) { ...@@ -2176,12 +1778,20 @@ arena_boot(sc_data_t *sc_data) {
div_init(&arena_binind_div_info[i], div_init(&arena_binind_div_info[i],
(1U << sc->lg_base) + (sc->ndelta << sc->lg_delta)); (1U << sc->lg_base) + (sc->ndelta << sc->lg_delta));
} }
uint32_t cur_offset = (uint32_t)offsetof(arena_t, bins);
for (szind_t i = 0; i < SC_NBINS; i++) {
arena_bin_offsets[i] = cur_offset;
nbins_total += bin_infos[i].n_shards;
cur_offset += (uint32_t)(bin_infos[i].n_shards * sizeof(bin_t));
}
return pa_central_init(&arena_pa_central_global, base, hpa,
&hpa_hooks_default);
} }
void void
arena_prefork0(tsdn_t *tsdn, arena_t *arena) { arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx); pa_shard_prefork0(tsdn, &arena->pa_shard);
malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx);
} }
void void
...@@ -2193,59 +1803,50 @@ arena_prefork1(tsdn_t *tsdn, arena_t *arena) { ...@@ -2193,59 +1803,50 @@ arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
void void
arena_prefork2(tsdn_t *tsdn, arena_t *arena) { arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx); pa_shard_prefork2(tsdn, &arena->pa_shard);
} }
void void
arena_prefork3(tsdn_t *tsdn, arena_t *arena) { arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
extents_prefork(tsdn, &arena->extents_dirty); pa_shard_prefork3(tsdn, &arena->pa_shard);
extents_prefork(tsdn, &arena->extents_muzzy);
extents_prefork(tsdn, &arena->extents_retained);
} }
void void
arena_prefork4(tsdn_t *tsdn, arena_t *arena) { arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx); pa_shard_prefork4(tsdn, &arena->pa_shard);
} }
void void
arena_prefork5(tsdn_t *tsdn, arena_t *arena) { arena_prefork5(tsdn_t *tsdn, arena_t *arena) {
base_prefork(tsdn, arena->base); pa_shard_prefork5(tsdn, &arena->pa_shard);
} }
void void
arena_prefork6(tsdn_t *tsdn, arena_t *arena) { arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_prefork(tsdn, &arena->large_mtx); base_prefork(tsdn, arena->base);
} }
void void
arena_prefork7(tsdn_t *tsdn, arena_t *arena) { arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
for (unsigned i = 0; i < SC_NBINS; i++) { malloc_mutex_prefork(tsdn, &arena->large_mtx);
for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { }
bin_prefork(tsdn, &arena->bins[i].bin_shards[j]);
} void
arena_prefork8(tsdn_t *tsdn, arena_t *arena) {
for (unsigned i = 0; i < nbins_total; i++) {
bin_prefork(tsdn, &arena->bins[i]);
} }
} }
void void
arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
unsigned i; for (unsigned i = 0; i < nbins_total; i++) {
bin_postfork_parent(tsdn, &arena->bins[i]);
for (i = 0; i < SC_NBINS; i++) {
for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
bin_postfork_parent(tsdn,
&arena->bins[i].bin_shards[j]);
}
} }
malloc_mutex_postfork_parent(tsdn, &arena->large_mtx); malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
base_postfork_parent(tsdn, arena->base); base_postfork_parent(tsdn, arena->base);
malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx); pa_shard_postfork_parent(tsdn, &arena->pa_shard);
extents_postfork_parent(tsdn, &arena->extents_dirty);
extents_postfork_parent(tsdn, &arena->extents_muzzy);
extents_postfork_parent(tsdn, &arena->extents_retained);
malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx);
malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
if (config_stats) { if (config_stats) {
malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx); malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
} }
...@@ -2253,8 +1854,6 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { ...@@ -2253,8 +1854,6 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
void void
arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
unsigned i;
atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) { if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) {
...@@ -2266,32 +1865,26 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { ...@@ -2266,32 +1865,26 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
if (config_stats) { if (config_stats) {
ql_new(&arena->tcache_ql); ql_new(&arena->tcache_ql);
ql_new(&arena->cache_bin_array_descriptor_ql); ql_new(&arena->cache_bin_array_descriptor_ql);
tcache_t *tcache = tcache_get(tsdn_tsd(tsdn)); tcache_slow_t *tcache_slow = tcache_slow_get(tsdn_tsd(tsdn));
if (tcache != NULL && tcache->arena == arena) { if (tcache_slow != NULL && tcache_slow->arena == arena) {
ql_elm_new(tcache, link); tcache_t *tcache = tcache_slow->tcache;
ql_tail_insert(&arena->tcache_ql, tcache, link); ql_elm_new(tcache_slow, link);
ql_tail_insert(&arena->tcache_ql, tcache_slow, link);
cache_bin_array_descriptor_init( cache_bin_array_descriptor_init(
&tcache->cache_bin_array_descriptor, &tcache_slow->cache_bin_array_descriptor,
tcache->bins_small, tcache->bins_large); tcache->bins);
ql_tail_insert(&arena->cache_bin_array_descriptor_ql, ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
&tcache->cache_bin_array_descriptor, link); &tcache_slow->cache_bin_array_descriptor, link);
} }
} }
for (i = 0; i < SC_NBINS; i++) { for (unsigned i = 0; i < nbins_total; i++) {
for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { bin_postfork_child(tsdn, &arena->bins[i]);
bin_postfork_child(tsdn, &arena->bins[i].bin_shards[j]);
}
} }
malloc_mutex_postfork_child(tsdn, &arena->large_mtx); malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
base_postfork_child(tsdn, arena->base); base_postfork_child(tsdn, arena->base);
malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx); pa_shard_postfork_child(tsdn, &arena->pa_shard);
extents_postfork_child(tsdn, &arena->extents_dirty);
extents_postfork_child(tsdn, &arena->extents_muzzy);
extents_postfork_child(tsdn, &arena->extents_retained);
malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx);
malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
if (config_stats) { if (config_stats) {
malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx); malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);
} }
......
#define JEMALLOC_BACKGROUND_THREAD_C_
#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/jemalloc_internal_includes.h"
...@@ -54,8 +53,9 @@ pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr, ...@@ -54,8 +53,9 @@ pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr,
bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED
bool background_threads_enable(tsd_t *tsd) NOT_REACHED bool background_threads_enable(tsd_t *tsd) NOT_REACHED
bool background_threads_disable(tsd_t *tsd) NOT_REACHED bool background_threads_disable(tsd_t *tsd) NOT_REACHED
void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, bool background_thread_is_started(background_thread_info_t *info) NOT_REACHED
arena_decay_t *decay, size_t npages_new) NOT_REACHED void background_thread_wakeup_early(background_thread_info_t *info,
nstime_t *remaining_sleep) NOT_REACHED
void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED
void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED
void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED
...@@ -74,7 +74,7 @@ background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) { ...@@ -74,7 +74,7 @@ background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) {
info->npages_to_purge_new = 0; info->npages_to_purge_new = 0;
if (config_stats) { if (config_stats) {
info->tot_n_runs = 0; info->tot_n_runs = 0;
nstime_init(&info->tot_sleep_time, 0); nstime_init_zero(&info->tot_sleep_time);
} }
} }
...@@ -82,136 +82,40 @@ static inline bool ...@@ -82,136 +82,40 @@ static inline bool
set_current_thread_affinity(int cpu) { set_current_thread_affinity(int cpu) {
#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) #if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
cpu_set_t cpuset; cpu_set_t cpuset;
#else
# ifndef __NetBSD__
cpuset_t cpuset;
# else
cpuset_t *cpuset;
# endif
#endif
#ifndef __NetBSD__
CPU_ZERO(&cpuset); CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset); CPU_SET(cpu, &cpuset);
int ret = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset); #else
cpuset = cpuset_create();
#endif
return (ret != 0); #if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
return (sched_setaffinity(0, sizeof(cpu_set_t), &cpuset) != 0);
#else #else
return false; # ifndef __NetBSD__
int ret = pthread_setaffinity_np(pthread_self(), sizeof(cpuset_t),
&cpuset);
# else
int ret = pthread_setaffinity_np(pthread_self(), cpuset_size(cpuset),
cpuset);
cpuset_destroy(cpuset);
# endif
return ret != 0;
#endif #endif
} }
/* Threshold for determining when to wake up the background thread. */
#define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024)
#define BILLION UINT64_C(1000000000) #define BILLION UINT64_C(1000000000)
/* Minimal sleep interval 100 ms. */ /* Minimal sleep interval 100 ms. */
#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10) #define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10)
static inline size_t
decay_npurge_after_interval(arena_decay_t *decay, size_t interval) {
size_t i;
uint64_t sum = 0;
for (i = 0; i < interval; i++) {
sum += decay->backlog[i] * h_steps[i];
}
for (; i < SMOOTHSTEP_NSTEPS; i++) {
sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]);
}
return (size_t)(sum >> SMOOTHSTEP_BFP);
}
static uint64_t
arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay,
extents_t *extents) {
if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
/* Use minimal interval if decay is contended. */
return BACKGROUND_THREAD_MIN_INTERVAL_NS;
}
uint64_t interval;
ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
if (decay_time <= 0) {
/* Purging is eagerly done or disabled currently. */
interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
goto label_done;
}
uint64_t decay_interval_ns = nstime_ns(&decay->interval);
assert(decay_interval_ns > 0);
size_t npages = extents_npages_get(extents);
if (npages == 0) {
unsigned i;
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
if (decay->backlog[i] > 0) {
break;
}
}
if (i == SMOOTHSTEP_NSTEPS) {
/* No dirty pages recorded. Sleep indefinitely. */
interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
goto label_done;
}
}
if (npages <= BACKGROUND_THREAD_NPAGES_THRESHOLD) {
/* Use max interval. */
interval = decay_interval_ns * SMOOTHSTEP_NSTEPS;
goto label_done;
}
size_t lb = BACKGROUND_THREAD_MIN_INTERVAL_NS / decay_interval_ns;
size_t ub = SMOOTHSTEP_NSTEPS;
/* Minimal 2 intervals to ensure reaching next epoch deadline. */
lb = (lb < 2) ? 2 : lb;
if ((decay_interval_ns * ub <= BACKGROUND_THREAD_MIN_INTERVAL_NS) ||
(lb + 2 > ub)) {
interval = BACKGROUND_THREAD_MIN_INTERVAL_NS;
goto label_done;
}
assert(lb + 2 <= ub);
size_t npurge_lb, npurge_ub;
npurge_lb = decay_npurge_after_interval(decay, lb);
if (npurge_lb > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
interval = decay_interval_ns * lb;
goto label_done;
}
npurge_ub = decay_npurge_after_interval(decay, ub);
if (npurge_ub < BACKGROUND_THREAD_NPAGES_THRESHOLD) {
interval = decay_interval_ns * ub;
goto label_done;
}
unsigned n_search = 0;
size_t target, npurge;
while ((npurge_lb + BACKGROUND_THREAD_NPAGES_THRESHOLD < npurge_ub)
&& (lb + 2 < ub)) {
target = (lb + ub) / 2;
npurge = decay_npurge_after_interval(decay, target);
if (npurge > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
ub = target;
npurge_ub = npurge;
} else {
lb = target;
npurge_lb = npurge;
}
assert(n_search++ < lg_floor(SMOOTHSTEP_NSTEPS) + 1);
}
interval = decay_interval_ns * (ub + lb) / 2;
label_done:
interval = (interval < BACKGROUND_THREAD_MIN_INTERVAL_NS) ?
BACKGROUND_THREAD_MIN_INTERVAL_NS : interval;
malloc_mutex_unlock(tsdn, &decay->mtx);
return interval;
}
/* Compute purge interval for background threads. */
static uint64_t
arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) {
uint64_t i1, i2;
i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty,
&arena->extents_dirty);
if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
return i1;
}
i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy,
&arena->extents_muzzy);
return i1 < i2 ? i1 : i2;
}
static void static void
background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info, background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
uint64_t interval) { uint64_t interval) {
...@@ -228,7 +132,8 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info, ...@@ -228,7 +132,8 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
int ret; int ret;
if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) { if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) {
assert(background_thread_indefinite_sleep(info)); background_thread_wakeup_time_set(tsdn, info,
BACKGROUND_THREAD_INDEFINITE_SLEEP);
ret = pthread_cond_wait(&info->cond, &info->mtx.lock); ret = pthread_cond_wait(&info->cond, &info->mtx.lock);
assert(ret == 0); assert(ret == 0);
} else { } else {
...@@ -236,8 +141,7 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info, ...@@ -236,8 +141,7 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP); interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP);
/* We need malloc clock (can be different from tv). */ /* We need malloc clock (can be different from tv). */
nstime_t next_wakeup; nstime_t next_wakeup;
nstime_init(&next_wakeup, 0); nstime_init_update(&next_wakeup);
nstime_update(&next_wakeup);
nstime_iadd(&next_wakeup, interval); nstime_iadd(&next_wakeup, interval);
assert(nstime_ns(&next_wakeup) < assert(nstime_ns(&next_wakeup) <
BACKGROUND_THREAD_INDEFINITE_SLEEP); BACKGROUND_THREAD_INDEFINITE_SLEEP);
...@@ -254,8 +158,6 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info, ...@@ -254,8 +158,6 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
assert(!background_thread_indefinite_sleep(info)); assert(!background_thread_indefinite_sleep(info));
ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts); ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts);
assert(ret == ETIMEDOUT || ret == 0); assert(ret == ETIMEDOUT || ret == 0);
background_thread_wakeup_time_set(tsdn, info,
BACKGROUND_THREAD_INDEFINITE_SLEEP);
} }
if (config_stats) { if (config_stats) {
gettimeofday(&tv, NULL); gettimeofday(&tv, NULL);
...@@ -283,28 +185,48 @@ background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) { ...@@ -283,28 +185,48 @@ background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) {
} }
static inline void static inline void
background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigned ind) { background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info,
uint64_t min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; unsigned ind) {
uint64_t ns_until_deferred = BACKGROUND_THREAD_DEFERRED_MAX;
unsigned narenas = narenas_total_get(); unsigned narenas = narenas_total_get();
bool slept_indefinitely = background_thread_indefinite_sleep(info);
for (unsigned i = ind; i < narenas; i += max_background_threads) { for (unsigned i = ind; i < narenas; i += max_background_threads) {
arena_t *arena = arena_get(tsdn, i, false); arena_t *arena = arena_get(tsdn, i, false);
if (!arena) { if (!arena) {
continue; continue;
} }
arena_decay(tsdn, arena, true, false); /*
if (min_interval == BACKGROUND_THREAD_MIN_INTERVAL_NS) { * If thread was woken up from the indefinite sleep, don't
* do the work instantly, but rather check when the deferred
* work that caused this thread to wake up is scheduled for.
*/
if (!slept_indefinitely) {
arena_do_deferred_work(tsdn, arena);
}
if (ns_until_deferred <= BACKGROUND_THREAD_MIN_INTERVAL_NS) {
/* Min interval will be used. */ /* Min interval will be used. */
continue; continue;
} }
uint64_t interval = arena_decay_compute_purge_interval(tsdn, uint64_t ns_arena_deferred = pa_shard_time_until_deferred_work(
arena); tsdn, &arena->pa_shard);
assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS); if (ns_arena_deferred < ns_until_deferred) {
if (min_interval > interval) { ns_until_deferred = ns_arena_deferred;
min_interval = interval;
} }
} }
background_thread_sleep(tsdn, info, min_interval);
uint64_t sleep_ns;
if (ns_until_deferred == BACKGROUND_THREAD_DEFERRED_MAX) {
sleep_ns = BACKGROUND_THREAD_INDEFINITE_SLEEP;
} else {
sleep_ns =
(ns_until_deferred < BACKGROUND_THREAD_MIN_INTERVAL_NS)
? BACKGROUND_THREAD_MIN_INTERVAL_NS
: ns_until_deferred;
}
background_thread_sleep(tsdn, info, sleep_ns);
} }
static bool static bool
...@@ -508,7 +430,7 @@ background_thread_entry(void *ind_arg) { ...@@ -508,7 +430,7 @@ background_thread_entry(void *ind_arg) {
assert(thread_ind < max_background_threads); assert(thread_ind < max_background_threads);
#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP #ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
pthread_setname_np(pthread_self(), "jemalloc_bg_thd"); pthread_setname_np(pthread_self(), "jemalloc_bg_thd");
#elif defined(__FreeBSD__) #elif defined(__FreeBSD__) || defined(__DragonFly__)
pthread_set_name_np(pthread_self(), "jemalloc_bg_thd"); pthread_set_name_np(pthread_self(), "jemalloc_bg_thd");
#endif #endif
if (opt_percpu_arena != percpu_arena_disabled) { if (opt_percpu_arena != percpu_arena_disabled) {
...@@ -608,16 +530,16 @@ background_threads_enable(tsd_t *tsd) { ...@@ -608,16 +530,16 @@ background_threads_enable(tsd_t *tsd) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
VARIABLE_ARRAY(bool, marked, max_background_threads); VARIABLE_ARRAY(bool, marked, max_background_threads);
unsigned i, nmarked; unsigned nmarked;
for (i = 0; i < max_background_threads; i++) { for (unsigned i = 0; i < max_background_threads; i++) {
marked[i] = false; marked[i] = false;
} }
nmarked = 0; nmarked = 0;
/* Thread 0 is required and created at the end. */ /* Thread 0 is required and created at the end. */
marked[0] = true; marked[0] = true;
/* Mark the threads we need to create for thread 0. */ /* Mark the threads we need to create for thread 0. */
unsigned n = narenas_total_get(); unsigned narenas = narenas_total_get();
for (i = 1; i < n; i++) { for (unsigned i = 1; i < narenas; i++) {
if (marked[i % max_background_threads] || if (marked[i % max_background_threads] ||
arena_get(tsd_tsdn(tsd), i, false) == NULL) { arena_get(tsd_tsdn(tsd), i, false) == NULL) {
continue; continue;
...@@ -634,7 +556,18 @@ background_threads_enable(tsd_t *tsd) { ...@@ -634,7 +556,18 @@ background_threads_enable(tsd_t *tsd) {
} }
} }
return background_thread_create_locked(tsd, 0); bool err = background_thread_create_locked(tsd, 0);
if (err) {
return true;
}
for (unsigned i = 0; i < narenas; i++) {
arena_t *arena = arena_get(tsd_tsdn(tsd), i, false);
if (arena != NULL) {
pa_shard_set_deferral_allowed(tsd_tsdn(tsd),
&arena->pa_shard, true);
}
}
return false;
} }
bool bool
...@@ -648,92 +581,36 @@ background_threads_disable(tsd_t *tsd) { ...@@ -648,92 +581,36 @@ background_threads_disable(tsd_t *tsd) {
return true; return true;
} }
assert(n_background_threads == 0); assert(n_background_threads == 0);
unsigned narenas = narenas_total_get();
for (unsigned i = 0; i < narenas; i++) {
arena_t *arena = arena_get(tsd_tsdn(tsd), i, false);
if (arena != NULL) {
pa_shard_set_deferral_allowed(tsd_tsdn(tsd),
&arena->pa_shard, false);
}
}
return false; return false;
} }
/* Check if we need to signal the background thread early. */ bool
background_thread_is_started(background_thread_info_t *info) {
return info->state == background_thread_started;
}
void void
background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, background_thread_wakeup_early(background_thread_info_t *info,
arena_decay_t *decay, size_t npages_new) { nstime_t *remaining_sleep) {
background_thread_info_t *info = arena_background_thread_info_get( /*
arena); * This is an optimization to increase batching. At this point
if (malloc_mutex_trylock(tsdn, &info->mtx)) { * we know that background thread wakes up soon, so the time to cache
/* * the just freed memory is bounded and low.
* Background thread may hold the mutex for a long period of */
* time. We'd like to avoid the variance on application if (remaining_sleep != NULL && nstime_ns(remaining_sleep) <
* threads. So keep this non-blocking, and leave the work to a BACKGROUND_THREAD_MIN_INTERVAL_NS) {
* future epoch.
*/
return; return;
} }
pthread_cond_signal(&info->cond);
if (info->state != background_thread_started) {
goto label_done;
}
if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
goto label_done;
}
ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
if (decay_time <= 0) {
/* Purging is eagerly done or disabled currently. */
goto label_done_unlock2;
}
uint64_t decay_interval_ns = nstime_ns(&decay->interval);
assert(decay_interval_ns > 0);
nstime_t diff;
nstime_init(&diff, background_thread_wakeup_time_get(info));
if (nstime_compare(&diff, &decay->epoch) <= 0) {
goto label_done_unlock2;
}
nstime_subtract(&diff, &decay->epoch);
if (nstime_ns(&diff) < BACKGROUND_THREAD_MIN_INTERVAL_NS) {
goto label_done_unlock2;
}
if (npages_new > 0) {
size_t n_epoch = (size_t)(nstime_ns(&diff) / decay_interval_ns);
/*
* Compute how many new pages we would need to purge by the next
* wakeup, which is used to determine if we should signal the
* background thread.
*/
uint64_t npurge_new;
if (n_epoch >= SMOOTHSTEP_NSTEPS) {
npurge_new = npages_new;
} else {
uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1];
assert(h_steps_max >=
h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
npurge_new = npages_new * (h_steps_max -
h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
npurge_new >>= SMOOTHSTEP_BFP;
}
info->npages_to_purge_new += npurge_new;
}
bool should_signal;
if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
should_signal = true;
} else if (unlikely(background_thread_indefinite_sleep(info)) &&
(extents_npages_get(&arena->extents_dirty) > 0 ||
extents_npages_get(&arena->extents_muzzy) > 0 ||
info->npages_to_purge_new > 0)) {
should_signal = true;
} else {
should_signal = false;
}
if (should_signal) {
info->npages_to_purge_new = 0;
pthread_cond_signal(&info->cond);
}
label_done_unlock2:
malloc_mutex_unlock(tsdn, &decay->mtx);
label_done:
malloc_mutex_unlock(tsdn, &info->mtx);
} }
void void
...@@ -794,9 +671,11 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) { ...@@ -794,9 +671,11 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
return true; return true;
} }
stats->num_threads = n_background_threads; nstime_init_zero(&stats->run_interval);
memset(&stats->max_counter_per_bg_thd, 0, sizeof(mutex_prof_data_t));
uint64_t num_runs = 0; uint64_t num_runs = 0;
nstime_init(&stats->run_interval, 0); stats->num_threads = n_background_threads;
for (unsigned i = 0; i < max_background_threads; i++) { for (unsigned i = 0; i < max_background_threads; i++) {
background_thread_info_t *info = &background_thread_info[i]; background_thread_info_t *info = &background_thread_info[i];
if (malloc_mutex_trylock(tsdn, &info->mtx)) { if (malloc_mutex_trylock(tsdn, &info->mtx)) {
...@@ -809,6 +688,8 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) { ...@@ -809,6 +688,8 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
if (info->state != background_thread_stopped) { if (info->state != background_thread_stopped) {
num_runs += info->tot_n_runs; num_runs += info->tot_n_runs;
nstime_add(&stats->run_interval, &info->tot_sleep_time); nstime_add(&stats->run_interval, &info->tot_sleep_time);
malloc_mutex_prof_max_update(tsdn,
&stats->max_counter_per_bg_thd, &info->mtx);
} }
malloc_mutex_unlock(tsdn, &info->mtx); malloc_mutex_unlock(tsdn, &info->mtx);
} }
...@@ -892,7 +773,7 @@ background_thread_boot0(void) { ...@@ -892,7 +773,7 @@ background_thread_boot0(void) {
} }
bool bool
background_thread_boot1(tsdn_t *tsdn) { background_thread_boot1(tsdn_t *tsdn, base_t *base) {
#ifdef JEMALLOC_BACKGROUND_THREAD #ifdef JEMALLOC_BACKGROUND_THREAD
assert(have_background_thread); assert(have_background_thread);
assert(narenas_total_get() > 0); assert(narenas_total_get() > 0);
...@@ -911,7 +792,7 @@ background_thread_boot1(tsdn_t *tsdn) { ...@@ -911,7 +792,7 @@ background_thread_boot1(tsdn_t *tsdn) {
} }
background_thread_info = (background_thread_info_t *)base_alloc(tsdn, background_thread_info = (background_thread_info_t *)base_alloc(tsdn,
b0get(), opt_max_background_threads * base, opt_max_background_threads *
sizeof(background_thread_info_t), CACHELINE); sizeof(background_thread_info_t), CACHELINE);
if (background_thread_info == NULL) { if (background_thread_info == NULL) {
return true; return true;
......
#define JEMALLOC_BASE_C_
#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/jemalloc_internal_includes.h"
...@@ -7,6 +6,15 @@ ...@@ -7,6 +6,15 @@
#include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/sz.h" #include "jemalloc/internal/sz.h"
/*
* In auto mode, arenas switch to huge pages for the base allocator on the
* second base block. a0 switches to thp on the 5th block (after 20 megabytes
* of metadata), since more metadata (e.g. rtree nodes) come from a0's base.
*/
#define BASE_AUTO_THP_THRESHOLD 2
#define BASE_AUTO_THP_THRESHOLD_A0 5
/******************************************************************************/ /******************************************************************************/
/* Data. */ /* Data. */
...@@ -29,7 +37,7 @@ metadata_thp_madvise(void) { ...@@ -29,7 +37,7 @@ metadata_thp_madvise(void) {
} }
static void * static void *
base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) { base_map(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, size_t size) {
void *addr; void *addr;
bool zero = true; bool zero = true;
bool commit = true; bool commit = true;
...@@ -37,22 +45,21 @@ base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) ...@@ -37,22 +45,21 @@ base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size)
/* Use huge page sizes and alignment regardless of opt_metadata_thp. */ /* Use huge page sizes and alignment regardless of opt_metadata_thp. */
assert(size == HUGEPAGE_CEILING(size)); assert(size == HUGEPAGE_CEILING(size));
size_t alignment = HUGEPAGE; size_t alignment = HUGEPAGE;
if (extent_hooks == &extent_hooks_default) { if (ehooks_are_default(ehooks)) {
addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit); addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit);
if (have_madvise_huge && addr) {
pages_set_thp_state(addr, size);
}
} else { } else {
/* No arena context as we are creating new arenas. */ addr = ehooks_alloc(tsdn, ehooks, NULL, size, alignment, &zero,
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); &commit);
pre_reentrancy(tsd, NULL);
addr = extent_hooks->alloc(extent_hooks, NULL, size, alignment,
&zero, &commit, ind);
post_reentrancy(tsd);
} }
return addr; return addr;
} }
static void static void
base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr, base_unmap(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, void *addr,
size_t size) { size_t size) {
/* /*
* Cascade through dalloc, decommit, purge_forced, and purge_lazy, * Cascade through dalloc, decommit, purge_forced, and purge_lazy,
...@@ -64,7 +71,7 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr, ...@@ -64,7 +71,7 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
* may in fact want the end state of all associated virtual memory to be * may in fact want the end state of all associated virtual memory to be
* in some consistent-but-allocated state. * in some consistent-but-allocated state.
*/ */
if (extent_hooks == &extent_hooks_default) { if (ehooks_are_default(ehooks)) {
if (!extent_dalloc_mmap(addr, size)) { if (!extent_dalloc_mmap(addr, size)) {
goto label_done; goto label_done;
} }
...@@ -80,31 +87,19 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr, ...@@ -80,31 +87,19 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
/* Nothing worked. This should never happen. */ /* Nothing worked. This should never happen. */
not_reached(); not_reached();
} else { } else {
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); if (!ehooks_dalloc(tsdn, ehooks, addr, size, true)) {
pre_reentrancy(tsd, NULL); goto label_done;
if (extent_hooks->dalloc != NULL &&
!extent_hooks->dalloc(extent_hooks, addr, size, true,
ind)) {
goto label_post_reentrancy;
} }
if (extent_hooks->decommit != NULL && if (!ehooks_decommit(tsdn, ehooks, addr, size, 0, size)) {
!extent_hooks->decommit(extent_hooks, addr, size, 0, size, goto label_done;
ind)) {
goto label_post_reentrancy;
} }
if (extent_hooks->purge_forced != NULL && if (!ehooks_purge_forced(tsdn, ehooks, addr, size, 0, size)) {
!extent_hooks->purge_forced(extent_hooks, addr, size, 0, goto label_done;
size, ind)) {
goto label_post_reentrancy;
} }
if (extent_hooks->purge_lazy != NULL && if (!ehooks_purge_lazy(tsdn, ehooks, addr, size, 0, size)) {
!extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size, goto label_done;
ind)) {
goto label_post_reentrancy;
} }
/* Nothing worked. That's the application's problem. */ /* Nothing worked. That's the application's problem. */
label_post_reentrancy:
post_reentrancy(tsd);
} }
label_done: label_done:
if (metadata_thp_madvise()) { if (metadata_thp_madvise()) {
...@@ -116,14 +111,14 @@ label_done: ...@@ -116,14 +111,14 @@ label_done:
} }
static void static void
base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr, base_edata_init(size_t *extent_sn_next, edata_t *edata, void *addr,
size_t size) { size_t size) {
size_t sn; size_t sn;
sn = *extent_sn_next; sn = *extent_sn_next;
(*extent_sn_next)++; (*extent_sn_next)++;
extent_binit(extent, addr, size, sn); edata_binit(edata, addr, size, sn);
} }
static size_t static size_t
...@@ -169,7 +164,7 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) { ...@@ -169,7 +164,7 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
pages_huge(block, block->size); pages_huge(block, block->size);
if (config_stats) { if (config_stats) {
base->n_thp += HUGEPAGE_CEILING(block->size - base->n_thp += HUGEPAGE_CEILING(block->size -
extent_bsize_get(&block->extent)) >> LG_HUGEPAGE; edata_bsize_get(&block->edata)) >> LG_HUGEPAGE;
} }
block = block->next; block = block->next;
assert(block == NULL || (base_ind_get(base) == 0)); assert(block == NULL || (base_ind_get(base) == 0));
...@@ -177,34 +172,34 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) { ...@@ -177,34 +172,34 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
} }
static void * static void *
base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size, base_extent_bump_alloc_helper(edata_t *edata, size_t *gap_size, size_t size,
size_t alignment) { size_t alignment) {
void *ret; void *ret;
assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM)); assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
assert(size == ALIGNMENT_CEILING(size, alignment)); assert(size == ALIGNMENT_CEILING(size, alignment));
*gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent), *gap_size = ALIGNMENT_CEILING((uintptr_t)edata_addr_get(edata),
alignment) - (uintptr_t)extent_addr_get(extent); alignment) - (uintptr_t)edata_addr_get(edata);
ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size); ret = (void *)((uintptr_t)edata_addr_get(edata) + *gap_size);
assert(extent_bsize_get(extent) >= *gap_size + size); assert(edata_bsize_get(edata) >= *gap_size + size);
extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) + edata_binit(edata, (void *)((uintptr_t)edata_addr_get(edata) +
*gap_size + size), extent_bsize_get(extent) - *gap_size - size, *gap_size + size), edata_bsize_get(edata) - *gap_size - size,
extent_sn_get(extent)); edata_sn_get(edata));
return ret; return ret;
} }
static void static void
base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size, base_extent_bump_alloc_post(base_t *base, edata_t *edata, size_t gap_size,
void *addr, size_t size) { void *addr, size_t size) {
if (extent_bsize_get(extent) > 0) { if (edata_bsize_get(edata) > 0) {
/* /*
* Compute the index for the largest size class that does not * Compute the index for the largest size class that does not
* exceed extent's size. * exceed extent's size.
*/ */
szind_t index_floor = szind_t index_floor =
sz_size2index(extent_bsize_get(extent) + 1) - 1; sz_size2index(edata_bsize_get(edata) + 1) - 1;
extent_heap_insert(&base->avail[index_floor], extent); edata_heap_insert(&base->avail[index_floor], edata);
} }
if (config_stats) { if (config_stats) {
...@@ -229,13 +224,13 @@ base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size, ...@@ -229,13 +224,13 @@ base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
} }
static void * static void *
base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size, base_extent_bump_alloc(base_t *base, edata_t *edata, size_t size,
size_t alignment) { size_t alignment) {
void *ret; void *ret;
size_t gap_size; size_t gap_size;
ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment); ret = base_extent_bump_alloc_helper(edata, &gap_size, size, alignment);
base_extent_bump_alloc_post(base, extent, gap_size, ret, size); base_extent_bump_alloc_post(base, edata, gap_size, ret, size);
return ret; return ret;
} }
...@@ -245,8 +240,8 @@ base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size, ...@@ -245,8 +240,8 @@ base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size,
* On success a pointer to the initialized base_block_t header is returned. * On success a pointer to the initialized base_block_t header is returned.
*/ */
static base_block_t * static base_block_t *
base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks, base_block_alloc(tsdn_t *tsdn, base_t *base, ehooks_t *ehooks, unsigned ind,
unsigned ind, pszind_t *pind_last, size_t *extent_sn_next, size_t size, pszind_t *pind_last, size_t *extent_sn_next, size_t size,
size_t alignment) { size_t alignment) {
alignment = ALIGNMENT_CEILING(alignment, QUANTUM); alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
size_t usize = ALIGNMENT_CEILING(size, alignment); size_t usize = ALIGNMENT_CEILING(size, alignment);
...@@ -267,7 +262,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks, ...@@ -267,7 +262,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next)); size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
size_t block_size = (min_block_size > next_block_size) ? min_block_size size_t block_size = (min_block_size > next_block_size) ? min_block_size
: next_block_size; : next_block_size;
base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind, base_block_t *block = (base_block_t *)base_map(tsdn, ehooks, ind,
block_size); block_size);
if (block == NULL) { if (block == NULL) {
return NULL; return NULL;
...@@ -295,7 +290,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks, ...@@ -295,7 +290,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
block->size = block_size; block->size = block_size;
block->next = NULL; block->next = NULL;
assert(block_size >= header_size); assert(block_size >= header_size);
base_extent_init(extent_sn_next, &block->extent, base_edata_init(extent_sn_next, &block->edata,
(void *)((uintptr_t)block + header_size), block_size - header_size); (void *)((uintptr_t)block + header_size), block_size - header_size);
return block; return block;
} }
...@@ -304,17 +299,17 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks, ...@@ -304,17 +299,17 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
* Allocate an extent that is at least as large as specified size, with * Allocate an extent that is at least as large as specified size, with
* specified alignment. * specified alignment.
*/ */
static extent_t * static edata_t *
base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
malloc_mutex_assert_owner(tsdn, &base->mtx); malloc_mutex_assert_owner(tsdn, &base->mtx);
extent_hooks_t *extent_hooks = base_extent_hooks_get(base); ehooks_t *ehooks = base_ehooks_get_for_metadata(base);
/* /*
* Drop mutex during base_block_alloc(), because an extent hook will be * Drop mutex during base_block_alloc(), because an extent hook will be
* called. * called.
*/ */
malloc_mutex_unlock(tsdn, &base->mtx); malloc_mutex_unlock(tsdn, &base->mtx);
base_block_t *block = base_block_alloc(tsdn, base, extent_hooks, base_block_t *block = base_block_alloc(tsdn, base, ehooks,
base_ind_get(base), &base->pind_last, &base->extent_sn_next, size, base_ind_get(base), &base->pind_last, &base->extent_sn_next, size,
alignment); alignment);
malloc_mutex_lock(tsdn, &base->mtx); malloc_mutex_lock(tsdn, &base->mtx);
...@@ -338,7 +333,7 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { ...@@ -338,7 +333,7 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
assert(base->resident <= base->mapped); assert(base->resident <= base->mapped);
assert(base->n_thp << LG_HUGEPAGE <= base->mapped); assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
} }
return &block->extent; return &block->edata;
} }
base_t * base_t *
...@@ -347,10 +342,22 @@ b0get(void) { ...@@ -347,10 +342,22 @@ b0get(void) {
} }
base_t * base_t *
base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { base_new(tsdn_t *tsdn, unsigned ind, const extent_hooks_t *extent_hooks,
bool metadata_use_hooks) {
pszind_t pind_last = 0; pszind_t pind_last = 0;
size_t extent_sn_next = 0; size_t extent_sn_next = 0;
base_block_t *block = base_block_alloc(tsdn, NULL, extent_hooks, ind,
/*
* The base will contain the ehooks eventually, but it itself is
* allocated using them. So we use some stack ehooks to bootstrap its
* memory, and then initialize the ehooks within the base_t.
*/
ehooks_t fake_ehooks;
ehooks_init(&fake_ehooks, metadata_use_hooks ?
(extent_hooks_t *)extent_hooks :
(extent_hooks_t *)&ehooks_default_extent_hooks, ind);
base_block_t *block = base_block_alloc(tsdn, NULL, &fake_ehooks, ind,
&pind_last, &extent_sn_next, sizeof(base_t), QUANTUM); &pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
if (block == NULL) { if (block == NULL) {
return NULL; return NULL;
...@@ -359,13 +366,15 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { ...@@ -359,13 +366,15 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
size_t gap_size; size_t gap_size;
size_t base_alignment = CACHELINE; size_t base_alignment = CACHELINE;
size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment); size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent, base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->edata,
&gap_size, base_size, base_alignment); &gap_size, base_size, base_alignment);
base->ind = ind; ehooks_init(&base->ehooks, (extent_hooks_t *)extent_hooks, ind);
atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED); ehooks_init(&base->ehooks_base, metadata_use_hooks ?
(extent_hooks_t *)extent_hooks :
(extent_hooks_t *)&ehooks_default_extent_hooks, ind);
if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE, if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
malloc_mutex_rank_exclusive)) { malloc_mutex_rank_exclusive)) {
base_unmap(tsdn, extent_hooks, ind, block, block->size); base_unmap(tsdn, &fake_ehooks, ind, block, block->size);
return NULL; return NULL;
} }
base->pind_last = pind_last; base->pind_last = pind_last;
...@@ -373,7 +382,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { ...@@ -373,7 +382,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
base->blocks = block; base->blocks = block;
base->auto_thp_switched = false; base->auto_thp_switched = false;
for (szind_t i = 0; i < SC_NSIZES; i++) { for (szind_t i = 0; i < SC_NSIZES; i++) {
extent_heap_new(&base->avail[i]); edata_heap_new(&base->avail[i]);
} }
if (config_stats) { if (config_stats) {
base->allocated = sizeof(base_block_t); base->allocated = sizeof(base_block_t);
...@@ -386,7 +395,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { ...@@ -386,7 +395,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
assert(base->resident <= base->mapped); assert(base->resident <= base->mapped);
assert(base->n_thp << LG_HUGEPAGE <= base->mapped); assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
} }
base_extent_bump_alloc_post(base, &block->extent, gap_size, base, base_extent_bump_alloc_post(base, &block->edata, gap_size, base,
base_size); base_size);
return base; return base;
...@@ -394,26 +403,31 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { ...@@ -394,26 +403,31 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
void void
base_delete(tsdn_t *tsdn, base_t *base) { base_delete(tsdn_t *tsdn, base_t *base) {
extent_hooks_t *extent_hooks = base_extent_hooks_get(base); ehooks_t *ehooks = base_ehooks_get_for_metadata(base);
base_block_t *next = base->blocks; base_block_t *next = base->blocks;
do { do {
base_block_t *block = next; base_block_t *block = next;
next = block->next; next = block->next;
base_unmap(tsdn, extent_hooks, base_ind_get(base), block, base_unmap(tsdn, ehooks, base_ind_get(base), block,
block->size); block->size);
} while (next != NULL); } while (next != NULL);
} }
extent_hooks_t * ehooks_t *
base_extent_hooks_get(base_t *base) { base_ehooks_get(base_t *base) {
return (extent_hooks_t *)atomic_load_p(&base->extent_hooks, return &base->ehooks;
ATOMIC_ACQUIRE); }
ehooks_t *
base_ehooks_get_for_metadata(base_t *base) {
return &base->ehooks_base;
} }
extent_hooks_t * extent_hooks_t *
base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) { base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base); extent_hooks_t *old_extent_hooks =
atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE); ehooks_get_extent_hooks_ptr(&base->ehooks);
ehooks_init(&base->ehooks, extent_hooks, ehooks_ind_get(&base->ehooks));
return old_extent_hooks; return old_extent_hooks;
} }
...@@ -424,28 +438,28 @@ base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment, ...@@ -424,28 +438,28 @@ base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
size_t usize = ALIGNMENT_CEILING(size, alignment); size_t usize = ALIGNMENT_CEILING(size, alignment);
size_t asize = usize + alignment - QUANTUM; size_t asize = usize + alignment - QUANTUM;
extent_t *extent = NULL; edata_t *edata = NULL;
malloc_mutex_lock(tsdn, &base->mtx); malloc_mutex_lock(tsdn, &base->mtx);
for (szind_t i = sz_size2index(asize); i < SC_NSIZES; i++) { for (szind_t i = sz_size2index(asize); i < SC_NSIZES; i++) {
extent = extent_heap_remove_first(&base->avail[i]); edata = edata_heap_remove_first(&base->avail[i]);
if (extent != NULL) { if (edata != NULL) {
/* Use existing space. */ /* Use existing space. */
break; break;
} }
} }
if (extent == NULL) { if (edata == NULL) {
/* Try to allocate more space. */ /* Try to allocate more space. */
extent = base_extent_alloc(tsdn, base, usize, alignment); edata = base_extent_alloc(tsdn, base, usize, alignment);
} }
void *ret; void *ret;
if (extent == NULL) { if (edata == NULL) {
ret = NULL; ret = NULL;
goto label_return; goto label_return;
} }
ret = base_extent_bump_alloc(base, extent, usize, alignment); ret = base_extent_bump_alloc(base, edata, usize, alignment);
if (esn != NULL) { if (esn != NULL) {
*esn = extent_sn_get(extent); *esn = (size_t)edata_sn_get(edata);
} }
label_return: label_return:
malloc_mutex_unlock(tsdn, &base->mtx); malloc_mutex_unlock(tsdn, &base->mtx);
...@@ -465,16 +479,16 @@ base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { ...@@ -465,16 +479,16 @@ base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
return base_alloc_impl(tsdn, base, size, alignment, NULL); return base_alloc_impl(tsdn, base, size, alignment, NULL);
} }
extent_t * edata_t *
base_alloc_extent(tsdn_t *tsdn, base_t *base) { base_alloc_edata(tsdn_t *tsdn, base_t *base) {
size_t esn; size_t esn;
extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t), edata_t *edata = base_alloc_impl(tsdn, base, sizeof(edata_t),
CACHELINE, &esn); EDATA_ALIGNMENT, &esn);
if (extent == NULL) { if (edata == NULL) {
return NULL; return NULL;
} }
extent_esn_set(extent, esn); edata_esn_set(edata, esn);
return extent; return edata;
} }
void void
...@@ -509,6 +523,7 @@ base_postfork_child(tsdn_t *tsdn, base_t *base) { ...@@ -509,6 +523,7 @@ base_postfork_child(tsdn_t *tsdn, base_t *base) {
bool bool
base_boot(tsdn_t *tsdn) { base_boot(tsdn_t *tsdn) {
b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default); b0 = base_new(tsdn, 0, (extent_hooks_t *)&ehooks_default_extent_hooks,
/* metadata_use_hooks */ true);
return (b0 == NULL); return (b0 == NULL);
} }
...@@ -6,26 +6,6 @@ ...@@ -6,26 +6,6 @@
#include "jemalloc/internal/sc.h" #include "jemalloc/internal/sc.h"
#include "jemalloc/internal/witness.h" #include "jemalloc/internal/witness.h"
bin_info_t bin_infos[SC_NBINS];
static void
bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
bin_info_t bin_infos[SC_NBINS]) {
for (unsigned i = 0; i < SC_NBINS; i++) {
bin_info_t *bin_info = &bin_infos[i];
sc_t *sc = &sc_data->sc[i];
bin_info->reg_size = ((size_t)1U << sc->lg_base)
+ ((size_t)sc->ndelta << sc->lg_delta);
bin_info->slab_size = (sc->pgs << LG_PAGE);
bin_info->nregs =
(uint32_t)(bin_info->slab_size / bin_info->reg_size);
bin_info->n_shards = bin_shard_sizes[i];
bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER(
bin_info->nregs);
bin_info->bitmap_info = bitmap_info;
}
}
bool bool
bin_update_shard_size(unsigned bin_shard_sizes[SC_NBINS], size_t start_size, bin_update_shard_size(unsigned bin_shard_sizes[SC_NBINS], size_t start_size,
size_t end_size, size_t nshards) { size_t end_size, size_t nshards) {
...@@ -58,12 +38,6 @@ bin_shard_sizes_boot(unsigned bin_shard_sizes[SC_NBINS]) { ...@@ -58,12 +38,6 @@ bin_shard_sizes_boot(unsigned bin_shard_sizes[SC_NBINS]) {
} }
} }
void
bin_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
assert(sc_data->initialized);
bin_infos_init(sc_data, bin_shard_sizes, bin_infos);
}
bool bool
bin_init(bin_t *bin) { bin_init(bin_t *bin) {
if (malloc_mutex_init(&bin->lock, "bin", WITNESS_RANK_BIN, if (malloc_mutex_init(&bin->lock, "bin", WITNESS_RANK_BIN,
...@@ -71,8 +45,8 @@ bin_init(bin_t *bin) { ...@@ -71,8 +45,8 @@ bin_init(bin_t *bin) {
return true; return true;
} }
bin->slabcur = NULL; bin->slabcur = NULL;
extent_heap_new(&bin->slabs_nonfull); edata_heap_new(&bin->slabs_nonfull);
extent_list_init(&bin->slabs_full); edata_list_active_init(&bin->slabs_full);
if (config_stats) { if (config_stats) {
memset(&bin->stats, 0, sizeof(bin_stats_t)); memset(&bin->stats, 0, sizeof(bin_stats_t));
} }
......
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/bin_info.h"
bin_info_t bin_infos[SC_NBINS];
static void
bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
bin_info_t infos[SC_NBINS]) {
for (unsigned i = 0; i < SC_NBINS; i++) {
bin_info_t *bin_info = &infos[i];
sc_t *sc = &sc_data->sc[i];
bin_info->reg_size = ((size_t)1U << sc->lg_base)
+ ((size_t)sc->ndelta << sc->lg_delta);
bin_info->slab_size = (sc->pgs << LG_PAGE);
bin_info->nregs =
(uint32_t)(bin_info->slab_size / bin_info->reg_size);
bin_info->n_shards = bin_shard_sizes[i];
bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER(
bin_info->nregs);
bin_info->bitmap_info = bitmap_info;
}
}
void
bin_info_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
assert(sc_data->initialized);
bin_infos_init(sc_data, bin_shard_sizes, bin_infos);
}
#define JEMALLOC_BITMAP_C_
#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/jemalloc_internal_includes.h"
......
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/buf_writer.h"
#include "jemalloc/internal/malloc_io.h"
static void *
buf_writer_allocate_internal_buf(tsdn_t *tsdn, size_t buf_len) {
#ifdef JEMALLOC_JET
if (buf_len > SC_LARGE_MAXCLASS) {
return NULL;
}
#else
assert(buf_len <= SC_LARGE_MAXCLASS);
#endif
return iallocztm(tsdn, buf_len, sz_size2index(buf_len), false, NULL,
true, arena_get(tsdn, 0, false), true);
}
static void
buf_writer_free_internal_buf(tsdn_t *tsdn, void *buf) {
if (buf != NULL) {
idalloctm(tsdn, buf, NULL, NULL, true, true);
}
}
static void
buf_writer_assert(buf_writer_t *buf_writer) {
assert(buf_writer != NULL);
assert(buf_writer->write_cb != NULL);
if (buf_writer->buf != NULL) {
assert(buf_writer->buf_size > 0);
} else {
assert(buf_writer->buf_size == 0);
assert(buf_writer->internal_buf);
}
assert(buf_writer->buf_end <= buf_writer->buf_size);
}
bool
buf_writer_init(tsdn_t *tsdn, buf_writer_t *buf_writer, write_cb_t *write_cb,
void *cbopaque, char *buf, size_t buf_len) {
if (write_cb != NULL) {
buf_writer->write_cb = write_cb;
} else {
buf_writer->write_cb = je_malloc_message != NULL ?
je_malloc_message : wrtmessage;
}
buf_writer->cbopaque = cbopaque;
assert(buf_len >= 2);
if (buf != NULL) {
buf_writer->buf = buf;
buf_writer->internal_buf = false;
} else {
buf_writer->buf = buf_writer_allocate_internal_buf(tsdn,
buf_len);
buf_writer->internal_buf = true;
}
if (buf_writer->buf != NULL) {
buf_writer->buf_size = buf_len - 1; /* Allowing for '\0'. */
} else {
buf_writer->buf_size = 0;
}
buf_writer->buf_end = 0;
buf_writer_assert(buf_writer);
return buf_writer->buf == NULL;
}
void
buf_writer_flush(buf_writer_t *buf_writer) {
buf_writer_assert(buf_writer);
if (buf_writer->buf == NULL) {
return;
}
buf_writer->buf[buf_writer->buf_end] = '\0';
buf_writer->write_cb(buf_writer->cbopaque, buf_writer->buf);
buf_writer->buf_end = 0;
buf_writer_assert(buf_writer);
}
void
buf_writer_cb(void *buf_writer_arg, const char *s) {
buf_writer_t *buf_writer = (buf_writer_t *)buf_writer_arg;
buf_writer_assert(buf_writer);
if (buf_writer->buf == NULL) {
buf_writer->write_cb(buf_writer->cbopaque, s);
return;
}
size_t i, slen, n;
for (i = 0, slen = strlen(s); i < slen; i += n) {
if (buf_writer->buf_end == buf_writer->buf_size) {
buf_writer_flush(buf_writer);
}
size_t s_remain = slen - i;
size_t buf_remain = buf_writer->buf_size - buf_writer->buf_end;
n = s_remain < buf_remain ? s_remain : buf_remain;
memcpy(buf_writer->buf + buf_writer->buf_end, s + i, n);
buf_writer->buf_end += n;
buf_writer_assert(buf_writer);
}
assert(i == slen);
}
void
buf_writer_terminate(tsdn_t *tsdn, buf_writer_t *buf_writer) {
buf_writer_assert(buf_writer);
buf_writer_flush(buf_writer);
if (buf_writer->internal_buf) {
buf_writer_free_internal_buf(tsdn, buf_writer->buf);
}
}
void
buf_writer_pipe(buf_writer_t *buf_writer, read_cb_t *read_cb,
void *read_cbopaque) {
/*
* A tiny local buffer in case the buffered writer failed to allocate
* at init.
*/
static char backup_buf[16];
static buf_writer_t backup_buf_writer;
buf_writer_assert(buf_writer);
assert(read_cb != NULL);
if (buf_writer->buf == NULL) {
buf_writer_init(TSDN_NULL, &backup_buf_writer,
buf_writer->write_cb, buf_writer->cbopaque, backup_buf,
sizeof(backup_buf));
buf_writer = &backup_buf_writer;
}
assert(buf_writer->buf != NULL);
ssize_t nread = 0;
do {
buf_writer->buf_end += nread;
buf_writer_assert(buf_writer);
if (buf_writer->buf_end == buf_writer->buf_size) {
buf_writer_flush(buf_writer);
}
nread = read_cb(read_cbopaque,
buf_writer->buf + buf_writer->buf_end,
buf_writer->buf_size - buf_writer->buf_end);
} while (nread > 0);
buf_writer_flush(buf_writer);
}
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/cache_bin.h"
#include "jemalloc/internal/safety_check.h"
void
cache_bin_info_init(cache_bin_info_t *info,
cache_bin_sz_t ncached_max) {
assert(ncached_max <= CACHE_BIN_NCACHED_MAX);
size_t stack_size = (size_t)ncached_max * sizeof(void *);
assert(stack_size < ((size_t)1 << (sizeof(cache_bin_sz_t) * 8)));
info->ncached_max = (cache_bin_sz_t)ncached_max;
}
void
cache_bin_info_compute_alloc(cache_bin_info_t *infos, szind_t ninfos,
size_t *size, size_t *alignment) {
/* For the total bin stack region (per tcache), reserve 2 more slots so
* that
* 1) the empty position can be safely read on the fast path before
* checking "is_empty"; and
* 2) the cur_ptr can go beyond the empty position by 1 step safely on
* the fast path (i.e. no overflow).
*/
*size = sizeof(void *) * 2;
for (szind_t i = 0; i < ninfos; i++) {
assert(infos[i].ncached_max > 0);
*size += infos[i].ncached_max * sizeof(void *);
}
/*
* Align to at least PAGE, to minimize the # of TLBs needed by the
* smaller sizes; also helps if the larger sizes don't get used at all.
*/
*alignment = PAGE;
}
void
cache_bin_preincrement(cache_bin_info_t *infos, szind_t ninfos, void *alloc,
size_t *cur_offset) {
if (config_debug) {
size_t computed_size;
size_t computed_alignment;
/* Pointer should be as aligned as we asked for. */
cache_bin_info_compute_alloc(infos, ninfos, &computed_size,
&computed_alignment);
assert(((uintptr_t)alloc & (computed_alignment - 1)) == 0);
}
*(uintptr_t *)((uintptr_t)alloc + *cur_offset) =
cache_bin_preceding_junk;
*cur_offset += sizeof(void *);
}
void
cache_bin_postincrement(cache_bin_info_t *infos, szind_t ninfos, void *alloc,
size_t *cur_offset) {
*(uintptr_t *)((uintptr_t)alloc + *cur_offset) =
cache_bin_trailing_junk;
*cur_offset += sizeof(void *);
}
void
cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc,
size_t *cur_offset) {
/*
* The full_position points to the lowest available space. Allocations
* will access the slots toward higher addresses (for the benefit of
* adjacent prefetch).
*/
void *stack_cur = (void *)((uintptr_t)alloc + *cur_offset);
void *full_position = stack_cur;
uint16_t bin_stack_size = info->ncached_max * sizeof(void *);
*cur_offset += bin_stack_size;
void *empty_position = (void *)((uintptr_t)alloc + *cur_offset);
/* Init to the empty position. */
bin->stack_head = (void **)empty_position;
bin->low_bits_low_water = (uint16_t)(uintptr_t)bin->stack_head;
bin->low_bits_full = (uint16_t)(uintptr_t)full_position;
bin->low_bits_empty = (uint16_t)(uintptr_t)empty_position;
cache_bin_sz_t free_spots = cache_bin_diff(bin,
bin->low_bits_full, (uint16_t)(uintptr_t)bin->stack_head,
/* racy */ false);
assert(free_spots == bin_stack_size);
assert(cache_bin_ncached_get_local(bin, info) == 0);
assert(cache_bin_empty_position_get(bin) == empty_position);
assert(bin_stack_size > 0 || empty_position == full_position);
}
bool
cache_bin_still_zero_initialized(cache_bin_t *bin) {
return bin->stack_head == NULL;
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment