Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
d4439bd4
Commit
d4439bd4
authored
May 15, 2023
by
Oran Agra
Browse files
Merge remote-tracking branch 'origin/unstable' into 7.2
parents
e26a769d
2ffde15a
Changes
199
Hide whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
199 of 199+
files are displayed.
Plain diff
Email patch
deps/jemalloc/doc_internal/jemalloc.svg
0 → 100644
View file @
d4439bd4
<svg
id=
"Layer_3"
data-name=
"Layer 3"
xmlns=
"http://www.w3.org/2000/svg"
xmlns:xlink=
"http://www.w3.org/1999/xlink"
viewBox=
"0 0 499 184.27"
><defs><style>
.cls-1,.cls-3{fill:none;}.cls-2{clip-path:url(#clip-path);}.cls-3{stroke:#262262;stroke-linecap:round;stroke-linejoin:round;stroke-width:4px;}
</style><clipPath
id=
"clip-path"
transform=
"translate(-100.66 -259.87)"
><path
class=
"cls-1"
d=
"M144.57,396c0,18.2-9.37,27.83-37.33,23.55V400.1c11.11,2.14,12.18-.27,12.18-11.5V324.11h25Zm-12.71-78.66c-9,0-15.52-1.48-15.52-12.71S122.9,292,131.86,292s15.52,1.2,15.52,12.58C147.38,315.55,141,317.29,131.86,317.29Zm50.57,76.39c-30.64,0-35.85-18.86-35.85-35.59s5.61-35.32,35.72-35.32c35.32,0,33.44,28,33.44,40.67H170.12c.54,9.5,4,14.05,11.37,14.05,6.83,0,9.64-3.34,10-7.89l24.75.13C215.48,383.38,205.84,393.68,182.43,393.68Zm-1.47-55c-6.69,0-10,2.81-10.84,12h21.41C190.73,341.9,188.18,338.69,181,338.69Zm112.78,53.65V351.4c0-4.15-1.33-8.16-6-8.16-5,0-6,3.75-6,8.16v40.94H256.42V351.4c0-4.15-.81-8.16-5.89-8.16s-6.29,3.75-6.29,8.16v40.94H219.09V324.11h14l4.15,8c2.67-4.69,10.56-9.37,18.86-9.37,7.36,0,16.19,2.14,21,9.1,3.48-5.22,11.11-9.1,20.21-9.1,19.13,0,21.54,11.37,21.54,27.16v42.41Zm83.09,0L372.41,383c-5.48,7.22-13.11,10.7-24.35,10.7-14.85,0-26.75-6-26.75-19.93,0-15.26,12.44-20.88,44.28-23,0-9.5-1.61-12.57-8.83-12.57-6.69,0-8.56,3.48-8.56,9.9H323.45c0-12.85,6.82-25.29,32.64-25.29,30,0,34.65,14.45,34.65,31.17v38.4Zm-21.54-28.63c-6.29.94-8.3,4.28-8.3,7.36,0,4.28,2.41,6.69,8.3,6.69s10.17-4.82,10.17-15.12ZM396,392.34V297.75h24.75v94.59Zm30.77,0V297.75h24.75v94.59Zm62.21,1.34c-28.09,0-34.11-18.6-34.11-35.32s6.29-35.59,34.38-35.59c27.7,0,34.12,19,34.12,35.59C523.33,375.22,516.91,393.68,488.94,393.68Zm.27-50.84c-7.89,0-11.37,4.82-11.37,15.52s3.61,15.39,11.1,15.39c7.9,0,11.38-4.42,11.38-15.39C500.32,347.79,497.24,342.84,489.21,342.84Zm69.17,50.84c-28.9,0-34.52-18.6-34.52-35.32s5.76-35.59,34.12-35.59c21.14,0,34.52,10.84,34.52,31.17H568.42c0-9.23-5.49-11.23-10.17-11.23-7,0-11.11,4.54-11.11,15.38s4,15.52,11.11,15.52c4.81,0,10-2.41,10-10.57H592.5C592.5,383.38,579,393.68,558.38,393.68Z"
/></clipPath></defs><title>
jemalloc Final Logo
</title><g
class=
"cls-2"
><line
class=
"cls-3"
x1=
"345"
y1=
"182.27"
x2=
"345"
y2=
"2"
/><line
class=
"cls-3"
x1=
"225"
y1=
"182.27"
x2=
"225"
y2=
"2"
/><line
class=
"cls-3"
x1=
"105"
y1=
"182.27"
x2=
"105"
y2=
"2"
/><line
class=
"cls-3"
x1=
"43"
y1=
"182.27"
x2=
"43"
y2=
"2"
/><line
class=
"cls-3"
x1=
"475"
y1=
"182.27"
x2=
"475"
y2=
"2"
/><line
class=
"cls-3"
x1=
"195"
y1=
"182.27"
x2=
"195"
y2=
"2"
/><line
class=
"cls-3"
x1=
"75"
y1=
"182.27"
x2=
"75"
y2=
"2"
/><line
class=
"cls-3"
x1=
"337"
y1=
"182.27"
x2=
"337"
y2=
"2"
/><line
class=
"cls-3"
x1=
"215"
y1=
"182.27"
x2=
"215"
y2=
"2"
/><line
class=
"cls-3"
x1=
"95"
y1=
"182.27"
x2=
"95"
y2=
"2"
/><line
class=
"cls-3"
x1=
"415"
y1=
"182.27"
x2=
"415"
y2=
"2"
/><line
class=
"cls-3"
x1=
"385"
y1=
"182.27"
x2=
"385"
y2=
"2"
/><line
class=
"cls-3"
x1=
"183"
y1=
"182.27"
x2=
"183"
y2=
"2"
/><line
class=
"cls-3"
x1=
"65"
y1=
"182.27"
x2=
"65"
y2=
"2"
/><line
class=
"cls-3"
x1=
"173"
y1=
"182.27"
x2=
"173"
y2=
"2"
/><line
class=
"cls-3"
x1=
"145"
y1=
"182.27"
x2=
"145"
y2=
"2"
/><line
class=
"cls-3"
x1=
"163"
y1=
"182.27"
x2=
"163"
y2=
"2"
/><line
class=
"cls-3"
x1=
"460"
y1=
"182.27"
x2=
"460"
y2=
"2"
/><line
class=
"cls-3"
x1=
"281"
y1=
"182.27"
x2=
"281"
y2=
"2"
/><line
class=
"cls-3"
x1=
"313"
y1=
"182.27"
x2=
"313"
y2=
"2"
/><line
class=
"cls-3"
x1=
"252"
y1=
"182.27"
x2=
"252"
y2=
"2"
/><line
class=
"cls-3"
x1=
"450"
y1=
"182.27"
x2=
"450"
y2=
"2"
/><line
class=
"cls-3"
x1=
"271"
y1=
"182.27"
x2=
"271"
y2=
"2"
/><line
class=
"cls-3"
x1=
"332"
y1=
"182.27"
x2=
"332"
y2=
"2"
/><line
class=
"cls-3"
x1=
"203"
y1=
"182.27"
x2=
"203"
y2=
"2"
/><line
class=
"cls-3"
x1=
"13"
y1=
"182.27"
x2=
"13"
y2=
"2"
/><line
class=
"cls-3"
x1=
"373"
y1=
"182.27"
x2=
"373"
y2=
"2"
/><line
class=
"cls-3"
x1=
"354"
y1=
"182.27"
x2=
"354"
y2=
"2"
/><line
class=
"cls-3"
x1=
"235"
y1=
"182.27"
x2=
"235"
y2=
"2"
/><line
class=
"cls-3"
x1=
"115"
y1=
"182.27"
x2=
"115"
y2=
"2"
/><line
class=
"cls-3"
x1=
"53"
y1=
"182.27"
x2=
"53"
y2=
"2"
/><line
class=
"cls-3"
x1=
"484"
y1=
"182.27"
x2=
"484"
y2=
"2"
/><line
class=
"cls-3"
x1=
"405"
y1=
"182.27"
x2=
"405"
y2=
"2"
/><line
class=
"cls-3"
x1=
"85"
y1=
"182.27"
x2=
"85"
y2=
"2"
/><line
class=
"cls-3"
x1=
"225"
y1=
"182.27"
x2=
"225"
y2=
"2"
/><line
class=
"cls-3"
x1=
"105"
y1=
"182.27"
x2=
"105"
y2=
"2"
/><line
class=
"cls-3"
x1=
"43"
y1=
"182.27"
x2=
"43"
y2=
"2"
/><line
class=
"cls-3"
x1=
"435"
y1=
"182.27"
x2=
"435"
y2=
"2"
/><line
class=
"cls-3"
x1=
"123"
y1=
"182.27"
x2=
"123"
y2=
"2"
/><line
class=
"cls-3"
x1=
"75"
y1=
"182.27"
x2=
"75"
y2=
"2"
/><line
class=
"cls-3"
x1=
"183"
y1=
"182.27"
x2=
"183"
y2=
"2"
/><line
class=
"cls-3"
x1=
"155"
y1=
"182.27"
x2=
"155"
y2=
"2"
/><line
class=
"cls-3"
x1=
"173"
y1=
"182.27"
x2=
"173"
y2=
"2"
/><line
class=
"cls-3"
x1=
"145"
y1=
"182.27"
x2=
"145"
y2=
"2"
/><line
class=
"cls-3"
x1=
"470"
y1=
"182.27"
x2=
"470"
y2=
"2"
/><line
class=
"cls-3"
x1=
"292"
y1=
"182.27"
x2=
"292"
y2=
"2"
/><line
class=
"cls-3"
x1=
"262"
y1=
"182.27"
x2=
"262"
y2=
"2"
/><line
class=
"cls-3"
x1=
"460"
y1=
"182.27"
x2=
"460"
y2=
"2"
/><line
class=
"cls-3"
x1=
"281"
y1=
"182.27"
x2=
"281"
y2=
"2"
/><line
class=
"cls-3"
x1=
"313"
y1=
"182.27"
x2=
"313"
y2=
"2"
/><line
class=
"cls-3"
x1=
"243"
y1=
"182.27"
x2=
"243"
y2=
"2"
/><line
class=
"cls-3"
x1=
"22"
y1=
"182.27"
x2=
"22"
y2=
"2"
/><line
class=
"cls-3"
x1=
"383"
y1=
"182.27"
x2=
"383"
y2=
"2"
/><line
class=
"cls-3"
x1=
"5"
y1=
"182.27"
x2=
"5"
y2=
"2"
/><line
class=
"cls-3"
x1=
"133"
y1=
"182.27"
x2=
"133"
y2=
"2"
/><line
class=
"cls-3"
x1=
"362"
y1=
"182.27"
x2=
"362"
y2=
"2"
/><line
class=
"cls-3"
x1=
"288"
y1=
"182.27"
x2=
"288"
y2=
"2"
/><line
class=
"cls-3"
x1=
"298"
y1=
"182.27"
x2=
"298"
y2=
"2"
/><line
class=
"cls-3"
x1=
"423"
y1=
"182.27"
x2=
"423"
y2=
"2"
/><line
class=
"cls-3"
x1=
"369"
y1=
"182.27"
x2=
"369"
y2=
"2"
/><line
class=
"cls-3"
x1=
"490"
y1=
"182.27"
x2=
"490"
y2=
"2"
/><line
class=
"cls-3"
x1=
"2"
y1=
"182.27"
x2=
"2"
y2=
"2"
/><line
class=
"cls-3"
x1=
"493"
y1=
"182.27"
x2=
"493"
y2=
"2"
/><line
class=
"cls-3"
x1=
"225"
y1=
"182.27"
x2=
"225"
y2=
"2"
/><line
class=
"cls-3"
x1=
"105"
y1=
"182.27"
x2=
"105"
y2=
"2"
/><line
class=
"cls-3"
x1=
"43"
y1=
"182.27"
x2=
"43"
y2=
"2"
/><line
class=
"cls-3"
x1=
"475"
y1=
"182.27"
x2=
"475"
y2=
"2"
/><line
class=
"cls-3"
x1=
"195"
y1=
"182.27"
x2=
"195"
y2=
"2"
/><line
class=
"cls-3"
x1=
"75"
y1=
"182.27"
x2=
"75"
y2=
"2"
/><line
class=
"cls-3"
x1=
"337"
y1=
"182.27"
x2=
"337"
y2=
"2"
/><line
class=
"cls-3"
x1=
"215"
y1=
"182.27"
x2=
"215"
y2=
"2"
/><line
class=
"cls-3"
x1=
"95"
y1=
"182.27"
x2=
"95"
y2=
"2"
/><line
class=
"cls-3"
x1=
"415"
y1=
"182.27"
x2=
"415"
y2=
"2"
/><line
class=
"cls-3"
x1=
"385"
y1=
"182.27"
x2=
"385"
y2=
"2"
/><line
class=
"cls-3"
x1=
"183"
y1=
"182.27"
x2=
"183"
y2=
"2"
/><line
class=
"cls-3"
x1=
"65"
y1=
"182.27"
x2=
"65"
y2=
"2"
/><line
class=
"cls-3"
x1=
"173"
y1=
"182.27"
x2=
"173"
y2=
"2"
/><line
class=
"cls-3"
x1=
"145"
y1=
"182.27"
x2=
"145"
y2=
"2"
/><line
class=
"cls-3"
x1=
"163"
y1=
"182.27"
x2=
"163"
y2=
"2"
/><line
class=
"cls-3"
x1=
"460"
y1=
"182.27"
x2=
"460"
y2=
"2"
/><line
class=
"cls-3"
x1=
"281"
y1=
"182.27"
x2=
"281"
y2=
"2"
/><line
class=
"cls-3"
x1=
"313"
y1=
"182.27"
x2=
"313"
y2=
"2"
/><line
class=
"cls-3"
x1=
"252"
y1=
"182.27"
x2=
"252"
y2=
"2"
/><line
class=
"cls-3"
x1=
"450"
y1=
"182.27"
x2=
"450"
y2=
"2"
/><line
class=
"cls-3"
x1=
"271"
y1=
"182.27"
x2=
"271"
y2=
"2"
/><line
class=
"cls-3"
x1=
"306"
y1=
"182.27"
x2=
"306"
y2=
"2"
/><line
class=
"cls-3"
x1=
"203"
y1=
"182.27"
x2=
"203"
y2=
"2"
/><line
class=
"cls-3"
x1=
"13"
y1=
"182.27"
x2=
"13"
y2=
"2"
/><line
class=
"cls-3"
x1=
"373"
y1=
"182.27"
x2=
"373"
y2=
"2"
/><line
class=
"cls-3"
x1=
"354"
y1=
"182.27"
x2=
"354"
y2=
"2"
/><line
class=
"cls-3"
x1=
"235"
y1=
"182.27"
x2=
"235"
y2=
"2"
/><line
class=
"cls-3"
x1=
"115"
y1=
"182.27"
x2=
"115"
y2=
"2"
/><line
class=
"cls-3"
x1=
"53"
y1=
"182.27"
x2=
"53"
y2=
"2"
/><line
class=
"cls-3"
x1=
"484"
y1=
"182.27"
x2=
"484"
y2=
"2"
/><line
class=
"cls-3"
x1=
"405"
y1=
"182.27"
x2=
"405"
y2=
"2"
/><line
class=
"cls-3"
x1=
"85"
y1=
"182.27"
x2=
"85"
y2=
"2"
/><line
class=
"cls-3"
x1=
"225"
y1=
"182.27"
x2=
"225"
y2=
"2"
/><line
class=
"cls-3"
x1=
"105"
y1=
"182.27"
x2=
"105"
y2=
"2"
/><line
class=
"cls-3"
x1=
"43"
y1=
"182.27"
x2=
"43"
y2=
"2"
/><line
class=
"cls-3"
x1=
"435"
y1=
"182.27"
x2=
"435"
y2=
"2"
/><line
class=
"cls-3"
x1=
"123"
y1=
"182.27"
x2=
"123"
y2=
"2"
/><line
class=
"cls-3"
x1=
"75"
y1=
"182.27"
x2=
"75"
y2=
"2"
/><line
class=
"cls-3"
x1=
"183"
y1=
"182.27"
x2=
"183"
y2=
"2"
/><line
class=
"cls-3"
x1=
"155"
y1=
"182.27"
x2=
"155"
y2=
"2"
/><line
class=
"cls-3"
x1=
"173"
y1=
"182.27"
x2=
"173"
y2=
"2"
/><line
class=
"cls-3"
x1=
"145"
y1=
"182.27"
x2=
"145"
y2=
"2"
/><line
class=
"cls-3"
x1=
"470"
y1=
"182.27"
x2=
"470"
y2=
"2"
/><line
class=
"cls-3"
x1=
"292"
y1=
"182.27"
x2=
"292"
y2=
"2"
/><line
class=
"cls-3"
x1=
"262"
y1=
"182.27"
x2=
"262"
y2=
"2"
/><line
class=
"cls-3"
x1=
"460"
y1=
"182.27"
x2=
"460"
y2=
"2"
/><line
class=
"cls-3"
x1=
"281"
y1=
"182.27"
x2=
"281"
y2=
"2"
/><line
class=
"cls-3"
x1=
"328"
y1=
"182.27"
x2=
"328"
y2=
"2"
/><line
class=
"cls-3"
x1=
"243"
y1=
"182.27"
x2=
"243"
y2=
"2"
/><line
class=
"cls-3"
x1=
"22"
y1=
"182.27"
x2=
"22"
y2=
"2"
/><line
class=
"cls-3"
x1=
"383"
y1=
"182.27"
x2=
"383"
y2=
"2"
/><line
class=
"cls-3"
x1=
"5"
y1=
"182.27"
x2=
"5"
y2=
"2"
/><line
class=
"cls-3"
x1=
"32"
y1=
"182.27"
x2=
"32"
y2=
"2"
/><line
class=
"cls-3"
x1=
"133"
y1=
"182.27"
x2=
"133"
y2=
"2"
/><line
class=
"cls-3"
x1=
"362"
y1=
"182.27"
x2=
"362"
y2=
"2"
/><line
class=
"cls-3"
x1=
"288"
y1=
"182.27"
x2=
"288"
y2=
"2"
/><line
class=
"cls-3"
x1=
"298"
y1=
"182.27"
x2=
"298"
y2=
"2"
/><line
class=
"cls-3"
x1=
"423"
y1=
"182.27"
x2=
"423"
y2=
"2"
/><line
class=
"cls-3"
x1=
"369"
y1=
"182.27"
x2=
"369"
y2=
"2"
/><line
class=
"cls-3"
x1=
"490"
y1=
"182.27"
x2=
"490"
y2=
"2"
/><line
class=
"cls-3"
x1=
"2"
y1=
"182.27"
x2=
"2"
y2=
"2"
/><line
class=
"cls-3"
x1=
"493"
y1=
"182.27"
x2=
"493"
y2=
"2"
/><line
class=
"cls-3"
x1=
"349"
y1=
"182.27"
x2=
"349"
y2=
"2"
/><line
class=
"cls-3"
x1=
"229"
y1=
"182.27"
x2=
"229"
y2=
"2"
/><line
class=
"cls-3"
x1=
"109"
y1=
"182.27"
x2=
"109"
y2=
"2"
/><line
class=
"cls-3"
x1=
"47"
y1=
"182.27"
x2=
"47"
y2=
"2"
/><line
class=
"cls-3"
x1=
"479"
y1=
"182.27"
x2=
"479"
y2=
"2"
/><line
class=
"cls-3"
x1=
"399"
y1=
"182.27"
x2=
"399"
y2=
"2"
/><line
class=
"cls-3"
x1=
"199"
y1=
"182.27"
x2=
"199"
y2=
"2"
/><line
class=
"cls-3"
x1=
"79"
y1=
"182.27"
x2=
"79"
y2=
"2"
/><line
class=
"cls-3"
x1=
"341"
y1=
"182.27"
x2=
"341"
y2=
"2"
/><line
class=
"cls-3"
x1=
"219"
y1=
"182.27"
x2=
"219"
y2=
"2"
/><line
class=
"cls-3"
x1=
"99"
y1=
"182.27"
x2=
"99"
y2=
"2"
/><line
class=
"cls-3"
x1=
"41"
y1=
"182.27"
x2=
"41"
y2=
"2"
/><line
class=
"cls-3"
x1=
"419"
y1=
"182.27"
x2=
"419"
y2=
"2"
/><line
class=
"cls-3"
x1=
"389"
y1=
"182.27"
x2=
"389"
y2=
"2"
/><line
class=
"cls-3"
x1=
"187"
y1=
"182.27"
x2=
"187"
y2=
"2"
/><line
class=
"cls-3"
x1=
"69"
y1=
"182.27"
x2=
"69"
y2=
"2"
/><line
class=
"cls-3"
x1=
"177"
y1=
"182.27"
x2=
"177"
y2=
"2"
/><line
class=
"cls-3"
x1=
"149"
y1=
"182.27"
x2=
"149"
y2=
"2"
/><line
class=
"cls-3"
x1=
"464"
y1=
"182.27"
x2=
"464"
y2=
"2"
/><line
class=
"cls-3"
x1=
"285"
y1=
"182.27"
x2=
"285"
y2=
"2"
/><line
class=
"cls-3"
x1=
"317"
y1=
"182.27"
x2=
"317"
y2=
"2"
/><line
class=
"cls-3"
x1=
"454"
y1=
"182.27"
x2=
"454"
y2=
"2"
/><line
class=
"cls-3"
x1=
"275"
y1=
"182.27"
x2=
"275"
y2=
"2"
/><line
class=
"cls-3"
x1=
"308"
y1=
"182.27"
x2=
"308"
y2=
"2"
/><line
class=
"cls-3"
x1=
"207"
y1=
"182.27"
x2=
"207"
y2=
"2"
/><line
class=
"cls-3"
x1=
"17"
y1=
"182.27"
x2=
"17"
y2=
"2"
/><line
class=
"cls-3"
x1=
"377"
y1=
"182.27"
x2=
"377"
y2=
"2"
/><line
class=
"cls-3"
x1=
"358"
y1=
"182.27"
x2=
"358"
y2=
"2"
/><line
class=
"cls-3"
x1=
"238"
y1=
"182.27"
x2=
"238"
y2=
"2"
/><line
class=
"cls-3"
x1=
"119"
y1=
"182.27"
x2=
"119"
y2=
"2"
/><line
class=
"cls-3"
x1=
"488"
y1=
"182.27"
x2=
"488"
y2=
"2"
/><line
class=
"cls-3"
x1=
"409"
y1=
"182.27"
x2=
"409"
y2=
"2"
/><line
class=
"cls-3"
x1=
"229"
y1=
"182.27"
x2=
"229"
y2=
"2"
/><line
class=
"cls-3"
x1=
"109"
y1=
"182.27"
x2=
"109"
y2=
"2"
/><line
class=
"cls-3"
x1=
"47"
y1=
"182.27"
x2=
"47"
y2=
"2"
/><line
class=
"cls-3"
x1=
"439"
y1=
"182.27"
x2=
"439"
y2=
"2"
/><line
class=
"cls-3"
x1=
"399"
y1=
"182.27"
x2=
"399"
y2=
"2"
/><line
class=
"cls-3"
x1=
"127"
y1=
"182.27"
x2=
"127"
y2=
"2"
/><line
class=
"cls-3"
x1=
"79"
y1=
"182.27"
x2=
"79"
y2=
"2"
/><line
class=
"cls-3"
x1=
"187"
y1=
"182.27"
x2=
"187"
y2=
"2"
/><line
class=
"cls-3"
x1=
"159"
y1=
"182.27"
x2=
"159"
y2=
"2"
/><line
class=
"cls-3"
x1=
"177"
y1=
"182.27"
x2=
"177"
y2=
"2"
/><line
class=
"cls-3"
x1=
"149"
y1=
"182.27"
x2=
"149"
y2=
"2"
/><line
class=
"cls-3"
x1=
"474"
y1=
"182.27"
x2=
"474"
y2=
"2"
/><line
class=
"cls-3"
x1=
"266"
y1=
"182.27"
x2=
"266"
y2=
"2"
/><line
class=
"cls-3"
x1=
"464"
y1=
"182.27"
x2=
"464"
y2=
"2"
/><line
class=
"cls-3"
x1=
"285"
y1=
"182.27"
x2=
"285"
y2=
"2"
/><line
class=
"cls-3"
x1=
"317"
y1=
"182.27"
x2=
"317"
y2=
"2"
/><line
class=
"cls-3"
x1=
"247"
y1=
"182.27"
x2=
"247"
y2=
"2"
/><line
class=
"cls-3"
x1=
"26"
y1=
"182.27"
x2=
"26"
y2=
"2"
/><line
class=
"cls-3"
x1=
"387"
y1=
"182.27"
x2=
"387"
y2=
"2"
/><line
class=
"cls-3"
x1=
"9"
y1=
"182.27"
x2=
"9"
y2=
"2"
/><line
class=
"cls-3"
x1=
"137"
y1=
"182.27"
x2=
"137"
y2=
"2"
/><line
class=
"cls-3"
x1=
"292"
y1=
"182.27"
x2=
"292"
y2=
"2"
/><line
class=
"cls-3"
x1=
"373"
y1=
"182.27"
x2=
"373"
y2=
"2"
/><line
class=
"cls-3"
x1=
"56"
y1=
"182.27"
x2=
"56"
y2=
"2"
/><line
class=
"cls-3"
x1=
"494"
y1=
"182.27"
x2=
"494"
y2=
"2"
/><line
class=
"cls-3"
x1=
"497"
y1=
"182.27"
x2=
"497"
y2=
"2"
/><line
class=
"cls-3"
x1=
"349"
y1=
"182.27"
x2=
"349"
y2=
"2"
/><line
class=
"cls-3"
x1=
"229"
y1=
"182.27"
x2=
"229"
y2=
"2"
/><line
class=
"cls-3"
x1=
"109"
y1=
"182.27"
x2=
"109"
y2=
"2"
/><line
class=
"cls-3"
x1=
"47"
y1=
"182.27"
x2=
"47"
y2=
"2"
/><line
class=
"cls-3"
x1=
"479"
y1=
"182.27"
x2=
"479"
y2=
"2"
/><line
class=
"cls-3"
x1=
"399"
y1=
"182.27"
x2=
"399"
y2=
"2"
/><line
class=
"cls-3"
x1=
"199"
y1=
"182.27"
x2=
"199"
y2=
"2"
/><line
class=
"cls-3"
x1=
"79"
y1=
"182.27"
x2=
"79"
y2=
"2"
/><line
class=
"cls-3"
x1=
"341"
y1=
"182.27"
x2=
"341"
y2=
"2"
/><line
class=
"cls-3"
x1=
"219"
y1=
"182.27"
x2=
"219"
y2=
"2"
/><line
class=
"cls-3"
x1=
"99"
y1=
"182.27"
x2=
"99"
y2=
"2"
/><line
class=
"cls-3"
x1=
"41"
y1=
"182.27"
x2=
"41"
y2=
"2"
/><line
class=
"cls-3"
x1=
"419"
y1=
"182.27"
x2=
"419"
y2=
"2"
/><line
class=
"cls-3"
x1=
"389"
y1=
"182.27"
x2=
"389"
y2=
"2"
/><line
class=
"cls-3"
x1=
"187"
y1=
"182.27"
x2=
"187"
y2=
"2"
/><line
class=
"cls-3"
x1=
"69"
y1=
"182.27"
x2=
"69"
y2=
"2"
/><line
class=
"cls-3"
x1=
"177"
y1=
"182.27"
x2=
"177"
y2=
"2"
/><line
class=
"cls-3"
x1=
"149"
y1=
"182.27"
x2=
"149"
y2=
"2"
/><line
class=
"cls-3"
x1=
"141"
y1=
"182.27"
x2=
"141"
y2=
"2"
/><line
class=
"cls-3"
x1=
"464"
y1=
"182.27"
x2=
"464"
y2=
"2"
/><line
class=
"cls-3"
x1=
"285"
y1=
"182.27"
x2=
"285"
y2=
"2"
/><line
class=
"cls-3"
x1=
"317"
y1=
"182.27"
x2=
"317"
y2=
"2"
/><line
class=
"cls-3"
x1=
"454"
y1=
"182.27"
x2=
"454"
y2=
"2"
/><line
class=
"cls-3"
x1=
"275"
y1=
"182.27"
x2=
"275"
y2=
"2"
/><line
class=
"cls-3"
x1=
"308"
y1=
"182.27"
x2=
"308"
y2=
"2"
/><line
class=
"cls-3"
x1=
"207"
y1=
"182.27"
x2=
"207"
y2=
"2"
/><line
class=
"cls-3"
x1=
"17"
y1=
"182.27"
x2=
"17"
y2=
"2"
/><line
class=
"cls-3"
x1=
"377"
y1=
"182.27"
x2=
"377"
y2=
"2"
/><line
class=
"cls-3"
x1=
"119"
y1=
"182.27"
x2=
"119"
y2=
"2"
/><line
class=
"cls-3"
x1=
"488"
y1=
"182.27"
x2=
"488"
y2=
"2"
/><line
class=
"cls-3"
x1=
"409"
y1=
"182.27"
x2=
"409"
y2=
"2"
/><line
class=
"cls-3"
x1=
"229"
y1=
"182.27"
x2=
"229"
y2=
"2"
/><line
class=
"cls-3"
x1=
"109"
y1=
"182.27"
x2=
"109"
y2=
"2"
/><line
class=
"cls-3"
x1=
"47"
y1=
"182.27"
x2=
"47"
y2=
"2"
/><line
class=
"cls-3"
x1=
"439"
y1=
"182.27"
x2=
"439"
y2=
"2"
/><line
class=
"cls-3"
x1=
"399"
y1=
"182.27"
x2=
"399"
y2=
"2"
/><line
class=
"cls-3"
x1=
"127"
y1=
"182.27"
x2=
"127"
y2=
"2"
/><line
class=
"cls-3"
x1=
"79"
y1=
"182.27"
x2=
"79"
y2=
"2"
/><line
class=
"cls-3"
x1=
"187"
y1=
"182.27"
x2=
"187"
y2=
"2"
/><line
class=
"cls-3"
x1=
"159"
y1=
"182.27"
x2=
"159"
y2=
"2"
/><line
class=
"cls-3"
x1=
"177"
y1=
"182.27"
x2=
"177"
y2=
"2"
/><line
class=
"cls-3"
x1=
"149"
y1=
"182.27"
x2=
"149"
y2=
"2"
/><line
class=
"cls-3"
x1=
"474"
y1=
"182.27"
x2=
"474"
y2=
"2"
/><line
class=
"cls-3"
x1=
"295"
y1=
"182.27"
x2=
"295"
y2=
"2"
/><line
class=
"cls-3"
x1=
"266"
y1=
"182.27"
x2=
"266"
y2=
"2"
/><line
class=
"cls-3"
x1=
"464"
y1=
"182.27"
x2=
"464"
y2=
"2"
/><line
class=
"cls-3"
x1=
"285"
y1=
"182.27"
x2=
"285"
y2=
"2"
/><line
class=
"cls-3"
x1=
"317"
y1=
"182.27"
x2=
"317"
y2=
"2"
/><line
class=
"cls-3"
x1=
"247"
y1=
"182.27"
x2=
"247"
y2=
"2"
/><line
class=
"cls-3"
x1=
"58"
y1=
"182.27"
x2=
"58"
y2=
"2"
/><line
class=
"cls-3"
x1=
"387"
y1=
"182.27"
x2=
"387"
y2=
"2"
/><line
class=
"cls-3"
x1=
"9"
y1=
"182.27"
x2=
"9"
y2=
"2"
/><line
class=
"cls-3"
x1=
"292"
y1=
"182.27"
x2=
"292"
y2=
"2"
/><line
class=
"cls-3"
x1=
"301"
y1=
"182.27"
x2=
"301"
y2=
"2"
/><line
class=
"cls-3"
x1=
"428"
y1=
"182.27"
x2=
"428"
y2=
"2"
/><line
class=
"cls-3"
x1=
"373"
y1=
"182.27"
x2=
"373"
y2=
"2"
/><line
class=
"cls-3"
x1=
"56"
y1=
"182.27"
x2=
"56"
y2=
"2"
/><line
class=
"cls-3"
x1=
"494"
y1=
"182.27"
x2=
"494"
y2=
"2"
/><line
class=
"cls-3"
x1=
"497"
y1=
"182.27"
x2=
"497"
y2=
"2"
/></g></svg>
\ No newline at end of file
deps/jemalloc/include/jemalloc/internal/activity_callback.h
0 → 100644
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H
#define JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H
/*
* The callback to be executed "periodically", in response to some amount of
* allocator activity.
*
* This callback need not be computing any sort of peak (although that's the
* intended first use case), but we drive it from the peak counter, so it's
* keeps things tidy to keep it here.
*
* The calls to this thunk get driven by the peak_event module.
*/
#define ACTIVITY_CALLBACK_THUNK_INITIALIZER {NULL, NULL}
typedef
void
(
*
activity_callback_t
)(
void
*
uctx
,
uint64_t
allocated
,
uint64_t
deallocated
);
typedef
struct
activity_callback_thunk_s
activity_callback_thunk_t
;
struct
activity_callback_thunk_s
{
activity_callback_t
callback
;
void
*
uctx
;
};
#endif
/* JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H */
deps/jemalloc/include/jemalloc/internal/arena_externs.h
View file @
d4439bd4
...
@@ -2,59 +2,67 @@
...
@@ -2,59 +2,67 @@
#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H
#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/div.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/hook.h"
#include "jemalloc/internal/hook.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/stats.h"
/*
* When the amount of pages to be purged exceeds this amount, deferred purge
* should happen.
*/
#define ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD UINT64_C(1024)
extern
ssize_t
opt_dirty_decay_ms
;
extern
ssize_t
opt_dirty_decay_ms
;
extern
ssize_t
opt_muzzy_decay_ms
;
extern
ssize_t
opt_muzzy_decay_ms
;
extern
percpu_arena_mode_t
opt_percpu_arena
;
extern
percpu_arena_mode_t
opt_percpu_arena
;
extern
const
char
*
percpu_arena_mode_names
[];
extern
const
char
*
percpu_arena_mode_names
[];
extern
const
uint64_t
h_steps
[
SMOOTHSTEP_NSTEPS
];
extern
div_info_t
arena_binind_div_info
[
SC_NBINS
];
extern
malloc_mutex_t
arenas_lock
;
extern
malloc_mutex_t
arenas_lock
;
extern
emap_t
arena_emap_global
;
extern
size_t
opt_oversize_threshold
;
extern
size_t
opt_oversize_threshold
;
extern
size_t
oversize_threshold
;
extern
size_t
oversize_threshold
;
/*
* arena_bin_offsets[binind] is the offset of the first bin shard for size class
* binind.
*/
extern
uint32_t
arena_bin_offsets
[
SC_NBINS
];
void
arena_basic_stats_merge
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
arena_basic_stats_merge
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
unsigned
*
nthreads
,
const
char
**
dss
,
ssize_t
*
dirty_decay_ms
,
unsigned
*
nthreads
,
const
char
**
dss
,
ssize_t
*
dirty_decay_ms
,
ssize_t
*
muzzy_decay_ms
,
size_t
*
nactive
,
size_t
*
ndirty
,
size_t
*
nmuzzy
);
ssize_t
*
muzzy_decay_ms
,
size_t
*
nactive
,
size_t
*
ndirty
,
size_t
*
nmuzzy
);
void
arena_stats_merge
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
unsigned
*
nthreads
,
void
arena_stats_merge
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
unsigned
*
nthreads
,
const
char
**
dss
,
ssize_t
*
dirty_decay_ms
,
ssize_t
*
muzzy_decay_ms
,
const
char
**
dss
,
ssize_t
*
dirty_decay_ms
,
ssize_t
*
muzzy_decay_ms
,
size_t
*
nactive
,
size_t
*
ndirty
,
size_t
*
nmuzzy
,
arena_stats_t
*
astats
,
size_t
*
nactive
,
size_t
*
ndirty
,
size_t
*
nmuzzy
,
arena_stats_t
*
astats
,
bin_stats_t
*
bstats
,
arena_stats_large_t
*
lstats
,
bin_stats_data_t
*
bstats
,
arena_stats_large_t
*
lstats
,
arena_stats_extents_t
*
estats
);
pac_estats_t
*
estats
,
hpa_shard_stats_t
*
hpastats
,
sec_stats_t
*
secstats
);
void
arena_extents_dirty_dalloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
arena_handle_deferred_work
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
);
edata_t
*
arena_extent_alloc_large
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
#ifdef JEMALLOC_JET
size_t
usize
,
size_t
alignment
,
bool
zero
);
size_t
arena_slab_regind
(
extent_t
*
slab
,
szind_t
binind
,
const
void
*
ptr
);
#endif
extent_t
*
arena_extent_alloc_large
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
size_t
alignment
,
bool
*
zero
);
void
arena_extent_dalloc_large_prep
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
arena_extent_dalloc_large_prep
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
e
xtent_t
*
extent
);
e
data_t
*
edata
);
void
arena_extent_ralloc_large_shrink
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
arena_extent_ralloc_large_shrink
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
e
xtent_t
*
extent
,
size_t
oldsize
);
e
data_t
*
edata
,
size_t
oldsize
);
void
arena_extent_ralloc_large_expand
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
arena_extent_ralloc_large_expand
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
extent
,
size_t
oldsize
);
edata_t
*
edata
,
size_t
oldsize
);
ssize_t
arena_dirty_decay_ms_get
(
arena_t
*
arena
);
bool
arena_decay_ms_set
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_state_t
state
,
bool
arena_dirty_decay_ms_set
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
ssize_t
decay_ms
);
ssize_t
decay_ms
);
ssize_t
arena_muzzy_decay_ms_get
(
arena_t
*
arena
);
ssize_t
arena_decay_ms_get
(
arena_t
*
arena
,
extent_state_t
state
);
bool
arena_muzzy_decay_ms_set
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
ssize_t
decay_ms
);
void
arena_decay
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
is_background_thread
,
void
arena_decay
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
is_background_thread
,
bool
all
);
bool
all
);
uint64_t
arena_time_until_deferred
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
arena_do_deferred_work
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
arena_reset
(
tsd_t
*
tsd
,
arena_t
*
arena
);
void
arena_reset
(
tsd_t
*
tsd
,
arena_t
*
arena
);
void
arena_destroy
(
tsd_t
*
tsd
,
arena_t
*
arena
);
void
arena_destroy
(
tsd_t
*
tsd
,
arena_t
*
arena
);
void
arena_tcache_fill_small
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
tcache_t
*
tcache
,
void
arena_cache_bin_fill_small
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
cache_bin_t
*
tbin
,
szind_t
binind
,
uint64_t
prof_accumbytes
);
cache_bin_t
*
cache_bin
,
cache_bin_info_t
*
cache_bin_info
,
szind_t
binind
,
void
arena_alloc_junk_small
(
void
*
ptr
,
const
bin_info_t
*
bin_info
,
const
unsigned
nfill
);
bool
zero
);
typedef
void
(
arena_dalloc_junk_small_t
)(
void
*
,
const
bin_info_t
*
);
extern
arena_dalloc_junk_small_t
*
JET_MUTABLE
arena_dalloc_junk_small
;
void
*
arena_malloc_hard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
size
,
void
*
arena_malloc_hard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
size
,
szind_t
ind
,
bool
zero
);
szind_t
ind
,
bool
zero
);
...
@@ -63,8 +71,12 @@ void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
...
@@ -63,8 +71,12 @@ void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
void
arena_prof_promote
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
usize
);
void
arena_prof_promote
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
usize
);
void
arena_dalloc_promoted
(
tsdn_t
*
tsdn
,
void
*
ptr
,
tcache_t
*
tcache
,
void
arena_dalloc_promoted
(
tsdn_t
*
tsdn
,
void
*
ptr
,
tcache_t
*
tcache
,
bool
slow_path
);
bool
slow_path
);
void
arena_dalloc_bin_junked_locked
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bin_t
*
bin
,
void
arena_slab_dalloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
edata_t
*
slab
);
szind_t
binind
,
extent_t
*
extent
,
void
*
ptr
);
void
arena_dalloc_bin_locked_handle_newly_empty
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
edata_t
*
slab
,
bin_t
*
bin
);
void
arena_dalloc_bin_locked_handle_newly_nonempty
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
edata_t
*
slab
,
bin_t
*
bin
);
void
arena_dalloc_small
(
tsdn_t
*
tsdn
,
void
*
ptr
);
void
arena_dalloc_small
(
tsdn_t
*
tsdn
,
void
*
ptr
);
bool
arena_ralloc_no_move
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
bool
arena_ralloc_no_move
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
bool
zero
,
size_t
*
newsize
);
size_t
extra
,
bool
zero
,
size_t
*
newsize
);
...
@@ -72,6 +84,9 @@ void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
...
@@ -72,6 +84,9 @@ void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
size_t
size
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
,
size_t
size
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
,
hook_ralloc_args_t
*
hook_args
);
hook_ralloc_args_t
*
hook_args
);
dss_prec_t
arena_dss_prec_get
(
arena_t
*
arena
);
dss_prec_t
arena_dss_prec_get
(
arena_t
*
arena
);
ehooks_t
*
arena_get_ehooks
(
arena_t
*
arena
);
extent_hooks_t
*
arena_set_extent_hooks
(
tsd_t
*
tsd
,
arena_t
*
arena
,
extent_hooks_t
*
extent_hooks
);
bool
arena_dss_prec_set
(
arena_t
*
arena
,
dss_prec_t
dss_prec
);
bool
arena_dss_prec_set
(
arena_t
*
arena
,
dss_prec_t
dss_prec
);
ssize_t
arena_dirty_decay_ms_default_get
(
void
);
ssize_t
arena_dirty_decay_ms_default_get
(
void
);
bool
arena_dirty_decay_ms_default_set
(
ssize_t
decay_ms
);
bool
arena_dirty_decay_ms_default_set
(
ssize_t
decay_ms
);
...
@@ -82,14 +97,15 @@ bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena,
...
@@ -82,14 +97,15 @@ bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena,
unsigned
arena_nthreads_get
(
arena_t
*
arena
,
bool
internal
);
unsigned
arena_nthreads_get
(
arena_t
*
arena
,
bool
internal
);
void
arena_nthreads_inc
(
arena_t
*
arena
,
bool
internal
);
void
arena_nthreads_inc
(
arena_t
*
arena
,
bool
internal
);
void
arena_nthreads_dec
(
arena_t
*
arena
,
bool
internal
);
void
arena_nthreads_dec
(
arena_t
*
arena
,
bool
internal
);
size_t
arena_extent_sn_next
(
arena_t
*
arena
);
arena_t
*
arena_new
(
tsdn_t
*
tsdn
,
unsigned
ind
,
const
arena_config_t
*
config
);
arena_t
*
arena_new
(
tsdn_t
*
tsdn
,
unsigned
ind
,
extent_hooks_t
*
extent_hooks
);
bool
arena_init_huge
(
void
);
bool
arena_init_huge
(
void
);
bool
arena_is_huge
(
unsigned
arena_ind
);
bool
arena_is_huge
(
unsigned
arena_ind
);
arena_t
*
arena_choose_huge
(
tsd_t
*
tsd
);
arena_t
*
arena_choose_huge
(
tsd_t
*
tsd
);
bin_t
*
arena_bin_choose
_lock
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
szind_t
binind
,
bin_t
*
arena_bin_choose
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
szind_t
binind
,
unsigned
*
binshard
);
unsigned
*
binshard
);
void
arena_boot
(
sc_data_t
*
sc_data
);
size_t
arena_fill_small_fresh
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
szind_t
binind
,
void
**
ptrs
,
size_t
nfill
,
bool
zero
);
bool
arena_boot
(
sc_data_t
*
sc_data
,
base_t
*
base
,
bool
hpa
);
void
arena_prefork0
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
arena_prefork0
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
arena_prefork1
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
arena_prefork1
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
arena_prefork2
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
arena_prefork2
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
...
@@ -98,6 +114,7 @@ void arena_prefork4(tsdn_t *tsdn, arena_t *arena);
...
@@ -98,6 +114,7 @@ void arena_prefork4(tsdn_t *tsdn, arena_t *arena);
void
arena_prefork5
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
arena_prefork5
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
arena_prefork6
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
arena_prefork6
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
arena_prefork7
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
arena_prefork7
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
arena_prefork8
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
arena_postfork_parent
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
arena_postfork_parent
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
arena_postfork_child
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
arena_postfork_child
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
...
...
deps/jemalloc/include/jemalloc/internal/arena_inlines_a.h
View file @
d4439bd4
...
@@ -3,7 +3,7 @@
...
@@ -3,7 +3,7 @@
static
inline
unsigned
static
inline
unsigned
arena_ind_get
(
const
arena_t
*
arena
)
{
arena_ind_get
(
const
arena_t
*
arena
)
{
return
base_ind_get
(
arena
->
base
)
;
return
arena
->
ind
;
}
}
static
inline
void
static
inline
void
...
@@ -21,37 +21,4 @@ arena_internal_get(arena_t *arena) {
...
@@ -21,37 +21,4 @@ arena_internal_get(arena_t *arena) {
return
atomic_load_zu
(
&
arena
->
stats
.
internal
,
ATOMIC_RELAXED
);
return
atomic_load_zu
(
&
arena
->
stats
.
internal
,
ATOMIC_RELAXED
);
}
}
static
inline
bool
arena_prof_accum
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
uint64_t
accumbytes
)
{
cassert
(
config_prof
);
if
(
likely
(
prof_interval
==
0
||
!
prof_active_get_unlocked
()))
{
return
false
;
}
return
prof_accum_add
(
tsdn
,
&
arena
->
prof_accum
,
accumbytes
);
}
static
inline
void
percpu_arena_update
(
tsd_t
*
tsd
,
unsigned
cpu
)
{
assert
(
have_percpu_arena
);
arena_t
*
oldarena
=
tsd_arena_get
(
tsd
);
assert
(
oldarena
!=
NULL
);
unsigned
oldind
=
arena_ind_get
(
oldarena
);
if
(
oldind
!=
cpu
)
{
unsigned
newind
=
cpu
;
arena_t
*
newarena
=
arena_get
(
tsd_tsdn
(
tsd
),
newind
,
true
);
assert
(
newarena
!=
NULL
);
/* Set new arena/tcache associations. */
arena_migrate
(
tsd
,
oldind
,
newind
);
tcache_t
*
tcache
=
tcache_get
(
tsd
);
if
(
tcache
!=
NULL
)
{
tcache_arena_reassociate
(
tsd_tsdn
(
tsd
),
tcache
,
newarena
);
}
}
}
#endif
/* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */
#endif
/* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */
deps/jemalloc/include/jemalloc/internal/arena_inlines_b.h
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
#include "jemalloc/internal/div.h"
#include "jemalloc/internal/emap.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/safety_check.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/ticker.h"
JEMALLOC_ALWAYS_INLINE
bool
static
inline
arena_t
*
arena_has_default_hooks
(
arena_t
*
arena
)
{
arena_get_from_edata
(
edata_t
*
edata
)
{
return
(
extent_hooks_get
(
arena
)
==
&
extent_hooks_default
);
return
(
arena_t
*
)
atomic_load_p
(
&
arenas
[
edata_arena_ind_get
(
edata
)],
ATOMIC_RELAXED
);
}
}
JEMALLOC_ALWAYS_INLINE
arena_t
*
JEMALLOC_ALWAYS_INLINE
arena_t
*
...
@@ -34,127 +38,109 @@ arena_choose_maybe_huge(tsd_t *tsd, arena_t *arena, size_t size) {
...
@@ -34,127 +38,109 @@ arena_choose_maybe_huge(tsd_t *tsd, arena_t *arena, size_t size) {
return
arena_choose
(
tsd
,
NULL
);
return
arena_choose
(
tsd
,
NULL
);
}
}
JEMALLOC_ALWAYS_INLINE
prof_tctx_t
*
JEMALLOC_ALWAYS_INLINE
void
arena_prof_tctx_get
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
alloc_ctx_t
*
alloc_ctx
)
{
arena_prof_info_get
(
tsd_t
*
tsd
,
const
void
*
ptr
,
emap_alloc_ctx_t
*
alloc_ctx
,
prof_info_t
*
prof_info
,
bool
reset_recent
)
{
cassert
(
config_prof
);
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
ptr
!=
NULL
);
assert
(
prof_info
!=
NULL
);
edata_t
*
edata
=
NULL
;
bool
is_slab
;
/* Static check. */
/* Static check. */
if
(
alloc_ctx
==
NULL
)
{
if
(
alloc_ctx
==
NULL
)
{
const
extent_t
*
extent
=
iealloc
(
tsdn
,
ptr
);
edata
=
emap_edata_lookup
(
tsd_tsdn
(
tsd
),
&
arena_emap_global
,
if
(
unlikely
(
!
extent_slab_get
(
extent
)))
{
ptr
);
return
large_prof_tctx_get
(
tsdn
,
extent
);
is_slab
=
edata_slab_get
(
edata
);
}
}
else
if
(
unlikely
(
!
(
is_slab
=
alloc_ctx
->
slab
)))
{
edata
=
emap_edata_lookup
(
tsd_tsdn
(
tsd
),
&
arena_emap_global
,
ptr
);
}
if
(
unlikely
(
!
is_slab
))
{
/* edata must have been initialized at this point. */
assert
(
edata
!=
NULL
);
large_prof_info_get
(
tsd
,
edata
,
prof_info
,
reset_recent
);
}
else
{
}
else
{
if
(
unlikely
(
!
alloc_ctx
->
slab
))
{
prof_info
->
alloc_tctx
=
(
prof_tctx_t
*
)(
uintptr_t
)
1U
;
return
large_prof_tctx_get
(
tsdn
,
iealloc
(
tsdn
,
ptr
));
/*
}
* No need to set other fields in prof_info; they will never be
* accessed if (uintptr_t)alloc_tctx == (uintptr_t)1U.
*/
}
}
return
(
prof_tctx_t
*
)(
uintptr_t
)
1U
;
}
}
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
arena_prof_tctx_set
(
tsd
n
_t
*
tsd
n
,
const
void
*
ptr
,
size_t
usize
,
arena_prof_tctx_
re
set
(
tsd_t
*
tsd
,
const
void
*
ptr
,
alloc_ctx_t
*
alloc_ctx
,
prof_tctx_t
*
tctx
)
{
emap_
alloc_ctx_t
*
alloc_ctx
)
{
cassert
(
config_prof
);
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
ptr
!=
NULL
);
/* Static check. */
/* Static check. */
if
(
alloc_ctx
==
NULL
)
{
if
(
alloc_ctx
==
NULL
)
{
extent_t
*
extent
=
iealloc
(
tsdn
,
ptr
);
edata_t
*
edata
=
emap_edata_lookup
(
tsd_tsdn
(
tsd
),
if
(
unlikely
(
!
extent_slab_get
(
extent
)))
{
&
arena_emap_global
,
ptr
);
large_prof_tctx_set
(
tsdn
,
extent
,
tctx
);
if
(
unlikely
(
!
edata_slab_get
(
edata
)))
{
large_prof_tctx_reset
(
edata
);
}
}
}
else
{
}
else
{
if
(
unlikely
(
!
alloc_ctx
->
slab
))
{
if
(
unlikely
(
!
alloc_ctx
->
slab
))
{
large_prof_tctx_set
(
tsdn
,
iealloc
(
tsdn
,
ptr
),
tctx
);
edata_t
*
edata
=
emap_edata_lookup
(
tsd_tsdn
(
tsd
),
&
arena_emap_global
,
ptr
);
large_prof_tctx_reset
(
edata
);
}
}
}
}
}
}
static
inline
void
JEMALLOC_ALWAYS_INLINE
void
arena_prof_tctx_reset
(
tsd
n
_t
*
tsd
n
,
const
void
*
ptr
,
prof_tctx_t
*
tctx
)
{
arena_prof_tctx_reset
_sampled
(
tsd_t
*
tsd
,
const
void
*
ptr
)
{
cassert
(
config_prof
);
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
ptr
!=
NULL
);
extent_t
*
extent
=
iealloc
(
tsdn
,
ptr
);
edata_t
*
edata
=
emap_edata_lookup
(
tsd_tsdn
(
tsd
),
&
arena_emap_global
,
assert
(
!
extent_slab_get
(
extent
));
ptr
);
assert
(
!
edata_slab_get
(
edata
));
large_prof_tctx_reset
(
tsdn
,
extent
);
large_prof_tctx_reset
(
edata
);
}
JEMALLOC_ALWAYS_INLINE
nstime_t
arena_prof_alloc_time_get
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
alloc_ctx_t
*
alloc_ctx
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
extent_t
*
extent
=
iealloc
(
tsdn
,
ptr
);
/*
* Unlike arena_prof_prof_tctx_{get, set}, we only call this once we're
* sure we have a sampled allocation.
*/
assert
(
!
extent_slab_get
(
extent
));
return
large_prof_alloc_time_get
(
extent
);
}
}
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
arena_prof_
alloc_time
_set
(
tsd
n
_t
*
tsd
n
,
const
void
*
ptr
,
alloc_
ctx_t
*
alloc_
ctx
,
arena_prof_
info
_set
(
tsd_t
*
tsd
,
edata_t
*
edata
,
prof_t
ctx_t
*
t
ctx
,
nstime_t
t
)
{
size_t
size
)
{
cassert
(
config_prof
);
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
extent_t
*
extent
=
iealloc
(
tsdn
,
ptr
);
assert
(
!
edata_slab_get
(
edata
));
assert
(
!
extent_slab_get
(
extent
));
large_prof_info_set
(
edata
,
tctx
,
size
);
large_prof_alloc_time_set
(
extent
,
t
);
}
}
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
arena_decay_ticks
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
unsigned
nticks
)
{
arena_decay_ticks
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
unsigned
nticks
)
{
tsd_t
*
tsd
;
ticker_t
*
decay_ticker
;
if
(
unlikely
(
tsdn_null
(
tsdn
)))
{
if
(
unlikely
(
tsdn_null
(
tsdn
)))
{
return
;
return
;
}
}
tsd
=
tsdn_tsd
(
tsdn
);
tsd_t
*
tsd
=
tsdn_tsd
(
tsdn
);
decay_ticker
=
decay_ticker_get
(
tsd
,
arena_ind_get
(
arena
));
/*
if
(
unlikely
(
decay_ticker
==
NULL
))
{
* We use the ticker_geom_t to avoid having per-arena state in the tsd.
return
;
* Instead of having a countdown-until-decay timer running for every
}
* arena in every thread, we flip a coin once per tick, whose
if
(
unlikely
(
ticker_ticks
(
decay_ticker
,
nticks
)))
{
* probability of coming up heads is 1/nticks; this is effectively the
* operation of the ticker_geom_t. Each arena has the same chance of a
* coinflip coming up heads (1/ARENA_DECAY_NTICKS_PER_UPDATE), so we can
* use a single ticker for all of them.
*/
ticker_geom_t
*
decay_ticker
=
tsd_arena_decay_tickerp_get
(
tsd
);
uint64_t
*
prng_state
=
tsd_prng_statep_get
(
tsd
);
if
(
unlikely
(
ticker_geom_ticks
(
decay_ticker
,
prng_state
,
nticks
)))
{
arena_decay
(
tsdn
,
arena
,
false
,
false
);
arena_decay
(
tsdn
,
arena
,
false
,
false
);
}
}
}
}
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
arena_decay_tick
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
arena_decay_tick
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
malloc_mutex_assert_not_owner
(
tsdn
,
&
arena
->
decay_dirty
.
mtx
);
malloc_mutex_assert_not_owner
(
tsdn
,
&
arena
->
decay_muzzy
.
mtx
);
arena_decay_ticks
(
tsdn
,
arena
,
1
);
arena_decay_ticks
(
tsdn
,
arena
,
1
);
}
}
/* Purge a single extent to retained / unmapped directly. */
JEMALLOC_ALWAYS_INLINE
void
arena_decay_extent
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
)
{
size_t
extent_size
=
extent_size_get
(
extent
);
extent_dalloc_wrapper
(
tsdn
,
arena
,
r_extent_hooks
,
extent
);
if
(
config_stats
)
{
/* Update stats accordingly. */
arena_stats_lock
(
tsdn
,
&
arena
->
stats
);
arena_stats_add_u64
(
tsdn
,
&
arena
->
stats
,
&
arena
->
decay_dirty
.
stats
->
nmadvise
,
1
);
arena_stats_add_u64
(
tsdn
,
&
arena
->
stats
,
&
arena
->
decay_dirty
.
stats
->
purged
,
extent_size
>>
LG_PAGE
);
arena_stats_sub_zu
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
mapped
,
extent_size
);
arena_stats_unlock
(
tsdn
,
&
arena
->
stats
);
}
}
JEMALLOC_ALWAYS_INLINE
void
*
JEMALLOC_ALWAYS_INLINE
void
*
arena_malloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
size
,
szind_t
ind
,
bool
zero
,
arena_malloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
size
,
szind_t
ind
,
bool
zero
,
tcache_t
*
tcache
,
bool
slow_path
)
{
tcache_t
*
tcache
,
bool
slow_path
)
{
...
@@ -178,21 +164,19 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
...
@@ -178,21 +164,19 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
JEMALLOC_ALWAYS_INLINE
arena_t
*
JEMALLOC_ALWAYS_INLINE
arena_t
*
arena_aalloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
)
{
arena_aalloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
)
{
return
extent_arena_get
(
iealloc
(
tsdn
,
ptr
));
edata_t
*
edata
=
emap_edata_lookup
(
tsdn
,
&
arena_emap_global
,
ptr
);
unsigned
arena_ind
=
edata_arena_ind_get
(
edata
);
return
(
arena_t
*
)
atomic_load_p
(
&
arenas
[
arena_ind
],
ATOMIC_RELAXED
);
}
}
JEMALLOC_ALWAYS_INLINE
size_t
JEMALLOC_ALWAYS_INLINE
size_t
arena_salloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
)
{
arena_salloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
)
{
assert
(
ptr
!=
NULL
);
assert
(
ptr
!=
NULL
);
emap_alloc_ctx_t
alloc_ctx
;
emap_alloc_ctx_lookup
(
tsdn
,
&
arena_emap_global
,
ptr
,
&
alloc_ctx
);
assert
(
alloc_ctx
.
szind
!=
SC_NSIZES
);
rtree_ctx_t
rtree_ctx_fallback
;
return
sz_index2size
(
alloc_ctx
.
szind
);
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
szind_t
szind
=
rtree_szind_read
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
true
);
assert
(
szind
!=
SC_NSIZES
);
return
sz_index2size
(
szind
);
}
}
JEMALLOC_ALWAYS_INLINE
size_t
JEMALLOC_ALWAYS_INLINE
size_t
...
@@ -206,26 +190,53 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
...
@@ -206,26 +190,53 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
* failure.
* failure.
*/
*/
rtree_ctx_t
rtree_ctx_fallback
;
emap_full_alloc_ctx_t
full_alloc_ctx
;
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
bool
missing
=
emap_full_alloc_ctx_try_lookup
(
tsdn
,
&
arena_emap_global
,
ptr
,
&
full_alloc_ctx
);
extent_t
*
extent
;
if
(
missing
)
{
szind_t
szind
;
if
(
rtree_extent_szind_read
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
false
,
&
extent
,
&
szind
))
{
return
0
;
return
0
;
}
}
if
(
extent
==
NULL
)
{
if
(
full_alloc_ctx
.
edata
==
NULL
)
{
return
0
;
return
0
;
}
}
assert
(
e
xtent
_state_get
(
extent
)
==
extent_state_active
);
assert
(
e
data
_state_get
(
full_alloc_ctx
.
edata
)
==
extent_state_active
);
/* Only slab members should be looked up via interior pointers. */
/* Only slab members should be looked up via interior pointers. */
assert
(
extent_addr_get
(
extent
)
==
ptr
||
extent_slab_get
(
extent
));
assert
(
edata_addr_get
(
full_alloc_ctx
.
edata
)
==
ptr
||
edata_slab_get
(
full_alloc_ctx
.
edata
));
assert
(
full_alloc_ctx
.
szind
!=
SC_NSIZES
);
return
sz_index2size
(
full_alloc_ctx
.
szind
);
}
assert
(
szind
!=
SC_NSIZES
);
JEMALLOC_ALWAYS_INLINE
bool
large_dalloc_safety_checks
(
edata_t
*
edata
,
void
*
ptr
,
szind_t
szind
)
{
if
(
!
config_opt_safety_checks
)
{
return
false
;
}
/*
* Eagerly detect double free and sized dealloc bugs for large sizes.
* The cost is low enough (as edata will be accessed anyway) to be
* enabled all the time.
*/
if
(
unlikely
(
edata
==
NULL
||
edata_state_get
(
edata
)
!=
extent_state_active
))
{
safety_check_fail
(
"Invalid deallocation detected: "
"pages being freed (%p) not currently active, "
"possibly caused by double free bugs."
,
(
uintptr_t
)
edata_addr_get
(
edata
));
return
true
;
}
size_t
input_size
=
sz_index2size
(
szind
);
if
(
unlikely
(
input_size
!=
edata_usize_get
(
edata
)))
{
safety_check_fail_sized_dealloc
(
/* current_dealloc */
true
,
ptr
,
/* true_size */
edata_usize_get
(
edata
),
input_size
);
return
true
;
}
return
sz_index2size
(
szind
)
;
return
false
;
}
}
static
inline
void
static
inline
void
...
@@ -233,8 +244,13 @@ arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) {
...
@@ -233,8 +244,13 @@ arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) {
if
(
config_prof
&&
unlikely
(
szind
<
SC_NBINS
))
{
if
(
config_prof
&&
unlikely
(
szind
<
SC_NBINS
))
{
arena_dalloc_promoted
(
tsdn
,
ptr
,
NULL
,
true
);
arena_dalloc_promoted
(
tsdn
,
ptr
,
NULL
,
true
);
}
else
{
}
else
{
extent_t
*
extent
=
iealloc
(
tsdn
,
ptr
);
edata_t
*
edata
=
emap_edata_lookup
(
tsdn
,
&
arena_emap_global
,
large_dalloc
(
tsdn
,
extent
);
ptr
);
if
(
large_dalloc_safety_checks
(
edata
,
ptr
,
szind
))
{
/* See the comment in isfree. */
return
;
}
large_dalloc
(
tsdn
,
edata
);
}
}
}
}
...
@@ -242,27 +258,22 @@ static inline void
...
@@ -242,27 +258,22 @@ static inline void
arena_dalloc_no_tcache
(
tsdn_t
*
tsdn
,
void
*
ptr
)
{
arena_dalloc_no_tcache
(
tsdn_t
*
tsdn
,
void
*
ptr
)
{
assert
(
ptr
!=
NULL
);
assert
(
ptr
!=
NULL
);
rtree_ctx_t
rtree_ctx_fallback
;
emap_alloc_ctx_t
alloc_ctx
;
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
emap_alloc_ctx_lookup
(
tsdn
,
&
arena_emap_global
,
ptr
,
&
alloc_ctx
);
szind_t
szind
;
bool
slab
;
rtree_szind_slab_read
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
true
,
&
szind
,
&
slab
);
if
(
config_debug
)
{
if
(
config_debug
)
{
e
xtent_t
*
extent
=
rtree_extent_read
(
tsdn
,
&
extents_rtree
,
e
data_t
*
edata
=
emap_edata_lookup
(
tsdn
,
&
arena_emap_global
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
true
);
ptr
);
assert
(
szind
==
e
xtent
_szind_get
(
e
xtent
));
assert
(
alloc_ctx
.
szind
==
e
data
_szind_get
(
e
data
));
assert
(
szind
<
SC_NSIZES
);
assert
(
alloc_ctx
.
szind
<
SC_NSIZES
);
assert
(
slab
==
e
xtent
_slab_get
(
e
xtent
));
assert
(
alloc_ctx
.
slab
==
e
data
_slab_get
(
e
data
));
}
}
if
(
likely
(
slab
))
{
if
(
likely
(
alloc_ctx
.
slab
))
{
/* Small allocation. */
/* Small allocation. */
arena_dalloc_small
(
tsdn
,
ptr
);
arena_dalloc_small
(
tsdn
,
ptr
);
}
else
{
}
else
{
arena_dalloc_large_no_tcache
(
tsdn
,
ptr
,
szind
);
arena_dalloc_large_no_tcache
(
tsdn
,
ptr
,
alloc_ctx
.
szind
);
}
}
}
}
...
@@ -277,14 +288,19 @@ arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
...
@@ -277,14 +288,19 @@ arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
slow_path
);
slow_path
);
}
}
}
else
{
}
else
{
extent_t
*
extent
=
iealloc
(
tsdn
,
ptr
);
edata_t
*
edata
=
emap_edata_lookup
(
tsdn
,
&
arena_emap_global
,
large_dalloc
(
tsdn
,
extent
);
ptr
);
if
(
large_dalloc_safety_checks
(
edata
,
ptr
,
szind
))
{
/* See the comment in isfree. */
return
;
}
large_dalloc
(
tsdn
,
edata
);
}
}
}
}
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
arena_dalloc
(
tsdn_t
*
tsdn
,
void
*
ptr
,
tcache_t
*
tcache
,
arena_dalloc
(
tsdn_t
*
tsdn
,
void
*
ptr
,
tcache_t
*
tcache
,
alloc_ctx_t
*
alloc_ctx
,
bool
slow_path
)
{
emap_
alloc_ctx_t
*
caller_
alloc_ctx
,
bool
slow_path
)
{
assert
(
!
tsdn_null
(
tsdn
)
||
tcache
==
NULL
);
assert
(
!
tsdn_null
(
tsdn
)
||
tcache
==
NULL
);
assert
(
ptr
!=
NULL
);
assert
(
ptr
!=
NULL
);
...
@@ -293,34 +309,30 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
...
@@ -293,34 +309,30 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
return
;
return
;
}
}
szind_t
szind
;
emap_alloc_ctx_t
alloc_ctx
;
bool
slab
;
if
(
caller_alloc_ctx
!=
NULL
)
{
rtree_ctx_t
*
rtree_ctx
;
alloc_ctx
=
*
caller_alloc_ctx
;
if
(
alloc_ctx
!=
NULL
)
{
szind
=
alloc_ctx
->
szind
;
slab
=
alloc_ctx
->
slab
;
assert
(
szind
!=
SC_NSIZES
);
}
else
{
}
else
{
rtree_ctx
=
tsd_rtree_ctx
(
tsdn_
tsd
(
tsdn
));
util_assume
(
!
tsdn_
null
(
tsdn
));
rtree_szind_slab_read
(
tsdn
,
&
extents_rtree
,
r
tr
ee_ctx
,
emap_alloc_ctx_lookup
(
tsdn
,
&
arena_emap_global
,
p
tr
,
(
uintptr_t
)
ptr
,
true
,
&
szind
,
&
slab
);
&
alloc_ctx
);
}
}
if
(
config_debug
)
{
if
(
config_debug
)
{
rtree_ctx
=
tsd_rtree_ctx
(
tsdn_tsd
(
tsdn
));
edata_t
*
edata
=
emap_edata_lookup
(
tsdn
,
&
arena_emap_global
,
extent_t
*
extent
=
rtree_extent_read
(
tsdn
,
&
extents_rtree
,
ptr
);
rtree_ctx
,
(
uintptr_t
)
ptr
,
true
);
assert
(
alloc_ctx
.
szind
==
edata_szind_get
(
edata
));
assert
(
szind
==
extent_szind_get
(
extent
));
assert
(
alloc_ctx
.
szind
<
SC_NSIZES
);
assert
(
szind
<
SC_NSIZES
);
assert
(
alloc_ctx
.
slab
==
edata_slab_get
(
edata
));
assert
(
slab
==
extent_slab_get
(
extent
));
}
}
if
(
likely
(
slab
))
{
if
(
likely
(
alloc_ctx
.
slab
))
{
/* Small allocation. */
/* Small allocation. */
tcache_dalloc_small
(
tsdn_tsd
(
tsdn
),
tcache
,
ptr
,
szind
,
tcache_dalloc_small
(
tsdn_tsd
(
tsdn
),
tcache
,
ptr
,
slow_path
);
alloc_ctx
.
szind
,
slow_path
);
}
else
{
}
else
{
arena_dalloc_large
(
tsdn
,
ptr
,
tcache
,
szind
,
slow_path
);
arena_dalloc_large
(
tsdn
,
ptr
,
tcache
,
alloc_ctx
.
szind
,
slow_path
);
}
}
}
}
...
@@ -329,47 +341,43 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
...
@@ -329,47 +341,43 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
assert
(
ptr
!=
NULL
);
assert
(
ptr
!=
NULL
);
assert
(
size
<=
SC_LARGE_MAXCLASS
);
assert
(
size
<=
SC_LARGE_MAXCLASS
);
szind_t
szind
;
emap_alloc_ctx_t
alloc_ctx
;
bool
slab
;
if
(
!
config_prof
||
!
opt_prof
)
{
if
(
!
config_prof
||
!
opt_prof
)
{
/*
/*
* There is no risk of being confused by a promoted sampled
* There is no risk of being confused by a promoted sampled
* object, so base szind and slab on the given size.
* object, so base szind and slab on the given size.
*/
*/
szind
=
sz_size2index
(
size
);
alloc_ctx
.
szind
=
sz_size2index
(
size
);
slab
=
(
szind
<
SC_NBINS
);
alloc_ctx
.
slab
=
(
alloc_ctx
.
szind
<
SC_NBINS
);
}
}
if
((
config_prof
&&
opt_prof
)
||
config_debug
)
{
if
((
config_prof
&&
opt_prof
)
||
config_debug
)
{
rtree_ctx_t
rtree_ctx_fallback
;
emap_alloc_ctx_lookup
(
tsdn
,
&
arena_emap_global
,
ptr
,
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
alloc_ctx
);
&
rtree_ctx_fallback
);
rtree_szind_slab_read
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
true
,
&
szind
,
&
slab
);
assert
(
szind
==
sz_size2index
(
size
));
assert
(
alloc_ctx
.
szind
==
sz_size2index
(
size
));
assert
((
config_prof
&&
opt_prof
)
||
slab
==
(
szind
<
SC_NBINS
));
assert
((
config_prof
&&
opt_prof
)
||
alloc_ctx
.
slab
==
(
alloc_ctx
.
szind
<
SC_NBINS
));
if
(
config_debug
)
{
if
(
config_debug
)
{
e
xtent_t
*
extent
=
rtree_extent_read
(
tsdn
,
e
data_t
*
edata
=
emap_edata_lookup
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
tr
ue
);
&
arena_emap_global
,
p
tr
);
assert
(
szind
==
e
xtent
_szind_get
(
e
xtent
));
assert
(
alloc_ctx
.
szind
==
e
data
_szind_get
(
e
data
));
assert
(
slab
==
e
xtent
_slab_get
(
e
xtent
));
assert
(
alloc_ctx
.
slab
==
e
data
_slab_get
(
e
data
));
}
}
}
}
if
(
likely
(
slab
))
{
if
(
likely
(
alloc_ctx
.
slab
))
{
/* Small allocation. */
/* Small allocation. */
arena_dalloc_small
(
tsdn
,
ptr
);
arena_dalloc_small
(
tsdn
,
ptr
);
}
else
{
}
else
{
arena_dalloc_large_no_tcache
(
tsdn
,
ptr
,
szind
);
arena_dalloc_large_no_tcache
(
tsdn
,
ptr
,
alloc_ctx
.
szind
);
}
}
}
}
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
arena_sdalloc
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
size
,
tcache_t
*
tcache
,
arena_sdalloc
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
size
,
tcache_t
*
tcache
,
alloc_ctx_t
*
alloc_ctx
,
bool
slow_path
)
{
emap_
alloc_ctx_t
*
caller_
alloc_ctx
,
bool
slow_path
)
{
assert
(
!
tsdn_null
(
tsdn
)
||
tcache
==
NULL
);
assert
(
!
tsdn_null
(
tsdn
)
||
tcache
==
NULL
);
assert
(
ptr
!=
NULL
);
assert
(
ptr
!=
NULL
);
assert
(
size
<=
SC_LARGE_MAXCLASS
);
assert
(
size
<=
SC_LARGE_MAXCLASS
);
...
@@ -379,49 +387,164 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
...
@@ -379,49 +387,164 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
return
;
return
;
}
}
szind_t
szind
;
emap_alloc_ctx_t
alloc_ctx
;
bool
slab
;
alloc_ctx_t
local_ctx
;
if
(
config_prof
&&
opt_prof
)
{
if
(
config_prof
&&
opt_prof
)
{
if
(
alloc_ctx
==
NULL
)
{
if
(
caller_
alloc_ctx
==
NULL
)
{
/* Uncommon case and should be a static check. */
/* Uncommon case and should be a static check. */
rtree_ctx_t
rtree_ctx_fallback
;
emap_alloc_ctx_lookup
(
tsdn
,
&
arena_emap_global
,
ptr
,
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
alloc_ctx
);
&
rtree_ctx_fallback
);
assert
(
alloc_ctx
.
szind
==
sz_size2index
(
size
));
rtree_szind_slab_read
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
}
else
{
(
uintptr_t
)
ptr
,
true
,
&
local_ctx
.
szind
,
alloc_ctx
=
*
caller_alloc_ctx
;
&
local_ctx
.
slab
);
assert
(
local_ctx
.
szind
==
sz_size2index
(
size
));
alloc_ctx
=
&
local_ctx
;
}
}
slab
=
alloc_ctx
->
slab
;
szind
=
alloc_ctx
->
szind
;
}
else
{
}
else
{
/*
/*
* There is no risk of being confused by a promoted sampled
* There is no risk of being confused by a promoted sampled
* object, so base szind and slab on the given size.
* object, so base szind and slab on the given size.
*/
*/
szind
=
sz_size2index
(
size
);
alloc_ctx
.
szind
=
sz_size2index
(
size
);
slab
=
(
szind
<
SC_NBINS
);
alloc_ctx
.
slab
=
(
alloc_ctx
.
szind
<
SC_NBINS
);
}
}
if
(
config_debug
)
{
if
(
config_debug
)
{
rtree_ctx_t
*
rtree_ctx
=
tsd_rtree_ctx
(
tsdn_tsd
(
tsdn
));
edata_t
*
edata
=
emap_edata_lookup
(
tsdn
,
&
arena_emap_global
,
rtree_szind_slab_read
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
ptr
);
(
uintptr_t
)
ptr
,
true
,
&
szind
,
&
slab
);
assert
(
alloc_ctx
.
szind
==
edata_szind_get
(
edata
));
extent_t
*
extent
=
rtree_extent_read
(
tsdn
,
assert
(
alloc_ctx
.
slab
==
edata_slab_get
(
edata
));
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
true
);
assert
(
szind
==
extent_szind_get
(
extent
));
assert
(
slab
==
extent_slab_get
(
extent
));
}
}
if
(
likely
(
slab
))
{
if
(
likely
(
alloc_ctx
.
slab
))
{
/* Small allocation. */
/* Small allocation. */
tcache_dalloc_small
(
tsdn_tsd
(
tsdn
),
tcache
,
ptr
,
szind
,
tcache_dalloc_small
(
tsdn_tsd
(
tsdn
),
tcache
,
ptr
,
slow_path
);
alloc_ctx
.
szind
,
slow_path
);
}
else
{
}
else
{
arena_dalloc_large
(
tsdn
,
ptr
,
tcache
,
szind
,
slow_path
);
arena_dalloc_large
(
tsdn
,
ptr
,
tcache
,
alloc_ctx
.
szind
,
slow_path
);
}
}
static
inline
void
arena_cache_oblivious_randomize
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
edata_t
*
edata
,
size_t
alignment
)
{
assert
(
edata_base_get
(
edata
)
==
edata_addr_get
(
edata
));
if
(
alignment
<
PAGE
)
{
unsigned
lg_range
=
LG_PAGE
-
lg_floor
(
CACHELINE_CEILING
(
alignment
));
size_t
r
;
if
(
!
tsdn_null
(
tsdn
))
{
tsd_t
*
tsd
=
tsdn_tsd
(
tsdn
);
r
=
(
size_t
)
prng_lg_range_u64
(
tsd_prng_statep_get
(
tsd
),
lg_range
);
}
else
{
uint64_t
stack_value
=
(
uint64_t
)(
uintptr_t
)
&
r
;
r
=
(
size_t
)
prng_lg_range_u64
(
&
stack_value
,
lg_range
);
}
uintptr_t
random_offset
=
((
uintptr_t
)
r
)
<<
(
LG_PAGE
-
lg_range
);
edata
->
e_addr
=
(
void
*
)((
uintptr_t
)
edata
->
e_addr
+
random_offset
);
assert
(
ALIGNMENT_ADDR2BASE
(
edata
->
e_addr
,
alignment
)
==
edata
->
e_addr
);
}
}
/*
* The dalloc bin info contains just the information that the common paths need
* during tcache flushes. By force-inlining these paths, and using local copies
* of data (so that the compiler knows it's constant), we avoid a whole bunch of
* redundant loads and stores by leaving this information in registers.
*/
typedef
struct
arena_dalloc_bin_locked_info_s
arena_dalloc_bin_locked_info_t
;
struct
arena_dalloc_bin_locked_info_s
{
div_info_t
div_info
;
uint32_t
nregs
;
uint64_t
ndalloc
;
};
JEMALLOC_ALWAYS_INLINE
size_t
arena_slab_regind
(
arena_dalloc_bin_locked_info_t
*
info
,
szind_t
binind
,
edata_t
*
slab
,
const
void
*
ptr
)
{
size_t
diff
,
regind
;
/* Freeing a pointer outside the slab can cause assertion failure. */
assert
((
uintptr_t
)
ptr
>=
(
uintptr_t
)
edata_addr_get
(
slab
));
assert
((
uintptr_t
)
ptr
<
(
uintptr_t
)
edata_past_get
(
slab
));
/* Freeing an interior pointer can cause assertion failure. */
assert
(((
uintptr_t
)
ptr
-
(
uintptr_t
)
edata_addr_get
(
slab
))
%
(
uintptr_t
)
bin_infos
[
binind
].
reg_size
==
0
);
diff
=
(
size_t
)((
uintptr_t
)
ptr
-
(
uintptr_t
)
edata_addr_get
(
slab
));
/* Avoid doing division with a variable divisor. */
regind
=
div_compute
(
&
info
->
div_info
,
diff
);
assert
(
regind
<
bin_infos
[
binind
].
nregs
);
return
regind
;
}
JEMALLOC_ALWAYS_INLINE
void
arena_dalloc_bin_locked_begin
(
arena_dalloc_bin_locked_info_t
*
info
,
szind_t
binind
)
{
info
->
div_info
=
arena_binind_div_info
[
binind
];
info
->
nregs
=
bin_infos
[
binind
].
nregs
;
info
->
ndalloc
=
0
;
}
/*
* Does the deallocation work associated with freeing a single pointer (a
* "step") in between a arena_dalloc_bin_locked begin and end call.
*
* Returns true if arena_slab_dalloc must be called on slab. Doesn't do
* stats updates, which happen during finish (this lets running counts get left
* in a register).
*/
JEMALLOC_ALWAYS_INLINE
bool
arena_dalloc_bin_locked_step
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bin_t
*
bin
,
arena_dalloc_bin_locked_info_t
*
info
,
szind_t
binind
,
edata_t
*
slab
,
void
*
ptr
)
{
const
bin_info_t
*
bin_info
=
&
bin_infos
[
binind
];
size_t
regind
=
arena_slab_regind
(
info
,
binind
,
slab
,
ptr
);
slab_data_t
*
slab_data
=
edata_slab_data_get
(
slab
);
assert
(
edata_nfree_get
(
slab
)
<
bin_info
->
nregs
);
/* Freeing an unallocated pointer can cause assertion failure. */
assert
(
bitmap_get
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
,
regind
));
bitmap_unset
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
,
regind
);
edata_nfree_inc
(
slab
);
if
(
config_stats
)
{
info
->
ndalloc
++
;
}
unsigned
nfree
=
edata_nfree_get
(
slab
);
if
(
nfree
==
bin_info
->
nregs
)
{
arena_dalloc_bin_locked_handle_newly_empty
(
tsdn
,
arena
,
slab
,
bin
);
return
true
;
}
else
if
(
nfree
==
1
&&
slab
!=
bin
->
slabcur
)
{
arena_dalloc_bin_locked_handle_newly_nonempty
(
tsdn
,
arena
,
slab
,
bin
);
}
}
return
false
;
}
JEMALLOC_ALWAYS_INLINE
void
arena_dalloc_bin_locked_finish
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bin_t
*
bin
,
arena_dalloc_bin_locked_info_t
*
info
)
{
if
(
config_stats
)
{
bin
->
stats
.
ndalloc
+=
info
->
ndalloc
;
assert
(
bin
->
stats
.
curregs
>=
(
size_t
)
info
->
ndalloc
);
bin
->
stats
.
curregs
-=
(
size_t
)
info
->
ndalloc
;
}
}
static
inline
bin_t
*
arena_get_bin
(
arena_t
*
arena
,
szind_t
binind
,
unsigned
binshard
)
{
bin_t
*
shard0
=
(
bin_t
*
)((
uintptr_t
)
arena
+
arena_bin_offsets
[
binind
]);
return
shard0
+
binshard
;
}
}
#endif
/* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */
#endif
/* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */
deps/jemalloc/include/jemalloc/internal/arena_stats.h
View file @
d4439bd4
...
@@ -2,77 +2,41 @@
...
@@ -2,77 +2,41 @@
#define JEMALLOC_INTERNAL_ARENA_STATS_H
#define JEMALLOC_INTERNAL_ARENA_STATS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/lockedint.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/pa.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/sc.h"
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
/*
* In those architectures that support 64-bit atomics, we use atomic updates for
* our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
* externally.
*/
#ifdef JEMALLOC_ATOMIC_U64
typedef
atomic_u64_t
arena_stats_u64_t
;
#else
/* Must hold the arena stats mutex while reading atomically. */
typedef
uint64_t
arena_stats_u64_t
;
#endif
typedef
struct
arena_stats_large_s
arena_stats_large_t
;
typedef
struct
arena_stats_large_s
arena_stats_large_t
;
struct
arena_stats_large_s
{
struct
arena_stats_large_s
{
/*
/*
* Total number of allocation/deallocation requests served directly by
* Total number of allocation/deallocation requests served directly by
* the arena.
* the arena.
*/
*/
arena_stats
_u64_t
nmalloc
;
locked
_u64_t
nmalloc
;
arena_stats
_u64_t
ndalloc
;
locked
_u64_t
ndalloc
;
/*
/*
* Number of allocation requests that correspond to this size class.
* Number of allocation requests that correspond to this size class.
* This includes requests served by tcache, though tcache only
* This includes requests served by tcache, though tcache only
* periodically merges into this counter.
* periodically merges into this counter.
*/
*/
arena_stats
_u64_t
nrequests
;
/* Partially derived. */
locked
_u64_t
nrequests
;
/* Partially derived. */
/*
/*
* Number of tcache fills / flushes for large (similarly, periodically
* Number of tcache fills / flushes for large (similarly, periodically
* merged). Note that there is no large tcache batch-fill currently
* merged). Note that there is no large tcache batch-fill currently
* (i.e. only fill 1 at a time); however flush may be batched.
* (i.e. only fill 1 at a time); however flush may be batched.
*/
*/
arena_stats
_u64_t
nfills
;
/* Partially derived. */
locked
_u64_t
nfills
;
/* Partially derived. */
arena_stats
_u64_t
nflushes
;
/* Partially derived. */
locked
_u64_t
nflushes
;
/* Partially derived. */
/* Current number of allocations of this size class. */
/* Current number of allocations of this size class. */
size_t
curlextents
;
/* Derived. */
size_t
curlextents
;
/* Derived. */
};
};
typedef
struct
arena_stats_decay_s
arena_stats_decay_t
;
struct
arena_stats_decay_s
{
/* Total number of purge sweeps. */
arena_stats_u64_t
npurge
;
/* Total number of madvise calls made. */
arena_stats_u64_t
nmadvise
;
/* Total number of pages purged. */
arena_stats_u64_t
purged
;
};
typedef
struct
arena_stats_extents_s
arena_stats_extents_t
;
struct
arena_stats_extents_s
{
/*
* Stats for a given index in the range [0, SC_NPSIZES] in an extents_t.
* We track both bytes and # of extents: two extents in the same bucket
* may have different sizes if adjacent size classes differ by more than
* a page, so bytes cannot always be derived from # of extents.
*/
atomic_zu_t
ndirty
;
atomic_zu_t
dirty_bytes
;
atomic_zu_t
nmuzzy
;
atomic_zu_t
muzzy_bytes
;
atomic_zu_t
nretained
;
atomic_zu_t
retained_bytes
;
};
/*
/*
* Arena stats. Note that fields marked "derived" are not directly maintained
* Arena stats. Note that fields marked "derived" are not directly maintained
* within the arena code; rather their values are derived during stats merge
* within the arena code; rather their values are derived during stats merge
...
@@ -80,43 +44,36 @@ struct arena_stats_extents_s {
...
@@ -80,43 +44,36 @@ struct arena_stats_extents_s {
*/
*/
typedef
struct
arena_stats_s
arena_stats_t
;
typedef
struct
arena_stats_s
arena_stats_t
;
struct
arena_stats_s
{
struct
arena_stats_s
{
#ifndef JEMALLOC_ATOMIC_U64
LOCKEDINT_MTX_DECLARE
(
mtx
)
malloc_mutex_t
mtx
;
#endif
/* Number of bytes currently mapped, excluding retained memory. */
atomic_zu_t
mapped
;
/* Partially derived. */
/*
/*
* Number of unused virtual memory bytes currently retained. Retained
* resident includes the base stats -- that's why it lives here and not
* bytes are technically mapped (though always decommitted or purged),
* in pa_shard_stats_t.
* but they are excluded from the mapped statistic (above).
*/
*/
atomic_zu_t
retained
;
/* Derived. */
size_t
base
;
/* Derived. */
size_t
resident
;
/* Derived. */
/* Number of extent_t structs allocated by base, but not being used. */
size_t
metadata_thp
;
/* Derived. */
atomic_zu_t
extent_avail
;
size_t
mapped
;
/* Derived. */
arena_stats_decay_t
decay_dirty
;
arena_stats_decay_t
decay_muzzy
;
atomic_zu_t
base
;
/* Derived. */
atomic_zu_t
internal
;
atomic_zu_t
internal
;
atomic_zu_t
resident
;
/* Derived. */
atomic_zu_t
metadata_thp
;
atomic_zu
_t
allocated_large
;
/* Derived. */
size
_t
allocated_large
;
/* Derived. */
arena_stats_u
64_t
nmalloc_large
;
/* Derived. */
uint
64_t
nmalloc_large
;
/* Derived. */
arena_stats_u
64_t
ndalloc_large
;
/* Derived. */
uint
64_t
ndalloc_large
;
/* Derived. */
arena_stats_u
64_t
nfills_large
;
/* Derived. */
uint
64_t
nfills_large
;
/* Derived. */
arena_stats_u
64_t
nflushes_large
;
/* Derived. */
uint
64_t
nflushes_large
;
/* Derived. */
arena_stats_u
64_t
nrequests_large
;
/* Derived. */
uint
64_t
nrequests_large
;
/* Derived. */
/* VM space had to be leaked (undocumented). Normally 0. */
/*
atomic_zu_t
abandoned_vm
;
* The stats logically owned by the pa_shard in the same arena. This
* lives here only because it's convenient for the purposes of the ctl
* module -- it only knows about the single arena_stats.
*/
pa_shard_stats_t
pa_shard_stats
;
/* Number of bytes cached in tcache associated with this arena. */
/* Number of bytes cached in tcache associated with this arena. */
atomic_zu_t
tcache_bytes
;
/* Derived. */
size_t
tcache_bytes
;
/* Derived. */
size_t
tcache_stashed_bytes
;
/* Derived. */
mutex_prof_data_t
mutex_prof_data
[
mutex_prof_num_arena_mutexes
];
mutex_prof_data_t
mutex_prof_data
[
mutex_prof_num_arena_mutexes
];
...
@@ -134,138 +91,24 @@ arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
...
@@ -134,138 +91,24 @@ arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
assert
(((
char
*
)
arena_stats
)[
i
]
==
0
);
assert
(((
char
*
)
arena_stats
)[
i
]
==
0
);
}
}
}
}
#ifndef JEMALLOC_ATOMIC_U64
if
(
LOCKEDINT_MTX_INIT
(
arena_stats
->
mtx
,
"arena_stats"
,
if
(
malloc_mutex_init
(
&
arena_stats
->
mtx
,
"arena_stats"
,
WITNESS_RANK_ARENA_STATS
,
malloc_mutex_rank_exclusive
))
{
WITNESS_RANK_ARENA_STATS
,
malloc_mutex_rank_exclusive
))
{
return
true
;
return
true
;
}
}
#endif
/* Memory is zeroed, so there is no need to clear stats. */
/* Memory is zeroed, so there is no need to clear stats. */
return
false
;
return
false
;
}
}
static
inline
void
arena_stats_lock
(
tsdn_t
*
tsdn
,
arena_stats_t
*
arena_stats
)
{
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_lock
(
tsdn
,
&
arena_stats
->
mtx
);
#endif
}
static
inline
void
arena_stats_unlock
(
tsdn_t
*
tsdn
,
arena_stats_t
*
arena_stats
)
{
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_unlock
(
tsdn
,
&
arena_stats
->
mtx
);
#endif
}
static
inline
uint64_t
arena_stats_read_u64
(
tsdn_t
*
tsdn
,
arena_stats_t
*
arena_stats
,
arena_stats_u64_t
*
p
)
{
#ifdef JEMALLOC_ATOMIC_U64
return
atomic_load_u64
(
p
,
ATOMIC_RELAXED
);
#else
malloc_mutex_assert_owner
(
tsdn
,
&
arena_stats
->
mtx
);
return
*
p
;
#endif
}
static
inline
void
arena_stats_add_u64
(
tsdn_t
*
tsdn
,
arena_stats_t
*
arena_stats
,
arena_stats_u64_t
*
p
,
uint64_t
x
)
{
#ifdef JEMALLOC_ATOMIC_U64
atomic_fetch_add_u64
(
p
,
x
,
ATOMIC_RELAXED
);
#else
malloc_mutex_assert_owner
(
tsdn
,
&
arena_stats
->
mtx
);
*
p
+=
x
;
#endif
}
static
inline
void
arena_stats_sub_u64
(
tsdn_t
*
tsdn
,
arena_stats_t
*
arena_stats
,
arena_stats_u64_t
*
p
,
uint64_t
x
)
{
#ifdef JEMALLOC_ATOMIC_U64
uint64_t
r
=
atomic_fetch_sub_u64
(
p
,
x
,
ATOMIC_RELAXED
);
assert
(
r
-
x
<=
r
);
#else
malloc_mutex_assert_owner
(
tsdn
,
&
arena_stats
->
mtx
);
*
p
-=
x
;
assert
(
*
p
+
x
>=
*
p
);
#endif
}
/*
* Non-atomically sets *dst += src. *dst needs external synchronization.
* This lets us avoid the cost of a fetch_add when its unnecessary (note that
* the types here are atomic).
*/
static
inline
void
arena_stats_accum_u64
(
arena_stats_u64_t
*
dst
,
uint64_t
src
)
{
#ifdef JEMALLOC_ATOMIC_U64
uint64_t
cur_dst
=
atomic_load_u64
(
dst
,
ATOMIC_RELAXED
);
atomic_store_u64
(
dst
,
src
+
cur_dst
,
ATOMIC_RELAXED
);
#else
*
dst
+=
src
;
#endif
}
static
inline
size_t
arena_stats_read_zu
(
tsdn_t
*
tsdn
,
arena_stats_t
*
arena_stats
,
atomic_zu_t
*
p
)
{
#ifdef JEMALLOC_ATOMIC_U64
return
atomic_load_zu
(
p
,
ATOMIC_RELAXED
);
#else
malloc_mutex_assert_owner
(
tsdn
,
&
arena_stats
->
mtx
);
return
atomic_load_zu
(
p
,
ATOMIC_RELAXED
);
#endif
}
static
inline
void
arena_stats_add_zu
(
tsdn_t
*
tsdn
,
arena_stats_t
*
arena_stats
,
atomic_zu_t
*
p
,
size_t
x
)
{
#ifdef JEMALLOC_ATOMIC_U64
atomic_fetch_add_zu
(
p
,
x
,
ATOMIC_RELAXED
);
#else
malloc_mutex_assert_owner
(
tsdn
,
&
arena_stats
->
mtx
);
size_t
cur
=
atomic_load_zu
(
p
,
ATOMIC_RELAXED
);
atomic_store_zu
(
p
,
cur
+
x
,
ATOMIC_RELAXED
);
#endif
}
static
inline
void
arena_stats_sub_zu
(
tsdn_t
*
tsdn
,
arena_stats_t
*
arena_stats
,
atomic_zu_t
*
p
,
size_t
x
)
{
#ifdef JEMALLOC_ATOMIC_U64
size_t
r
=
atomic_fetch_sub_zu
(
p
,
x
,
ATOMIC_RELAXED
);
assert
(
r
-
x
<=
r
);
#else
malloc_mutex_assert_owner
(
tsdn
,
&
arena_stats
->
mtx
);
size_t
cur
=
atomic_load_zu
(
p
,
ATOMIC_RELAXED
);
atomic_store_zu
(
p
,
cur
-
x
,
ATOMIC_RELAXED
);
#endif
}
/* Like the _u64 variant, needs an externally synchronized *dst. */
static
inline
void
arena_stats_accum_zu
(
atomic_zu_t
*
dst
,
size_t
src
)
{
size_t
cur_dst
=
atomic_load_zu
(
dst
,
ATOMIC_RELAXED
);
atomic_store_zu
(
dst
,
src
+
cur_dst
,
ATOMIC_RELAXED
);
}
static
inline
void
static
inline
void
arena_stats_large_flush_nrequests_add
(
tsdn_t
*
tsdn
,
arena_stats_t
*
arena_stats
,
arena_stats_large_flush_nrequests_add
(
tsdn_t
*
tsdn
,
arena_stats_t
*
arena_stats
,
szind_t
szind
,
uint64_t
nrequests
)
{
szind_t
szind
,
uint64_t
nrequests
)
{
arena_stats_lock
(
tsdn
,
arena_stats
);
LOCKEDINT_MTX_LOCK
(
tsdn
,
arena_stats
->
mtx
);
arena_stats_large_t
*
lstats
=
&
arena_stats
->
lstats
[
szind
-
SC_NBINS
];
arena_stats_large_t
*
lstats
=
&
arena_stats
->
lstats
[
szind
-
SC_NBINS
];
arena_stats_add_u64
(
tsdn
,
arena_stats
,
&
lstats
->
nrequests
,
nrequests
);
locked_inc_u64
(
tsdn
,
LOCKEDINT_MTX
(
arena_stats
->
mtx
),
arena_stats_add_u64
(
tsdn
,
arena_stats
,
&
lstats
->
nflushes
,
1
);
&
lstats
->
nrequests
,
nrequests
);
arena_stats_unlock
(
tsdn
,
arena_stats
);
locked_inc_u64
(
tsdn
,
LOCKEDINT_MTX
(
arena_stats
->
mtx
),
}
&
lstats
->
nflushes
,
1
);
LOCKEDINT_MTX_UNLOCK
(
tsdn
,
arena_stats
->
mtx
);
static
inline
void
arena_stats_mapped_add
(
tsdn_t
*
tsdn
,
arena_stats_t
*
arena_stats
,
size_t
size
)
{
arena_stats_lock
(
tsdn
,
arena_stats
);
arena_stats_add_zu
(
tsdn
,
arena_stats
,
&
arena_stats
->
mapped
,
size
);
arena_stats_unlock
(
tsdn
,
arena_stats
);
}
}
#endif
/* JEMALLOC_INTERNAL_ARENA_STATS_H */
#endif
/* JEMALLOC_INTERNAL_ARENA_STATS_H */
deps/jemalloc/include/jemalloc/internal/arena_structs
_b
.h
→
deps/jemalloc/include/jemalloc/internal/arena_structs.h
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_
B_
H
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_H
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_
B_
H
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_H
#include "jemalloc/internal/arena_stats.h"
#include "jemalloc/internal/arena_stats.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/counter.h"
#include "jemalloc/internal/ecache.h"
#include "jemalloc/internal/edata_cache.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/pa.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/ticker.h"
struct
arena_decay_s
{
/* Synchronizes all non-atomic fields. */
malloc_mutex_t
mtx
;
/*
* True if a thread is currently purging the extents associated with
* this decay structure.
*/
bool
purging
;
/*
* Approximate time in milliseconds from the creation of a set of unused
* dirty pages until an equivalent set of unused dirty pages is purged
* and/or reused.
*/
atomic_zd_t
time_ms
;
/* time / SMOOTHSTEP_NSTEPS. */
nstime_t
interval
;
/*
* Time at which the current decay interval logically started. We do
* not actually advance to a new epoch until sometime after it starts
* because of scheduling and computation delays, and it is even possible
* to completely skip epochs. In all cases, during epoch advancement we
* merge all relevant activity into the most recently recorded epoch.
*/
nstime_t
epoch
;
/* Deadline randomness generator. */
uint64_t
jitter_state
;
/*
* Deadline for current epoch. This is the sum of interval and per
* epoch jitter which is a uniform random variable in [0..interval).
* Epochs always advance by precise multiples of interval, but we
* randomize the deadline to reduce the likelihood of arenas purging in
* lockstep.
*/
nstime_t
deadline
;
/*
* Number of unpurged pages at beginning of current epoch. During epoch
* advancement we use the delta between arena->decay_*.nunpurged and
* extents_npages_get(&arena->extents_*) to determine how many dirty
* pages, if any, were generated.
*/
size_t
nunpurged
;
/*
* Trailing log of how many unused dirty pages were generated during
* each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
* element is the most recent epoch. Corresponding epoch times are
* relative to epoch.
*/
size_t
backlog
[
SMOOTHSTEP_NSTEPS
];
/*
* Pointer to associated stats. These stats are embedded directly in
* the arena's stats due to how stats structures are shared between the
* arena and ctl code.
*
* Synchronization: Same as associated arena's stats field. */
arena_stats_decay_t
*
stats
;
/* Peak number of pages in associated extents. Used for debug only. */
uint64_t
ceil_npages
;
};
struct
arena_s
{
struct
arena_s
{
/*
/*
* Number of threads currently assigned to this arena. Each thread has
* Number of threads currently assigned to this arena. Each thread has
...
@@ -110,28 +53,10 @@ struct arena_s {
...
@@ -110,28 +53,10 @@ struct arena_s {
*
*
* Synchronization: tcache_ql_mtx.
* Synchronization: tcache_ql_mtx.
*/
*/
ql_head
(
tcache_t
)
tcache_ql
;
ql_head
(
tcache_
slow_
t
)
tcache_ql
;
ql_head
(
cache_bin_array_descriptor_t
)
cache_bin_array_descriptor_ql
;
ql_head
(
cache_bin_array_descriptor_t
)
cache_bin_array_descriptor_ql
;
malloc_mutex_t
tcache_ql_mtx
;
malloc_mutex_t
tcache_ql_mtx
;
/* Synchronization: internal. */
prof_accum_t
prof_accum
;
/*
* PRNG state for cache index randomization of large allocation base
* pointers.
*
* Synchronization: atomic.
*/
atomic_zu_t
offset_state
;
/*
* Extent serial number generator state.
*
* Synchronization: atomic.
*/
atomic_zu_t
extent_sn_next
;
/*
/*
* Represents a dss_prec_t, but atomically.
* Represents a dss_prec_t, but atomically.
*
*
...
@@ -139,74 +64,23 @@ struct arena_s {
...
@@ -139,74 +64,23 @@ struct arena_s {
*/
*/
atomic_u_t
dss_prec
;
atomic_u_t
dss_prec
;
/*
* Number of pages in active extents.
*
* Synchronization: atomic.
*/
atomic_zu_t
nactive
;
/*
/*
* Extant large allocations.
* Extant large allocations.
*
*
* Synchronization: large_mtx.
* Synchronization: large_mtx.
*/
*/
e
xtent
_list_t
large
;
e
data
_list_
active_
t
large
;
/* Synchronizes all large allocation/update/deallocation. */
/* Synchronizes all large allocation/update/deallocation. */
malloc_mutex_t
large_mtx
;
malloc_mutex_t
large_mtx
;
/*
/* The page-level allocator shard this arena uses. */
* Collections of extents that were previously allocated. These are
pa_shard_t
pa_shard
;
* used when allocating extents, in an attempt to re-use address space.
*
* Synchronization: internal.
*/
extents_t
extents_dirty
;
extents_t
extents_muzzy
;
extents_t
extents_retained
;
/*
/*
* Decay-based purging state, responsible for scheduling extent state
* A cached copy of base->ind. This can get accessed on hot paths;
* transitions.
* looking it up in base requires an extra pointer hop / cache miss.
*
* Synchronization: internal.
*/
arena_decay_t
decay_dirty
;
/* dirty --> muzzy */
arena_decay_t
decay_muzzy
;
/* muzzy --> retained */
/*
* Next extent size class in a growing series to use when satisfying a
* request via the extent hooks (only if opt_retain). This limits the
* number of disjoint virtual memory ranges so that extent merging can
* be effective even if multiple arenas' extent allocation requests are
* highly interleaved.
*
* retain_grow_limit is the max allowed size ind to expand (unless the
* required size is greater). Default is no limit, and controlled
* through mallctl only.
*
* Synchronization: extent_grow_mtx
*/
*/
pszind_t
extent_grow_next
;
unsigned
ind
;
pszind_t
retain_grow_limit
;
malloc_mutex_t
extent_grow_mtx
;
/*
* Available extent structures that were allocated via
* base_alloc_extent().
*
* Synchronization: extent_avail_mtx.
*/
extent_tree_t
extent_avail
;
atomic_zu_t
extent_avail_cnt
;
malloc_mutex_t
extent_avail_mtx
;
/*
* bins is used to store heaps of free regions.
*
* Synchronization: internal.
*/
bins_t
bins
[
SC_NBINS
];
/*
/*
* Base allocator, from which arena metadata are allocated.
* Base allocator, from which arena metadata are allocated.
...
@@ -216,17 +90,12 @@ struct arena_s {
...
@@ -216,17 +90,12 @@ struct arena_s {
base_t
*
base
;
base_t
*
base
;
/* Used to determine uptime. Read-only after initialization. */
/* Used to determine uptime. Read-only after initialization. */
nstime_t
create_time
;
nstime_t
create_time
;
};
/* Used in conjunction with tsd for fast arena-related context lookup. */
/*
struct
arena_tdata_s
{
* The arena is allocated alongside its bins; really this is a
ticker_t
decay_ticker
;
* dynamically sized array determined by the binshard settings.
};
*/
bin_t
bins
[
0
];
/* Used to pass rtree lookup context down the path. */
struct
alloc_ctx_s
{
szind_t
szind
;
bool
slab
;
};
};
#endif
/* JEMALLOC_INTERNAL_ARENA_STRUCTS_
B_
H */
#endif
/* JEMALLOC_INTERNAL_ARENA_STRUCTS_H */
deps/jemalloc/include/jemalloc/internal/arena_structs_a.h
deleted
100644 → 0
View file @
e26a769d
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
#include "jemalloc/internal/bitmap.h"
struct
arena_slab_data_s
{
/* Per region allocated/deallocated bitmap. */
bitmap_t
bitmap
[
BITMAP_GROUPS_MAX
];
};
#endif
/* JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H */
deps/jemalloc/include/jemalloc/internal/arena_types.h
View file @
d4439bd4
...
@@ -3,21 +3,14 @@
...
@@ -3,21 +3,14 @@
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/sc.h"
/* Maximum number of regions in one slab. */
#define LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN)
#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS)
/* Default decay times in milliseconds. */
/* Default decay times in milliseconds. */
#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000)
#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000)
#define MUZZY_DECAY_MS_DEFAULT (0)
#define MUZZY_DECAY_MS_DEFAULT (0)
/* Number of event ticks between time checks. */
/* Number of event ticks between time checks. */
#define DECAY_NTICKS_PER_UPDATE 1000
#define
ARENA_
DECAY_NTICKS_PER_UPDATE 1000
typedef
struct
arena_slab_data_s
arena_slab_data_t
;
typedef
struct
arena_decay_s
arena_decay_t
;
typedef
struct
arena_decay_s
arena_decay_t
;
typedef
struct
arena_s
arena_t
;
typedef
struct
arena_s
arena_t
;
typedef
struct
arena_tdata_s
arena_tdata_t
;
typedef
struct
alloc_ctx_s
alloc_ctx_t
;
typedef
enum
{
typedef
enum
{
percpu_arena_mode_names_base
=
0
,
/* Used for options processing. */
percpu_arena_mode_names_base
=
0
,
/* Used for options processing. */
...
@@ -48,4 +41,18 @@ typedef enum {
...
@@ -48,4 +41,18 @@ typedef enum {
*/
*/
#define OVERSIZE_THRESHOLD_DEFAULT (8 << 20)
#define OVERSIZE_THRESHOLD_DEFAULT (8 << 20)
struct
arena_config_s
{
/* extent hooks to be used for the arena */
extent_hooks_t
*
extent_hooks
;
/*
* Use extent hooks for metadata (base) allocations when true.
*/
bool
metadata_use_hooks
;
};
typedef
struct
arena_config_s
arena_config_t
;
extern
const
arena_config_t
arena_config_default
;
#endif
/* JEMALLOC_INTERNAL_ARENA_TYPES_H */
#endif
/* JEMALLOC_INTERNAL_ARENA_TYPES_H */
deps/jemalloc/include/jemalloc/internal/atomic.h
View file @
d4439bd4
...
@@ -51,6 +51,27 @@
...
@@ -51,6 +51,27 @@
#define ATOMIC_ACQ_REL atomic_memory_order_acq_rel
#define ATOMIC_ACQ_REL atomic_memory_order_acq_rel
#define ATOMIC_SEQ_CST atomic_memory_order_seq_cst
#define ATOMIC_SEQ_CST atomic_memory_order_seq_cst
/*
* Another convenience -- simple atomic helper functions.
*/
#define JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(type, short_type, \
lg_size) \
JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \
ATOMIC_INLINE void \
atomic_load_add_store_##short_type(atomic_##short_type##_t *a, \
type inc) { \
type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \
type newval = oldval + inc; \
atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \
} \
ATOMIC_INLINE void \
atomic_load_sub_store_##short_type(atomic_##short_type##_t *a, \
type inc) { \
type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \
type newval = oldval - inc; \
atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \
}
/*
/*
* Not all platforms have 64-bit atomics. If we do, this #define exposes that
* Not all platforms have 64-bit atomics. If we do, this #define exposes that
* fact.
* fact.
...
@@ -67,18 +88,18 @@ JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR)
...
@@ -67,18 +88,18 @@ JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR)
*/
*/
JEMALLOC_GENERATE_ATOMICS
(
bool
,
b
,
0
)
JEMALLOC_GENERATE_ATOMICS
(
bool
,
b
,
0
)
JEMALLOC_GENERATE_INT_ATOMICS
(
unsigned
,
u
,
LG_SIZEOF_INT
)
JEMALLOC_GENERATE_
EXPANDED_
INT_ATOMICS
(
unsigned
,
u
,
LG_SIZEOF_INT
)
JEMALLOC_GENERATE_INT_ATOMICS
(
size_t
,
zu
,
LG_SIZEOF_PTR
)
JEMALLOC_GENERATE_
EXPANDED_
INT_ATOMICS
(
size_t
,
zu
,
LG_SIZEOF_PTR
)
JEMALLOC_GENERATE_INT_ATOMICS
(
ssize_t
,
zd
,
LG_SIZEOF_PTR
)
JEMALLOC_GENERATE_
EXPANDED_
INT_ATOMICS
(
ssize_t
,
zd
,
LG_SIZEOF_PTR
)
JEMALLOC_GENERATE_INT_ATOMICS
(
uint8_t
,
u8
,
0
)
JEMALLOC_GENERATE_
EXPANDED_
INT_ATOMICS
(
uint8_t
,
u8
,
0
)
JEMALLOC_GENERATE_INT_ATOMICS
(
uint32_t
,
u32
,
2
)
JEMALLOC_GENERATE_
EXPANDED_
INT_ATOMICS
(
uint32_t
,
u32
,
2
)
#ifdef JEMALLOC_ATOMIC_U64
#ifdef JEMALLOC_ATOMIC_U64
JEMALLOC_GENERATE_INT_ATOMICS
(
uint64_t
,
u64
,
3
)
JEMALLOC_GENERATE_
EXPANDED_
INT_ATOMICS
(
uint64_t
,
u64
,
3
)
#endif
#endif
#undef ATOMIC_INLINE
#undef ATOMIC_INLINE
...
...
deps/jemalloc/include/jemalloc/internal/background_thread_externs.h
View file @
d4439bd4
...
@@ -12,8 +12,9 @@ extern background_thread_info_t *background_thread_info;
...
@@ -12,8 +12,9 @@ extern background_thread_info_t *background_thread_info;
bool
background_thread_create
(
tsd_t
*
tsd
,
unsigned
arena_ind
);
bool
background_thread_create
(
tsd_t
*
tsd
,
unsigned
arena_ind
);
bool
background_threads_enable
(
tsd_t
*
tsd
);
bool
background_threads_enable
(
tsd_t
*
tsd
);
bool
background_threads_disable
(
tsd_t
*
tsd
);
bool
background_threads_disable
(
tsd_t
*
tsd
);
void
background_thread_interval_check
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
background_thread_is_started
(
background_thread_info_t
*
info
);
arena_decay_t
*
decay
,
size_t
npages_new
);
void
background_thread_wakeup_early
(
background_thread_info_t
*
info
,
nstime_t
*
remaining_sleep
);
void
background_thread_prefork0
(
tsdn_t
*
tsdn
);
void
background_thread_prefork0
(
tsdn_t
*
tsdn
);
void
background_thread_prefork1
(
tsdn_t
*
tsdn
);
void
background_thread_prefork1
(
tsdn_t
*
tsdn
);
void
background_thread_postfork_parent
(
tsdn_t
*
tsdn
);
void
background_thread_postfork_parent
(
tsdn_t
*
tsdn
);
...
@@ -27,6 +28,6 @@ extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *,
...
@@ -27,6 +28,6 @@ extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *,
void
*
(
*
)(
void
*
),
void
*
__restrict
);
void
*
(
*
)(
void
*
),
void
*
__restrict
);
#endif
#endif
bool
background_thread_boot0
(
void
);
bool
background_thread_boot0
(
void
);
bool
background_thread_boot1
(
tsdn_t
*
tsdn
);
bool
background_thread_boot1
(
tsdn_t
*
tsdn
,
base_t
*
base
);
#endif
/* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */
#endif
/* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/background_thread_inlines.h
View file @
d4439bd4
...
@@ -45,18 +45,4 @@ background_thread_indefinite_sleep(background_thread_info_t *info) {
...
@@ -45,18 +45,4 @@ background_thread_indefinite_sleep(background_thread_info_t *info) {
return
atomic_load_b
(
&
info
->
indefinite_sleep
,
ATOMIC_ACQUIRE
);
return
atomic_load_b
(
&
info
->
indefinite_sleep
,
ATOMIC_ACQUIRE
);
}
}
JEMALLOC_ALWAYS_INLINE
void
arena_background_thread_inactivity_check
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
is_background_thread
)
{
if
(
!
background_thread_enabled
()
||
is_background_thread
)
{
return
;
}
background_thread_info_t
*
info
=
arena_background_thread_info_get
(
arena
);
if
(
background_thread_indefinite_sleep
(
info
))
{
background_thread_interval_check
(
tsdn
,
arena
,
&
arena
->
decay_dirty
,
0
);
}
}
#endif
/* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */
#endif
/* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */
deps/jemalloc/include/jemalloc/internal/background_thread_structs.h
View file @
d4439bd4
...
@@ -11,6 +11,17 @@
...
@@ -11,6 +11,17 @@
#define MAX_BACKGROUND_THREAD_LIMIT MALLOCX_ARENA_LIMIT
#define MAX_BACKGROUND_THREAD_LIMIT MALLOCX_ARENA_LIMIT
#define DEFAULT_NUM_BACKGROUND_THREAD 4
#define DEFAULT_NUM_BACKGROUND_THREAD 4
/*
* These exist only as a transitional state. Eventually, deferral should be
* part of the PAI, and each implementation can indicate wait times with more
* specificity.
*/
#define BACKGROUND_THREAD_HPA_INTERVAL_MAX_UNINITIALIZED (-2)
#define BACKGROUND_THREAD_HPA_INTERVAL_MAX_DEFAULT_WHEN_ENABLED 5000
#define BACKGROUND_THREAD_DEFERRED_MIN UINT64_C(0)
#define BACKGROUND_THREAD_DEFERRED_MAX UINT64_MAX
typedef
enum
{
typedef
enum
{
background_thread_stopped
,
background_thread_stopped
,
background_thread_started
,
background_thread_started
,
...
@@ -48,6 +59,7 @@ struct background_thread_stats_s {
...
@@ -48,6 +59,7 @@ struct background_thread_stats_s {
size_t
num_threads
;
size_t
num_threads
;
uint64_t
num_runs
;
uint64_t
num_runs
;
nstime_t
run_interval
;
nstime_t
run_interval
;
mutex_prof_data_t
max_counter_per_bg_thd
;
};
};
typedef
struct
background_thread_stats_s
background_thread_stats_t
;
typedef
struct
background_thread_stats_s
background_thread_stats_t
;
...
...
deps/jemalloc/include/jemalloc/internal/base.h
0 → 100644
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_BASE_H
#define JEMALLOC_INTERNAL_BASE_H
#include "jemalloc/internal/edata.h"
#include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/mutex.h"
enum
metadata_thp_mode_e
{
metadata_thp_disabled
=
0
,
/*
* Lazily enable hugepage for metadata. To avoid high RSS caused by THP
* + low usage arena (i.e. THP becomes a significant percentage), the
* "auto" option only starts using THP after a base allocator used up
* the first THP region. Starting from the second hugepage (in a single
* arena), "auto" behaves the same as "always", i.e. madvise hugepage
* right away.
*/
metadata_thp_auto
=
1
,
metadata_thp_always
=
2
,
metadata_thp_mode_limit
=
3
};
typedef
enum
metadata_thp_mode_e
metadata_thp_mode_t
;
#define METADATA_THP_DEFAULT metadata_thp_disabled
extern
metadata_thp_mode_t
opt_metadata_thp
;
extern
const
char
*
metadata_thp_mode_names
[];
/* Embedded at the beginning of every block of base-managed virtual memory. */
typedef
struct
base_block_s
base_block_t
;
struct
base_block_s
{
/* Total size of block's virtual memory mapping. */
size_t
size
;
/* Next block in list of base's blocks. */
base_block_t
*
next
;
/* Tracks unused trailing space. */
edata_t
edata
;
};
typedef
struct
base_s
base_t
;
struct
base_s
{
/*
* User-configurable extent hook functions.
*/
ehooks_t
ehooks
;
/*
* User-configurable extent hook functions for metadata allocations.
*/
ehooks_t
ehooks_base
;
/* Protects base_alloc() and base_stats_get() operations. */
malloc_mutex_t
mtx
;
/* Using THP when true (metadata_thp auto mode). */
bool
auto_thp_switched
;
/*
* Most recent size class in the series of increasingly large base
* extents. Logarithmic spacing between subsequent allocations ensures
* that the total number of distinct mappings remains small.
*/
pszind_t
pind_last
;
/* Serial number generation state. */
size_t
extent_sn_next
;
/* Chain of all blocks associated with base. */
base_block_t
*
blocks
;
/* Heap of extents that track unused trailing space within blocks. */
edata_heap_t
avail
[
SC_NSIZES
];
/* Stats, only maintained if config_stats. */
size_t
allocated
;
size_t
resident
;
size_t
mapped
;
/* Number of THP regions touched. */
size_t
n_thp
;
};
static
inline
unsigned
base_ind_get
(
const
base_t
*
base
)
{
return
ehooks_ind_get
(
&
base
->
ehooks
);
}
static
inline
bool
metadata_thp_enabled
(
void
)
{
return
(
opt_metadata_thp
!=
metadata_thp_disabled
);
}
base_t
*
b0get
(
void
);
base_t
*
base_new
(
tsdn_t
*
tsdn
,
unsigned
ind
,
const
extent_hooks_t
*
extent_hooks
,
bool
metadata_use_hooks
);
void
base_delete
(
tsdn_t
*
tsdn
,
base_t
*
base
);
ehooks_t
*
base_ehooks_get
(
base_t
*
base
);
ehooks_t
*
base_ehooks_get_for_metadata
(
base_t
*
base
);
extent_hooks_t
*
base_extent_hooks_set
(
base_t
*
base
,
extent_hooks_t
*
extent_hooks
);
void
*
base_alloc
(
tsdn_t
*
tsdn
,
base_t
*
base
,
size_t
size
,
size_t
alignment
);
edata_t
*
base_alloc_edata
(
tsdn_t
*
tsdn
,
base_t
*
base
);
void
base_stats_get
(
tsdn_t
*
tsdn
,
base_t
*
base
,
size_t
*
allocated
,
size_t
*
resident
,
size_t
*
mapped
,
size_t
*
n_thp
);
void
base_prefork
(
tsdn_t
*
tsdn
,
base_t
*
base
);
void
base_postfork_parent
(
tsdn_t
*
tsdn
,
base_t
*
base
);
void
base_postfork_child
(
tsdn_t
*
tsdn
,
base_t
*
base
);
bool
base_boot
(
tsdn_t
*
tsdn
);
#endif
/* JEMALLOC_INTERNAL_BASE_H */
deps/jemalloc/include/jemalloc/internal/base_externs.h
deleted
100644 → 0
View file @
e26a769d
#ifndef JEMALLOC_INTERNAL_BASE_EXTERNS_H
#define JEMALLOC_INTERNAL_BASE_EXTERNS_H
extern
metadata_thp_mode_t
opt_metadata_thp
;
extern
const
char
*
metadata_thp_mode_names
[];
base_t
*
b0get
(
void
);
base_t
*
base_new
(
tsdn_t
*
tsdn
,
unsigned
ind
,
extent_hooks_t
*
extent_hooks
);
void
base_delete
(
tsdn_t
*
tsdn
,
base_t
*
base
);
extent_hooks_t
*
base_extent_hooks_get
(
base_t
*
base
);
extent_hooks_t
*
base_extent_hooks_set
(
base_t
*
base
,
extent_hooks_t
*
extent_hooks
);
void
*
base_alloc
(
tsdn_t
*
tsdn
,
base_t
*
base
,
size_t
size
,
size_t
alignment
);
extent_t
*
base_alloc_extent
(
tsdn_t
*
tsdn
,
base_t
*
base
);
void
base_stats_get
(
tsdn_t
*
tsdn
,
base_t
*
base
,
size_t
*
allocated
,
size_t
*
resident
,
size_t
*
mapped
,
size_t
*
n_thp
);
void
base_prefork
(
tsdn_t
*
tsdn
,
base_t
*
base
);
void
base_postfork_parent
(
tsdn_t
*
tsdn
,
base_t
*
base
);
void
base_postfork_child
(
tsdn_t
*
tsdn
,
base_t
*
base
);
bool
base_boot
(
tsdn_t
*
tsdn
);
#endif
/* JEMALLOC_INTERNAL_BASE_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/base_inlines.h
deleted
100644 → 0
View file @
e26a769d
#ifndef JEMALLOC_INTERNAL_BASE_INLINES_H
#define JEMALLOC_INTERNAL_BASE_INLINES_H
static
inline
unsigned
base_ind_get
(
const
base_t
*
base
)
{
return
base
->
ind
;
}
static
inline
bool
metadata_thp_enabled
(
void
)
{
return
(
opt_metadata_thp
!=
metadata_thp_disabled
);
}
#endif
/* JEMALLOC_INTERNAL_BASE_INLINES_H */
deps/jemalloc/include/jemalloc/internal/base_structs.h
deleted
100644 → 0
View file @
e26a769d
#ifndef JEMALLOC_INTERNAL_BASE_STRUCTS_H
#define JEMALLOC_INTERNAL_BASE_STRUCTS_H
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/sc.h"
/* Embedded at the beginning of every block of base-managed virtual memory. */
struct
base_block_s
{
/* Total size of block's virtual memory mapping. */
size_t
size
;
/* Next block in list of base's blocks. */
base_block_t
*
next
;
/* Tracks unused trailing space. */
extent_t
extent
;
};
struct
base_s
{
/* Associated arena's index within the arenas array. */
unsigned
ind
;
/*
* User-configurable extent hook functions. Points to an
* extent_hooks_t.
*/
atomic_p_t
extent_hooks
;
/* Protects base_alloc() and base_stats_get() operations. */
malloc_mutex_t
mtx
;
/* Using THP when true (metadata_thp auto mode). */
bool
auto_thp_switched
;
/*
* Most recent size class in the series of increasingly large base
* extents. Logarithmic spacing between subsequent allocations ensures
* that the total number of distinct mappings remains small.
*/
pszind_t
pind_last
;
/* Serial number generation state. */
size_t
extent_sn_next
;
/* Chain of all blocks associated with base. */
base_block_t
*
blocks
;
/* Heap of extents that track unused trailing space within blocks. */
extent_heap_t
avail
[
SC_NSIZES
];
/* Stats, only maintained if config_stats. */
size_t
allocated
;
size_t
resident
;
size_t
mapped
;
/* Number of THP regions touched. */
size_t
n_thp
;
};
#endif
/* JEMALLOC_INTERNAL_BASE_STRUCTS_H */
deps/jemalloc/include/jemalloc/internal/base_types.h
deleted
100644 → 0
View file @
e26a769d
#ifndef JEMALLOC_INTERNAL_BASE_TYPES_H
#define JEMALLOC_INTERNAL_BASE_TYPES_H
typedef
struct
base_block_s
base_block_t
;
typedef
struct
base_s
base_t
;
#define METADATA_THP_DEFAULT metadata_thp_disabled
/*
* In auto mode, arenas switch to huge pages for the base allocator on the
* second base block. a0 switches to thp on the 5th block (after 20 megabytes
* of metadata), since more metadata (e.g. rtree nodes) come from a0's base.
*/
#define BASE_AUTO_THP_THRESHOLD 2
#define BASE_AUTO_THP_THRESHOLD_A0 5
typedef
enum
{
metadata_thp_disabled
=
0
,
/*
* Lazily enable hugepage for metadata. To avoid high RSS caused by THP
* + low usage arena (i.e. THP becomes a significant percentage), the
* "auto" option only starts using THP after a base allocator used up
* the first THP region. Starting from the second hugepage (in a single
* arena), "auto" behaves the same as "always", i.e. madvise hugepage
* right away.
*/
metadata_thp_auto
=
1
,
metadata_thp_always
=
2
,
metadata_thp_mode_limit
=
3
}
metadata_thp_mode_t
;
#endif
/* JEMALLOC_INTERNAL_BASE_TYPES_H */
deps/jemalloc/include/jemalloc/internal/bin.h
View file @
d4439bd4
...
@@ -3,8 +3,7 @@
...
@@ -3,8 +3,7 @@
#include "jemalloc/internal/bin_stats.h"
#include "jemalloc/internal/bin_stats.h"
#include "jemalloc/internal/bin_types.h"
#include "jemalloc/internal/bin_types.h"
#include "jemalloc/internal/extent_types.h"
#include "jemalloc/internal/edata.h"
#include "jemalloc/internal/extent_structs.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/sc.h"
...
@@ -12,74 +11,34 @@
...
@@ -12,74 +11,34 @@
* A bin contains a set of extents that are currently being used for slab
* A bin contains a set of extents that are currently being used for slab
* allocations.
* allocations.
*/
*/
/*
* Read-only information associated with each element of arena_t's bins array
* is stored separately, partly to reduce memory usage (only one copy, rather
* than one per arena), but mainly to avoid false cacheline sharing.
*
* Each slab has the following layout:
*
* /--------------------\
* | region 0 |
* |--------------------|
* | region 1 |
* |--------------------|
* | ... |
* | ... |
* | ... |
* |--------------------|
* | region nregs-1 |
* \--------------------/
*/
typedef
struct
bin_info_s
bin_info_t
;
struct
bin_info_s
{
/* Size of regions in a slab for this bin's size class. */
size_t
reg_size
;
/* Total size of a slab for this bin's size class. */
size_t
slab_size
;
/* Total number of regions in a slab for this bin's size class. */
uint32_t
nregs
;
/* Number of sharded bins in each arena for this size class. */
uint32_t
n_shards
;
/*
* Metadata used to manipulate bitmaps for slabs associated with this
* bin.
*/
bitmap_info_t
bitmap_info
;
};
extern
bin_info_t
bin_infos
[
SC_NBINS
];
typedef
struct
bin_s
bin_t
;
typedef
struct
bin_s
bin_t
;
struct
bin_s
{
struct
bin_s
{
/* All operations on bin_t fields require lock ownership. */
/* All operations on bin_t fields require lock ownership. */
malloc_mutex_t
lock
;
malloc_mutex_t
lock
;
/*
* Bin statistics. These get touched every time the lock is acquired,
* so put them close by in the hopes of getting some cache locality.
*/
bin_stats_t
stats
;
/*
/*
* Current slab being used to service allocations of this bin's size
* Current slab being used to service allocations of this bin's size
* class. slabcur is independent of slabs_{nonfull,full}; whenever
* class. slabcur is independent of slabs_{nonfull,full}; whenever
* slabcur is reassigned, the previous slab must be deallocated or
* slabcur is reassigned, the previous slab must be deallocated or
* inserted into slabs_{nonfull,full}.
* inserted into slabs_{nonfull,full}.
*/
*/
e
xtent
_t
*
slabcur
;
e
data
_t
*
slabcur
;
/*
/*
* Heap of non-full slabs. This heap is used to assure that new
* Heap of non-full slabs. This heap is used to assure that new
* allocations come from the non-full slab that is oldest/lowest in
* allocations come from the non-full slab that is oldest/lowest in
* memory.
* memory.
*/
*/
e
xtent
_heap_t
slabs_nonfull
;
e
data
_heap_t
slabs_nonfull
;
/* List used to track full slabs. */
/* List used to track full slabs. */
extent_list_t
slabs_full
;
edata_list_active_t
slabs_full
;
/* Bin statistics. */
bin_stats_t
stats
;
};
};
/* A set of sharded bins of the same size class. */
/* A set of sharded bins of the same size class. */
...
@@ -92,7 +51,6 @@ struct bins_s {
...
@@ -92,7 +51,6 @@ struct bins_s {
void
bin_shard_sizes_boot
(
unsigned
bin_shards
[
SC_NBINS
]);
void
bin_shard_sizes_boot
(
unsigned
bin_shards
[
SC_NBINS
]);
bool
bin_update_shard_size
(
unsigned
bin_shards
[
SC_NBINS
],
size_t
start_size
,
bool
bin_update_shard_size
(
unsigned
bin_shards
[
SC_NBINS
],
size_t
start_size
,
size_t
end_size
,
size_t
nshards
);
size_t
end_size
,
size_t
nshards
);
void
bin_boot
(
sc_data_t
*
sc_data
,
unsigned
bin_shard_sizes
[
SC_NBINS
]);
/* Initializes a bin to empty. Returns true on error. */
/* Initializes a bin to empty. Returns true on error. */
bool
bin_init
(
bin_t
*
bin
);
bool
bin_init
(
bin_t
*
bin
);
...
@@ -104,19 +62,20 @@ void bin_postfork_child(tsdn_t *tsdn, bin_t *bin);
...
@@ -104,19 +62,20 @@ void bin_postfork_child(tsdn_t *tsdn, bin_t *bin);
/* Stats. */
/* Stats. */
static
inline
void
static
inline
void
bin_stats_merge
(
tsdn_t
*
tsdn
,
bin_stats_t
*
dst_bin_stats
,
bin_t
*
bin
)
{
bin_stats_merge
(
tsdn_t
*
tsdn
,
bin_stats_
data_
t
*
dst_bin_stats
,
bin_t
*
bin
)
{
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
malloc_mutex_prof_accum
(
tsdn
,
&
dst_bin_stats
->
mutex_data
,
&
bin
->
lock
);
malloc_mutex_prof_accum
(
tsdn
,
&
dst_bin_stats
->
mutex_data
,
&
bin
->
lock
);
dst_bin_stats
->
nmalloc
+=
bin
->
stats
.
nmalloc
;
bin_stats_t
*
stats
=
&
dst_bin_stats
->
stats_data
;
dst_bin_stats
->
ndalloc
+=
bin
->
stats
.
ndalloc
;
stats
->
nmalloc
+=
bin
->
stats
.
nmalloc
;
dst_bin_stats
->
nrequests
+=
bin
->
stats
.
nrequests
;
stats
->
ndalloc
+=
bin
->
stats
.
ndalloc
;
dst_bin_stats
->
curregs
+=
bin
->
stats
.
curregs
;
stats
->
nrequests
+=
bin
->
stats
.
nrequests
;
dst_bin_stats
->
nfills
+=
bin
->
stats
.
nfills
;
stats
->
curregs
+=
bin
->
stats
.
curregs
;
dst_bin_stats
->
nflushes
+=
bin
->
stats
.
nflushes
;
stats
->
nfills
+=
bin
->
stats
.
nfills
;
dst_bin_stats
->
nslabs
+=
bin
->
stats
.
nslabs
;
stats
->
nflushes
+=
bin
->
stats
.
nflushes
;
dst_bin_stats
->
reslabs
+=
bin
->
stats
.
reslabs
;
stats
->
nslabs
+=
bin
->
stats
.
nslabs
;
dst_bin_stats
->
curslabs
+=
bin
->
stats
.
curslabs
;
stats
->
reslabs
+=
bin
->
stats
.
reslabs
;
dst_bin_stats
->
nonfull_slabs
+=
bin
->
stats
.
nonfull_slabs
;
stats
->
curslabs
+=
bin
->
stats
.
curslabs
;
stats
->
nonfull_slabs
+=
bin
->
stats
.
nonfull_slabs
;
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
}
}
...
...
deps/jemalloc/include/jemalloc/internal/bin_info.h
0 → 100644
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_BIN_INFO_H
#define JEMALLOC_INTERNAL_BIN_INFO_H
#include "jemalloc/internal/bitmap.h"
/*
* Read-only information associated with each element of arena_t's bins array
* is stored separately, partly to reduce memory usage (only one copy, rather
* than one per arena), but mainly to avoid false cacheline sharing.
*
* Each slab has the following layout:
*
* /--------------------\
* | region 0 |
* |--------------------|
* | region 1 |
* |--------------------|
* | ... |
* | ... |
* | ... |
* |--------------------|
* | region nregs-1 |
* \--------------------/
*/
typedef
struct
bin_info_s
bin_info_t
;
struct
bin_info_s
{
/* Size of regions in a slab for this bin's size class. */
size_t
reg_size
;
/* Total size of a slab for this bin's size class. */
size_t
slab_size
;
/* Total number of regions in a slab for this bin's size class. */
uint32_t
nregs
;
/* Number of sharded bins in each arena for this size class. */
uint32_t
n_shards
;
/*
* Metadata used to manipulate bitmaps for slabs associated with this
* bin.
*/
bitmap_info_t
bitmap_info
;
};
extern
bin_info_t
bin_infos
[
SC_NBINS
];
void
bin_info_boot
(
sc_data_t
*
sc_data
,
unsigned
bin_shard_sizes
[
SC_NBINS
]);
#endif
/* JEMALLOC_INTERNAL_BIN_INFO_H */
Prev
1
2
3
4
5
6
…
10
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment