Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
4d5911b4
Commit
4d5911b4
authored
Oct 10, 2021
by
Yoav Steinberg
Browse files
Merge commit '
220a0f08
' as 'deps/jemalloc'
parents
4a884343
220a0f08
Changes
163
Hide whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
163 of 163+
files are displayed.
Plain diff
Email patch
deps/jemalloc/include/jemalloc/internal/seq.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_SEQ_H
#define JEMALLOC_INTERNAL_SEQ_H
#include "jemalloc/internal/atomic.h"
/*
* A simple seqlock implementation.
*/
#define seq_define(type, short_type) \
typedef struct { \
atomic_zu_t seq; \
atomic_zu_t data[ \
(sizeof(type) + sizeof(size_t) - 1) / sizeof(size_t)]; \
} seq_##short_type##_t; \
\
/* \
* No internal synchronization -- the caller must ensure that there's \
* only a single writer at a time. \
*/
\
static inline void \
seq_store_##short_type(seq_##short_type##_t *dst, type *src) { \
size_t buf[sizeof(dst->data) / sizeof(size_t)]; \
buf[sizeof(buf) / sizeof(size_t) - 1] = 0; \
memcpy(buf, src, sizeof(type)); \
size_t old_seq = atomic_load_zu(&dst->seq, ATOMIC_RELAXED); \
atomic_store_zu(&dst->seq, old_seq + 1, ATOMIC_RELAXED); \
atomic_fence(ATOMIC_RELEASE); \
for (size_t i = 0; i < sizeof(buf) / sizeof(size_t); i++) { \
atomic_store_zu(&dst->data[i], buf[i], ATOMIC_RELAXED); \
} \
atomic_store_zu(&dst->seq, old_seq + 2, ATOMIC_RELEASE); \
} \
\
/* Returns whether or not the read was consistent. */
\
static inline bool \
seq_try_load_##short_type(type *dst, seq_##short_type##_t *src) { \
size_t buf[sizeof(src->data) / sizeof(size_t)]; \
size_t seq1 = atomic_load_zu(&src->seq, ATOMIC_ACQUIRE); \
if (seq1 % 2 != 0) { \
return false; \
} \
for (size_t i = 0; i < sizeof(buf) / sizeof(size_t); i++) { \
buf[i] = atomic_load_zu(&src->data[i], ATOMIC_RELAXED); \
} \
atomic_fence(ATOMIC_ACQUIRE); \
size_t seq2 = atomic_load_zu(&src->seq, ATOMIC_RELAXED); \
if (seq1 != seq2) { \
return false; \
} \
memcpy(dst, buf, sizeof(type)); \
return true; \
}
#endif
/* JEMALLOC_INTERNAL_SEQ_H */
deps/jemalloc/include/jemalloc/internal/smoothstep.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H
#define JEMALLOC_INTERNAL_SMOOTHSTEP_H
/*
* This file was generated by the following command:
* sh smoothstep.sh smoother 200 24 3 15
*/
/******************************************************************************/
/*
* This header defines a precomputed table based on the smoothstep family of
* sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
* to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
* that floating point math can be avoided.
*
* 3 2
* smoothstep(x) = -2x + 3x
*
* 5 4 3
* smootherstep(x) = 6x - 15x + 10x
*
* 7 6 5 4
* smootheststep(x) = -20x + 70x - 84x + 35x
*/
#define SMOOTHSTEP_VARIANT "smoother"
#define SMOOTHSTEP_NSTEPS 200
#define SMOOTHSTEP_BFP 24
#define SMOOTHSTEP \
/* STEP(step, h, x, y) */
\
STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \
STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \
STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \
STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \
STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \
STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \
STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \
STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \
STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \
STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \
STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \
STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \
STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \
STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \
STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \
STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \
STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \
STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \
STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \
STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \
STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \
STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \
STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \
STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \
STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \
STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \
STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \
STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \
STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \
STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \
STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \
STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \
STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \
STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \
STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \
STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \
STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \
STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \
STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \
STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \
STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \
STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \
STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \
STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \
STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \
STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \
STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \
STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \
STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \
STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \
STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \
STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \
STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \
STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \
STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \
STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \
STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \
STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \
STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \
STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \
STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \
STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \
STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \
STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \
STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \
STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \
STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \
STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \
STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \
STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \
STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \
STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \
STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \
STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \
STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \
STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \
STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \
STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \
STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \
STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \
STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \
STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \
STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \
STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \
STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \
STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \
STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \
STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \
STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \
STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \
STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \
STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \
STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \
STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \
STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \
STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \
STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \
STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \
STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \
STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \
STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \
STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \
STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \
STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \
STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \
STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \
STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \
STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \
STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \
STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \
STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \
STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \
STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \
STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \
STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \
STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \
STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \
STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \
STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \
STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \
STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \
STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \
STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \
STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \
STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \
STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \
STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \
STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \
STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \
STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \
STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \
STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \
STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \
STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \
STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \
STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \
STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \
STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \
STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \
STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \
STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \
STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \
STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \
STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \
STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \
STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \
STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \
STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \
STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \
STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \
STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \
STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \
STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \
STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \
STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \
STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \
STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \
STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \
STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \
STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \
STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \
STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \
STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \
STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \
STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \
STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \
STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \
STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \
STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \
STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \
STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \
STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \
STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \
STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \
STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \
STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \
STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \
STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \
STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \
STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \
STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \
STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \
STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \
STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \
STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \
STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \
STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \
STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \
STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \
STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \
STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \
STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \
STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \
STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \
STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \
STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \
STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \
STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \
STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \
#endif
/* JEMALLOC_INTERNAL_SMOOTHSTEP_H */
deps/jemalloc/include/jemalloc/internal/smoothstep.sh
0 → 100755
View file @
4d5911b4
#!/bin/sh
#
# Generate a discrete lookup table for a sigmoid function in the smoothstep
# family (https://en.wikipedia.org/wiki/Smoothstep), where the lookup table
# entries correspond to x in [1/nsteps, 2/nsteps, ..., nsteps/nsteps]. Encode
# the entries using a binary fixed point representation.
#
# Usage: smoothstep.sh <variant> <nsteps> <bfp> <xprec> <yprec>
#
# <variant> is in {smooth, smoother, smoothest}.
# <nsteps> must be greater than zero.
# <bfp> must be in [0..62]; reasonable values are roughly [10..30].
# <xprec> is x decimal precision.
# <yprec> is y decimal precision.
#set -x
cmd
=
"sh smoothstep.sh
$*
"
variant
=
$1
nsteps
=
$2
bfp
=
$3
xprec
=
$4
yprec
=
$5
case
"
${
variant
}
"
in
smooth
)
;;
smoother
)
;;
smoothest
)
;;
*
)
echo
"Unsupported variant"
exit
1
;;
esac
smooth
()
{
step
=
$1
y
=
`
echo
${
yprec
}
k
${
step
}
${
nsteps
}
/ sx _2 lx 3 ^
'*'
3 lx 2 ^
'*'
+ p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
`
h
=
`
echo
${
yprec
}
k 2
${
bfp
}
^
${
y
}
'*'
p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
|
tr
'.'
' '
|
awk
'{print $1}'
`
}
smoother
()
{
step
=
$1
y
=
`
echo
${
yprec
}
k
${
step
}
${
nsteps
}
/ sx 6 lx 5 ^
'*'
_15 lx 4 ^
'*'
+ 10 lx 3 ^
'*'
+ p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
`
h
=
`
echo
${
yprec
}
k 2
${
bfp
}
^
${
y
}
'*'
p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
|
tr
'.'
' '
|
awk
'{print $1}'
`
}
smoothest
()
{
step
=
$1
y
=
`
echo
${
yprec
}
k
${
step
}
${
nsteps
}
/ sx _20 lx 7 ^
'*'
70 lx 6 ^
'*'
+ _84 lx 5 ^
'*'
+ 35 lx 4 ^
'*'
+ p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
`
h
=
`
echo
${
yprec
}
k 2
${
bfp
}
^
${
y
}
'*'
p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
|
tr
'.'
' '
|
awk
'{print $1}'
`
}
cat
<<
EOF
#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H
#define JEMALLOC_INTERNAL_SMOOTHSTEP_H
/*
* This file was generated by the following command:
*
$cmd
*/
/******************************************************************************/
/*
* This header defines a precomputed table based on the smoothstep family of
* sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
* to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
* that floating point math can be avoided.
*
* 3 2
* smoothstep(x) = -2x + 3x
*
* 5 4 3
* smootherstep(x) = 6x - 15x + 10x
*
* 7 6 5 4
* smootheststep(x) = -20x + 70x - 84x + 35x
*/
#define SMOOTHSTEP_VARIANT "
${
variant
}
"
#define SMOOTHSTEP_NSTEPS
${
nsteps
}
#define SMOOTHSTEP_BFP
${
bfp
}
#define SMOOTHSTEP
\\
/* STEP(step, h, x, y) */
\\
EOF
s
=
1
while
[
$s
-le
$nsteps
]
;
do
$variant
${
s
}
x
=
`
echo
${
xprec
}
k
${
s
}
${
nsteps
}
/ p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
`
printf
' STEP(%4d, UINT64_C(0x%016x), %s, %s) \\\n'
${
s
}
${
h
}
${
x
}
${
y
}
s
=
$((
s+1
))
done
echo
cat
<<
EOF
#endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */
EOF
deps/jemalloc/include/jemalloc/internal/spin.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_SPIN_H
#define JEMALLOC_INTERNAL_SPIN_H
#define SPIN_INITIALIZER {0U}
typedef
struct
{
unsigned
iteration
;
}
spin_t
;
static
inline
void
spin_cpu_spinwait
()
{
# if HAVE_CPU_SPINWAIT
CPU_SPINWAIT
;
# else
volatile
int
x
=
0
;
x
=
x
;
# endif
}
static
inline
void
spin_adaptive
(
spin_t
*
spin
)
{
volatile
uint32_t
i
;
if
(
spin
->
iteration
<
5
)
{
for
(
i
=
0
;
i
<
(
1U
<<
spin
->
iteration
);
i
++
)
{
spin_cpu_spinwait
();
}
spin
->
iteration
++
;
}
else
{
#ifdef _WIN32
SwitchToThread
();
#else
sched_yield
();
#endif
}
}
#undef SPIN_INLINE
#endif
/* JEMALLOC_INTERNAL_SPIN_H */
deps/jemalloc/include/jemalloc/internal/stats.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_STATS_H
#define JEMALLOC_INTERNAL_STATS_H
/* OPTION(opt, var_name, default, set_value_to) */
#define STATS_PRINT_OPTIONS \
OPTION('J', json, false, true) \
OPTION('g', general, true, false) \
OPTION('m', merged, config_stats, false) \
OPTION('d', destroyed, config_stats, false) \
OPTION('a', unmerged, config_stats, false) \
OPTION('b', bins, true, false) \
OPTION('l', large, true, false) \
OPTION('x', mutex, true, false) \
OPTION('e', extents, true, false)
enum
{
#define OPTION(o, v, d, s) stats_print_option_num_##v,
STATS_PRINT_OPTIONS
#undef OPTION
stats_print_tot_num_options
};
/* Options for stats_print. */
extern
bool
opt_stats_print
;
extern
char
opt_stats_print_opts
[
stats_print_tot_num_options
+
1
];
/* Implements je_malloc_stats_print. */
void
stats_print
(
void
(
*
write_cb
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
const
char
*
opts
);
#endif
/* JEMALLOC_INTERNAL_STATS_H */
deps/jemalloc/include/jemalloc/internal/sz.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_SIZE_H
#define JEMALLOC_INTERNAL_SIZE_H
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/util.h"
/*
* sz module: Size computations.
*
* Some abbreviations used here:
* p: Page
* ind: Index
* s, sz: Size
* u: Usable size
* a: Aligned
*
* These are not always used completely consistently, but should be enough to
* interpret function names. E.g. sz_psz2ind converts page size to page size
* index; sz_sa2u converts a (size, alignment) allocation request to the usable
* size that would result from such an allocation.
*/
/*
* sz_pind2sz_tab encodes the same information as could be computed by
* sz_pind2sz_compute().
*/
extern
size_t
sz_pind2sz_tab
[
SC_NPSIZES
+
1
];
/*
* sz_index2size_tab encodes the same information as could be computed (at
* unacceptable cost in some code paths) by sz_index2size_compute().
*/
extern
size_t
sz_index2size_tab
[
SC_NSIZES
];
/*
* sz_size2index_tab is a compact lookup table that rounds request sizes up to
* size classes. In order to reduce cache footprint, the table is compressed,
* and all accesses are via sz_size2index().
*/
extern
uint8_t
sz_size2index_tab
[];
static
const
size_t
sz_large_pad
=
#ifdef JEMALLOC_CACHE_OBLIVIOUS
PAGE
#else
0
#endif
;
extern
void
sz_boot
(
const
sc_data_t
*
sc_data
);
JEMALLOC_ALWAYS_INLINE
pszind_t
sz_psz2ind
(
size_t
psz
)
{
if
(
unlikely
(
psz
>
SC_LARGE_MAXCLASS
))
{
return
SC_NPSIZES
;
}
pszind_t
x
=
lg_floor
((
psz
<<
1
)
-
1
);
pszind_t
shift
=
(
x
<
SC_LG_NGROUP
+
LG_PAGE
)
?
0
:
x
-
(
SC_LG_NGROUP
+
LG_PAGE
);
pszind_t
grp
=
shift
<<
SC_LG_NGROUP
;
pszind_t
lg_delta
=
(
x
<
SC_LG_NGROUP
+
LG_PAGE
+
1
)
?
LG_PAGE
:
x
-
SC_LG_NGROUP
-
1
;
size_t
delta_inverse_mask
=
ZU
(
-
1
)
<<
lg_delta
;
pszind_t
mod
=
((((
psz
-
1
)
&
delta_inverse_mask
)
>>
lg_delta
))
&
((
ZU
(
1
)
<<
SC_LG_NGROUP
)
-
1
);
pszind_t
ind
=
grp
+
mod
;
return
ind
;
}
static
inline
size_t
sz_pind2sz_compute
(
pszind_t
pind
)
{
if
(
unlikely
(
pind
==
SC_NPSIZES
))
{
return
SC_LARGE_MAXCLASS
+
PAGE
;
}
size_t
grp
=
pind
>>
SC_LG_NGROUP
;
size_t
mod
=
pind
&
((
ZU
(
1
)
<<
SC_LG_NGROUP
)
-
1
);
size_t
grp_size_mask
=
~
((
!!
grp
)
-
1
);
size_t
grp_size
=
((
ZU
(
1
)
<<
(
LG_PAGE
+
(
SC_LG_NGROUP
-
1
)))
<<
grp
)
&
grp_size_mask
;
size_t
shift
=
(
grp
==
0
)
?
1
:
grp
;
size_t
lg_delta
=
shift
+
(
LG_PAGE
-
1
);
size_t
mod_size
=
(
mod
+
1
)
<<
lg_delta
;
size_t
sz
=
grp_size
+
mod_size
;
return
sz
;
}
static
inline
size_t
sz_pind2sz_lookup
(
pszind_t
pind
)
{
size_t
ret
=
(
size_t
)
sz_pind2sz_tab
[
pind
];
assert
(
ret
==
sz_pind2sz_compute
(
pind
));
return
ret
;
}
static
inline
size_t
sz_pind2sz
(
pszind_t
pind
)
{
assert
(
pind
<
SC_NPSIZES
+
1
);
return
sz_pind2sz_lookup
(
pind
);
}
static
inline
size_t
sz_psz2u
(
size_t
psz
)
{
if
(
unlikely
(
psz
>
SC_LARGE_MAXCLASS
))
{
return
SC_LARGE_MAXCLASS
+
PAGE
;
}
size_t
x
=
lg_floor
((
psz
<<
1
)
-
1
);
size_t
lg_delta
=
(
x
<
SC_LG_NGROUP
+
LG_PAGE
+
1
)
?
LG_PAGE
:
x
-
SC_LG_NGROUP
-
1
;
size_t
delta
=
ZU
(
1
)
<<
lg_delta
;
size_t
delta_mask
=
delta
-
1
;
size_t
usize
=
(
psz
+
delta_mask
)
&
~
delta_mask
;
return
usize
;
}
static
inline
szind_t
sz_size2index_compute
(
size_t
size
)
{
if
(
unlikely
(
size
>
SC_LARGE_MAXCLASS
))
{
return
SC_NSIZES
;
}
if
(
size
==
0
)
{
return
0
;
}
#if (SC_NTINY != 0)
if
(
size
<=
(
ZU
(
1
)
<<
SC_LG_TINY_MAXCLASS
))
{
szind_t
lg_tmin
=
SC_LG_TINY_MAXCLASS
-
SC_NTINY
+
1
;
szind_t
lg_ceil
=
lg_floor
(
pow2_ceil_zu
(
size
));
return
(
lg_ceil
<
lg_tmin
?
0
:
lg_ceil
-
lg_tmin
);
}
#endif
{
szind_t
x
=
lg_floor
((
size
<<
1
)
-
1
);
szind_t
shift
=
(
x
<
SC_LG_NGROUP
+
LG_QUANTUM
)
?
0
:
x
-
(
SC_LG_NGROUP
+
LG_QUANTUM
);
szind_t
grp
=
shift
<<
SC_LG_NGROUP
;
szind_t
lg_delta
=
(
x
<
SC_LG_NGROUP
+
LG_QUANTUM
+
1
)
?
LG_QUANTUM
:
x
-
SC_LG_NGROUP
-
1
;
size_t
delta_inverse_mask
=
ZU
(
-
1
)
<<
lg_delta
;
szind_t
mod
=
((((
size
-
1
)
&
delta_inverse_mask
)
>>
lg_delta
))
&
((
ZU
(
1
)
<<
SC_LG_NGROUP
)
-
1
);
szind_t
index
=
SC_NTINY
+
grp
+
mod
;
return
index
;
}
}
JEMALLOC_ALWAYS_INLINE
szind_t
sz_size2index_lookup
(
size_t
size
)
{
assert
(
size
<=
SC_LOOKUP_MAXCLASS
);
szind_t
ret
=
(
sz_size2index_tab
[(
size
+
(
ZU
(
1
)
<<
SC_LG_TINY_MIN
)
-
1
)
>>
SC_LG_TINY_MIN
]);
assert
(
ret
==
sz_size2index_compute
(
size
));
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
szind_t
sz_size2index
(
size_t
size
)
{
if
(
likely
(
size
<=
SC_LOOKUP_MAXCLASS
))
{
return
sz_size2index_lookup
(
size
);
}
return
sz_size2index_compute
(
size
);
}
static
inline
size_t
sz_index2size_compute
(
szind_t
index
)
{
#if (SC_NTINY > 0)
if
(
index
<
SC_NTINY
)
{
return
(
ZU
(
1
)
<<
(
SC_LG_TINY_MAXCLASS
-
SC_NTINY
+
1
+
index
));
}
#endif
{
size_t
reduced_index
=
index
-
SC_NTINY
;
size_t
grp
=
reduced_index
>>
SC_LG_NGROUP
;
size_t
mod
=
reduced_index
&
((
ZU
(
1
)
<<
SC_LG_NGROUP
)
-
1
);
size_t
grp_size_mask
=
~
((
!!
grp
)
-
1
);
size_t
grp_size
=
((
ZU
(
1
)
<<
(
LG_QUANTUM
+
(
SC_LG_NGROUP
-
1
)))
<<
grp
)
&
grp_size_mask
;
size_t
shift
=
(
grp
==
0
)
?
1
:
grp
;
size_t
lg_delta
=
shift
+
(
LG_QUANTUM
-
1
);
size_t
mod_size
=
(
mod
+
1
)
<<
lg_delta
;
size_t
usize
=
grp_size
+
mod_size
;
return
usize
;
}
}
JEMALLOC_ALWAYS_INLINE
size_t
sz_index2size_lookup
(
szind_t
index
)
{
size_t
ret
=
(
size_t
)
sz_index2size_tab
[
index
];
assert
(
ret
==
sz_index2size_compute
(
index
));
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
size_t
sz_index2size
(
szind_t
index
)
{
assert
(
index
<
SC_NSIZES
);
return
sz_index2size_lookup
(
index
);
}
JEMALLOC_ALWAYS_INLINE
size_t
sz_s2u_compute
(
size_t
size
)
{
if
(
unlikely
(
size
>
SC_LARGE_MAXCLASS
))
{
return
0
;
}
if
(
size
==
0
)
{
size
++
;
}
#if (SC_NTINY > 0)
if
(
size
<=
(
ZU
(
1
)
<<
SC_LG_TINY_MAXCLASS
))
{
size_t
lg_tmin
=
SC_LG_TINY_MAXCLASS
-
SC_NTINY
+
1
;
size_t
lg_ceil
=
lg_floor
(
pow2_ceil_zu
(
size
));
return
(
lg_ceil
<
lg_tmin
?
(
ZU
(
1
)
<<
lg_tmin
)
:
(
ZU
(
1
)
<<
lg_ceil
));
}
#endif
{
size_t
x
=
lg_floor
((
size
<<
1
)
-
1
);
size_t
lg_delta
=
(
x
<
SC_LG_NGROUP
+
LG_QUANTUM
+
1
)
?
LG_QUANTUM
:
x
-
SC_LG_NGROUP
-
1
;
size_t
delta
=
ZU
(
1
)
<<
lg_delta
;
size_t
delta_mask
=
delta
-
1
;
size_t
usize
=
(
size
+
delta_mask
)
&
~
delta_mask
;
return
usize
;
}
}
JEMALLOC_ALWAYS_INLINE
size_t
sz_s2u_lookup
(
size_t
size
)
{
size_t
ret
=
sz_index2size_lookup
(
sz_size2index_lookup
(
size
));
assert
(
ret
==
sz_s2u_compute
(
size
));
return
ret
;
}
/*
* Compute usable size that would result from allocating an object with the
* specified size.
*/
JEMALLOC_ALWAYS_INLINE
size_t
sz_s2u
(
size_t
size
)
{
if
(
likely
(
size
<=
SC_LOOKUP_MAXCLASS
))
{
return
sz_s2u_lookup
(
size
);
}
return
sz_s2u_compute
(
size
);
}
/*
* Compute usable size that would result from allocating an object with the
* specified size and alignment.
*/
JEMALLOC_ALWAYS_INLINE
size_t
sz_sa2u
(
size_t
size
,
size_t
alignment
)
{
size_t
usize
;
assert
(
alignment
!=
0
&&
((
alignment
-
1
)
&
alignment
)
==
0
);
/* Try for a small size class. */
if
(
size
<=
SC_SMALL_MAXCLASS
&&
alignment
<
PAGE
)
{
/*
* Round size up to the nearest multiple of alignment.
*
* This done, we can take advantage of the fact that for each
* small size class, every object is aligned at the smallest
* power of two that is non-zero in the base two representation
* of the size. For example:
*
* Size | Base 2 | Minimum alignment
* -----+----------+------------------
* 96 | 1100000 | 32
* 144 | 10100000 | 32
* 192 | 11000000 | 64
*/
usize
=
sz_s2u
(
ALIGNMENT_CEILING
(
size
,
alignment
));
if
(
usize
<
SC_LARGE_MINCLASS
)
{
return
usize
;
}
}
/* Large size class. Beware of overflow. */
if
(
unlikely
(
alignment
>
SC_LARGE_MAXCLASS
))
{
return
0
;
}
/* Make sure result is a large size class. */
if
(
size
<=
SC_LARGE_MINCLASS
)
{
usize
=
SC_LARGE_MINCLASS
;
}
else
{
usize
=
sz_s2u
(
size
);
if
(
usize
<
size
)
{
/* size_t overflow. */
return
0
;
}
}
/*
* Calculate the multi-page mapping that large_palloc() would need in
* order to guarantee the alignment.
*/
if
(
usize
+
sz_large_pad
+
PAGE_CEILING
(
alignment
)
-
PAGE
<
usize
)
{
/* size_t overflow. */
return
0
;
}
return
usize
;
}
#endif
/* JEMALLOC_INTERNAL_SIZE_H */
deps/jemalloc/include/jemalloc/internal/tcache_externs.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
#define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
extern
bool
opt_tcache
;
extern
ssize_t
opt_lg_tcache_max
;
extern
cache_bin_info_t
*
tcache_bin_info
;
/*
* Number of tcache bins. There are SC_NBINS small-object bins, plus 0 or more
* large-object bins.
*/
extern
unsigned
nhbins
;
/* Maximum cached size class. */
extern
size_t
tcache_maxclass
;
/*
* Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
* usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
* completely disjoint from this data structure. tcaches starts off as a sparse
* array, so it has no physical memory footprint until individual pages are
* touched. This allows the entire array to be allocated the first time an
* explicit tcache is created without a disproportionate impact on memory usage.
*/
extern
tcaches_t
*
tcaches
;
size_t
tcache_salloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
);
void
tcache_event_hard
(
tsd_t
*
tsd
,
tcache_t
*
tcache
);
void
*
tcache_alloc_small_hard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
tcache_t
*
tcache
,
cache_bin_t
*
tbin
,
szind_t
binind
,
bool
*
tcache_success
);
void
tcache_bin_flush_small
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
cache_bin_t
*
tbin
,
szind_t
binind
,
unsigned
rem
);
void
tcache_bin_flush_large
(
tsd_t
*
tsd
,
cache_bin_t
*
tbin
,
szind_t
binind
,
unsigned
rem
,
tcache_t
*
tcache
);
void
tcache_arena_reassociate
(
tsdn_t
*
tsdn
,
tcache_t
*
tcache
,
arena_t
*
arena
);
tcache_t
*
tcache_create_explicit
(
tsd_t
*
tsd
);
void
tcache_cleanup
(
tsd_t
*
tsd
);
void
tcache_stats_merge
(
tsdn_t
*
tsdn
,
tcache_t
*
tcache
,
arena_t
*
arena
);
bool
tcaches_create
(
tsd_t
*
tsd
,
unsigned
*
r_ind
);
void
tcaches_flush
(
tsd_t
*
tsd
,
unsigned
ind
);
void
tcaches_destroy
(
tsd_t
*
tsd
,
unsigned
ind
);
bool
tcache_boot
(
tsdn_t
*
tsdn
);
void
tcache_arena_associate
(
tsdn_t
*
tsdn
,
tcache_t
*
tcache
,
arena_t
*
arena
);
void
tcache_prefork
(
tsdn_t
*
tsdn
);
void
tcache_postfork_parent
(
tsdn_t
*
tsdn
);
void
tcache_postfork_child
(
tsdn_t
*
tsdn
);
void
tcache_flush
(
tsd_t
*
tsd
);
bool
tsd_tcache_data_init
(
tsd_t
*
tsd
);
bool
tsd_tcache_enabled_data_init
(
tsd_t
*
tsd
);
#endif
/* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/tcache_inlines.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H
#define JEMALLOC_INTERNAL_TCACHE_INLINES_H
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/util.h"
static
inline
bool
tcache_enabled_get
(
tsd_t
*
tsd
)
{
return
tsd_tcache_enabled_get
(
tsd
);
}
static
inline
void
tcache_enabled_set
(
tsd_t
*
tsd
,
bool
enabled
)
{
bool
was_enabled
=
tsd_tcache_enabled_get
(
tsd
);
if
(
!
was_enabled
&&
enabled
)
{
tsd_tcache_data_init
(
tsd
);
}
else
if
(
was_enabled
&&
!
enabled
)
{
tcache_cleanup
(
tsd
);
}
/* Commit the state last. Above calls check current state. */
tsd_tcache_enabled_set
(
tsd
,
enabled
);
tsd_slow_update
(
tsd
);
}
JEMALLOC_ALWAYS_INLINE
void
tcache_event
(
tsd_t
*
tsd
,
tcache_t
*
tcache
)
{
if
(
TCACHE_GC_INCR
==
0
)
{
return
;
}
if
(
unlikely
(
ticker_tick
(
&
tcache
->
gc_ticker
)))
{
tcache_event_hard
(
tsd
,
tcache
);
}
}
JEMALLOC_ALWAYS_INLINE
void
*
tcache_alloc_small
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
size_t
size
,
szind_t
binind
,
bool
zero
,
bool
slow_path
)
{
void
*
ret
;
cache_bin_t
*
bin
;
bool
tcache_success
;
size_t
usize
JEMALLOC_CC_SILENCE_INIT
(
0
);
assert
(
binind
<
SC_NBINS
);
bin
=
tcache_small_bin_get
(
tcache
,
binind
);
ret
=
cache_bin_alloc_easy
(
bin
,
&
tcache_success
);
assert
(
tcache_success
==
(
ret
!=
NULL
));
if
(
unlikely
(
!
tcache_success
))
{
bool
tcache_hard_success
;
arena
=
arena_choose
(
tsd
,
arena
);
if
(
unlikely
(
arena
==
NULL
))
{
return
NULL
;
}
ret
=
tcache_alloc_small_hard
(
tsd_tsdn
(
tsd
),
arena
,
tcache
,
bin
,
binind
,
&
tcache_hard_success
);
if
(
tcache_hard_success
==
false
)
{
return
NULL
;
}
}
assert
(
ret
);
/*
* Only compute usize if required. The checks in the following if
* statement are all static.
*/
if
(
config_prof
||
(
slow_path
&&
config_fill
)
||
unlikely
(
zero
))
{
usize
=
sz_index2size
(
binind
);
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ret
)
==
usize
);
}
if
(
likely
(
!
zero
))
{
if
(
slow_path
&&
config_fill
)
{
if
(
unlikely
(
opt_junk_alloc
))
{
arena_alloc_junk_small
(
ret
,
&
bin_infos
[
binind
],
false
);
}
else
if
(
unlikely
(
opt_zero
))
{
memset
(
ret
,
0
,
usize
);
}
}
}
else
{
if
(
slow_path
&&
config_fill
&&
unlikely
(
opt_junk_alloc
))
{
arena_alloc_junk_small
(
ret
,
&
bin_infos
[
binind
],
true
);
}
memset
(
ret
,
0
,
usize
);
}
if
(
config_stats
)
{
bin
->
tstats
.
nrequests
++
;
}
if
(
config_prof
)
{
tcache
->
prof_accumbytes
+=
usize
;
}
tcache_event
(
tsd
,
tcache
);
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
void
*
tcache_alloc_large
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
size_t
size
,
szind_t
binind
,
bool
zero
,
bool
slow_path
)
{
void
*
ret
;
cache_bin_t
*
bin
;
bool
tcache_success
;
assert
(
binind
>=
SC_NBINS
&&
binind
<
nhbins
);
bin
=
tcache_large_bin_get
(
tcache
,
binind
);
ret
=
cache_bin_alloc_easy
(
bin
,
&
tcache_success
);
assert
(
tcache_success
==
(
ret
!=
NULL
));
if
(
unlikely
(
!
tcache_success
))
{
/*
* Only allocate one large object at a time, because it's quite
* expensive to create one and not use it.
*/
arena
=
arena_choose
(
tsd
,
arena
);
if
(
unlikely
(
arena
==
NULL
))
{
return
NULL
;
}
ret
=
large_malloc
(
tsd_tsdn
(
tsd
),
arena
,
sz_s2u
(
size
),
zero
);
if
(
ret
==
NULL
)
{
return
NULL
;
}
}
else
{
size_t
usize
JEMALLOC_CC_SILENCE_INIT
(
0
);
/* Only compute usize on demand */
if
(
config_prof
||
(
slow_path
&&
config_fill
)
||
unlikely
(
zero
))
{
usize
=
sz_index2size
(
binind
);
assert
(
usize
<=
tcache_maxclass
);
}
if
(
likely
(
!
zero
))
{
if
(
slow_path
&&
config_fill
)
{
if
(
unlikely
(
opt_junk_alloc
))
{
memset
(
ret
,
JEMALLOC_ALLOC_JUNK
,
usize
);
}
else
if
(
unlikely
(
opt_zero
))
{
memset
(
ret
,
0
,
usize
);
}
}
}
else
{
memset
(
ret
,
0
,
usize
);
}
if
(
config_stats
)
{
bin
->
tstats
.
nrequests
++
;
}
if
(
config_prof
)
{
tcache
->
prof_accumbytes
+=
usize
;
}
}
tcache_event
(
tsd
,
tcache
);
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
void
tcache_dalloc_small
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
szind_t
binind
,
bool
slow_path
)
{
cache_bin_t
*
bin
;
cache_bin_info_t
*
bin_info
;
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
<=
SC_SMALL_MAXCLASS
);
if
(
slow_path
&&
config_fill
&&
unlikely
(
opt_junk_free
))
{
arena_dalloc_junk_small
(
ptr
,
&
bin_infos
[
binind
]);
}
bin
=
tcache_small_bin_get
(
tcache
,
binind
);
bin_info
=
&
tcache_bin_info
[
binind
];
if
(
unlikely
(
!
cache_bin_dalloc_easy
(
bin
,
bin_info
,
ptr
)))
{
tcache_bin_flush_small
(
tsd
,
tcache
,
bin
,
binind
,
(
bin_info
->
ncached_max
>>
1
));
bool
ret
=
cache_bin_dalloc_easy
(
bin
,
bin_info
,
ptr
);
assert
(
ret
);
}
tcache_event
(
tsd
,
tcache
);
}
JEMALLOC_ALWAYS_INLINE
void
tcache_dalloc_large
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
szind_t
binind
,
bool
slow_path
)
{
cache_bin_t
*
bin
;
cache_bin_info_t
*
bin_info
;
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
>
SC_SMALL_MAXCLASS
);
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
<=
tcache_maxclass
);
if
(
slow_path
&&
config_fill
&&
unlikely
(
opt_junk_free
))
{
large_dalloc_junk
(
ptr
,
sz_index2size
(
binind
));
}
bin
=
tcache_large_bin_get
(
tcache
,
binind
);
bin_info
=
&
tcache_bin_info
[
binind
];
if
(
unlikely
(
bin
->
ncached
==
bin_info
->
ncached_max
))
{
tcache_bin_flush_large
(
tsd
,
bin
,
binind
,
(
bin_info
->
ncached_max
>>
1
),
tcache
);
}
assert
(
bin
->
ncached
<
bin_info
->
ncached_max
);
bin
->
ncached
++
;
*
(
bin
->
avail
-
bin
->
ncached
)
=
ptr
;
tcache_event
(
tsd
,
tcache
);
}
JEMALLOC_ALWAYS_INLINE
tcache_t
*
tcaches_get
(
tsd_t
*
tsd
,
unsigned
ind
)
{
tcaches_t
*
elm
=
&
tcaches
[
ind
];
if
(
unlikely
(
elm
->
tcache
==
NULL
))
{
malloc_printf
(
"<jemalloc>: invalid tcache id (%u).
\n
"
,
ind
);
abort
();
}
else
if
(
unlikely
(
elm
->
tcache
==
TCACHES_ELM_NEED_REINIT
))
{
elm
->
tcache
=
tcache_create_explicit
(
tsd
);
}
return
elm
->
tcache
;
}
#endif
/* JEMALLOC_INTERNAL_TCACHE_INLINES_H */
deps/jemalloc/include/jemalloc/internal/tcache_structs.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
#define JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
#include "jemalloc/internal/cache_bin.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/tsd_types.h"
/* Various uses of this struct need it to be a named type. */
typedef
ql_elm
(
tsd_t
)
tsd_link_t
;
struct
tcache_s
{
/*
* To minimize our cache-footprint, we put the frequently accessed data
* together at the start of this struct.
*/
/* Cleared after arena_prof_accum(). */
uint64_t
prof_accumbytes
;
/* Drives incremental GC. */
ticker_t
gc_ticker
;
/*
* The pointer stacks associated with bins follow as a contiguous array.
* During tcache initialization, the avail pointer in each element of
* tbins is initialized to point to the proper offset within this array.
*/
cache_bin_t
bins_small
[
SC_NBINS
];
/*
* This data is less hot; we can be a little less careful with our
* footprint here.
*/
/* Lets us track all the tcaches in an arena. */
ql_elm
(
tcache_t
)
link
;
/* Logically scoped to tsd, but put here for cache layout reasons. */
ql_elm
(
tsd_t
)
tsd_link
;
bool
in_hook
;
/*
* The descriptor lets the arena find our cache bins without seeing the
* tcache definition. This enables arenas to aggregate stats across
* tcaches without having a tcache dependency.
*/
cache_bin_array_descriptor_t
cache_bin_array_descriptor
;
/* The arena this tcache is associated with. */
arena_t
*
arena
;
/* Next bin to GC. */
szind_t
next_gc_bin
;
/* For small bins, fill (ncached_max >> lg_fill_div). */
uint8_t
lg_fill_div
[
SC_NBINS
];
/*
* We put the cache bins for large size classes at the end of the
* struct, since some of them might not get used. This might end up
* letting us avoid touching an extra page if we don't have to.
*/
cache_bin_t
bins_large
[
SC_NSIZES
-
SC_NBINS
];
};
/* Linkage for list of available (previously used) explicit tcache IDs. */
struct
tcaches_s
{
union
{
tcache_t
*
tcache
;
tcaches_t
*
next
;
};
};
#endif
/* JEMALLOC_INTERNAL_TCACHE_STRUCTS_H */
deps/jemalloc/include/jemalloc/internal/tcache_types.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_TCACHE_TYPES_H
#define JEMALLOC_INTERNAL_TCACHE_TYPES_H
#include "jemalloc/internal/sc.h"
typedef
struct
tcache_s
tcache_t
;
typedef
struct
tcaches_s
tcaches_t
;
/*
* tcache pointers close to NULL are used to encode state information that is
* used for two purposes: preventing thread caching on a per thread basis and
* cleaning up during thread shutdown.
*/
#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
/*
* Absolute minimum number of cache slots for each small bin.
*/
#define TCACHE_NSLOTS_SMALL_MIN 20
/*
* Absolute maximum number of cache slots for each small bin in the thread
* cache. This is an additional constraint beyond that imposed as: twice the
* number of regions per slab for this size class.
*
* This constant must be an even number.
*/
#define TCACHE_NSLOTS_SMALL_MAX 200
/* Number of cache slots for large size classes. */
#define TCACHE_NSLOTS_LARGE 20
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
#define LG_TCACHE_MAXCLASS_DEFAULT 15
/*
* TCACHE_GC_SWEEP is the approximate number of allocation events between
* full GC sweeps. Integer rounding may cause the actual number to be
* slightly higher, since GC is performed incrementally.
*/
#define TCACHE_GC_SWEEP 8192
/* Number of tcache allocation/deallocation events between incremental GCs. */
#define TCACHE_GC_INCR \
((TCACHE_GC_SWEEP / SC_NBINS) + ((TCACHE_GC_SWEEP / SC_NBINS == 0) ? 0 : 1))
/* Used in TSD static initializer only. Real init in tcache_data_init(). */
#define TCACHE_ZERO_INITIALIZER {0}
/* Used in TSD static initializer only. Will be initialized to opt_tcache. */
#define TCACHE_ENABLED_ZERO_INITIALIZER false
/* Used for explicit tcache only. Means flushed but not destroyed. */
#define TCACHES_ELM_NEED_REINIT ((tcache_t *)(uintptr_t)1)
#endif
/* JEMALLOC_INTERNAL_TCACHE_TYPES_H */
deps/jemalloc/include/jemalloc/internal/test_hooks.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_TEST_HOOKS_H
#define JEMALLOC_INTERNAL_TEST_HOOKS_H
extern
JEMALLOC_EXPORT
void
(
*
test_hooks_arena_new_hook
)();
extern
JEMALLOC_EXPORT
void
(
*
test_hooks_libc_hook
)();
#define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)
#define open JEMALLOC_HOOK(open, test_hooks_libc_hook)
#define read JEMALLOC_HOOK(read, test_hooks_libc_hook)
#define write JEMALLOC_HOOK(write, test_hooks_libc_hook)
#define readlink JEMALLOC_HOOK(readlink, test_hooks_libc_hook)
#define close JEMALLOC_HOOK(close, test_hooks_libc_hook)
#define creat JEMALLOC_HOOK(creat, test_hooks_libc_hook)
#define secure_getenv JEMALLOC_HOOK(secure_getenv, test_hooks_libc_hook)
/* Note that this is undef'd and re-define'd in src/prof.c. */
#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
#endif
/* JEMALLOC_INTERNAL_TEST_HOOKS_H */
deps/jemalloc/include/jemalloc/internal/ticker.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_TICKER_H
#define JEMALLOC_INTERNAL_TICKER_H
#include "jemalloc/internal/util.h"
/**
* A ticker makes it easy to count-down events until some limit. You
* ticker_init the ticker to trigger every nticks events. You then notify it
* that an event has occurred with calls to ticker_tick (or that nticks events
* have occurred with a call to ticker_ticks), which will return true (and reset
* the counter) if the countdown hit zero.
*/
typedef
struct
{
int32_t
tick
;
int32_t
nticks
;
}
ticker_t
;
static
inline
void
ticker_init
(
ticker_t
*
ticker
,
int32_t
nticks
)
{
ticker
->
tick
=
nticks
;
ticker
->
nticks
=
nticks
;
}
static
inline
void
ticker_copy
(
ticker_t
*
ticker
,
const
ticker_t
*
other
)
{
*
ticker
=
*
other
;
}
static
inline
int32_t
ticker_read
(
const
ticker_t
*
ticker
)
{
return
ticker
->
tick
;
}
/*
* Not intended to be a public API. Unfortunately, on x86, neither gcc nor
* clang seems smart enough to turn
* ticker->tick -= nticks;
* if (unlikely(ticker->tick < 0)) {
* fixup ticker
* return true;
* }
* return false;
* into
* subq %nticks_reg, (%ticker_reg)
* js fixup ticker
*
* unless we force "fixup ticker" out of line. In that case, gcc gets it right,
* but clang now does worse than before. So, on x86 with gcc, we force it out
* of line, but otherwise let the inlining occur. Ordinarily this wouldn't be
* worth the hassle, but this is on the fast path of both malloc and free (via
* tcache_event).
*/
#if defined(__GNUC__) && !defined(__clang__) \
&& (defined(__x86_64__) || defined(__i386__))
JEMALLOC_NOINLINE
#endif
static
bool
ticker_fixup
(
ticker_t
*
ticker
)
{
ticker
->
tick
=
ticker
->
nticks
;
return
true
;
}
static
inline
bool
ticker_ticks
(
ticker_t
*
ticker
,
int32_t
nticks
)
{
ticker
->
tick
-=
nticks
;
if
(
unlikely
(
ticker
->
tick
<
0
))
{
return
ticker_fixup
(
ticker
);
}
return
false
;
}
static
inline
bool
ticker_tick
(
ticker_t
*
ticker
)
{
return
ticker_ticks
(
ticker
,
1
);
}
/*
* Try to tick. If ticker would fire, return true, but rely on
* slowpath to reset ticker.
*/
static
inline
bool
ticker_trytick
(
ticker_t
*
ticker
)
{
--
ticker
->
tick
;
if
(
unlikely
(
ticker
->
tick
<
0
))
{
return
true
;
}
return
false
;
}
#endif
/* JEMALLOC_INTERNAL_TICKER_H */
deps/jemalloc/include/jemalloc/internal/tsd.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_TSD_H
#define JEMALLOC_INTERNAL_TSD_H
#include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/bin_types.h"
#include "jemalloc/internal/jemalloc_internal_externs.h"
#include "jemalloc/internal/prof_types.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/rtree_tsd.h"
#include "jemalloc/internal/tcache_types.h"
#include "jemalloc/internal/tcache_structs.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/witness.h"
/*
* Thread-Specific-Data layout
* --- data accessed on tcache fast path: state, rtree_ctx, stats, prof ---
* s: state
* e: tcache_enabled
* m: thread_allocated (config_stats)
* f: thread_deallocated (config_stats)
* p: prof_tdata (config_prof)
* c: rtree_ctx (rtree cache accessed on deallocation)
* t: tcache
* --- data not accessed on tcache fast path: arena-related fields ---
* d: arenas_tdata_bypass
* r: reentrancy_level
* x: narenas_tdata
* i: iarena
* a: arena
* o: arenas_tdata
* Loading TSD data is on the critical path of basically all malloc operations.
* In particular, tcache and rtree_ctx rely on hot CPU cache to be effective.
* Use a compact layout to reduce cache footprint.
* +--- 64-bit and 64B cacheline; 1B each letter; First byte on the left. ---+
* |---------------------------- 1st cacheline ----------------------------|
* | sedrxxxx mmmmmmmm ffffffff pppppppp [c * 32 ........ ........ .......] |
* |---------------------------- 2nd cacheline ----------------------------|
* | [c * 64 ........ ........ ........ ........ ........ ........ .......] |
* |---------------------------- 3nd cacheline ----------------------------|
* | [c * 32 ........ ........ .......] iiiiiiii aaaaaaaa oooooooo [t...... |
* +-------------------------------------------------------------------------+
* Note: the entire tcache is embedded into TSD and spans multiple cachelines.
*
* The last 3 members (i, a and o) before tcache isn't really needed on tcache
* fast path. However we have a number of unused tcache bins and witnesses
* (never touched unless config_debug) at the end of tcache, so we place them
* there to avoid breaking the cachelines and possibly paging in an extra page.
*/
#ifdef JEMALLOC_JET
typedef
void
(
*
test_callback_t
)(
int
*
);
# define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10
# define MALLOC_TEST_TSD \
O(test_data, int, int) \
O(test_callback, test_callback_t, int)
# define MALLOC_TEST_TSD_INITIALIZER , MALLOC_TSD_TEST_DATA_INIT, NULL
#else
# define MALLOC_TEST_TSD
# define MALLOC_TEST_TSD_INITIALIZER
#endif
/* O(name, type, nullable type */
#define MALLOC_TSD \
O(tcache_enabled, bool, bool) \
O(arenas_tdata_bypass, bool, bool) \
O(reentrancy_level, int8_t, int8_t) \
O(narenas_tdata, uint32_t, uint32_t) \
O(offset_state, uint64_t, uint64_t) \
O(thread_allocated, uint64_t, uint64_t) \
O(thread_deallocated, uint64_t, uint64_t) \
O(bytes_until_sample, int64_t, int64_t) \
O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \
O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) \
O(iarena, arena_t *, arena_t *) \
O(arena, arena_t *, arena_t *) \
O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\
O(binshards, tsd_binshards_t, tsd_binshards_t)\
O(tcache, tcache_t, tcache_t) \
O(witness_tsd, witness_tsd_t, witness_tsdn_t) \
MALLOC_TEST_TSD
#define TSD_INITIALIZER { \
ATOMIC_INIT(tsd_state_uninitialized), \
TCACHE_ENABLED_ZERO_INITIALIZER, \
false, \
0, \
0, \
0, \
0, \
0, \
0, \
NULL, \
RTREE_CTX_ZERO_INITIALIZER, \
NULL, \
NULL, \
NULL, \
TSD_BINSHARDS_ZERO_INITIALIZER, \
TCACHE_ZERO_INITIALIZER, \
WITNESS_TSD_INITIALIZER \
MALLOC_TEST_TSD_INITIALIZER \
}
void
*
malloc_tsd_malloc
(
size_t
size
);
void
malloc_tsd_dalloc
(
void
*
wrapper
);
void
malloc_tsd_cleanup_register
(
bool
(
*
f
)(
void
));
tsd_t
*
malloc_tsd_boot0
(
void
);
void
malloc_tsd_boot1
(
void
);
void
tsd_cleanup
(
void
*
arg
);
tsd_t
*
tsd_fetch_slow
(
tsd_t
*
tsd
,
bool
internal
);
void
tsd_state_set
(
tsd_t
*
tsd
,
uint8_t
new_state
);
void
tsd_slow_update
(
tsd_t
*
tsd
);
void
tsd_prefork
(
tsd_t
*
tsd
);
void
tsd_postfork_parent
(
tsd_t
*
tsd
);
void
tsd_postfork_child
(
tsd_t
*
tsd
);
/*
* Call ..._inc when your module wants to take all threads down the slow paths,
* and ..._dec when it no longer needs to.
*/
void
tsd_global_slow_inc
(
tsdn_t
*
tsdn
);
void
tsd_global_slow_dec
(
tsdn_t
*
tsdn
);
bool
tsd_global_slow
();
enum
{
/* Common case --> jnz. */
tsd_state_nominal
=
0
,
/* Initialized but on slow path. */
tsd_state_nominal_slow
=
1
,
/*
* Some thread has changed global state in such a way that all nominal
* threads need to recompute their fast / slow status the next time they
* get a chance.
*
* Any thread can change another thread's status *to* recompute, but
* threads are the only ones who can change their status *from*
* recompute.
*/
tsd_state_nominal_recompute
=
2
,
/*
* The above nominal states should be lower values. We use
* tsd_nominal_max to separate nominal states from threads in the
* process of being born / dying.
*/
tsd_state_nominal_max
=
2
,
/*
* A thread might free() during its death as its only allocator action;
* in such scenarios, we need tsd, but set up in such a way that no
* cleanup is necessary.
*/
tsd_state_minimal_initialized
=
3
,
/* States during which we know we're in thread death. */
tsd_state_purgatory
=
4
,
tsd_state_reincarnated
=
5
,
/*
* What it says on the tin; tsd that hasn't been initialized. Note
* that even when the tsd struct lives in TLS, when need to keep track
* of stuff like whether or not our pthread destructors have been
* scheduled, so this really truly is different than the nominal state.
*/
tsd_state_uninitialized
=
6
};
/*
* Some TSD accesses can only be done in a nominal state. To enforce this, we
* wrap TSD member access in a function that asserts on TSD state, and mangle
* field names to prevent touching them accidentally.
*/
#define TSD_MANGLE(n) cant_access_tsd_items_directly_use_a_getter_or_setter_##n
#ifdef JEMALLOC_U8_ATOMICS
# define tsd_state_t atomic_u8_t
# define tsd_atomic_load atomic_load_u8
# define tsd_atomic_store atomic_store_u8
# define tsd_atomic_exchange atomic_exchange_u8
#else
# define tsd_state_t atomic_u32_t
# define tsd_atomic_load atomic_load_u32
# define tsd_atomic_store atomic_store_u32
# define tsd_atomic_exchange atomic_exchange_u32
#endif
/* The actual tsd. */
struct
tsd_s
{
/*
* The contents should be treated as totally opaque outside the tsd
* module. Access any thread-local state through the getters and
* setters below.
*/
/*
* We manually limit the state to just a single byte. Unless the 8-bit
* atomics are unavailable (which is rare).
*/
tsd_state_t
state
;
#define O(n, t, nt) \
t TSD_MANGLE(n);
MALLOC_TSD
#undef O
};
JEMALLOC_ALWAYS_INLINE
uint8_t
tsd_state_get
(
tsd_t
*
tsd
)
{
/*
* This should be atomic. Unfortunately, compilers right now can't tell
* that this can be done as a memory comparison, and forces a load into
* a register that hurts fast-path performance.
*/
/* return atomic_load_u8(&tsd->state, ATOMIC_RELAXED); */
return
*
(
uint8_t
*
)
&
tsd
->
state
;
}
/*
* Wrapper around tsd_t that makes it possible to avoid implicit conversion
* between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
* explicitly converted to tsd_t, which is non-nullable.
*/
struct
tsdn_s
{
tsd_t
tsd
;
};
#define TSDN_NULL ((tsdn_t *)0)
JEMALLOC_ALWAYS_INLINE
tsdn_t
*
tsd_tsdn
(
tsd_t
*
tsd
)
{
return
(
tsdn_t
*
)
tsd
;
}
JEMALLOC_ALWAYS_INLINE
bool
tsdn_null
(
const
tsdn_t
*
tsdn
)
{
return
tsdn
==
NULL
;
}
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsdn_tsd
(
tsdn_t
*
tsdn
)
{
assert
(
!
tsdn_null
(
tsdn
));
return
&
tsdn
->
tsd
;
}
/*
* We put the platform-specific data declarations and inlines into their own
* header files to avoid cluttering this file. They define tsd_boot0,
* tsd_boot1, tsd_boot, tsd_booted_get, tsd_get_allocates, tsd_get, and tsd_set.
*/
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#include "jemalloc/internal/tsd_malloc_thread_cleanup.h"
#elif (defined(JEMALLOC_TLS))
#include "jemalloc/internal/tsd_tls.h"
#elif (defined(_WIN32))
#include "jemalloc/internal/tsd_win.h"
#else
#include "jemalloc/internal/tsd_generic.h"
#endif
/*
* tsd_foop_get_unsafe(tsd) returns a pointer to the thread-local instance of
* foo. This omits some safety checks, and so can be used during tsd
* initialization and cleanup.
*/
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE t * \
tsd_##n##p_get_unsafe(tsd_t *tsd) { \
return &tsd->TSD_MANGLE(n); \
}
MALLOC_TSD
#undef O
/* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE t * \
tsd_##n##p_get(tsd_t *tsd) { \
/* \
* Because the state might change asynchronously if it's \
* nominal, we need to make sure that we only read it once. \
*/
\
uint8_t state = tsd_state_get(tsd); \
assert(state == tsd_state_nominal || \
state == tsd_state_nominal_slow || \
state == tsd_state_nominal_recompute || \
state == tsd_state_reincarnated || \
state == tsd_state_minimal_initialized); \
return tsd_##n##p_get_unsafe(tsd); \
}
MALLOC_TSD
#undef O
/*
* tsdn_foop_get(tsdn) returns either the thread-local instance of foo (if tsdn
* isn't NULL), or NULL (if tsdn is NULL), cast to the nullable pointer type.
*/
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE nt * \
tsdn_##n##p_get(tsdn_t *tsdn) { \
if (tsdn_null(tsdn)) { \
return NULL; \
} \
tsd_t *tsd = tsdn_tsd(tsdn); \
return (nt *)tsd_##n##p_get(tsd); \
}
MALLOC_TSD
#undef O
/* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE t \
tsd_##n##_get(tsd_t *tsd) { \
return *tsd_##n##p_get(tsd); \
}
MALLOC_TSD
#undef O
/* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE void \
tsd_##n##_set(tsd_t *tsd, t val) { \
assert(tsd_state_get(tsd) != tsd_state_reincarnated && \
tsd_state_get(tsd) != tsd_state_minimal_initialized); \
*tsd_##n##p_get(tsd) = val; \
}
MALLOC_TSD
#undef O
JEMALLOC_ALWAYS_INLINE
void
tsd_assert_fast
(
tsd_t
*
tsd
)
{
/*
* Note that our fastness assertion does *not* include global slowness
* counters; it's not in general possible to ensure that they won't
* change asynchronously from underneath us.
*/
assert
(
!
malloc_slow
&&
tsd_tcache_enabled_get
(
tsd
)
&&
tsd_reentrancy_level_get
(
tsd
)
==
0
);
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_fast
(
tsd_t
*
tsd
)
{
bool
fast
=
(
tsd_state_get
(
tsd
)
==
tsd_state_nominal
);
if
(
fast
)
{
tsd_assert_fast
(
tsd
);
}
return
fast
;
}
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsd_fetch_impl
(
bool
init
,
bool
minimal
)
{
tsd_t
*
tsd
=
tsd_get
(
init
);
if
(
!
init
&&
tsd_get_allocates
()
&&
tsd
==
NULL
)
{
return
NULL
;
}
assert
(
tsd
!=
NULL
);
if
(
unlikely
(
tsd_state_get
(
tsd
)
!=
tsd_state_nominal
))
{
return
tsd_fetch_slow
(
tsd
,
minimal
);
}
assert
(
tsd_fast
(
tsd
));
tsd_assert_fast
(
tsd
);
return
tsd
;
}
/* Get a minimal TSD that requires no cleanup. See comments in free(). */
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsd_fetch_min
(
void
)
{
return
tsd_fetch_impl
(
true
,
true
);
}
/* For internal background threads use only. */
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsd_internal_fetch
(
void
)
{
tsd_t
*
tsd
=
tsd_fetch_min
();
/* Use reincarnated state to prevent full initialization. */
tsd_state_set
(
tsd
,
tsd_state_reincarnated
);
return
tsd
;
}
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsd_fetch
(
void
)
{
return
tsd_fetch_impl
(
true
,
false
);
}
static
inline
bool
tsd_nominal
(
tsd_t
*
tsd
)
{
return
(
tsd_state_get
(
tsd
)
<=
tsd_state_nominal_max
);
}
JEMALLOC_ALWAYS_INLINE
tsdn_t
*
tsdn_fetch
(
void
)
{
if
(
!
tsd_booted_get
())
{
return
NULL
;
}
return
tsd_tsdn
(
tsd_fetch_impl
(
false
,
false
));
}
JEMALLOC_ALWAYS_INLINE
rtree_ctx_t
*
tsd_rtree_ctx
(
tsd_t
*
tsd
)
{
return
tsd_rtree_ctxp_get
(
tsd
);
}
JEMALLOC_ALWAYS_INLINE
rtree_ctx_t
*
tsdn_rtree_ctx
(
tsdn_t
*
tsdn
,
rtree_ctx_t
*
fallback
)
{
/*
* If tsd cannot be accessed, initialize the fallback rtree_ctx and
* return a pointer to it.
*/
if
(
unlikely
(
tsdn_null
(
tsdn
)))
{
rtree_ctx_data_init
(
fallback
);
return
fallback
;
}
return
tsd_rtree_ctx
(
tsdn_tsd
(
tsdn
));
}
#endif
/* JEMALLOC_INTERNAL_TSD_H */
deps/jemalloc/include/jemalloc/internal/tsd_generic.h
0 → 100644
View file @
4d5911b4
#ifdef JEMALLOC_INTERNAL_TSD_GENERIC_H
#error This file should be included only once, by tsd.h.
#endif
#define JEMALLOC_INTERNAL_TSD_GENERIC_H
typedef
struct
tsd_init_block_s
tsd_init_block_t
;
struct
tsd_init_block_s
{
ql_elm
(
tsd_init_block_t
)
link
;
pthread_t
thread
;
void
*
data
;
};
/* Defined in tsd.c, to allow the mutex headers to have tsd dependencies. */
typedef
struct
tsd_init_head_s
tsd_init_head_t
;
typedef
struct
{
bool
initialized
;
tsd_t
val
;
}
tsd_wrapper_t
;
void
*
tsd_init_check_recursion
(
tsd_init_head_t
*
head
,
tsd_init_block_t
*
block
);
void
tsd_init_finish
(
tsd_init_head_t
*
head
,
tsd_init_block_t
*
block
);
extern
pthread_key_t
tsd_tsd
;
extern
tsd_init_head_t
tsd_init_head
;
extern
tsd_wrapper_t
tsd_boot_wrapper
;
extern
bool
tsd_booted
;
/* Initialization/cleanup. */
JEMALLOC_ALWAYS_INLINE
void
tsd_cleanup_wrapper
(
void
*
arg
)
{
tsd_wrapper_t
*
wrapper
=
(
tsd_wrapper_t
*
)
arg
;
if
(
wrapper
->
initialized
)
{
wrapper
->
initialized
=
false
;
tsd_cleanup
(
&
wrapper
->
val
);
if
(
wrapper
->
initialized
)
{
/* Trigger another cleanup round. */
if
(
pthread_setspecific
(
tsd_tsd
,
(
void
*
)
wrapper
)
!=
0
)
{
malloc_write
(
"<jemalloc>: Error setting TSD
\n
"
);
if
(
opt_abort
)
{
abort
();
}
}
return
;
}
}
malloc_tsd_dalloc
(
wrapper
);
}
JEMALLOC_ALWAYS_INLINE
void
tsd_wrapper_set
(
tsd_wrapper_t
*
wrapper
)
{
if
(
pthread_setspecific
(
tsd_tsd
,
(
void
*
)
wrapper
)
!=
0
)
{
malloc_write
(
"<jemalloc>: Error setting TSD
\n
"
);
abort
();
}
}
JEMALLOC_ALWAYS_INLINE
tsd_wrapper_t
*
tsd_wrapper_get
(
bool
init
)
{
tsd_wrapper_t
*
wrapper
=
(
tsd_wrapper_t
*
)
pthread_getspecific
(
tsd_tsd
);
if
(
init
&&
unlikely
(
wrapper
==
NULL
))
{
tsd_init_block_t
block
;
wrapper
=
(
tsd_wrapper_t
*
)
tsd_init_check_recursion
(
&
tsd_init_head
,
&
block
);
if
(
wrapper
)
{
return
wrapper
;
}
wrapper
=
(
tsd_wrapper_t
*
)
malloc_tsd_malloc
(
sizeof
(
tsd_wrapper_t
));
block
.
data
=
(
void
*
)
wrapper
;
if
(
wrapper
==
NULL
)
{
malloc_write
(
"<jemalloc>: Error allocating TSD
\n
"
);
abort
();
}
else
{
wrapper
->
initialized
=
false
;
JEMALLOC_DIAGNOSTIC_PUSH
JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
tsd_t
initializer
=
TSD_INITIALIZER
;
JEMALLOC_DIAGNOSTIC_POP
wrapper
->
val
=
initializer
;
}
tsd_wrapper_set
(
wrapper
);
tsd_init_finish
(
&
tsd_init_head
,
&
block
);
}
return
wrapper
;
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_boot0
(
void
)
{
if
(
pthread_key_create
(
&
tsd_tsd
,
tsd_cleanup_wrapper
)
!=
0
)
{
return
true
;
}
tsd_wrapper_set
(
&
tsd_boot_wrapper
);
tsd_booted
=
true
;
return
false
;
}
JEMALLOC_ALWAYS_INLINE
void
tsd_boot1
(
void
)
{
tsd_wrapper_t
*
wrapper
;
wrapper
=
(
tsd_wrapper_t
*
)
malloc_tsd_malloc
(
sizeof
(
tsd_wrapper_t
));
if
(
wrapper
==
NULL
)
{
malloc_write
(
"<jemalloc>: Error allocating TSD
\n
"
);
abort
();
}
tsd_boot_wrapper
.
initialized
=
false
;
tsd_cleanup
(
&
tsd_boot_wrapper
.
val
);
wrapper
->
initialized
=
false
;
JEMALLOC_DIAGNOSTIC_PUSH
JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
tsd_t
initializer
=
TSD_INITIALIZER
;
JEMALLOC_DIAGNOSTIC_POP
wrapper
->
val
=
initializer
;
tsd_wrapper_set
(
wrapper
);
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_boot
(
void
)
{
if
(
tsd_boot0
())
{
return
true
;
}
tsd_boot1
();
return
false
;
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_booted_get
(
void
)
{
return
tsd_booted
;
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_get_allocates
(
void
)
{
return
true
;
}
/* Get/set. */
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsd_get
(
bool
init
)
{
tsd_wrapper_t
*
wrapper
;
assert
(
tsd_booted
);
wrapper
=
tsd_wrapper_get
(
init
);
if
(
tsd_get_allocates
()
&&
!
init
&&
wrapper
==
NULL
)
{
return
NULL
;
}
return
&
wrapper
->
val
;
}
JEMALLOC_ALWAYS_INLINE
void
tsd_set
(
tsd_t
*
val
)
{
tsd_wrapper_t
*
wrapper
;
assert
(
tsd_booted
);
wrapper
=
tsd_wrapper_get
(
true
);
if
(
likely
(
&
wrapper
->
val
!=
val
))
{
wrapper
->
val
=
*
(
val
);
}
wrapper
->
initialized
=
true
;
}
deps/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h
0 → 100644
View file @
4d5911b4
#ifdef JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H
#error This file should be included only once, by tsd.h.
#endif
#define JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H
#define JEMALLOC_TSD_TYPE_ATTR(type) __thread type JEMALLOC_TLS_MODEL
extern
JEMALLOC_TSD_TYPE_ATTR
(
tsd_t
)
tsd_tls
;
extern
JEMALLOC_TSD_TYPE_ATTR
(
bool
)
tsd_initialized
;
extern
bool
tsd_booted
;
/* Initialization/cleanup. */
JEMALLOC_ALWAYS_INLINE
bool
tsd_cleanup_wrapper
(
void
)
{
if
(
tsd_initialized
)
{
tsd_initialized
=
false
;
tsd_cleanup
(
&
tsd_tls
);
}
return
tsd_initialized
;
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_boot0
(
void
)
{
malloc_tsd_cleanup_register
(
&
tsd_cleanup_wrapper
);
tsd_booted
=
true
;
return
false
;
}
JEMALLOC_ALWAYS_INLINE
void
tsd_boot1
(
void
)
{
/* Do nothing. */
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_boot
(
void
)
{
return
tsd_boot0
();
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_booted_get
(
void
)
{
return
tsd_booted
;
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_get_allocates
(
void
)
{
return
false
;
}
/* Get/set. */
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsd_get
(
bool
init
)
{
return
&
tsd_tls
;
}
JEMALLOC_ALWAYS_INLINE
void
tsd_set
(
tsd_t
*
val
)
{
assert
(
tsd_booted
);
if
(
likely
(
&
tsd_tls
!=
val
))
{
tsd_tls
=
(
*
val
);
}
tsd_initialized
=
true
;
}
deps/jemalloc/include/jemalloc/internal/tsd_tls.h
0 → 100644
View file @
4d5911b4
#ifdef JEMALLOC_INTERNAL_TSD_TLS_H
#error This file should be included only once, by tsd.h.
#endif
#define JEMALLOC_INTERNAL_TSD_TLS_H
#define JEMALLOC_TSD_TYPE_ATTR(type) __thread type JEMALLOC_TLS_MODEL
extern
JEMALLOC_TSD_TYPE_ATTR
(
tsd_t
)
tsd_tls
;
extern
pthread_key_t
tsd_tsd
;
extern
bool
tsd_booted
;
/* Initialization/cleanup. */
JEMALLOC_ALWAYS_INLINE
bool
tsd_boot0
(
void
)
{
if
(
pthread_key_create
(
&
tsd_tsd
,
&
tsd_cleanup
)
!=
0
)
{
return
true
;
}
tsd_booted
=
true
;
return
false
;
}
JEMALLOC_ALWAYS_INLINE
void
tsd_boot1
(
void
)
{
/* Do nothing. */
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_boot
(
void
)
{
return
tsd_boot0
();
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_booted_get
(
void
)
{
return
tsd_booted
;
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_get_allocates
(
void
)
{
return
false
;
}
/* Get/set. */
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsd_get
(
bool
init
)
{
return
&
tsd_tls
;
}
JEMALLOC_ALWAYS_INLINE
void
tsd_set
(
tsd_t
*
val
)
{
assert
(
tsd_booted
);
if
(
likely
(
&
tsd_tls
!=
val
))
{
tsd_tls
=
(
*
val
);
}
if
(
pthread_setspecific
(
tsd_tsd
,
(
void
*
)(
&
tsd_tls
))
!=
0
)
{
malloc_write
(
"<jemalloc>: Error setting tsd.
\n
"
);
if
(
opt_abort
)
{
abort
();
}
}
}
deps/jemalloc/include/jemalloc/internal/tsd_types.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_TSD_TYPES_H
#define JEMALLOC_INTERNAL_TSD_TYPES_H
#define MALLOC_TSD_CLEANUPS_MAX 2
typedef
struct
tsd_s
tsd_t
;
typedef
struct
tsdn_s
tsdn_t
;
typedef
bool
(
*
malloc_tsd_cleanup_t
)(
void
);
#endif
/* JEMALLOC_INTERNAL_TSD_TYPES_H */
deps/jemalloc/include/jemalloc/internal/tsd_win.h
0 → 100644
View file @
4d5911b4
#ifdef JEMALLOC_INTERNAL_TSD_WIN_H
#error This file should be included only once, by tsd.h.
#endif
#define JEMALLOC_INTERNAL_TSD_WIN_H
typedef
struct
{
bool
initialized
;
tsd_t
val
;
}
tsd_wrapper_t
;
extern
DWORD
tsd_tsd
;
extern
tsd_wrapper_t
tsd_boot_wrapper
;
extern
bool
tsd_booted
;
/* Initialization/cleanup. */
JEMALLOC_ALWAYS_INLINE
bool
tsd_cleanup_wrapper
(
void
)
{
DWORD
error
=
GetLastError
();
tsd_wrapper_t
*
wrapper
=
(
tsd_wrapper_t
*
)
TlsGetValue
(
tsd_tsd
);
SetLastError
(
error
);
if
(
wrapper
==
NULL
)
{
return
false
;
}
if
(
wrapper
->
initialized
)
{
wrapper
->
initialized
=
false
;
tsd_cleanup
(
&
wrapper
->
val
);
if
(
wrapper
->
initialized
)
{
/* Trigger another cleanup round. */
return
true
;
}
}
malloc_tsd_dalloc
(
wrapper
);
return
false
;
}
JEMALLOC_ALWAYS_INLINE
void
tsd_wrapper_set
(
tsd_wrapper_t
*
wrapper
)
{
if
(
!
TlsSetValue
(
tsd_tsd
,
(
void
*
)
wrapper
))
{
malloc_write
(
"<jemalloc>: Error setting TSD
\n
"
);
abort
();
}
}
JEMALLOC_ALWAYS_INLINE
tsd_wrapper_t
*
tsd_wrapper_get
(
bool
init
)
{
DWORD
error
=
GetLastError
();
tsd_wrapper_t
*
wrapper
=
(
tsd_wrapper_t
*
)
TlsGetValue
(
tsd_tsd
);
SetLastError
(
error
);
if
(
init
&&
unlikely
(
wrapper
==
NULL
))
{
wrapper
=
(
tsd_wrapper_t
*
)
malloc_tsd_malloc
(
sizeof
(
tsd_wrapper_t
));
if
(
wrapper
==
NULL
)
{
malloc_write
(
"<jemalloc>: Error allocating TSD
\n
"
);
abort
();
}
else
{
wrapper
->
initialized
=
false
;
/* MSVC is finicky about aggregate initialization. */
tsd_t
tsd_initializer
=
TSD_INITIALIZER
;
wrapper
->
val
=
tsd_initializer
;
}
tsd_wrapper_set
(
wrapper
);
}
return
wrapper
;
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_boot0
(
void
)
{
tsd_tsd
=
TlsAlloc
();
if
(
tsd_tsd
==
TLS_OUT_OF_INDEXES
)
{
return
true
;
}
malloc_tsd_cleanup_register
(
&
tsd_cleanup_wrapper
);
tsd_wrapper_set
(
&
tsd_boot_wrapper
);
tsd_booted
=
true
;
return
false
;
}
JEMALLOC_ALWAYS_INLINE
void
tsd_boot1
(
void
)
{
tsd_wrapper_t
*
wrapper
;
wrapper
=
(
tsd_wrapper_t
*
)
malloc_tsd_malloc
(
sizeof
(
tsd_wrapper_t
));
if
(
wrapper
==
NULL
)
{
malloc_write
(
"<jemalloc>: Error allocating TSD
\n
"
);
abort
();
}
tsd_boot_wrapper
.
initialized
=
false
;
tsd_cleanup
(
&
tsd_boot_wrapper
.
val
);
wrapper
->
initialized
=
false
;
tsd_t
initializer
=
TSD_INITIALIZER
;
wrapper
->
val
=
initializer
;
tsd_wrapper_set
(
wrapper
);
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_boot
(
void
)
{
if
(
tsd_boot0
())
{
return
true
;
}
tsd_boot1
();
return
false
;
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_booted_get
(
void
)
{
return
tsd_booted
;
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_get_allocates
(
void
)
{
return
true
;
}
/* Get/set. */
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsd_get
(
bool
init
)
{
tsd_wrapper_t
*
wrapper
;
assert
(
tsd_booted
);
wrapper
=
tsd_wrapper_get
(
init
);
if
(
tsd_get_allocates
()
&&
!
init
&&
wrapper
==
NULL
)
{
return
NULL
;
}
return
&
wrapper
->
val
;
}
JEMALLOC_ALWAYS_INLINE
void
tsd_set
(
tsd_t
*
val
)
{
tsd_wrapper_t
*
wrapper
;
assert
(
tsd_booted
);
wrapper
=
tsd_wrapper_get
(
true
);
if
(
likely
(
&
wrapper
->
val
!=
val
))
{
wrapper
->
val
=
*
(
val
);
}
wrapper
->
initialized
=
true
;
}
deps/jemalloc/include/jemalloc/internal/util.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_UTIL_H
#define JEMALLOC_INTERNAL_UTIL_H
#define UTIL_INLINE static inline
/* Junk fill patterns. */
#ifndef JEMALLOC_ALLOC_JUNK
# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5)
#endif
#ifndef JEMALLOC_FREE_JUNK
# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a)
#endif
/*
* Wrap a cpp argument that contains commas such that it isn't broken up into
* multiple arguments.
*/
#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
/* cpp macro definition stringification. */
#define STRINGIFY_HELPER(x) #x
#define STRINGIFY(x) STRINGIFY_HELPER(x)
/*
* Silence compiler warnings due to uninitialized values. This is used
* wherever the compiler fails to recognize that the variable is never used
* uninitialized.
*/
#define JEMALLOC_CC_SILENCE_INIT(v) = v
#ifdef __GNUC__
# define likely(x) __builtin_expect(!!(x), 1)
# define unlikely(x) __builtin_expect(!!(x), 0)
#else
# define likely(x) !!(x)
# define unlikely(x) !!(x)
#endif
#if !defined(JEMALLOC_INTERNAL_UNREACHABLE)
# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure
#endif
#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE()
/* Set error code. */
UTIL_INLINE
void
set_errno
(
int
errnum
)
{
#ifdef _WIN32
SetLastError
(
errnum
);
#else
errno
=
errnum
;
#endif
}
/* Get last error code. */
UTIL_INLINE
int
get_errno
(
void
)
{
#ifdef _WIN32
return
GetLastError
();
#else
return
errno
;
#endif
}
#undef UTIL_INLINE
#endif
/* JEMALLOC_INTERNAL_UTIL_H */
deps/jemalloc/include/jemalloc/internal/witness.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_WITNESS_H
#define JEMALLOC_INTERNAL_WITNESS_H
#include "jemalloc/internal/ql.h"
/******************************************************************************/
/* LOCK RANKS */
/******************************************************************************/
/*
* Witnesses with rank WITNESS_RANK_OMIT are completely ignored by the witness
* machinery.
*/
#define WITNESS_RANK_OMIT 0U
#define WITNESS_RANK_MIN 1U
#define WITNESS_RANK_INIT 1U
#define WITNESS_RANK_CTL 1U
#define WITNESS_RANK_TCACHES 2U
#define WITNESS_RANK_ARENAS 3U
#define WITNESS_RANK_BACKGROUND_THREAD_GLOBAL 4U
#define WITNESS_RANK_PROF_DUMP 5U
#define WITNESS_RANK_PROF_BT2GCTX 6U
#define WITNESS_RANK_PROF_TDATAS 7U
#define WITNESS_RANK_PROF_TDATA 8U
#define WITNESS_RANK_PROF_LOG 9U
#define WITNESS_RANK_PROF_GCTX 10U
#define WITNESS_RANK_BACKGROUND_THREAD 11U
/*
* Used as an argument to witness_assert_depth_to_rank() in order to validate
* depth excluding non-core locks with lower ranks. Since the rank argument to
* witness_assert_depth_to_rank() is inclusive rather than exclusive, this
* definition can have the same value as the minimally ranked core lock.
*/
#define WITNESS_RANK_CORE 12U
#define WITNESS_RANK_DECAY 12U
#define WITNESS_RANK_TCACHE_QL 13U
#define WITNESS_RANK_EXTENT_GROW 14U
#define WITNESS_RANK_EXTENTS 15U
#define WITNESS_RANK_EXTENT_AVAIL 16U
#define WITNESS_RANK_EXTENT_POOL 17U
#define WITNESS_RANK_RTREE 18U
#define WITNESS_RANK_BASE 19U
#define WITNESS_RANK_ARENA_LARGE 20U
#define WITNESS_RANK_HOOK 21U
#define WITNESS_RANK_LEAF 0xffffffffU
#define WITNESS_RANK_BIN WITNESS_RANK_LEAF
#define WITNESS_RANK_ARENA_STATS WITNESS_RANK_LEAF
#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_ACCUM WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
/******************************************************************************/
/* PER-WITNESS DATA */
/******************************************************************************/
#if defined(JEMALLOC_DEBUG)
# define WITNESS_INITIALIZER(name, rank) {name, rank, NULL, NULL, {NULL, NULL}}
#else
# define WITNESS_INITIALIZER(name, rank)
#endif
typedef
struct
witness_s
witness_t
;
typedef
unsigned
witness_rank_t
;
typedef
ql_head
(
witness_t
)
witness_list_t
;
typedef
int
witness_comp_t
(
const
witness_t
*
,
void
*
,
const
witness_t
*
,
void
*
);
struct
witness_s
{
/* Name, used for printing lock order reversal messages. */
const
char
*
name
;
/*
* Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses
* must be acquired in order of increasing rank.
*/
witness_rank_t
rank
;
/*
* If two witnesses are of equal rank and they have the samp comp
* function pointer, it is called as a last attempt to differentiate
* between witnesses of equal rank.
*/
witness_comp_t
*
comp
;
/* Opaque data, passed to comp(). */
void
*
opaque
;
/* Linkage for thread's currently owned locks. */
ql_elm
(
witness_t
)
link
;
};
/******************************************************************************/
/* PER-THREAD DATA */
/******************************************************************************/
typedef
struct
witness_tsd_s
witness_tsd_t
;
struct
witness_tsd_s
{
witness_list_t
witnesses
;
bool
forking
;
};
#define WITNESS_TSD_INITIALIZER { ql_head_initializer(witnesses), false }
#define WITNESS_TSDN_NULL ((witness_tsdn_t *)0)
/******************************************************************************/
/* (PER-THREAD) NULLABILITY HELPERS */
/******************************************************************************/
typedef
struct
witness_tsdn_s
witness_tsdn_t
;
struct
witness_tsdn_s
{
witness_tsd_t
witness_tsd
;
};
JEMALLOC_ALWAYS_INLINE
witness_tsdn_t
*
witness_tsd_tsdn
(
witness_tsd_t
*
witness_tsd
)
{
return
(
witness_tsdn_t
*
)
witness_tsd
;
}
JEMALLOC_ALWAYS_INLINE
bool
witness_tsdn_null
(
witness_tsdn_t
*
witness_tsdn
)
{
return
witness_tsdn
==
NULL
;
}
JEMALLOC_ALWAYS_INLINE
witness_tsd_t
*
witness_tsdn_tsd
(
witness_tsdn_t
*
witness_tsdn
)
{
assert
(
!
witness_tsdn_null
(
witness_tsdn
));
return
&
witness_tsdn
->
witness_tsd
;
}
/******************************************************************************/
/* API */
/******************************************************************************/
void
witness_init
(
witness_t
*
witness
,
const
char
*
name
,
witness_rank_t
rank
,
witness_comp_t
*
comp
,
void
*
opaque
);
typedef
void
(
witness_lock_error_t
)(
const
witness_list_t
*
,
const
witness_t
*
);
extern
witness_lock_error_t
*
JET_MUTABLE
witness_lock_error
;
typedef
void
(
witness_owner_error_t
)(
const
witness_t
*
);
extern
witness_owner_error_t
*
JET_MUTABLE
witness_owner_error
;
typedef
void
(
witness_not_owner_error_t
)(
const
witness_t
*
);
extern
witness_not_owner_error_t
*
JET_MUTABLE
witness_not_owner_error
;
typedef
void
(
witness_depth_error_t
)(
const
witness_list_t
*
,
witness_rank_t
rank_inclusive
,
unsigned
depth
);
extern
witness_depth_error_t
*
JET_MUTABLE
witness_depth_error
;
void
witnesses_cleanup
(
witness_tsd_t
*
witness_tsd
);
void
witness_prefork
(
witness_tsd_t
*
witness_tsd
);
void
witness_postfork_parent
(
witness_tsd_t
*
witness_tsd
);
void
witness_postfork_child
(
witness_tsd_t
*
witness_tsd
);
/* Helper, not intended for direct use. */
static
inline
bool
witness_owner
(
witness_tsd_t
*
witness_tsd
,
const
witness_t
*
witness
)
{
witness_list_t
*
witnesses
;
witness_t
*
w
;
cassert
(
config_debug
);
witnesses
=
&
witness_tsd
->
witnesses
;
ql_foreach
(
w
,
witnesses
,
link
)
{
if
(
w
==
witness
)
{
return
true
;
}
}
return
false
;
}
static
inline
void
witness_assert_owner
(
witness_tsdn_t
*
witness_tsdn
,
const
witness_t
*
witness
)
{
witness_tsd_t
*
witness_tsd
;
if
(
!
config_debug
)
{
return
;
}
if
(
witness_tsdn_null
(
witness_tsdn
))
{
return
;
}
witness_tsd
=
witness_tsdn_tsd
(
witness_tsdn
);
if
(
witness
->
rank
==
WITNESS_RANK_OMIT
)
{
return
;
}
if
(
witness_owner
(
witness_tsd
,
witness
))
{
return
;
}
witness_owner_error
(
witness
);
}
static
inline
void
witness_assert_not_owner
(
witness_tsdn_t
*
witness_tsdn
,
const
witness_t
*
witness
)
{
witness_tsd_t
*
witness_tsd
;
witness_list_t
*
witnesses
;
witness_t
*
w
;
if
(
!
config_debug
)
{
return
;
}
if
(
witness_tsdn_null
(
witness_tsdn
))
{
return
;
}
witness_tsd
=
witness_tsdn_tsd
(
witness_tsdn
);
if
(
witness
->
rank
==
WITNESS_RANK_OMIT
)
{
return
;
}
witnesses
=
&
witness_tsd
->
witnesses
;
ql_foreach
(
w
,
witnesses
,
link
)
{
if
(
w
==
witness
)
{
witness_not_owner_error
(
witness
);
}
}
}
static
inline
void
witness_assert_depth_to_rank
(
witness_tsdn_t
*
witness_tsdn
,
witness_rank_t
rank_inclusive
,
unsigned
depth
)
{
witness_tsd_t
*
witness_tsd
;
unsigned
d
;
witness_list_t
*
witnesses
;
witness_t
*
w
;
if
(
!
config_debug
)
{
return
;
}
if
(
witness_tsdn_null
(
witness_tsdn
))
{
return
;
}
witness_tsd
=
witness_tsdn_tsd
(
witness_tsdn
);
d
=
0
;
witnesses
=
&
witness_tsd
->
witnesses
;
w
=
ql_last
(
witnesses
,
link
);
if
(
w
!=
NULL
)
{
ql_reverse_foreach
(
w
,
witnesses
,
link
)
{
if
(
w
->
rank
<
rank_inclusive
)
{
break
;
}
d
++
;
}
}
if
(
d
!=
depth
)
{
witness_depth_error
(
witnesses
,
rank_inclusive
,
depth
);
}
}
static
inline
void
witness_assert_depth
(
witness_tsdn_t
*
witness_tsdn
,
unsigned
depth
)
{
witness_assert_depth_to_rank
(
witness_tsdn
,
WITNESS_RANK_MIN
,
depth
);
}
static
inline
void
witness_assert_lockless
(
witness_tsdn_t
*
witness_tsdn
)
{
witness_assert_depth
(
witness_tsdn
,
0
);
}
static
inline
void
witness_lock
(
witness_tsdn_t
*
witness_tsdn
,
witness_t
*
witness
)
{
witness_tsd_t
*
witness_tsd
;
witness_list_t
*
witnesses
;
witness_t
*
w
;
if
(
!
config_debug
)
{
return
;
}
if
(
witness_tsdn_null
(
witness_tsdn
))
{
return
;
}
witness_tsd
=
witness_tsdn_tsd
(
witness_tsdn
);
if
(
witness
->
rank
==
WITNESS_RANK_OMIT
)
{
return
;
}
witness_assert_not_owner
(
witness_tsdn
,
witness
);
witnesses
=
&
witness_tsd
->
witnesses
;
w
=
ql_last
(
witnesses
,
link
);
if
(
w
==
NULL
)
{
/* No other locks; do nothing. */
}
else
if
(
witness_tsd
->
forking
&&
w
->
rank
<=
witness
->
rank
)
{
/* Forking, and relaxed ranking satisfied. */
}
else
if
(
w
->
rank
>
witness
->
rank
)
{
/* Not forking, rank order reversal. */
witness_lock_error
(
witnesses
,
witness
);
}
else
if
(
w
->
rank
==
witness
->
rank
&&
(
w
->
comp
==
NULL
||
w
->
comp
!=
witness
->
comp
||
w
->
comp
(
w
,
w
->
opaque
,
witness
,
witness
->
opaque
)
>
0
))
{
/*
* Missing/incompatible comparison function, or comparison
* function indicates rank order reversal.
*/
witness_lock_error
(
witnesses
,
witness
);
}
ql_elm_new
(
witness
,
link
);
ql_tail_insert
(
witnesses
,
witness
,
link
);
}
static
inline
void
witness_unlock
(
witness_tsdn_t
*
witness_tsdn
,
witness_t
*
witness
)
{
witness_tsd_t
*
witness_tsd
;
witness_list_t
*
witnesses
;
if
(
!
config_debug
)
{
return
;
}
if
(
witness_tsdn_null
(
witness_tsdn
))
{
return
;
}
witness_tsd
=
witness_tsdn_tsd
(
witness_tsdn
);
if
(
witness
->
rank
==
WITNESS_RANK_OMIT
)
{
return
;
}
/*
* Check whether owner before removal, rather than relying on
* witness_assert_owner() to abort, so that unit tests can test this
* function's failure mode without causing undefined behavior.
*/
if
(
witness_owner
(
witness_tsd
,
witness
))
{
witnesses
=
&
witness_tsd
->
witnesses
;
ql_remove
(
witnesses
,
witness
,
link
);
}
else
{
witness_assert_owner
(
witness_tsdn
,
witness
);
}
}
#endif
/* JEMALLOC_INTERNAL_WITNESS_H */
Prev
1
2
3
4
5
6
7
8
9
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment