Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
e3b8492e
Commit
e3b8492e
authored
Apr 22, 2017
by
antirez
Browse files
Revert "Jemalloc updated to 4.4.0."
This reverts commit
36c1acc2
.
parent
238cebdd
Changes
150
Hide whitespace changes
Inline
Side-by-side
deps/jemalloc/include/jemalloc/internal/assert.h
deleted
100644 → 0
View file @
238cebdd
/*
* Define a custom assert() in order to reduce the chances of deadlock during
* assertion failure.
*/
#ifndef assert
#define assert(e) do { \
if (unlikely(config_debug && !(e))) { \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
__FILE__, __LINE__, #e); \
abort(); \
} \
} while (0)
#endif
#ifndef not_reached
#define not_reached() do { \
if (config_debug) { \
malloc_printf( \
"<jemalloc>: %s:%d: Unreachable code reached\n", \
__FILE__, __LINE__); \
abort(); \
} \
unreachable(); \
} while (0)
#endif
#ifndef not_implemented
#define not_implemented() do { \
if (config_debug) { \
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
__FILE__, __LINE__); \
abort(); \
} \
} while (0)
#endif
#ifndef assert_not_implemented
#define assert_not_implemented(e) do { \
if (unlikely(config_debug && !(e))) \
not_implemented(); \
} while (0)
#endif
deps/jemalloc/include/jemalloc/internal/atomic.h
View file @
e3b8492e
...
...
@@ -28,8 +28,8 @@
* callers.
*
* <t> atomic_read_<t>(<t> *p) { return (*p); }
* <t> atomic_add_<t>(<t> *p, <t> x) { return (*p +
=
x); }
* <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p -
=
x); }
* <t> atomic_add_<t>(<t> *p, <t> x) { return (*p + x); }
* <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p - x); }
* bool atomic_cas_<t>(<t> *p, <t> c, <t> s)
* {
* if (*p != c)
...
...
deps/jemalloc/include/jemalloc/internal/base.h
View file @
e3b8492e
...
...
@@ -9,13 +9,12 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void
*
base_alloc
(
tsdn_t
*
tsdn
,
size_t
size
);
void
base_stats_get
(
tsdn_t
*
tsdn
,
size_t
*
allocated
,
size_t
*
resident
,
size_t
*
mapped
);
void
*
base_alloc
(
size_t
size
);
void
base_stats_get
(
size_t
*
allocated
,
size_t
*
resident
,
size_t
*
mapped
);
bool
base_boot
(
void
);
void
base_prefork
(
tsdn_t
*
tsdn
);
void
base_postfork_parent
(
tsdn_t
*
tsdn
);
void
base_postfork_child
(
tsdn_t
*
tsdn
);
void
base_prefork
(
void
);
void
base_postfork_parent
(
void
);
void
base_postfork_child
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
...
...
deps/jemalloc/include/jemalloc/internal/bitmap.h
View file @
e3b8492e
...
...
@@ -15,15 +15,6 @@ typedef unsigned long bitmap_t;
#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS)
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
/*
* Do some analysis on how big the bitmap is before we use a tree. For a brute
* force linear search, if we would have to call ffs_lu() more than 2^3 times,
* use a tree instead.
*/
#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
# define USE_TREE
#endif
/* Number of groups required to store a given number of bits. */
#define BITMAP_BITS2GROUPS(nbits) \
((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
...
...
@@ -57,8 +48,6 @@ typedef unsigned long bitmap_t;
/*
* Maximum number of groups required to support LG_BITMAP_MAXBITS.
*/
#ifdef USE_TREE
#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
...
...
@@ -76,12 +65,6 @@ typedef unsigned long bitmap_t;
(LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
+ !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
#else
/* USE_TREE */
#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
#endif
/* USE_TREE */
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
...
...
@@ -95,7 +78,6 @@ struct bitmap_info_s {
/* Logical number of bits in bitmap (stored at bottom level). */
size_t
nbits
;
#ifdef USE_TREE
/* Number of levels necessary for nbits. */
unsigned
nlevels
;
...
...
@@ -104,10 +86,6 @@ struct bitmap_info_s {
* bottom to top (e.g. the bottom level is stored in levels[0]).
*/
bitmap_level_t
levels
[
BITMAP_MAX_LEVELS
+
1
];
#else
/* USE_TREE */
/* Number of groups necessary for nbits. */
size_t
ngroups
;
#endif
/* USE_TREE */
};
#endif
/* JEMALLOC_H_STRUCTS */
...
...
@@ -115,8 +93,9 @@ struct bitmap_info_s {
#ifdef JEMALLOC_H_EXTERNS
void
bitmap_info_init
(
bitmap_info_t
*
binfo
,
size_t
nbits
);
size_t
bitmap_info_ngroups
(
const
bitmap_info_t
*
binfo
);
size_t
bitmap_size
(
size_t
nbits
);
void
bitmap_init
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
);
size_t
bitmap_size
(
const
bitmap_info_t
*
binfo
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
...
...
@@ -134,20 +113,10 @@ void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
JEMALLOC_INLINE
bool
bitmap_full
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
)
{
#ifdef USE_TREE
size_t
rgoff
=
binfo
->
levels
[
binfo
->
nlevels
].
group_offset
-
1
;
unsigned
rgoff
=
binfo
->
levels
[
binfo
->
nlevels
].
group_offset
-
1
;
bitmap_t
rg
=
bitmap
[
rgoff
];
/* The bitmap is full iff the root group is 0. */
return
(
rg
==
0
);
#else
size_t
i
;
for
(
i
=
0
;
i
<
binfo
->
ngroups
;
i
++
)
{
if
(
bitmap
[
i
]
!=
0
)
return
(
false
);
}
return
(
true
);
#endif
}
JEMALLOC_INLINE
bool
...
...
@@ -159,7 +128,7 @@ bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
assert
(
bit
<
binfo
->
nbits
);
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
g
=
bitmap
[
goff
];
return
(
!
(
g
&
(
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
))));
return
(
!
(
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
))));
}
JEMALLOC_INLINE
void
...
...
@@ -174,11 +143,10 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
gp
=
&
bitmap
[
goff
];
g
=
*
gp
;
assert
(
g
&
(
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)));
g
^=
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
assert
(
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)));
g
^=
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
assert
(
bitmap_get
(
bitmap
,
binfo
,
bit
));
#ifdef USE_TREE
/* Propagate group state transitions up the tree. */
if
(
g
==
0
)
{
unsigned
i
;
...
...
@@ -187,14 +155,13 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
gp
=
&
bitmap
[
binfo
->
levels
[
i
].
group_offset
+
goff
];
g
=
*
gp
;
assert
(
g
&
(
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)));
g
^=
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
assert
(
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)));
g
^=
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
if
(
g
!=
0
)
break
;
}
}
#endif
}
/* sfu: set first unset. */
...
...
@@ -207,24 +174,15 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
assert
(
!
bitmap_full
(
bitmap
,
binfo
));
#ifdef USE_TREE
i
=
binfo
->
nlevels
-
1
;
g
=
bitmap
[
binfo
->
levels
[
i
].
group_offset
];
bit
=
ffs_lu
(
g
)
-
1
;
bit
=
jemalloc_ffsl
(
g
)
-
1
;
while
(
i
>
0
)
{
i
--
;
g
=
bitmap
[
binfo
->
levels
[
i
].
group_offset
+
bit
];
bit
=
(
bit
<<
LG_BITMAP_GROUP_NBITS
)
+
(
ffs_lu
(
g
)
-
1
);
bit
=
(
bit
<<
LG_BITMAP_GROUP_NBITS
)
+
(
jemalloc_ffsl
(
g
)
-
1
);
}
#else
i
=
0
;
g
=
bitmap
[
0
];
while
((
bit
=
ffs_lu
(
g
))
==
0
)
{
i
++
;
g
=
bitmap
[
i
];
}
bit
=
(
i
<<
LG_BITMAP_GROUP_NBITS
)
+
(
bit
-
1
);
#endif
bitmap_set
(
bitmap
,
binfo
,
bit
);
return
(
bit
);
}
...
...
@@ -235,7 +193,7 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
size_t
goff
;
bitmap_t
*
gp
;
bitmap_t
g
;
UNUSED
bool
propagate
;
bool
propagate
;
assert
(
bit
<
binfo
->
nbits
);
assert
(
bitmap_get
(
bitmap
,
binfo
,
bit
));
...
...
@@ -243,11 +201,10 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
gp
=
&
bitmap
[
goff
];
g
=
*
gp
;
propagate
=
(
g
==
0
);
assert
((
g
&
(
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)))
==
0
);
g
^=
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
assert
((
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)))
==
0
);
g
^=
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
assert
(
!
bitmap_get
(
bitmap
,
binfo
,
bit
));
#ifdef USE_TREE
/* Propagate group state transitions up the tree. */
if
(
propagate
)
{
unsigned
i
;
...
...
@@ -257,15 +214,14 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
gp
=
&
bitmap
[
binfo
->
levels
[
i
].
group_offset
+
goff
];
g
=
*
gp
;
propagate
=
(
g
==
0
);
assert
((
g
&
(
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)))
assert
((
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)))
==
0
);
g
^=
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
g
^=
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
if
(
!
propagate
)
break
;
}
}
#endif
/* USE_TREE */
}
#endif
...
...
deps/jemalloc/include/jemalloc/internal/chunk.h
View file @
e3b8492e
...
...
@@ -48,30 +48,32 @@ extern size_t chunk_npages;
extern
const
chunk_hooks_t
chunk_hooks_default
;
chunk_hooks_t
chunk_hooks_get
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
chunk_hooks_t
chunk_hooks_set
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
chunk_hooks_get
(
arena_t
*
arena
);
chunk_hooks_t
chunk_hooks_set
(
arena_t
*
arena
,
const
chunk_hooks_t
*
chunk_hooks
);
bool
chunk_register
(
tsdn_t
*
tsdn
,
const
void
*
chunk
,
const
extent_node_t
*
node
);
bool
chunk_register
(
const
void
*
chunk
,
const
extent_node_t
*
node
);
void
chunk_deregister
(
const
void
*
chunk
,
const
extent_node_t
*
node
);
void
*
chunk_alloc_base
(
size_t
size
);
void
*
chunk_alloc_cache
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
size_t
*
sn
,
bool
*
zero
,
bool
*
commit
,
bool
dalloc_node
);
void
*
chunk_alloc_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
size_t
*
sn
,
bool
*
zero
,
bool
*
commit
);
void
chunk_dalloc_cache
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
size_t
sn
,
bool
committed
);
void
chunk_dalloc_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
size_t
sn
,
bool
zeroed
,
bool
committed
);
bool
chunk_purge_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
size_t
offset
,
void
*
chunk_alloc_cache
(
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
dalloc_node
);
void
*
chunk_alloc_wrapper
(
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
);
void
chunk_dalloc_cache
(
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
bool
committed
);
void
chunk_dalloc_arena
(
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
bool
zeroed
,
bool
committed
);
void
chunk_dalloc_wrapper
(
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
bool
committed
);
bool
chunk_purge_arena
(
arena_t
*
arena
,
void
*
chunk
,
size_t
offset
,
size_t
length
);
bool
chunk_purge_wrapper
(
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
size_t
offset
,
size_t
length
);
bool
chunk_boot
(
void
);
void
chunk_prefork
(
void
);
void
chunk_postfork_parent
(
void
);
void
chunk_postfork_child
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
...
...
deps/jemalloc/include/jemalloc/internal/chunk_dss.h
View file @
e3b8492e
...
...
@@ -23,11 +23,13 @@ extern const char *dss_prec_names[];
dss_prec_t
chunk_dss_prec_get
(
void
);
bool
chunk_dss_prec_set
(
dss_prec_t
dss_prec
);
void
*
chunk_alloc_dss
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
);
void
*
chunk_alloc_dss
(
arena_t
*
arena
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
);
bool
chunk_in_dss
(
void
*
chunk
);
bool
chunk_dss_mergeable
(
void
*
chunk_a
,
void
*
chunk_b
);
void
chunk_dss_boot
(
void
);
bool
chunk_dss_boot
(
void
);
void
chunk_dss_prefork
(
void
);
void
chunk_dss_postfork_parent
(
void
);
void
chunk_dss_postfork_child
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
...
...
deps/jemalloc/include/jemalloc/internal/chunk_mmap.h
View file @
e3b8492e
...
...
@@ -9,8 +9,8 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void
*
chunk_alloc_mmap
(
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
);
void
*
chunk_alloc_mmap
(
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
);
bool
chunk_dalloc_mmap
(
void
*
chunk
,
size_t
size
);
#endif
/* JEMALLOC_H_EXTERNS */
...
...
deps/jemalloc/include/jemalloc/internal/ckh.h
View file @
e3b8492e
...
...
@@ -40,7 +40,9 @@ struct ckh_s {
#endif
/* Used for pseudo-random number generation. */
uint64_t
prng_state
;
#define CKH_A 1103515241
#define CKH_C 12347
uint32_t
prng_state
;
/* Total number of items. */
size_t
count
;
...
...
@@ -72,7 +74,7 @@ bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
bool
ckh_insert
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
const
void
*
key
,
const
void
*
data
);
bool
ckh_remove
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
void
**
data
);
bool
ckh_search
(
ckh_t
*
ckh
,
const
void
*
sea
r
chkey
,
void
**
key
,
void
**
data
);
bool
ckh_search
(
ckh_t
*
ckh
,
const
void
*
seachkey
,
void
**
key
,
void
**
data
);
void
ckh_string_hash
(
const
void
*
key
,
size_t
r_hash
[
2
]);
bool
ckh_string_keycomp
(
const
void
*
k1
,
const
void
*
k2
);
void
ckh_pointer_hash
(
const
void
*
key
,
size_t
r_hash
[
2
]);
...
...
deps/jemalloc/include/jemalloc/internal/ctl.h
View file @
e3b8492e
...
...
@@ -21,14 +21,13 @@ struct ctl_named_node_s {
/* If (nchildren == 0), this is a terminal node. */
unsigned
nchildren
;
const
ctl_node_t
*
children
;
int
(
*
ctl
)(
tsd_t
*
,
const
size_t
*
,
size_t
,
void
*
,
size_t
*
,
void
*
,
size_t
);
int
(
*
ctl
)(
const
size_t
*
,
size_t
,
void
*
,
size_t
*
,
void
*
,
size_t
);
};
struct
ctl_indexed_node_s
{
struct
ctl_node_s
node
;
const
ctl_named_node_t
*
(
*
index
)(
tsdn_t
*
,
const
size_t
*
,
size_t
,
size_t
);
const
ctl_named_node_t
*
(
*
index
)(
const
size_t
*
,
size_t
,
size_t
);
};
struct
ctl_arena_stats_s
{
...
...
@@ -36,12 +35,8 @@ struct ctl_arena_stats_s {
unsigned
nthreads
;
const
char
*
dss
;
ssize_t
lg_dirty_mult
;
ssize_t
decay_time
;
size_t
pactive
;
size_t
pdirty
;
/* The remainder are only populated if config_stats is true. */
arena_stats_t
astats
;
/* Aggregate stats for small size classes, based on bin stats. */
...
...
@@ -61,7 +56,6 @@ struct ctl_stats_s {
size_t
metadata
;
size_t
resident
;
size_t
mapped
;
size_t
retained
;
unsigned
narenas
;
ctl_arena_stats_t
*
arenas
;
/* (narenas + 1) elements. */
};
...
...
@@ -70,17 +64,16 @@ struct ctl_stats_s {
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
int
ctl_byname
(
tsd_t
*
tsd
,
const
char
*
name
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
);
int
ctl_nametomib
(
tsdn_t
*
tsdn
,
const
char
*
name
,
size_t
*
mibp
,
size_t
*
miblenp
);
int
ctl_byname
(
const
char
*
name
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
);
int
ctl_nametomib
(
const
char
*
name
,
size_t
*
mibp
,
size_t
*
miblenp
);
int
ctl_bymib
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
);
int
ctl_bymib
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
);
bool
ctl_boot
(
void
);
void
ctl_prefork
(
tsdn_t
*
tsdn
);
void
ctl_postfork_parent
(
tsdn_t
*
tsdn
);
void
ctl_postfork_child
(
tsdn_t
*
tsdn
);
void
ctl_prefork
(
void
);
void
ctl_postfork_parent
(
void
);
void
ctl_postfork_child
(
void
);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
...
...
deps/jemalloc/include/jemalloc/internal/extent.h
View file @
e3b8492e
...
...
@@ -18,20 +18,6 @@ struct extent_node_s {
/* Total region size. */
size_t
en_size
;
/*
* Serial number (potentially non-unique).
*
* In principle serial numbers can wrap around on 32-bit systems if
* JEMALLOC_MUNMAP is defined, but as long as comparison functions fall
* back on address comparison for equal serial numbers, stable (if
* imperfect) ordering is maintained.
*
* Serial numbers may not be unique even in the absence of wrap-around,
* e.g. when splitting an extent and assigning the same serial number to
* both resulting adjacent extents.
*/
size_t
en_sn
;
/*
* The zeroed flag is used by chunk recycling code to track whether
* memory is zero-filled.
...
...
@@ -59,10 +45,10 @@ struct extent_node_s {
qr
(
extent_node_t
)
cc_link
;
union
{
/* Linkage for the size/
sn/
address-ordered tree. */
rb_node
(
extent_node_t
)
sz
sn
ad_link
;
/* Linkage for the size/address-ordered tree. */
rb_node
(
extent_node_t
)
szad_link
;
/* Linkage for arena's
achunks,
huge
,
and node_cache lists. */
/* Linkage for arena's huge and node_cache lists. */
ql_elm
(
extent_node_t
)
ql_link
;
};
...
...
@@ -75,7 +61,7 @@ typedef rb_tree(extent_node_t) extent_tree_t;
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
rb_proto
(,
extent_tree_sz
sn
ad_
,
extent_tree_t
,
extent_node_t
)
rb_proto
(,
extent_tree_szad_
,
extent_tree_t
,
extent_node_t
)
rb_proto
(,
extent_tree_ad_
,
extent_tree_t
,
extent_node_t
)
...
...
@@ -87,7 +73,6 @@ rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
arena_t
*
extent_node_arena_get
(
const
extent_node_t
*
node
);
void
*
extent_node_addr_get
(
const
extent_node_t
*
node
);
size_t
extent_node_size_get
(
const
extent_node_t
*
node
);
size_t
extent_node_sn_get
(
const
extent_node_t
*
node
);
bool
extent_node_zeroed_get
(
const
extent_node_t
*
node
);
bool
extent_node_committed_get
(
const
extent_node_t
*
node
);
bool
extent_node_achunk_get
(
const
extent_node_t
*
node
);
...
...
@@ -95,13 +80,12 @@ prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
void
extent_node_arena_set
(
extent_node_t
*
node
,
arena_t
*
arena
);
void
extent_node_addr_set
(
extent_node_t
*
node
,
void
*
addr
);
void
extent_node_size_set
(
extent_node_t
*
node
,
size_t
size
);
void
extent_node_sn_set
(
extent_node_t
*
node
,
size_t
sn
);
void
extent_node_zeroed_set
(
extent_node_t
*
node
,
bool
zeroed
);
void
extent_node_committed_set
(
extent_node_t
*
node
,
bool
committed
);
void
extent_node_achunk_set
(
extent_node_t
*
node
,
bool
achunk
);
void
extent_node_prof_tctx_set
(
extent_node_t
*
node
,
prof_tctx_t
*
tctx
);
void
extent_node_init
(
extent_node_t
*
node
,
arena_t
*
arena
,
void
*
addr
,
size_t
size
,
size_t
sn
,
bool
zeroed
,
bool
committed
);
size_t
size
,
bool
zeroed
,
bool
committed
);
void
extent_node_dirty_linkage_init
(
extent_node_t
*
node
);
void
extent_node_dirty_insert
(
extent_node_t
*
node
,
arena_runs_dirty_link_t
*
runs_dirty
,
extent_node_t
*
chunks_dirty
);
...
...
@@ -130,13 +114,6 @@ extent_node_size_get(const extent_node_t *node)
return
(
node
->
en_size
);
}
JEMALLOC_INLINE
size_t
extent_node_sn_get
(
const
extent_node_t
*
node
)
{
return
(
node
->
en_sn
);
}
JEMALLOC_INLINE
bool
extent_node_zeroed_get
(
const
extent_node_t
*
node
)
{
...
...
@@ -187,13 +164,6 @@ extent_node_size_set(extent_node_t *node, size_t size)
node
->
en_size
=
size
;
}
JEMALLOC_INLINE
void
extent_node_sn_set
(
extent_node_t
*
node
,
size_t
sn
)
{
node
->
en_sn
=
sn
;
}
JEMALLOC_INLINE
void
extent_node_zeroed_set
(
extent_node_t
*
node
,
bool
zeroed
)
{
...
...
@@ -224,13 +194,12 @@ extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
JEMALLOC_INLINE
void
extent_node_init
(
extent_node_t
*
node
,
arena_t
*
arena
,
void
*
addr
,
size_t
size
,
size_t
sn
,
bool
zeroed
,
bool
committed
)
bool
zeroed
,
bool
committed
)
{
extent_node_arena_set
(
node
,
arena
);
extent_node_addr_set
(
node
,
addr
);
extent_node_size_set
(
node
,
size
);
extent_node_sn_set
(
node
,
sn
);
extent_node_zeroed_set
(
node
,
zeroed
);
extent_node_committed_set
(
node
,
committed
);
extent_node_achunk_set
(
node
,
false
);
...
...
deps/jemalloc/include/jemalloc/internal/hash.h
View file @
e3b8492e
/*
* The following hash function is based on MurmurHash3, placed into the public
* domain by Austin Appleby. See http
s
://
github.com/aappleby
/smhasher for
* domain by Austin Appleby. See http://
code.google.com/p
/smhasher
/
for
* details.
*/
/******************************************************************************/
...
...
@@ -49,14 +49,6 @@ JEMALLOC_INLINE uint32_t
hash_get_block_32
(
const
uint32_t
*
p
,
int
i
)
{
/* Handle unaligned read. */
if
(
unlikely
((
uintptr_t
)
p
&
(
sizeof
(
uint32_t
)
-
1
))
!=
0
)
{
uint32_t
ret
;
memcpy
(
&
ret
,
(
uint8_t
*
)(
p
+
i
),
sizeof
(
uint32_t
));
return
(
ret
);
}
return
(
p
[
i
]);
}
...
...
@@ -64,14 +56,6 @@ JEMALLOC_INLINE uint64_t
hash_get_block_64
(
const
uint64_t
*
p
,
int
i
)
{
/* Handle unaligned read. */
if
(
unlikely
((
uintptr_t
)
p
&
(
sizeof
(
uint64_t
)
-
1
))
!=
0
)
{
uint64_t
ret
;
memcpy
(
&
ret
,
(
uint8_t
*
)(
p
+
i
),
sizeof
(
uint64_t
));
return
(
ret
);
}
return
(
p
[
i
]);
}
...
...
@@ -337,18 +321,13 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
JEMALLOC_INLINE
void
hash
(
const
void
*
key
,
size_t
len
,
const
uint32_t
seed
,
size_t
r_hash
[
2
])
{
assert
(
len
<=
INT_MAX
);
/* Unfortunate implementation limitation. */
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
hash_x64_128
(
key
,
(
int
)
len
,
seed
,
(
uint64_t
*
)
r_hash
);
hash_x64_128
(
key
,
len
,
seed
,
(
uint64_t
*
)
r_hash
);
#else
{
uint64_t
hashes
[
2
];
hash_x86_128
(
key
,
(
int
)
len
,
seed
,
hashes
);
r_hash
[
0
]
=
(
size_t
)
hashes
[
0
];
r_hash
[
1
]
=
(
size_t
)
hashes
[
1
];
}
uint64_t
hashes
[
2
];
hash_x86_128
(
key
,
len
,
seed
,
hashes
);
r_hash
[
0
]
=
(
size_t
)
hashes
[
0
];
r_hash
[
1
]
=
(
size_t
)
hashes
[
1
];
#endif
}
#endif
...
...
deps/jemalloc/include/jemalloc/internal/huge.h
View file @
e3b8492e
...
...
@@ -9,23 +9,24 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void
*
huge_malloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
bool
zero
);
void
*
huge_palloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
size_t
alignment
,
bool
zero
);
bool
huge_ralloc_no_move
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
oldsize
,
size_t
usize_min
,
size_t
usize_max
,
bool
zero
);
void
*
huge_malloc
(
tsd_t
*
tsd
,
arena_t
*
arena
,
size_t
size
,
bool
zero
,
tcache_t
*
tcache
);
void
*
huge_palloc
(
tsd_t
*
tsd
,
arena_t
*
arena
,
size_t
size
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
);
bool
huge_ralloc_no_move
(
void
*
ptr
,
size_t
oldsize
,
size_t
usize_min
,
size_t
usize_max
,
bool
zero
);
void
*
huge_ralloc
(
tsd_t
*
tsd
,
arena_t
*
arena
,
void
*
ptr
,
size_t
oldsize
,
size_t
usize
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
);
#ifdef JEMALLOC_JET
typedef
void
(
huge_dalloc_junk_t
)(
void
*
,
size_t
);
extern
huge_dalloc_junk_t
*
huge_dalloc_junk
;
#endif
void
huge_dalloc
(
tsd
n
_t
*
tsd
n
,
void
*
ptr
);
void
huge_dalloc
(
tsd_t
*
tsd
,
void
*
ptr
,
tcache_t
*
tcache
);
arena_t
*
huge_aalloc
(
const
void
*
ptr
);
size_t
huge_salloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
);
prof_tctx_t
*
huge_prof_tctx_get
(
tsdn_t
*
tsdn
,
const
void
*
ptr
);
void
huge_prof_tctx_set
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
prof_tctx_t
*
tctx
);
void
huge_prof_tctx_reset
(
tsdn_t
*
tsdn
,
const
void
*
ptr
);
size_t
huge_salloc
(
const
void
*
ptr
);
prof_tctx_t
*
huge_prof_tctx_get
(
const
void
*
ptr
);
void
huge_prof_tctx_set
(
const
void
*
ptr
,
prof_tctx_t
*
tctx
);
void
huge_prof_tctx_reset
(
const
void
*
ptr
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
...
...
deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
View file @
e3b8492e
...
...
@@ -49,7 +49,6 @@ static const bool config_lazy_lock =
false
#endif
;
static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
static const bool config_prof =
#ifdef JEMALLOC_PROF
true
...
...
@@ -161,10 +160,7 @@ static const bool config_cache_oblivious =
#include <malloc/malloc.h>
#endif
#include "jemalloc/internal/ph.h"
#ifndef __PGI
#define RB_COMPACT
#endif
#include "jemalloc/internal/rb.h"
#include "jemalloc/internal/qr.h"
#include "jemalloc/internal/ql.h"
...
...
@@ -187,9 +183,6 @@ static const bool config_cache_oblivious =
#include "jemalloc/internal/jemalloc_internal_macros.h"
/* Page size index type. */
typedef unsigned pszind_t;
/* Size class index type. */
typedef unsigned szind_t;
...
...
@@ -239,7 +232,7 @@ typedef unsigned szind_t;
# ifdef __alpha__
# define LG_QUANTUM 4
# endif
# if (defined(__sparc64__) || defined(__sparcv9)
|| defined(__sparc_v9__)
)
# if (defined(__sparc64__) || defined(__sparcv9))
# define LG_QUANTUM 4
# endif
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
...
...
@@ -263,9 +256,6 @@ typedef unsigned szind_t;
# ifdef __powerpc__
# define LG_QUANTUM 4
# endif
# ifdef __riscv__
# define LG_QUANTUM 4
# endif
# ifdef __s390__
# define LG_QUANTUM 4
# endif
...
...
@@ -327,17 +317,13 @@ typedef unsigned szind_t;
#define PAGE ((size_t)(1U << LG_PAGE))
#define PAGE_MASK ((size_t)(PAGE - 1))
/* Return the page base address for the page containing address a. */
#define PAGE_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~PAGE_MASK))
/* Return the smallest pagesize multiple that is >= s. */
#define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK)
/* Return the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2BASE(a, alignment) \
((void *)((uintptr_t)(a) & (
(~
(alignment))
+ 1)
))
((void *)((uintptr_t)(a) & (
-
(alignment))))
/* Return the offset between a and the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
...
...
@@ -345,7 +331,7 @@ typedef unsigned szind_t;
/* Return the smallest alignment multiple that is >= s. */
#define ALIGNMENT_CEILING(s, alignment) \
(((s) + (alignment - 1)) & (
(~
(alignment))
+ 1)
)
(((s) + (alignment - 1)) & (
-
(alignment)))
/* Declare a variable-length array. */
#if __STDC_VERSION__ < 199901L
...
...
@@ -365,19 +351,14 @@ typedef unsigned szind_t;
# define VARIABLE_ARRAY(type, name, count) type name[(count)]
#endif
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
...
...
@@ -398,19 +379,14 @@ typedef unsigned szind_t;
/******************************************************************************/
#define JEMALLOC_H_STRUCTS
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
...
...
@@ -446,27 +422,13 @@ extern bool opt_redzone;
extern bool opt_utrace;
extern bool opt_xmalloc;
extern bool opt_zero;
extern
unsigned
opt_narenas;
extern
size_t
opt_narenas;
extern bool in_valgrind;
/* Number of CPUs. */
extern unsigned ncpus;
/* Number of arenas used for automatic multiplexing of threads and arenas. */
extern unsigned narenas_auto;
extern unsigned ncpus;
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
*/
extern arena_t **arenas;
/*
* pind2sz_tab encodes the same information as could be computed by
* pind2sz_compute().
*/
extern size_t const pind2sz_tab[NPSIZES];
/*
* index2size_tab encodes the same information as could be computed (at
* unacceptable cost in some code paths) by index2size_compute().
...
...
@@ -485,35 +447,31 @@ void a0dalloc(void *ptr);
void *bootstrap_malloc(size_t size);
void *bootstrap_calloc(size_t num, size_t size);
void bootstrap_free(void *ptr);
arena_t *arenas_extend(unsigned ind);
arena_t *arena_init(unsigned ind);
unsigned narenas_total_get(void);
arena_t *arena_init(tsdn_t *tsdn, unsigned ind);
arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
arena_t *arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing);
arena_t *arena_choose_hard(tsd_t *tsd);
void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
unsigned arena_nbound(unsigned ind);
void thread_allocated_cleanup(tsd_t *tsd);
void thread_deallocated_cleanup(tsd_t *tsd);
void iarena_cleanup(tsd_t *tsd);
void arena_cleanup(tsd_t *tsd);
void arenas_
tdata
_cleanup(tsd_t *tsd);
void narenas_
tdata
_cleanup(tsd_t *tsd);
void arenas_
tdata
_bypass_cleanup(tsd_t *tsd);
void arenas_
cache
_cleanup(tsd_t *tsd);
void narenas_
cache
_cleanup(tsd_t *tsd);
void arenas_
cache
_bypass_cleanup(tsd_t *tsd);
void jemalloc_prefork(void);
void jemalloc_postfork_parent(void);
void jemalloc_postfork_child(void);
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
...
...
@@ -534,21 +492,16 @@ void jemalloc_postfork_child(void);
/******************************************************************************/
#define JEMALLOC_H_INLINES
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/base.h"
...
...
@@ -558,11 +511,6 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/huge.h"
#ifndef JEMALLOC_ENABLE_INLINE
pszind_t psz2ind(size_t psz);
size_t pind2sz_compute(pszind_t pind);
size_t pind2sz_lookup(pszind_t pind);
size_t pind2sz(pszind_t pind);
size_t psz2u(size_t psz);
szind_t size2index_compute(size_t size);
szind_t size2index_lookup(size_t size);
szind_t size2index(size_t size);
...
...
@@ -573,121 +521,39 @@ size_t s2u_compute(size_t size);
size_t s2u_lookup(size_t size);
size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment);
arena_t *arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal);
arena_t *arena_choose(tsd_t *tsd, arena_t *arena);
arena_t *arena_ichoose(tsd_t *tsd, arena_t *arena);
arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind,
arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
bool refresh_if_missing);
arena_t *arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing);
ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_INLINE pszind_t
psz2ind(size_t psz)
{
if (unlikely(psz > HUGE_MAXCLASS))
return (NPSIZES);
{
pszind_t x = lg_floor((psz<<1)-1);
pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x -
(LG_SIZE_CLASS_GROUP + LG_PAGE);
pszind_t grp = shift << LG_SIZE_CLASS_GROUP;
pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta_inverse_mask = ZI(-1) << lg_delta;
pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) &
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
pszind_t ind = grp + mod;
return (ind);
}
}
JEMALLOC_INLINE size_t
pind2sz_compute(pszind_t pind)
{
{
size_t grp = pind >> LG_SIZE_CLASS_GROUP;
size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
size_t grp_size_mask = ~((!!grp)-1);
size_t grp_size = ((ZU(1) << (LG_PAGE +
(LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
size_t shift = (grp == 0) ? 1 : grp;
size_t lg_delta = shift + (LG_PAGE-1);
size_t mod_size = (mod+1) << lg_delta;
size_t sz = grp_size + mod_size;
return (sz);
}
}
JEMALLOC_INLINE size_t
pind2sz_lookup(pszind_t pind)
{
size_t ret = (size_t)pind2sz_tab[pind];
assert(ret == pind2sz_compute(pind));
return (ret);
}
JEMALLOC_INLINE size_t
pind2sz(pszind_t pind)
{
assert(pind < NPSIZES);
return (pind2sz_lookup(pind));
}
JEMALLOC_INLINE size_t
psz2u(size_t psz)
{
if (unlikely(psz > HUGE_MAXCLASS))
return (0);
{
size_t x = lg_floor((psz<<1)-1);
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta = ZU(1) << lg_delta;
size_t delta_mask = delta - 1;
size_t usize = (psz + delta_mask) & ~delta_mask;
return (usize);
}
}
JEMALLOC_INLINE szind_t
size2index_compute(size_t size)
{
if (unlikely(size > HUGE_MAXCLASS))
return (NSIZES);
#if (NTBINS != 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
s
zind
_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
s
zind
_t lg_ceil = lg_floor(pow2_ceil
_zu
(size));
s
ize
_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
s
ize
_t lg_ceil = lg_floor(pow2_ceil(size));
return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
}
#endif
{
szind_t x = lg_floor((size<<1)-1);
szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
(ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
: lg_floor((size<<1)-1);
size_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
s
zind
_t grp = shift << LG_SIZE_CLASS_GROUP;
s
ize
_t grp = shift << LG_SIZE_CLASS_GROUP;
s
zind
_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
s
ize
_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta_inverse_mask = ZI(-1) << lg_delta;
s
zind
_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
s
ize
_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
s
zind
_t index = NTBINS + grp + mod;
s
ize
_t index = NTBINS + grp + mod;
return (index);
}
}
...
...
@@ -698,7 +564,8 @@ size2index_lookup(size_t size)
assert(size <= LOOKUP_MAXCLASS);
{
szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]);
size_t ret = ((size_t)(size2index_tab[(size-1) >>
LG_TINY_MIN]));
assert(ret == size2index_compute(size));
return (ret);
}
...
...
@@ -761,18 +628,18 @@ JEMALLOC_ALWAYS_INLINE size_t
s2u_compute(size_t size)
{
if (unlikely(size > HUGE_MAXCLASS))
return (0);
#if (NTBINS > 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
size_t lg_ceil = lg_floor(pow2_ceil
_zu
(size));
size_t lg_ceil = lg_floor(pow2_ceil(size));
return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
(ZU(1) << lg_ceil));
}
#endif
{
size_t x = lg_floor((size<<1)-1);
size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
(ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
: lg_floor((size<<1)-1);
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta = ZU(1) << lg_delta;
...
...
@@ -856,16 +723,17 @@ sa2u(size_t size, size_t alignment)
return (usize);
}
/* Huge size class. Beware of overflow. */
if (unlikely(alignment > HUGE_MAXCLASS))
return (0);
/* Huge size class. Beware of size_t overflow. */
/*
* We can't achieve subchunk alignment, so round up alignment to the
* minimum that can actually be supported.
*/
alignment = CHUNK_CEILING(alignment);
if (alignment == 0) {
/* size_t overflow. */
return (0);
}
/* Make sure result is a huge size class. */
if (size <= chunksize)
...
...
@@ -891,84 +759,45 @@ sa2u(size_t size, size_t alignment)
/* Choose an arena based on a per-thread value. */
JEMALLOC_INLINE arena_t *
arena_choose
_impl
(tsd_t *tsd, arena_t *arena
, bool internal
)
arena_choose(tsd_t *tsd, arena_t *arena)
{
arena_t *ret;
if (arena != NULL)
return (arena);
ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
if (unlikely(ret == NULL))
ret = arena_choose_hard(tsd, internal);
if (unlikely((ret = tsd_arena_get(tsd)) == NULL))
ret = arena_choose_hard(tsd);
return (ret);
}
JEMALLOC_INLINE arena_t *
arena_choose(tsd_t *tsd, arena_t *arena)
arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
bool refresh_if_missing)
{
arena_t *arena;
arena_t **arenas_cache = tsd_arenas_cache_get(tsd);
return (arena_choose_impl(tsd, arena, false));
}
JEMALLOC_INLINE arena_t *
arena_ichoose(tsd_t *tsd, arena_t *arena)
{
/* init_if_missing requires refresh_if_missing. */
assert(!init_if_missing || refresh_if_missing);
return (arena_choose_impl(tsd, arena, true));
}
JEMALLOC_INLINE arena_tdata_t *
arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
{
arena_tdata_t *tdata;
arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
if (unlikely(arenas_tdata == NULL)) {
/* arenas_tdata hasn't been initialized yet. */
return (arena_tdata_get_hard(tsd, ind));
if (unlikely(arenas_cache == NULL)) {
/* arenas_cache hasn't been initialized yet. */
return (arena_get_hard(tsd, ind, init_if_missing));
}
if (unlikely(ind >= tsd_narenas_
tdata
_get(tsd))) {
if (unlikely(ind >= tsd_narenas_
cache
_get(tsd))) {
/*
* ind is invalid, cache is old (too small), or
tdat
a to be
* ind is invalid, cache is old (too small), or
aren
a to be
* initialized.
*/
return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
NULL);
}
tdata = &arenas_tdata[ind];
if (likely(tdata != NULL) || !refresh_if_missing)
return (tdata);
return (arena_tdata_get_hard(tsd, ind));
}
JEMALLOC_INLINE arena_t *
arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing)
{
arena_t *ret;
assert(ind <= MALLOCX_ARENA_MAX);
ret = arenas[ind];
if (unlikely(ret == NULL)) {
ret = atomic_read_p((void *)&arenas[ind]);
if (init_if_missing && unlikely(ret == NULL))
ret = arena_init(tsdn, ind);
return (refresh_if_missing ? arena_get_hard(tsd, ind,
init_if_missing) : NULL);
}
return (ret);
}
JEMALLOC_INLINE ticker_t *
decay_ticker_get(tsd_t *tsd, unsigned ind)
{
arena_tdata_t *tdata;
tdata = arena_tdata_get(tsd, ind, true);
if (unlikely(tdata == NULL))
return (NULL);
return (&tdata->decay_ticker);
arena = arenas_cache[ind];
if (likely(arena != NULL) || !refresh_if_missing)
return (arena);
return (arena_get_hard(tsd, ind, init_if_missing));
}
#endif
...
...
@@ -989,27 +818,27 @@ decay_ticker_get(tsd_t *tsd, unsigned ind)
#ifndef JEMALLOC_ENABLE_INLINE
arena_t *iaalloc(const void *ptr);
size_t isalloc(tsdn_t *tsdn, const void *ptr, bool demote);
void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path);
void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero,
bool slow_path);
void *ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
size_t isalloc(const void *ptr, bool demote);
void *iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache,
bool is_metadata, arena_t *arena);
void *imalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena);
void *imalloc(tsd_t *tsd, size_t size);
void *icalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena);
void *icalloc(tsd_t *tsd, size_t size);
void *ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena);
void *ipalloct(tsd
n
_t *tsd
n
, size_t usize, size_t alignment, bool zero,
void *ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena);
void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
size_t ivsalloc(
tsdn_t *tsdn,
const void *ptr, bool demote);
size_t ivsalloc(const void *ptr, bool demote);
size_t u2rz(size_t usize);
size_t p2rz(
tsdn_t *tsdn,
const void *ptr);
void idalloctm(tsd
n
_t *tsd
n
, void *ptr, tcache_t *tcache, bool is_metadata
,
bool slow_path
);
size_t p2rz(const void *ptr);
void idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata
);
void idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache
);
void idalloc(tsd_t *tsd, void *ptr);
void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path);
void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
bool slow_path);
void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache,
bool slow_path);
void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
void isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, tcache_t *tcache,
arena_t *arena);
...
...
@@ -1017,8 +846,8 @@ void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero);
bool ixalloc(
tsdn_t *tsdn,
void *ptr, size_t oldsize, size_t size,
size_t
extra, size_t
alignment, bool zero);
bool ixalloc(void *ptr, size_t oldsize, size_t size,
size_t extra,
size_t alignment, bool zero);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
...
...
@@ -1033,85 +862,100 @@ iaalloc(const void *ptr)
/*
* Typical usage:
* tsdn_t *tsdn = [...]
* void *ptr = [...]
* size_t sz = isalloc(
tsdn,
ptr, config_prof);
* size_t sz = isalloc(ptr, config_prof);
*/
JEMALLOC_ALWAYS_INLINE size_t
isalloc(
tsdn_t *tsdn,
const void *ptr, bool demote)
isalloc(const void *ptr, bool demote)
{
assert(ptr != NULL);
/* Demotion only makes sense if config_prof is true. */
assert(config_prof || !demote);
return (arena_salloc(
tsdn,
ptr, demote));
return (arena_salloc(ptr, demote));
}
JEMALLOC_ALWAYS_INLINE void *
iallocztm(tsd
n
_t *tsd
n
, size_t size,
szind_t ind,
bool zero, tcache_t *tcache,
bool is_metadata, arena_t *arena, bool slow_path
)
iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache,
bool is_metadata,
arena_t *arena
)
{
void *ret;
assert(size != 0);
assert(!is_metadata || tcache == NULL);
assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
ret = arena_malloc(tsd
n
, arena, size,
ind,
zero, tcache
, slow_path
);
ret = arena_malloc(tsd, arena, size, zero, tcache);
if (config_stats && is_metadata && likely(ret != NULL)) {
arena_metadata_allocated_add(iaalloc(ret),
isalloc(tsdn, ret,
config_prof));
arena_metadata_allocated_add(iaalloc(ret),
isalloc(ret,
config_prof));
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
ialloc(tsd_t *tsd, size_t size,
szind_t ind, bool zero, bool slow_path
)
i
m
alloc
t
(tsd_t *tsd, size_t size,
tcache_t *tcache, arena_t *arena
)
{
return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true),
false, NULL, slow_path));
return (iallocztm(tsd, size, false, tcache, false, arena));
}
JEMALLOC_ALWAYS_INLINE void *
ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
imalloc(tsd_t *tsd, size_t size)
{
return (iallocztm(tsd, size, false, tcache_get(tsd, true), false, NULL));
}
JEMALLOC_ALWAYS_INLINE void *
icalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena)
{
return (iallocztm(tsd, size, true, tcache, false, arena));
}
JEMALLOC_ALWAYS_INLINE void *
icalloc(tsd_t *tsd, size_t size)
{
return (iallocztm(tsd, size, true, tcache_get(tsd, true), false, NULL));
}
JEMALLOC_ALWAYS_INLINE void *
ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena)
{
void *ret;
assert(usize != 0);
assert(usize == sa2u(usize, alignment));
assert(!is_metadata || tcache == NULL);
assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
ret = arena_palloc(tsd
n
, arena, usize, alignment, zero, tcache);
ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache);
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
if (config_stats && is_metadata && likely(ret != NULL)) {
arena_metadata_allocated_add(iaalloc(ret), isalloc(
tsdn,
ret,
arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
config_prof));
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
ipalloct(tsd
n
_t *tsd
n
, size_t usize, size_t alignment, bool zero,
ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena)
{
return (ipallocztm(tsd
n
, usize, alignment, zero, tcache, false, arena));
return (ipallocztm(tsd, usize, alignment, zero, tcache, false, arena));
}
JEMALLOC_ALWAYS_INLINE void *
ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
{
return (ipallocztm(tsd
_tsdn(tsd)
, usize, alignment, zero,
tcache_get(tsd, true
), false, NULL));
return (ipallocztm(tsd, usize, alignment, zero,
tcache_get(tsd,
NULL
), false, NULL));
}
JEMALLOC_ALWAYS_INLINE size_t
ivsalloc(
tsdn_t *tsdn,
const void *ptr, bool demote)
ivsalloc(const void *ptr, bool demote)
{
extent_node_t *node;
...
...
@@ -1123,7 +967,7 @@ ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
assert(extent_node_addr_get(node) == ptr ||
extent_node_achunk_get(node));
return (isalloc(
tsdn,
ptr, demote));
return (isalloc(ptr, demote));
}
JEMALLOC_INLINE size_t
...
...
@@ -1141,62 +985,65 @@ u2rz(size_t usize)
}
JEMALLOC_INLINE size_t
p2rz(
tsdn_t *tsdn,
const void *ptr)
p2rz(const void *ptr)
{
size_t usize = isalloc(
tsdn,
ptr, false);
size_t usize = isalloc(ptr, false);
return (u2rz(usize));
}
JEMALLOC_ALWAYS_INLINE void
idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
bool slow_path)
idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata)
{
assert(ptr != NULL);
assert(!is_metadata || tcache == NULL);
assert(!is_metadata || iaalloc(ptr)->ind < narenas_auto);
if (config_stats && is_metadata) {
arena_metadata_allocated_sub(iaalloc(ptr), isalloc(
tsdn,
ptr,
arena_metadata_allocated_sub(iaalloc(ptr), isalloc(ptr,
config_prof));
}
arena_dalloc(tsdn, ptr, tcache, slow_path);
arena_dalloc(tsd, ptr, tcache);
}
JEMALLOC_ALWAYS_INLINE void
idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
idalloctm(tsd, ptr, tcache, false);
}
JEMALLOC_ALWAYS_INLINE void
idalloc(tsd_t *tsd, void *ptr)
{
idalloctm(tsd
_tsdn(tsd)
, ptr, tcache_get(tsd, false), false
, true
);
idalloctm(tsd, ptr, tcache_get(tsd, false), false);
}
JEMALLOC_ALWAYS_INLINE void
iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache
, bool slow_path
)
iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
if (
slow_path &&
config_fill && unlikely(opt_quarantine))
if (config_fill && unlikely(opt_quarantine))
quarantine(tsd, ptr);
else
idalloctm(tsd
_tsdn(tsd)
, ptr, tcache, false
, slow_path
);
idalloctm(tsd, ptr, tcache, false);
}
JEMALLOC_ALWAYS_INLINE void
isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
bool slow_path)
isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
{
arena_sdalloc(tsd
n
, ptr, size, tcache
, slow_path
);
arena_sdalloc(tsd, ptr, size, tcache);
}
JEMALLOC_ALWAYS_INLINE void
isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache
, bool slow_path
)
isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
{
if (
slow_path &&
config_fill && unlikely(opt_quarantine))
if (config_fill && unlikely(opt_quarantine))
quarantine(tsd, ptr);
else
isdalloct(tsd
_tsdn(tsd)
, ptr, size, tcache
, slow_path
);
isdalloct(tsd, ptr, size, tcache);
}
JEMALLOC_ALWAYS_INLINE void *
...
...
@@ -1207,18 +1054,17 @@ iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t usize, copysize;
usize = sa2u(size + extra, alignment);
if (u
nlikely(usize == 0 || usize > HUGE_MAXCLASS)
)
if (u
size == 0
)
return (NULL);
p = ipalloct(tsd
_tsdn(tsd)
, usize, alignment, zero, tcache, arena);
p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
if (p == NULL) {
if (extra == 0)
return (NULL);
/* Try again, without extra this time. */
usize = sa2u(size, alignment);
if (u
nlikely(usize == 0 || usize > HUGE_MAXCLASS)
)
if (u
size == 0
)
return (NULL);
p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache,
arena);
p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
if (p == NULL)
return (NULL);
}
...
...
@@ -1228,7 +1074,7 @@ iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(p, ptr, copysize);
isqalloc(tsd, ptr, oldsize, tcache
, true
);
isqalloc(tsd, ptr, oldsize, tcache);
return (p);
}
...
...
@@ -1264,8 +1110,8 @@ iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
}
JEMALLOC_ALWAYS_INLINE bool
ixalloc(
tsdn_t *tsdn,
void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment,
bool zero)
ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment,
bool zero)
{
assert(ptr != NULL);
...
...
@@ -1277,7 +1123,7 @@ ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
return (true);
}
return (arena_ralloc_no_move(
tsdn,
ptr, oldsize, size, extra, zero));
return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
}
#endif
...
...
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
View file @
e3b8492e
...
...
@@ -17,18 +17,7 @@
# include <sys/uio.h>
# endif
# include <pthread.h>
# ifdef JEMALLOC_OS_UNFAIR_LOCK
# include <os/lock.h>
# endif
# ifdef JEMALLOC_GLIBC_MALLOC_HOOK
# include <sched.h>
# endif
# include <errno.h>
# include <sys/time.h>
# include <time.h>
# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
# include <mach/mach_time.h>
# endif
#endif
#include <sys/types.h>
...
...
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
View file @
e3b8492e
...
...
@@ -56,9 +56,9 @@
#undef JEMALLOC_HAVE_BUILTIN_CLZ
/*
* Defined if
os_unfair_lock_*() functions are available, as provided by Darwin
.
* Defined if
madvise(2) is available
.
*/
#undef JEMALLOC_
OS_UNFAIR_LOCK
#undef JEMALLOC_
HAVE_MADVISE
/*
* Defined if OSSpin*() functions are available, as provided by Darwin, and
...
...
@@ -66,9 +66,6 @@
*/
#undef JEMALLOC_OSSPIN
/* Defined if syscall(2) is usable. */
#undef JEMALLOC_USE_SYSCALL
/*
* Defined if secure_getenv(3) is available.
*/
...
...
@@ -79,24 +76,6 @@
*/
#undef JEMALLOC_HAVE_ISSETUGID
/* Defined if pthread_atfork(3) is available. */
#undef JEMALLOC_HAVE_PTHREAD_ATFORK
/*
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
*/
#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
/*
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
*/
#undef JEMALLOC_HAVE_CLOCK_MONOTONIC
/*
* Defined if mach_absolute_time() is available.
*/
#undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
...
...
@@ -210,16 +189,9 @@
#undef JEMALLOC_TLS
/*
*
Used to mark unreachable code to quiet "end of non-void" compiler warnings.
*
Don't use this directly;
instead use
unreachable
() from util.h
*
ffs()/ffsl() functions to use for bitmapping. Don't use these directly;
* instead
,
use
jemalloc_ffs() or jemalloc_ffsl
() from util.h
.
*/
#undef JEMALLOC_INTERNAL_UNREACHABLE
/*
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
* use ffs_*() from util.h.
*/
#undef JEMALLOC_INTERNAL_FFSLL
#undef JEMALLOC_INTERNAL_FFSL
#undef JEMALLOC_INTERNAL_FFS
...
...
@@ -241,35 +213,18 @@
#undef JEMALLOC_ZONE
#undef JEMALLOC_ZONE_VERSION
/*
* Methods for determining whether the OS overcommits.
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
* /proc/sys/vm.overcommit_memory file.
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
*/
#undef JEMALLOC_SYSCTL_VM_OVERCOMMIT
#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
/* Defined if madvise(2) is available. */
#undef JEMALLOC_HAVE_MADVISE
/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
* will be discarded rather than swapped out.
* madvise(..., MADV_DONTNEED) : This immediately discards pages, such that
* new pages will be demand-zeroed if the
* address region is later touched.
* madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
* such that new pages will be demand-zeroed if
* the address region is later touched.
* madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being
* unused, such that they will be discarded rather
* than swapped out.
*/
#undef JEMALLOC_PURGE_MADVISE_FREE
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
/*
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
* arguments to madvise(2).
*/
#undef JEMALLOC_THP
#undef JEMALLOC_PURGE_MADVISE_FREE
/* Define if operating system has alloca.h header. */
#undef JEMALLOC_HAS_ALLOCA_H
...
...
@@ -286,9 +241,6 @@
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#undef LG_SIZEOF_LONG
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
#undef LG_SIZEOF_LONG_LONG
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#undef LG_SIZEOF_INTMAX_T
...
...
@@ -307,7 +259,4 @@
*/
#undef JEMALLOC_EXPORT
/* config.malloc_conf options string. */
#undef JEMALLOC_CONFIG_MALLOC_CONF
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
deps/jemalloc/include/jemalloc/internal/mb.h
View file @
e3b8492e
...
...
@@ -42,7 +42,7 @@ mb_write(void)
:
/* Inputs. */
:
"memory"
/* Clobbers. */
);
#
else
#else
/*
* This is hopefully enough to keep the compiler from reordering
* instructions around this one.
...
...
@@ -52,7 +52,7 @@ mb_write(void)
:
/* Inputs. */
:
"memory"
/* Clobbers. */
);
#
endif
#endif
}
#elif (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE
void
...
...
@@ -104,9 +104,9 @@ mb_write(void)
{
malloc_mutex_t
mtx
;
malloc_mutex_init
(
&
mtx
,
"mb"
,
WITNESS_RANK_OMIT
);
malloc_mutex_lock
(
TSDN_NULL
,
&
mtx
);
malloc_mutex_unlock
(
TSDN_NULL
,
&
mtx
);
malloc_mutex_init
(
&
mtx
);
malloc_mutex_lock
(
&
mtx
);
malloc_mutex_unlock
(
&
mtx
);
}
#endif
#endif
...
...
deps/jemalloc/include/jemalloc/internal/mutex.h
View file @
e3b8492e
...
...
@@ -5,25 +5,18 @@ typedef struct malloc_mutex_s malloc_mutex_t;
#ifdef _WIN32
# define MALLOC_MUTEX_INITIALIZER
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
# define MALLOC_MUTEX_INITIALIZER \
{OS_UNFAIR_LOCK_INIT, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_INITIALIZER {0
, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)
}
# define MALLOC_MUTEX_INITIALIZER {0}
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
# define MALLOC_MUTEX_INITIALIZER \
{PTHREAD_MUTEX_INITIALIZER, NULL, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL}
#else
# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
# define MALLOC_MUTEX_INITIALIZER \
{PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, \
WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}
# else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# define MALLOC_MUTEX_INITIALIZER \
{PTHREAD_MUTEX_INITIALIZER, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER}
# endif
#endif
...
...
@@ -38,8 +31,6 @@ struct malloc_mutex_s {
# else
CRITICAL_SECTION
lock
;
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock
lock
;
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLock
lock
;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
...
...
@@ -48,7 +39,6 @@ struct malloc_mutex_s {
#else
pthread_mutex_t
lock
;
#endif
witness_t
witness
;
};
#endif
/* JEMALLOC_H_STRUCTS */
...
...
@@ -62,62 +52,52 @@ extern bool isthreaded;
# define isthreaded true
#endif
bool
malloc_mutex_init
(
malloc_mutex_t
*
mutex
,
const
char
*
name
,
witness_rank_t
rank
);
void
malloc_mutex_prefork
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
void
malloc_mutex_postfork_parent
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
void
malloc_mutex_postfork_child
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
bool
malloc_mutex_boot
(
void
);
bool
malloc_mutex_init
(
malloc_mutex_t
*
mutex
);
void
malloc_mutex_prefork
(
malloc_mutex_t
*
mutex
);
void
malloc_mutex_postfork_parent
(
malloc_mutex_t
*
mutex
);
void
malloc_mutex_postfork_child
(
malloc_mutex_t
*
mutex
);
bool
mutex_boot
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void
malloc_mutex_lock
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
void
malloc_mutex_unlock
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
void
malloc_mutex_assert_owner
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
void
malloc_mutex_assert_not_owner
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
void
malloc_mutex_lock
(
malloc_mutex_t
*
mutex
);
void
malloc_mutex_unlock
(
malloc_mutex_t
*
mutex
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
JEMALLOC_INLINE
void
malloc_mutex_lock
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
malloc_mutex_lock
(
malloc_mutex_t
*
mutex
)
{
if
(
isthreaded
)
{
witness_assert_not_owner
(
tsdn
,
&
mutex
->
witness
);
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
AcquireSRWLockExclusive
(
&
mutex
->
lock
);
# else
EnterCriticalSection
(
&
mutex
->
lock
);
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock_lock
(
&
mutex
->
lock
);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock
(
&
mutex
->
lock
);
#else
pthread_mutex_lock
(
&
mutex
->
lock
);
#endif
witness_lock
(
tsdn
,
&
mutex
->
witness
);
}
}
JEMALLOC_INLINE
void
malloc_mutex_unlock
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
malloc_mutex_unlock
(
malloc_mutex_t
*
mutex
)
{
if
(
isthreaded
)
{
witness_unlock
(
tsdn
,
&
mutex
->
witness
);
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
ReleaseSRWLockExclusive
(
&
mutex
->
lock
);
# else
LeaveCriticalSection
(
&
mutex
->
lock
);
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock_unlock
(
&
mutex
->
lock
);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock
(
&
mutex
->
lock
);
#else
...
...
@@ -125,22 +105,6 @@ malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex)
#endif
}
}
JEMALLOC_INLINE
void
malloc_mutex_assert_owner
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
if
(
isthreaded
)
witness_assert_owner
(
tsdn
,
&
mutex
->
witness
);
}
JEMALLOC_INLINE
void
malloc_mutex_assert_not_owner
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
if
(
isthreaded
)
witness_assert_not_owner
(
tsdn
,
&
mutex
->
witness
);
}
#endif
#endif
/* JEMALLOC_H_INLINES */
...
...
deps/jemalloc/include/jemalloc/internal/nstime.h
deleted
100644 → 0
View file @
238cebdd
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef
struct
nstime_s
nstime_t
;
/* Maximum supported number of seconds (~584 years). */
#define NSTIME_SEC_MAX KQU(18446744072)
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct
nstime_s
{
uint64_t
ns
;
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void
nstime_init
(
nstime_t
*
time
,
uint64_t
ns
);
void
nstime_init2
(
nstime_t
*
time
,
uint64_t
sec
,
uint64_t
nsec
);
uint64_t
nstime_ns
(
const
nstime_t
*
time
);
uint64_t
nstime_sec
(
const
nstime_t
*
time
);
uint64_t
nstime_nsec
(
const
nstime_t
*
time
);
void
nstime_copy
(
nstime_t
*
time
,
const
nstime_t
*
source
);
int
nstime_compare
(
const
nstime_t
*
a
,
const
nstime_t
*
b
);
void
nstime_add
(
nstime_t
*
time
,
const
nstime_t
*
addend
);
void
nstime_subtract
(
nstime_t
*
time
,
const
nstime_t
*
subtrahend
);
void
nstime_imultiply
(
nstime_t
*
time
,
uint64_t
multiplier
);
void
nstime_idivide
(
nstime_t
*
time
,
uint64_t
divisor
);
uint64_t
nstime_divide
(
const
nstime_t
*
time
,
const
nstime_t
*
divisor
);
#ifdef JEMALLOC_JET
typedef
bool
(
nstime_monotonic_t
)(
void
);
extern
nstime_monotonic_t
*
nstime_monotonic
;
typedef
bool
(
nstime_update_t
)(
nstime_t
*
);
extern
nstime_update_t
*
nstime_update
;
#else
bool
nstime_monotonic
(
void
);
bool
nstime_update
(
nstime_t
*
time
);
#endif
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/pages.h
View file @
e3b8492e
...
...
@@ -9,16 +9,13 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void
*
pages_map
(
void
*
addr
,
size_t
size
,
bool
*
commit
);
void
*
pages_map
(
void
*
addr
,
size_t
size
);
void
pages_unmap
(
void
*
addr
,
size_t
size
);
void
*
pages_trim
(
void
*
addr
,
size_t
alloc_size
,
size_t
leadsize
,
size_t
size
,
bool
*
commit
);
size_t
size
);
bool
pages_commit
(
void
*
addr
,
size_t
size
);
bool
pages_decommit
(
void
*
addr
,
size_t
size
);
bool
pages_purge
(
void
*
addr
,
size_t
size
);
bool
pages_huge
(
void
*
addr
,
size_t
size
);
bool
pages_nohuge
(
void
*
addr
,
size_t
size
);
void
pages_boot
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
...
...
deps/jemalloc/include/jemalloc/internal/ph.h
deleted
100644 → 0
View file @
238cebdd
/*
* A Pairing Heap implementation.
*
* "The Pairing Heap: A New Form of Self-Adjusting Heap"
* https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf
*
* With auxiliary twopass list, described in a follow on paper.
*
* "Pairing Heaps: Experiments and Analysis"
* http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
*
*******************************************************************************
*/
#ifndef PH_H_
#define PH_H_
/* Node structure. */
#define phn(a_type) \
struct { \
a_type *phn_prev; \
a_type *phn_next; \
a_type *phn_lchild; \
}
/* Root structure. */
#define ph(a_type) \
struct { \
a_type *ph_root; \
}
/* Internal utility macros. */
#define phn_lchild_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_lchild)
#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
a_phn->a_field.phn_lchild = a_lchild; \
} while (0)
#define phn_next_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_next)
#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
a_phn->a_field.phn_prev = a_prev; \
} while (0)
#define phn_prev_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_prev)
#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
a_phn->a_field.phn_next = a_next; \
} while (0)
#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
a_type *phn0child; \
\
assert(a_phn0 != NULL); \
assert(a_phn1 != NULL); \
assert(a_cmp(a_phn0, a_phn1) <= 0); \
\
phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
phn_next_set(a_type, a_field, a_phn1, phn0child); \
if (phn0child != NULL) \
phn_prev_set(a_type, a_field, phn0child, a_phn1); \
phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
} while (0)
#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
if (a_phn0 == NULL) \
r_phn = a_phn1; \
else if (a_phn1 == NULL) \
r_phn = a_phn0; \
else if (a_cmp(a_phn0, a_phn1) < 0) { \
phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
a_cmp); \
r_phn = a_phn0; \
} else { \
phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \
a_cmp); \
r_phn = a_phn1; \
} \
} while (0)
#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *head = NULL; \
a_type *tail = NULL; \
a_type *phn0 = a_phn; \
a_type *phn1 = phn_next_get(a_type, a_field, phn0); \
\
/* \
* Multipass merge, wherein the first two elements of a FIFO \
* are repeatedly merged, and each result is appended to the \
* singly linked FIFO, until the FIFO contains only a single \
* element. We start with a sibling list but no reference to \
* its tail, so we do a single pass over the sibling list to \
* populate the FIFO. \
*/
\
if (phn1 != NULL) { \
a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
if (phnrest != NULL) \
phn_prev_set(a_type, a_field, phnrest, NULL); \
phn_prev_set(a_type, a_field, phn0, NULL); \
phn_next_set(a_type, a_field, phn0, NULL); \
phn_prev_set(a_type, a_field, phn1, NULL); \
phn_next_set(a_type, a_field, phn1, NULL); \
phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \
head = tail = phn0; \
phn0 = phnrest; \
while (phn0 != NULL) { \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
phnrest = phn_next_get(a_type, a_field, \
phn1); \
if (phnrest != NULL) { \
phn_prev_set(a_type, a_field, \
phnrest, NULL); \
} \
phn_prev_set(a_type, a_field, phn0, \
NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
phn_prev_set(a_type, a_field, phn1, \
NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = phnrest; \
} else { \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = NULL; \
} \
} \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
while (true) { \
head = phn_next_get(a_type, a_field, \
phn1); \
assert(phn_prev_get(a_type, a_field, \
phn0) == NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
assert(phn_prev_get(a_type, a_field, \
phn1) == NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
if (head == NULL) \
break; \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, \
phn0); \
} \
} \
} \
r_phn = phn0; \
} while (0)
#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \
if (phn != NULL) { \
phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_prev_set(a_type, a_field, phn, NULL); \
ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \
assert(phn_next_get(a_type, a_field, phn) == NULL); \
phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \
a_ph->ph_root); \
} \
} while (0)
#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
if (lchild == NULL) \
r_phn = NULL; \
else { \
ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
r_phn); \
} \
} while (0)
/*
* The ph_proto() macro generates function prototypes that correspond to the
* functions generated by an equivalently parameterized call to ph_gen().
*/
#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
a_attr void a_prefix##new(a_ph_type *ph); \
a_attr bool a_prefix##empty(a_ph_type *ph); \
a_attr a_type *a_prefix##first(a_ph_type *ph); \
a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
/*
* The ph_gen() macro generates a type-specific pairing heap implementation,
* based on the above cpp macros.
*/
#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
a_attr void \
a_prefix##new(a_ph_type *ph) \
{ \
\
memset(ph, 0, sizeof(ph(a_type))); \
} \
a_attr bool \
a_prefix##empty(a_ph_type *ph) \
{ \
\
return (ph->ph_root == NULL); \
} \
a_attr a_type * \
a_prefix##first(a_ph_type *ph) \
{ \
\
if (ph->ph_root == NULL) \
return (NULL); \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
return (ph->ph_root); \
} \
a_attr void \
a_prefix##insert(a_ph_type *ph, a_type *phn) \
{ \
\
memset(&phn->a_field, 0, sizeof(phn(a_type))); \
\
/* \
* Treat the root as an aux list during insertion, and lazily \
* merge during a_prefix##remove_first(). For elements that \
* are inserted, then removed via a_prefix##remove() before the \
* aux list is ever processed, this makes insert/remove \
* constant-time, whereas eager merging would make insert \
* O(log n). \
*/
\
if (ph->ph_root == NULL) \
ph->ph_root = phn; \
else { \
phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
a_field, ph->ph_root)); \
if (phn_next_get(a_type, a_field, ph->ph_root) != \
NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, ph->ph_root), \
phn); \
} \
phn_prev_set(a_type, a_field, phn, ph->ph_root); \
phn_next_set(a_type, a_field, ph->ph_root, phn); \
} \
} \
a_attr a_type * \
a_prefix##remove_first(a_ph_type *ph) \
{ \
a_type *ret; \
\
if (ph->ph_root == NULL) \
return (NULL); \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
\
ret = ph->ph_root; \
\
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
ph->ph_root); \
\
return (ret); \
} \
a_attr void \
a_prefix##remove(a_ph_type *ph, a_type *phn) \
{ \
a_type *replace, *parent; \
\
/* \
* We can delete from aux list without merging it, but we need \
* to merge if we are dealing with the root node. \
*/
\
if (ph->ph_root == phn) { \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
if (ph->ph_root == phn) { \
ph_merge_children(a_type, a_field, ph->ph_root, \
a_cmp, ph->ph_root); \
return; \
} \
} \
\
/* Get parent (if phn is leftmost child) before mutating. */
\
if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
if (phn_lchild_get(a_type, a_field, parent) != phn) \
parent = NULL; \
} \
/* Find a possible replacement node, and link to parent. */
\
ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
/* Set next/prev for sibling linked list. */
\
if (replace != NULL) { \
if (parent != NULL) { \
phn_prev_set(a_type, a_field, replace, parent); \
phn_lchild_set(a_type, a_field, parent, \
replace); \
} else { \
phn_prev_set(a_type, a_field, replace, \
phn_prev_get(a_type, a_field, phn)); \
if (phn_prev_get(a_type, a_field, phn) != \
NULL) { \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
replace); \
} \
} \
phn_next_set(a_type, a_field, replace, \
phn_next_get(a_type, a_field, phn)); \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
replace); \
} \
} else { \
if (parent != NULL) { \
a_type *next = phn_next_get(a_type, a_field, \
phn); \
phn_lchild_set(a_type, a_field, parent, next); \
if (next != NULL) { \
phn_prev_set(a_type, a_field, next, \
parent); \
} \
} else { \
assert(phn_prev_get(a_type, a_field, phn) != \
NULL); \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
phn_next_get(a_type, a_field, phn)); \
} \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
phn_prev_get(a_type, a_field, phn)); \
} \
} \
}
#endif
/* PH_H_ */
Prev
1
2
3
4
5
6
…
8
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment