Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
a78e148b
Commit
a78e148b
authored
May 09, 2011
by
antirez
Browse files
jemalloc source added
parent
07486df6
Changes
83
Show whitespace changes
Inline
Side-by-side
deps/jemalloc/include/jemalloc/internal/atomic.h
0 → 100644
View file @
a78e148b
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#define atomic_read_uint64(p) atomic_add_uint64(p, 0)
#define atomic_read_uint32(p) atomic_add_uint32(p, 0)
#if (LG_SIZEOF_PTR == 3)
# define atomic_read_z(p) \
(size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)0)
# define atomic_add_z(p, x) \
(size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x)
# define atomic_sub_z(p, x) \
(size_t)atomic_sub_uint64((uint64_t *)p, (uint64_t)x)
#elif (LG_SIZEOF_PTR == 2)
# define atomic_read_z(p) \
(size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)0)
# define atomic_add_z(p, x) \
(size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x)
# define atomic_sub_z(p, x) \
(size_t)atomic_sub_uint32((uint32_t *)p, (uint32_t)x)
#endif
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
uint64_t
atomic_add_uint64
(
uint64_t
*
p
,
uint64_t
x
);
uint64_t
atomic_sub_uint64
(
uint64_t
*
p
,
uint64_t
x
);
uint32_t
atomic_add_uint32
(
uint32_t
*
p
,
uint32_t
x
);
uint32_t
atomic_sub_uint32
(
uint32_t
*
p
,
uint32_t
x
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
/******************************************************************************/
/* 64-bit operations. */
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8
JEMALLOC_INLINE
uint64_t
atomic_add_uint64
(
uint64_t
*
p
,
uint64_t
x
)
{
return
(
__sync_add_and_fetch
(
p
,
x
));
}
JEMALLOC_INLINE
uint64_t
atomic_sub_uint64
(
uint64_t
*
p
,
uint64_t
x
)
{
return
(
__sync_sub_and_fetch
(
p
,
x
));
}
#elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE
uint64_t
atomic_add_uint64
(
uint64_t
*
p
,
uint64_t
x
)
{
return
(
OSAtomicAdd64
((
int64_t
)
x
,
(
int64_t
*
)
p
));
}
JEMALLOC_INLINE
uint64_t
atomic_sub_uint64
(
uint64_t
*
p
,
uint64_t
x
)
{
return
(
OSAtomicAdd64
(
-
((
int64_t
)
x
),
(
int64_t
*
)
p
));
}
#elif (defined(__amd64_) || defined(__x86_64__))
JEMALLOC_INLINE
uint64_t
atomic_add_uint64
(
uint64_t
*
p
,
uint64_t
x
)
{
asm
volatile
(
"lock; xaddq %0, %1;"
:
"+r"
(
x
),
"=m"
(
*
p
)
/* Outputs. */
:
"m"
(
*
p
)
/* Inputs. */
);
return
(
x
);
}
JEMALLOC_INLINE
uint64_t
atomic_sub_uint64
(
uint64_t
*
p
,
uint64_t
x
)
{
x
=
(
uint64_t
)(
-
(
int64_t
)
x
);
asm
volatile
(
"lock; xaddq %0, %1;"
:
"+r"
(
x
),
"=m"
(
*
p
)
/* Outputs. */
:
"m"
(
*
p
)
/* Inputs. */
);
return
(
x
);
}
#else
# if (LG_SIZEOF_PTR == 3)
# error "Missing implementation for 64-bit atomic operations"
# endif
#endif
/******************************************************************************/
/* 32-bit operations. */
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
JEMALLOC_INLINE
uint32_t
atomic_add_uint32
(
uint32_t
*
p
,
uint32_t
x
)
{
return
(
__sync_add_and_fetch
(
p
,
x
));
}
JEMALLOC_INLINE
uint32_t
atomic_sub_uint32
(
uint32_t
*
p
,
uint32_t
x
)
{
return
(
__sync_sub_and_fetch
(
p
,
x
));
}
#elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE
uint32_t
atomic_add_uint32
(
uint32_t
*
p
,
uint32_t
x
)
{
return
(
OSAtomicAdd32
((
int32_t
)
x
,
(
int32_t
*
)
p
));
}
JEMALLOC_INLINE
uint32_t
atomic_sub_uint32
(
uint32_t
*
p
,
uint32_t
x
)
{
return
(
OSAtomicAdd32
(
-
((
int32_t
)
x
),
(
int32_t
*
)
p
));
}
#elif (defined(__i386__) || defined(__amd64_) || defined(__x86_64__))
JEMALLOC_INLINE
uint32_t
atomic_add_uint32
(
uint32_t
*
p
,
uint32_t
x
)
{
asm
volatile
(
"lock; xaddl %0, %1;"
:
"+r"
(
x
),
"=m"
(
*
p
)
/* Outputs. */
:
"m"
(
*
p
)
/* Inputs. */
);
return
(
x
);
}
JEMALLOC_INLINE
uint32_t
atomic_sub_uint32
(
uint32_t
*
p
,
uint32_t
x
)
{
x
=
(
uint32_t
)(
-
(
int32_t
)
x
);
asm
volatile
(
"lock; xaddl %0, %1;"
:
"+r"
(
x
),
"=m"
(
*
p
)
/* Outputs. */
:
"m"
(
*
p
)
/* Inputs. */
);
return
(
x
);
}
#else
# error "Missing implementation for 32-bit atomic operations"
#endif
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/base.h
0 → 100644
View file @
a78e148b
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern
malloc_mutex_t
base_mtx
;
void
*
base_alloc
(
size_t
size
);
extent_node_t
*
base_node_alloc
(
void
);
void
base_node_dealloc
(
extent_node_t
*
node
);
bool
base_boot
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/bitmap.h
0 → 100644
View file @
a78e148b
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS
typedef
struct
bitmap_level_s
bitmap_level_t
;
typedef
struct
bitmap_info_s
bitmap_info_t
;
typedef
unsigned
long
bitmap_t
;
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
/* Number of bits per group. */
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS)
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
/* Maximum number of levels possible. */
#define BITMAP_MAX_LEVELS \
(LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
+ !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct
bitmap_level_s
{
/* Offset of this level's groups within the array of groups. */
size_t
group_offset
;
};
struct
bitmap_info_s
{
/* Logical number of bits in bitmap (stored at bottom level). */
size_t
nbits
;
/* Number of levels necessary for nbits. */
unsigned
nlevels
;
/*
* Only the first (nlevels+1) elements are used, and levels are ordered
* bottom to top (e.g. the bottom level is stored in levels[0]).
*/
bitmap_level_t
levels
[
BITMAP_MAX_LEVELS
+
1
];
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void
bitmap_info_init
(
bitmap_info_t
*
binfo
,
size_t
nbits
);
size_t
bitmap_info_ngroups
(
const
bitmap_info_t
*
binfo
);
size_t
bitmap_size
(
size_t
nbits
);
void
bitmap_init
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
bool
bitmap_full
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
);
bool
bitmap_get
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
bit
);
void
bitmap_set
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
bit
);
size_t
bitmap_sfu
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
);
void
bitmap_unset
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
bit
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_))
JEMALLOC_INLINE
bool
bitmap_full
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
)
{
unsigned
rgoff
=
binfo
->
levels
[
binfo
->
nlevels
].
group_offset
-
1
;
bitmap_t
rg
=
bitmap
[
rgoff
];
/* The bitmap is full iff the root group is 0. */
return
(
rg
==
0
);
}
JEMALLOC_INLINE
bool
bitmap_get
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
bit
)
{
size_t
goff
;
bitmap_t
g
;
assert
(
bit
<
binfo
->
nbits
);
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
g
=
bitmap
[
goff
];
return
(
!
(
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
))));
}
JEMALLOC_INLINE
void
bitmap_set
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
bit
)
{
size_t
goff
;
bitmap_t
*
gp
;
bitmap_t
g
;
assert
(
bit
<
binfo
->
nbits
);
assert
(
bitmap_get
(
bitmap
,
binfo
,
bit
)
==
false
);
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
gp
=
&
bitmap
[
goff
];
g
=
*
gp
;
assert
(
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)));
g
^=
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
assert
(
bitmap_get
(
bitmap
,
binfo
,
bit
));
/* Propagate group state transitions up the tree. */
if
(
g
==
0
)
{
unsigned
i
;
for
(
i
=
1
;
i
<
binfo
->
nlevels
;
i
++
)
{
bit
=
goff
;
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
gp
=
&
bitmap
[
binfo
->
levels
[
i
].
group_offset
+
goff
];
g
=
*
gp
;
assert
(
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)));
g
^=
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
if
(
g
!=
0
)
break
;
}
}
}
/* sfu: set first unset. */
JEMALLOC_INLINE
size_t
bitmap_sfu
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
)
{
size_t
bit
;
bitmap_t
g
;
unsigned
i
;
assert
(
bitmap_full
(
bitmap
,
binfo
)
==
false
);
i
=
binfo
->
nlevels
-
1
;
g
=
bitmap
[
binfo
->
levels
[
i
].
group_offset
];
bit
=
ffsl
(
g
)
-
1
;
while
(
i
>
0
)
{
i
--
;
g
=
bitmap
[
binfo
->
levels
[
i
].
group_offset
+
bit
];
bit
=
(
bit
<<
LG_BITMAP_GROUP_NBITS
)
+
(
ffsl
(
g
)
-
1
);
}
bitmap_set
(
bitmap
,
binfo
,
bit
);
return
(
bit
);
}
JEMALLOC_INLINE
void
bitmap_unset
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
bit
)
{
size_t
goff
;
bitmap_t
*
gp
;
bitmap_t
g
;
bool
propagate
;
assert
(
bit
<
binfo
->
nbits
);
assert
(
bitmap_get
(
bitmap
,
binfo
,
bit
));
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
gp
=
&
bitmap
[
goff
];
g
=
*
gp
;
propagate
=
(
g
==
0
);
assert
((
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)))
==
0
);
g
^=
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
assert
(
bitmap_get
(
bitmap
,
binfo
,
bit
)
==
false
);
/* Propagate group state transitions up the tree. */
if
(
propagate
)
{
unsigned
i
;
for
(
i
=
1
;
i
<
binfo
->
nlevels
;
i
++
)
{
bit
=
goff
;
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
gp
=
&
bitmap
[
binfo
->
levels
[
i
].
group_offset
+
goff
];
g
=
*
gp
;
propagate
=
(
g
==
0
);
assert
((
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)))
==
0
);
g
^=
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
if
(
propagate
==
false
)
break
;
}
}
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/chunk.h
0 → 100644
View file @
a78e148b
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* Size and alignment of memory chunks that are allocated by the OS's virtual
* memory system.
*/
#define LG_CHUNK_DEFAULT 22
/* Return the chunk address for allocation address a. */
#define CHUNK_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~chunksize_mask))
/* Return the chunk offset of address a. */
#define CHUNK_ADDR2OFFSET(a) \
((size_t)((uintptr_t)(a) & chunksize_mask))
/* Return the smallest chunk multiple that is >= s. */
#define CHUNK_CEILING(s) \
(((s) + chunksize_mask) & ~chunksize_mask)
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern
size_t
opt_lg_chunk
;
#ifdef JEMALLOC_SWAP
extern
bool
opt_overcommit
;
#endif
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
/* Protects stats_chunks; currently not used for any other purpose. */
extern
malloc_mutex_t
chunks_mtx
;
/* Chunk statistics. */
extern
chunk_stats_t
stats_chunks
;
#endif
#ifdef JEMALLOC_IVSALLOC
extern
rtree_t
*
chunks_rtree
;
#endif
extern
size_t
chunksize
;
extern
size_t
chunksize_mask
;
/* (chunksize - 1). */
extern
size_t
chunk_npages
;
extern
size_t
map_bias
;
/* Number of arena chunk header pages. */
extern
size_t
arena_maxclass
;
/* Max size class for arenas. */
void
*
chunk_alloc
(
size_t
size
,
bool
base
,
bool
*
zero
);
void
chunk_dealloc
(
void
*
chunk
,
size_t
size
);
bool
chunk_boot
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
#include "jemalloc/internal/chunk_swap.h"
#include "jemalloc/internal/chunk_dss.h"
#include "jemalloc/internal/chunk_mmap.h"
deps/jemalloc/include/jemalloc/internal/chunk_dss.h
0 → 100644
View file @
a78e148b
#ifdef JEMALLOC_DSS
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
/*
* Protects sbrk() calls. This avoids malloc races among threads, though it
* does not protect against races with threads that call sbrk() directly.
*/
extern
malloc_mutex_t
dss_mtx
;
void
*
chunk_alloc_dss
(
size_t
size
,
bool
*
zero
);
bool
chunk_in_dss
(
void
*
chunk
);
bool
chunk_dealloc_dss
(
void
*
chunk
,
size_t
size
);
bool
chunk_dss_boot
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif
/* JEMALLOC_DSS */
deps/jemalloc/include/jemalloc/internal/chunk_mmap.h
0 → 100644
View file @
a78e148b
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void
*
chunk_alloc_mmap
(
size_t
size
);
void
*
chunk_alloc_mmap_noreserve
(
size_t
size
);
void
chunk_dealloc_mmap
(
void
*
chunk
,
size_t
size
);
bool
chunk_mmap_boot
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/chunk_swap.h
0 → 100644
View file @
a78e148b
#ifdef JEMALLOC_SWAP
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern
malloc_mutex_t
swap_mtx
;
extern
bool
swap_enabled
;
extern
bool
swap_prezeroed
;
extern
size_t
swap_nfds
;
extern
int
*
swap_fds
;
#ifdef JEMALLOC_STATS
extern
size_t
swap_avail
;
#endif
void
*
chunk_alloc_swap
(
size_t
size
,
bool
*
zero
);
bool
chunk_in_swap
(
void
*
chunk
);
bool
chunk_dealloc_swap
(
void
*
chunk
,
size_t
size
);
bool
chunk_swap_enable
(
const
int
*
fds
,
unsigned
nfds
,
bool
prezeroed
);
bool
chunk_swap_boot
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif
/* JEMALLOC_SWAP */
deps/jemalloc/include/jemalloc/internal/ckh.h
0 → 100644
View file @
a78e148b
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef
struct
ckh_s
ckh_t
;
typedef
struct
ckhc_s
ckhc_t
;
/* Typedefs to allow easy function pointer passing. */
typedef
void
ckh_hash_t
(
const
void
*
,
unsigned
,
size_t
*
,
size_t
*
);
typedef
bool
ckh_keycomp_t
(
const
void
*
,
const
void
*
);
/* Maintain counters used to get an idea of performance. */
/* #define CKH_COUNT */
/* Print counter values in ckh_delete() (requires CKH_COUNT). */
/* #define CKH_VERBOSE */
/*
* There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
* one bucket per L1 cache line.
*/
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Hash table cell. */
struct
ckhc_s
{
const
void
*
key
;
const
void
*
data
;
};
struct
ckh_s
{
#ifdef JEMALLOC_DEBUG
#define CKH_MAGIC 0x3af2489d
uint32_t
magic
;
#endif
#ifdef CKH_COUNT
/* Counters used to get an idea of performance. */
uint64_t
ngrows
;
uint64_t
nshrinks
;
uint64_t
nshrinkfails
;
uint64_t
ninserts
;
uint64_t
nrelocs
;
#endif
/* Used for pseudo-random number generation. */
#define CKH_A 1103515241
#define CKH_C 12347
uint32_t
prn_state
;
/* Total number of items. */
size_t
count
;
/*
* Minimum and current number of hash table buckets. There are
* 2^LG_CKH_BUCKET_CELLS cells per bucket.
*/
unsigned
lg_minbuckets
;
unsigned
lg_curbuckets
;
/* Hash and comparison functions. */
ckh_hash_t
*
hash
;
ckh_keycomp_t
*
keycomp
;
/* Hash table with 2^lg_curbuckets buckets. */
ckhc_t
*
tab
;
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
bool
ckh_new
(
ckh_t
*
ckh
,
size_t
minitems
,
ckh_hash_t
*
hash
,
ckh_keycomp_t
*
keycomp
);
void
ckh_delete
(
ckh_t
*
ckh
);
size_t
ckh_count
(
ckh_t
*
ckh
);
bool
ckh_iter
(
ckh_t
*
ckh
,
size_t
*
tabind
,
void
**
key
,
void
**
data
);
bool
ckh_insert
(
ckh_t
*
ckh
,
const
void
*
key
,
const
void
*
data
);
bool
ckh_remove
(
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
void
**
data
);
bool
ckh_search
(
ckh_t
*
ckh
,
const
void
*
seachkey
,
void
**
key
,
void
**
data
);
void
ckh_string_hash
(
const
void
*
key
,
unsigned
minbits
,
size_t
*
hash1
,
size_t
*
hash2
);
bool
ckh_string_keycomp
(
const
void
*
k1
,
const
void
*
k2
);
void
ckh_pointer_hash
(
const
void
*
key
,
unsigned
minbits
,
size_t
*
hash1
,
size_t
*
hash2
);
bool
ckh_pointer_keycomp
(
const
void
*
k1
,
const
void
*
k2
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/ctl.h
0 → 100644
View file @
a78e148b
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef
struct
ctl_node_s
ctl_node_t
;
typedef
struct
ctl_arena_stats_s
ctl_arena_stats_t
;
typedef
struct
ctl_stats_s
ctl_stats_t
;
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct
ctl_node_s
{
bool
named
;
union
{
struct
{
const
char
*
name
;
/* If (nchildren == 0), this is a terminal node. */
unsigned
nchildren
;
const
ctl_node_t
*
children
;
}
named
;
struct
{
const
ctl_node_t
*
(
*
index
)(
const
size_t
*
,
size_t
,
size_t
);
}
indexed
;
}
u
;
int
(
*
ctl
)(
const
size_t
*
,
size_t
,
void
*
,
size_t
*
,
void
*
,
size_t
);
};
struct
ctl_arena_stats_s
{
bool
initialized
;
unsigned
nthreads
;
size_t
pactive
;
size_t
pdirty
;
#ifdef JEMALLOC_STATS
arena_stats_t
astats
;
/* Aggregate stats for small size classes, based on bin stats. */
size_t
allocated_small
;
uint64_t
nmalloc_small
;
uint64_t
ndalloc_small
;
uint64_t
nrequests_small
;
malloc_bin_stats_t
*
bstats
;
/* nbins elements. */
malloc_large_stats_t
*
lstats
;
/* nlclasses elements. */
#endif
};
struct
ctl_stats_s
{
#ifdef JEMALLOC_STATS
size_t
allocated
;
size_t
active
;
size_t
mapped
;
struct
{
size_t
current
;
/* stats_chunks.curchunks */
uint64_t
total
;
/* stats_chunks.nchunks */
size_t
high
;
/* stats_chunks.highchunks */
}
chunks
;
struct
{
size_t
allocated
;
/* huge_allocated */
uint64_t
nmalloc
;
/* huge_nmalloc */
uint64_t
ndalloc
;
/* huge_ndalloc */
}
huge
;
#endif
ctl_arena_stats_t
*
arenas
;
/* (narenas + 1) elements. */
#ifdef JEMALLOC_SWAP
size_t
swap_avail
;
#endif
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
int
ctl_byname
(
const
char
*
name
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
);
int
ctl_nametomib
(
const
char
*
name
,
size_t
*
mibp
,
size_t
*
miblenp
);
int
ctl_bymib
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
);
bool
ctl_boot
(
void
);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (JEMALLOC_P(mallctl)(name, oldp, oldlenp, newp, newlen) \
!= 0) { \
malloc_write("<jemalloc>: Failure in xmallctl(\""); \
malloc_write(name); \
malloc_write("\", ...)\n"); \
abort(); \
} \
} while (0)
#define xmallctlnametomib(name, mibp, miblenp) do { \
if (JEMALLOC_P(mallctlnametomib)(name, mibp, miblenp) != 0) { \
malloc_write( \
"<jemalloc>: Failure in xmallctlnametomib(\""); \
malloc_write(name); \
malloc_write("\", ...)\n"); \
abort(); \
} \
} while (0)
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
if (JEMALLOC_P(mallctlbymib)(mib, miblen, oldp, oldlenp, newp, \
newlen) != 0) { \
malloc_write( \
"<jemalloc>: Failure in xmallctlbymib()\n"); \
abort(); \
} \
} while (0)
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/extent.h
0 → 100644
View file @
a78e148b
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef
struct
extent_node_s
extent_node_t
;
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Tree of extents. */
struct
extent_node_s
{
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
/* Linkage for the size/address-ordered tree. */
rb_node
(
extent_node_t
)
link_szad
;
#endif
/* Linkage for the address-ordered tree. */
rb_node
(
extent_node_t
)
link_ad
;
#ifdef JEMALLOC_PROF
/* Profile counters, used for huge objects. */
prof_ctx_t
*
prof_ctx
;
#endif
/* Pointer to the extent that this tree node is responsible for. */
void
*
addr
;
/* Total region size. */
size_t
size
;
};
typedef
rb_tree
(
extent_node_t
)
extent_tree_t
;
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
rb_proto
(,
extent_tree_szad_
,
extent_tree_t
,
extent_node_t
)
#endif
rb_proto
(,
extent_tree_ad_
,
extent_tree_t
,
extent_node_t
)
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/hash.h
0 → 100644
View file @
a78e148b
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
uint64_t
hash
(
const
void
*
key
,
size_t
len
,
uint64_t
seed
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
/*
* The following hash function is based on MurmurHash64A(), placed into the
* public domain by Austin Appleby. See http://murmurhash.googlepages.com/ for
* details.
*/
JEMALLOC_INLINE
uint64_t
hash
(
const
void
*
key
,
size_t
len
,
uint64_t
seed
)
{
const
uint64_t
m
=
0xc6a4a7935bd1e995
;
const
int
r
=
47
;
uint64_t
h
=
seed
^
(
len
*
m
);
const
uint64_t
*
data
=
(
const
uint64_t
*
)
key
;
const
uint64_t
*
end
=
data
+
(
len
/
8
);
const
unsigned
char
*
data2
;
assert
(((
uintptr_t
)
key
&
0x7
)
==
0
);
while
(
data
!=
end
)
{
uint64_t
k
=
*
data
++
;
k
*=
m
;
k
^=
k
>>
r
;
k
*=
m
;
h
^=
k
;
h
*=
m
;
}
data2
=
(
const
unsigned
char
*
)
data
;
switch
(
len
&
7
)
{
case
7
:
h
^=
((
uint64_t
)(
data2
[
6
]))
<<
48
;
case
6
:
h
^=
((
uint64_t
)(
data2
[
5
]))
<<
40
;
case
5
:
h
^=
((
uint64_t
)(
data2
[
4
]))
<<
32
;
case
4
:
h
^=
((
uint64_t
)(
data2
[
3
]))
<<
24
;
case
3
:
h
^=
((
uint64_t
)(
data2
[
2
]))
<<
16
;
case
2
:
h
^=
((
uint64_t
)(
data2
[
1
]))
<<
8
;
case
1
:
h
^=
((
uint64_t
)(
data2
[
0
]));
h
*=
m
;
}
h
^=
h
>>
r
;
h
*=
m
;
h
^=
h
>>
r
;
return
(
h
);
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/huge.h
0 → 100644
View file @
a78e148b
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_STATS
/* Huge allocation statistics. */
extern
uint64_t
huge_nmalloc
;
extern
uint64_t
huge_ndalloc
;
extern
size_t
huge_allocated
;
#endif
/* Protects chunk-related data structures. */
extern
malloc_mutex_t
huge_mtx
;
void
*
huge_malloc
(
size_t
size
,
bool
zero
);
void
*
huge_palloc
(
size_t
size
,
size_t
alignment
,
bool
zero
);
void
*
huge_ralloc_no_move
(
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
);
void
*
huge_ralloc
(
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
size_t
alignment
,
bool
zero
);
void
huge_dalloc
(
void
*
ptr
,
bool
unmap
);
size_t
huge_salloc
(
const
void
*
ptr
);
#ifdef JEMALLOC_PROF
prof_ctx_t
*
huge_prof_ctx_get
(
const
void
*
ptr
);
void
huge_prof_ctx_set
(
const
void
*
ptr
,
prof_ctx_t
*
ctx
);
#endif
bool
huge_boot
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
0 → 100644
View file @
a78e148b
#include <sys/mman.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/sysctl.h>
#include <sys/uio.h>
#include <errno.h>
#include <limits.h>
#ifndef SIZE_T_MAX
# define SIZE_T_MAX SIZE_MAX
#endif
#include <pthread.h>
#include <sched.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stddef.h>
#ifndef offsetof
# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
#endif
#include <inttypes.h>
#include <string.h>
#include <strings.h>
#include <ctype.h>
#include <unistd.h>
#include <fcntl.h>
#include <pthread.h>
#include <math.h>
#define JEMALLOC_MANGLE
#include "../jemalloc@install_suffix@.h"
#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
#include <libkern/OSAtomic.h>
#endif
#ifdef JEMALLOC_ZONE
#include <mach/mach_error.h>
#include <mach/mach_init.h>
#include <mach/vm_map.h>
#include <malloc/malloc.h>
#endif
#ifdef JEMALLOC_LAZY_LOCK
#include <dlfcn.h>
#endif
#define RB_COMPACT
#include "jemalloc/internal/rb.h"
#include "jemalloc/internal/qr.h"
#include "jemalloc/internal/ql.h"
extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
/*
* Define a custom assert() in order to reduce the chances of deadlock during
* assertion failure.
*/
#ifndef assert
# ifdef JEMALLOC_DEBUG
# define assert(e) do { \
if (!(e)) { \
char line_buf[UMAX2S_BUFSIZE]; \
malloc_write("<jemalloc>: "); \
malloc_write(__FILE__); \
malloc_write(":"); \
malloc_write(u2s(__LINE__, 10, line_buf)); \
malloc_write(": Failed assertion: "); \
malloc_write("\""); \
malloc_write(#e); \
malloc_write("\"\n"); \
abort(); \
} \
} while (0)
# else
# define assert(e)
# endif
#endif
#ifdef JEMALLOC_DEBUG
# define dassert(e) assert(e)
#else
# define dassert(e)
#endif
/*
* jemalloc can conceptually be broken into components (arena, tcache, etc.),
* but there are circular dependencies that cannot be broken without
* substantial performance degradation. In order to reduce the effect on
* visual code flow, read the header files in multiple passes, with one of the
* following cpp variables defined during each pass:
*
* JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
* types.
* JEMALLOC_H_STRUCTS : Data structures.
* JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
* JEMALLOC_H_INLINES : Inline functions.
*/
/******************************************************************************/
#define JEMALLOC_H_TYPES
#define ALLOCM_LG_ALIGN_MASK ((int)0x3f)
#define ZU(z) ((size_t)z)
#ifndef __DECONST
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
#ifdef JEMALLOC_DEBUG
/* Disable inlining to make debugging easier. */
# define JEMALLOC_INLINE
# define inline
#else
# define JEMALLOC_ENABLE_INLINE
# define JEMALLOC_INLINE static inline
#endif
/* Size of stack-allocated buffer passed to buferror(). */
#define BUFERROR_BUF 64
/* Minimum alignment of allocations is 2^LG_QUANTUM bytes. */
#ifdef __i386__
# define LG_QUANTUM 4
#endif
#ifdef __ia64__
# define LG_QUANTUM 4
#endif
#ifdef __alpha__
# define LG_QUANTUM 4
#endif
#ifdef __sparc64__
# define LG_QUANTUM 4
#endif
#if (defined(__amd64__) || defined(__x86_64__))
# define LG_QUANTUM 4
#endif
#ifdef __arm__
# define LG_QUANTUM 3
#endif
#ifdef __mips__
# define LG_QUANTUM 3
#endif
#ifdef __powerpc__
# define LG_QUANTUM 4
#endif
#ifdef __s390x__
# define LG_QUANTUM 4
#endif
#define QUANTUM ((size_t)(1U << LG_QUANTUM))
#define QUANTUM_MASK (QUANTUM - 1)
/* Return the smallest quantum multiple that is >= a. */
#define QUANTUM_CEILING(a) \
(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
#define LONG_MASK (LONG - 1)
/* Return the smallest long multiple that is >= a. */
#define LONG_CEILING(a) \
(((a) + LONG_MASK) & ~LONG_MASK)
#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
#define PTR_MASK (SIZEOF_PTR - 1)
/* Return the smallest (void *) multiple that is >= a. */
#define PTR_CEILING(a) \
(((a) + PTR_MASK) & ~PTR_MASK)
/*
* Maximum size of L1 cache line. This is used to avoid cache line aliasing.
* In addition, this controls the spacing of cacheline-spaced size classes.
*/
#define LG_CACHELINE 6
#define CACHELINE ((size_t)(1U << LG_CACHELINE))
#define CACHELINE_MASK (CACHELINE - 1)
/* Return the smallest cacheline multiple that is >= s. */
#define CACHELINE_CEILING(s) \
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
/*
* Page size. STATIC_PAGE_SHIFT is determined by the configure script. If
* DYNAMIC_PAGE_SHIFT is enabled, only use the STATIC_PAGE_* macros where
* compile-time values are required for the purposes of defining data
* structures.
*/
#define STATIC_PAGE_SIZE ((size_t)(1U << STATIC_PAGE_SHIFT))
#define STATIC_PAGE_MASK ((size_t)(STATIC_PAGE_SIZE - 1))
#ifdef PAGE_SHIFT
# undef PAGE_SHIFT
#endif
#ifdef PAGE_SIZE
# undef PAGE_SIZE
#endif
#ifdef PAGE_MASK
# undef PAGE_MASK
#endif
#ifdef DYNAMIC_PAGE_SHIFT
# define PAGE_SHIFT lg_pagesize
# define PAGE_SIZE pagesize
# define PAGE_MASK pagesize_mask
#else
# define PAGE_SHIFT STATIC_PAGE_SHIFT
# define PAGE_SIZE STATIC_PAGE_SIZE
# define PAGE_MASK STATIC_PAGE_MASK
#endif
/* Return the smallest pagesize multiple that is >= s. */
#define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK)
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prn.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#ifdef JEMALLOC_ZONE
#include "jemalloc/internal/zone.h"
#endif
#include "jemalloc/internal/prof.h"
#undef JEMALLOC_H_TYPES
/******************************************************************************/
#define JEMALLOC_H_STRUCTS
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prn.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#ifdef JEMALLOC_ZONE
#include "jemalloc/internal/zone.h"
#endif
#include "jemalloc/internal/prof.h"
#ifdef JEMALLOC_STATS
typedef struct {
uint64_t allocated;
uint64_t deallocated;
} thread_allocated_t;
#endif
#undef JEMALLOC_H_STRUCTS
/******************************************************************************/
#define JEMALLOC_H_EXTERNS
extern bool opt_abort;
#ifdef JEMALLOC_FILL
extern bool opt_junk;
#endif
#ifdef JEMALLOC_SYSV
extern bool opt_sysv;
#endif
#ifdef JEMALLOC_XMALLOC
extern bool opt_xmalloc;
#endif
#ifdef JEMALLOC_FILL
extern bool opt_zero;
#endif
extern size_t opt_narenas;
#ifdef DYNAMIC_PAGE_SHIFT
extern size_t pagesize;
extern size_t pagesize_mask;
extern size_t lg_pagesize;
#endif
/* Number of CPUs. */
extern unsigned ncpus;
extern malloc_mutex_t arenas_lock; /* Protects arenas initialization. */
extern pthread_key_t arenas_tsd;
#ifndef NO_TLS
/*
* Map of pthread_self() --> arenas[???], used for selecting an arena to use
* for allocations.
*/
extern __thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
# define ARENA_GET() arenas_tls
# define ARENA_SET(v) do { \
arenas_tls = (v); \
pthread_setspecific(arenas_tsd, (void *)(v)); \
} while (0)
#else
# define ARENA_GET() ((arena_t *)pthread_getspecific(arenas_tsd))
# define ARENA_SET(v) do { \
pthread_setspecific(arenas_tsd, (void *)(v)); \
} while (0)
#endif
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
*/
extern arena_t **arenas;
extern unsigned narenas;
#ifdef JEMALLOC_STATS
# ifndef NO_TLS
extern __thread thread_allocated_t thread_allocated_tls;
# define ALLOCATED_GET() (thread_allocated_tls.allocated)
# define ALLOCATEDP_GET() (&thread_allocated_tls.allocated)
# define DEALLOCATED_GET() (thread_allocated_tls.deallocated)
# define DEALLOCATEDP_GET() (&thread_allocated_tls.deallocated)
# define ALLOCATED_ADD(a, d) do { \
thread_allocated_tls.allocated += a; \
thread_allocated_tls.deallocated += d; \
} while (0)
# else
extern pthread_key_t thread_allocated_tsd;
thread_allocated_t *thread_allocated_get_hard(void);
# define ALLOCATED_GET() (thread_allocated_get()->allocated)
# define ALLOCATEDP_GET() (&thread_allocated_get()->allocated)
# define DEALLOCATED_GET() (thread_allocated_get()->deallocated)
# define DEALLOCATEDP_GET() (&thread_allocated_get()->deallocated)
# define ALLOCATED_ADD(a, d) do { \
thread_allocated_t *thread_allocated = thread_allocated_get(); \
thread_allocated->allocated += (a); \
thread_allocated->deallocated += (d); \
} while (0)
# endif
#endif
arena_t *arenas_extend(unsigned ind);
arena_t *choose_arena_hard(void);
int buferror(int errnum, char *buf, size_t buflen);
void jemalloc_prefork(void);
void jemalloc_postfork(void);
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prn.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#ifdef JEMALLOC_ZONE
#include "jemalloc/internal/zone.h"
#endif
#include "jemalloc/internal/prof.h"
#undef JEMALLOC_H_EXTERNS
/******************************************************************************/
#define JEMALLOC_H_INLINES
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prn.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#ifndef JEMALLOC_ENABLE_INLINE
size_t pow2_ceil(size_t x);
size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment, size_t *run_size_p);
void malloc_write(const char *s);
arena_t *choose_arena(void);
# if (defined(JEMALLOC_STATS) && defined(NO_TLS))
thread_allocated_t *thread_allocated_get(void);
# endif
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
/* Compute the smallest power of 2 that is >= x. */
JEMALLOC_INLINE size_t
pow2_ceil(size_t x)
{
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
#if (LG_SIZEOF_PTR == 3)
x |= x >> 32;
#endif
x++;
return (x);
}
/*
* Compute usable size that would result from allocating an object with the
* specified size.
*/
JEMALLOC_INLINE size_t
s2u(size_t size)
{
if (size <= small_maxclass)
return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
if (size <= arena_maxclass)
return (PAGE_CEILING(size));
return (CHUNK_CEILING(size));
}
/*
* Compute usable size that would result from allocating an object with the
* specified size and alignment.
*/
JEMALLOC_INLINE size_t
sa2u(size_t size, size_t alignment, size_t *run_size_p)
{
size_t usize;
/*
* Round size up to the nearest multiple of alignment.
*
* This done, we can take advantage of the fact that for each small
* size class, every object is aligned at the smallest power of two
* that is non-zero in the base two representation of the size. For
* example:
*
* Size | Base 2 | Minimum alignment
* -----+----------+------------------
* 96 | 1100000 | 32
* 144 | 10100000 | 32
* 192 | 11000000 | 64
*
* Depending on runtime settings, it is possible that arena_malloc()
* will further round up to a power of two, but that never causes
* correctness issues.
*/
usize = (size + (alignment - 1)) & (-alignment);
/*
* (usize < size) protects against the combination of maximal
* alignment and size greater than maximal alignment.
*/
if (usize < size) {
/* size_t overflow. */
return (0);
}
if (usize <= arena_maxclass && alignment <= PAGE_SIZE) {
if (usize <= small_maxclass)
return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
return (PAGE_CEILING(usize));
} else {
size_t run_size;
/*
* We can't achieve subpage alignment, so round up alignment
* permanently; it makes later calculations simpler.
*/
alignment = PAGE_CEILING(alignment);
usize = PAGE_CEILING(size);
/*
* (usize < size) protects against very large sizes within
* PAGE_SIZE of SIZE_T_MAX.
*
* (usize + alignment < usize) protects against the
* combination of maximal alignment and usize large enough
* to cause overflow. This is similar to the first overflow
* check above, but it needs to be repeated due to the new
* usize value, which may now be *equal* to maximal
* alignment, whereas before we only detected overflow if the
* original size was *greater* than maximal alignment.
*/
if (usize < size || usize + alignment < usize) {
/* size_t overflow. */
return (0);
}
/*
* Calculate the size of the over-size run that arena_palloc()
* would need to allocate in order to guarantee the alignment.
*/
if (usize >= alignment)
run_size = usize + alignment - PAGE_SIZE;
else {
/*
* It is possible that (alignment << 1) will cause
* overflow, but it doesn't matter because we also
* subtract PAGE_SIZE, which in the case of overflow
* leaves us with a very large run_size. That causes
* the first conditional below to fail, which means
* that the bogus run_size value never gets used for
* anything important.
*/
run_size = (alignment << 1) - PAGE_SIZE;
}
if (run_size_p != NULL)
*run_size_p = run_size;
if (run_size <= arena_maxclass)
return (PAGE_CEILING(usize));
return (CHUNK_CEILING(usize));
}
}
/*
* Wrapper around malloc_message() that avoids the need for
* JEMALLOC_P(malloc_message)(...) throughout the code.
*/
JEMALLOC_INLINE void
malloc_write(const char *s)
{
JEMALLOC_P(malloc_message)(NULL, s);
}
/*
* Choose an arena based on a per-thread value (fast-path code, calls slow-path
* code if necessary).
*/
JEMALLOC_INLINE arena_t *
choose_arena(void)
{
arena_t *ret;
ret = ARENA_GET();
if (ret == NULL) {
ret = choose_arena_hard();
assert(ret != NULL);
}
return (ret);
}
#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
JEMALLOC_INLINE thread_allocated_t *
thread_allocated_get(void)
{
thread_allocated_t *thread_allocated = (thread_allocated_t *)
pthread_getspecific(thread_allocated_tsd);
if (thread_allocated == NULL)
return (thread_allocated_get_hard());
return (thread_allocated);
}
#endif
#endif
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/hash.h"
#ifdef JEMALLOC_ZONE
#include "jemalloc/internal/zone.h"
#endif
#ifndef JEMALLOC_ENABLE_INLINE
void *imalloc(size_t size);
void *icalloc(size_t size);
void *ipalloc(size_t usize, size_t alignment, bool zero);
size_t isalloc(const void *ptr);
# ifdef JEMALLOC_IVSALLOC
size_t ivsalloc(const void *ptr);
# endif
void idalloc(void *ptr);
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero, bool no_move);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_INLINE void *
imalloc(size_t size)
{
assert(size != 0);
if (size <= arena_maxclass)
return (arena_malloc(size, false));
else
return (huge_malloc(size, false));
}
JEMALLOC_INLINE void *
icalloc(size_t size)
{
if (size <= arena_maxclass)
return (arena_malloc(size, true));
else
return (huge_malloc(size, true));
}
JEMALLOC_INLINE void *
ipalloc(size_t usize, size_t alignment, bool zero)
{
void *ret;
assert(usize != 0);
assert(usize == sa2u(usize, alignment, NULL));
if (usize <= arena_maxclass && alignment <= PAGE_SIZE)
ret = arena_malloc(usize, zero);
else {
size_t run_size = 0;
/*
* Ideally we would only ever call sa2u() once per aligned
* allocation request, and the caller of this function has
* already done so once. However, it's rather burdensome to
* require every caller to pass in run_size, especially given
* that it's only relevant to large allocations. Therefore,
* just call it again here in order to get run_size.
*/
sa2u(usize, alignment, &run_size);
if (run_size <= arena_maxclass) {
ret = arena_palloc(choose_arena(), usize, run_size,
alignment, zero);
} else if (alignment <= chunksize)
ret = huge_malloc(usize, zero);
else
ret = huge_palloc(usize, alignment, zero);
}
assert(((uintptr_t)ret & (alignment - 1)) == 0);
return (ret);
}
JEMALLOC_INLINE size_t
isalloc(const void *ptr)
{
size_t ret;
arena_chunk_t *chunk;
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr) {
/* Region. */
dassert(chunk->arena->magic == ARENA_MAGIC);
#ifdef JEMALLOC_PROF
ret = arena_salloc_demote(ptr);
#else
ret = arena_salloc(ptr);
#endif
} else
ret = huge_salloc(ptr);
return (ret);
}
#ifdef JEMALLOC_IVSALLOC
JEMALLOC_INLINE size_t
ivsalloc(const void *ptr)
{
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
return (0);
return (isalloc(ptr));
}
#endif
JEMALLOC_INLINE void
idalloc(void *ptr)
{
arena_chunk_t *chunk;
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr)
arena_dalloc(chunk->arena, chunk, ptr);
else
huge_dalloc(ptr, true);
}
JEMALLOC_INLINE void *
iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
bool no_move)
{
void *ret;
size_t oldsize;
assert(ptr != NULL);
assert(size != 0);
oldsize = isalloc(ptr);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
size_t usize, copysize;
/*
* Existing object alignment is inadquate; allocate new space
* and copy.
*/
if (no_move)
return (NULL);
usize = sa2u(size + extra, alignment, NULL);
if (usize == 0)
return (NULL);
ret = ipalloc(usize, alignment, zero);
if (ret == NULL) {
if (extra == 0)
return (NULL);
/* Try again, without extra this time. */
usize = sa2u(size, alignment, NULL);
if (usize == 0)
return (NULL);
ret = ipalloc(usize, alignment, zero);
if (ret == NULL)
return (NULL);
}
/*
* Copy at most size bytes (not size+extra), since the caller
* has no expectation that the extra bytes will be reliably
* preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(ret, ptr, copysize);
idalloc(ptr);
return (ret);
}
if (no_move) {
if (size <= arena_maxclass) {
return (arena_ralloc_no_move(ptr, oldsize, size,
extra, zero));
} else {
return (huge_ralloc_no_move(ptr, oldsize, size,
extra));
}
} else {
if (size + extra <= arena_maxclass) {
return (arena_ralloc(ptr, oldsize, size, extra,
alignment, zero));
} else {
return (huge_ralloc(ptr, oldsize, size, extra,
alignment, zero));
}
}
}
#endif
#include "jemalloc/internal/prof.h"
#undef JEMALLOC_H_INLINES
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/mb.h
0 → 100644
View file @
a78e148b
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void
mb_write
(
void
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_))
#ifdef __i386__
/*
* According to the Intel Architecture Software Developer's Manual, current
* processors execute instructions in order from the perspective of other
* processors in a multiprocessor system, but 1) Intel reserves the right to
* change that, and 2) the compiler's optimizer could re-order instructions if
* there weren't some form of barrier. Therefore, even if running on an
* architecture that does not need memory barriers (everything through at least
* i686), an "optimizer barrier" is necessary.
*/
JEMALLOC_INLINE
void
mb_write
(
void
)
{
# if 0
/* This is a true memory barrier. */
asm
volatile
(
"pusha;"
"xor %%eax,%%eax;"
"cpuid;"
"popa;"
:
/* Outputs. */
:
/* Inputs. */
:
"memory"
/* Clobbers. */
);
#else
/*
* This is hopefully enough to keep the compiler from reordering
* instructions around this one.
*/
asm
volatile
(
"nop;"
:
/* Outputs. */
:
/* Inputs. */
:
"memory"
/* Clobbers. */
);
#endif
}
#elif (defined(__amd64_) || defined(__x86_64__))
JEMALLOC_INLINE
void
mb_write
(
void
)
{
asm
volatile
(
"sfence"
:
/* Outputs. */
:
/* Inputs. */
:
"memory"
/* Clobbers. */
);
}
#elif defined(__powerpc__)
JEMALLOC_INLINE
void
mb_write
(
void
)
{
asm
volatile
(
"eieio"
:
/* Outputs. */
:
/* Inputs. */
:
"memory"
/* Clobbers. */
);
}
#elif defined(__sparc64__)
JEMALLOC_INLINE
void
mb_write
(
void
)
{
asm
volatile
(
"membar #StoreStore"
:
/* Outputs. */
:
/* Inputs. */
:
"memory"
/* Clobbers. */
);
}
#else
/*
* This is much slower than a simple memory barrier, but the semantics of mutex
* unlock make this work.
*/
JEMALLOC_INLINE
void
mb_write
(
void
)
{
malloc_mutex_t
mtx
;
malloc_mutex_init
(
&
mtx
);
malloc_mutex_lock
(
&
mtx
);
malloc_mutex_unlock
(
&
mtx
);
}
#endif
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/mutex.h
0 → 100644
View file @
a78e148b
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#ifdef JEMALLOC_OSSPIN
typedef
OSSpinLock
malloc_mutex_t
;
#else
typedef
pthread_mutex_t
malloc_mutex_t
;
#endif
#ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
# define MALLOC_MUTEX_INITIALIZER PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
#else
# define MALLOC_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
#endif
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_LAZY_LOCK
extern
bool
isthreaded
;
#else
# define isthreaded true
#endif
bool
malloc_mutex_init
(
malloc_mutex_t
*
mutex
);
void
malloc_mutex_destroy
(
malloc_mutex_t
*
mutex
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void
malloc_mutex_lock
(
malloc_mutex_t
*
mutex
);
bool
malloc_mutex_trylock
(
malloc_mutex_t
*
mutex
);
void
malloc_mutex_unlock
(
malloc_mutex_t
*
mutex
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
JEMALLOC_INLINE
void
malloc_mutex_lock
(
malloc_mutex_t
*
mutex
)
{
if
(
isthreaded
)
{
#ifdef JEMALLOC_OSSPIN
OSSpinLockLock
(
mutex
);
#else
pthread_mutex_lock
(
mutex
);
#endif
}
}
JEMALLOC_INLINE
bool
malloc_mutex_trylock
(
malloc_mutex_t
*
mutex
)
{
if
(
isthreaded
)
{
#ifdef JEMALLOC_OSSPIN
return
(
OSSpinLockTry
(
mutex
)
==
false
);
#else
return
(
pthread_mutex_trylock
(
mutex
)
!=
0
);
#endif
}
else
return
(
false
);
}
JEMALLOC_INLINE
void
malloc_mutex_unlock
(
malloc_mutex_t
*
mutex
)
{
if
(
isthreaded
)
{
#ifdef JEMALLOC_OSSPIN
OSSpinLockUnlock
(
mutex
);
#else
pthread_mutex_unlock
(
mutex
);
#endif
}
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/prn.h
0 → 100644
View file @
a78e148b
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* Simple linear congruential pseudo-random number generator:
*
* prn(y) = (a*x + c) % m
*
* where the following constants ensure maximal period:
*
* a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4.
* c == Odd number (relatively prime to 2^n).
* m == 2^32
*
* See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
*
* This choice of m has the disadvantage that the quality of the bits is
* proportional to bit position. For example. the lowest bit has a cycle of 2,
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
* bits.
*
* Macro parameters:
* uint32_t r : Result.
* unsigned lg_range : (0..32], number of least significant bits to return.
* uint32_t state : Seed value.
* const uint32_t a, c : See above discussion.
*/
#define prn32(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 32); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (32 - lg_range); \
} while (false)
/* Same as prn32(), but 64 bits of pseudo-randomness, using uint64_t. */
#define prn64(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 64); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (64 - lg_range); \
} while (false)
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/prof.h
0 → 100644
View file @
a78e148b
#ifdef JEMALLOC_PROF
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef
struct
prof_bt_s
prof_bt_t
;
typedef
struct
prof_cnt_s
prof_cnt_t
;
typedef
struct
prof_thr_cnt_s
prof_thr_cnt_t
;
typedef
struct
prof_ctx_s
prof_ctx_t
;
typedef
struct
prof_tdata_s
prof_tdata_t
;
/* Option defaults. */
#define PROF_PREFIX_DEFAULT "jeprof"
#define LG_PROF_BT_MAX_DEFAULT 7
#define LG_PROF_SAMPLE_DEFAULT 0
#define LG_PROF_INTERVAL_DEFAULT -1
#define LG_PROF_TCMAX_DEFAULT -1
/*
* Hard limit on stack backtrace depth. Note that the version of
* prof_backtrace() that is based on __builtin_return_address() necessarily has
* a hard-coded number of backtrace frame handlers.
*/
#if (defined(JEMALLOC_PROF_LIBGCC) || defined(JEMALLOC_PROF_LIBUNWIND))
# define LG_PROF_BT_MAX ((ZU(1) << (LG_SIZEOF_PTR+3)) - 1)
#else
# define LG_PROF_BT_MAX 7
/* >= LG_PROF_BT_MAX_DEFAULT */
#endif
#define PROF_BT_MAX (1U << LG_PROF_BT_MAX)
/* Initial hash table size. */
#define PROF_CKH_MINITEMS 64
/* Size of memory buffer to use when writing dump files. */
#define PROF_DUMP_BUF_SIZE 65536
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct
prof_bt_s
{
/* Backtrace, stored as len program counters. */
void
**
vec
;
unsigned
len
;
};
#ifdef JEMALLOC_PROF_LIBGCC
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
typedef
struct
{
prof_bt_t
*
bt
;
unsigned
nignore
;
unsigned
max
;
}
prof_unwind_data_t
;
#endif
struct
prof_cnt_s
{
/*
* Profiling counters. An allocation/deallocation pair can operate on
* different prof_thr_cnt_t objects that are linked into the same
* prof_ctx_t cnts_ql, so it is possible for the cur* counters to go
* negative. In principle it is possible for the *bytes counters to
* overflow/underflow, but a general solution would require something
* like 128-bit counters; this implementation doesn't bother to solve
* that problem.
*/
int64_t
curobjs
;
int64_t
curbytes
;
uint64_t
accumobjs
;
uint64_t
accumbytes
;
};
struct
prof_thr_cnt_s
{
/* Linkage into prof_ctx_t's cnts_ql. */
ql_elm
(
prof_thr_cnt_t
)
cnts_link
;
/* Linkage into thread's LRU. */
ql_elm
(
prof_thr_cnt_t
)
lru_link
;
/*
* Associated context. If a thread frees an object that it did not
* allocate, it is possible that the context is not cached in the
* thread's hash table, in which case it must be able to look up the
* context, insert a new prof_thr_cnt_t into the thread's hash table,
* and link it into the prof_ctx_t's cnts_ql.
*/
prof_ctx_t
*
ctx
;
/*
* Threads use memory barriers to update the counters. Since there is
* only ever one writer, the only challenge is for the reader to get a
* consistent read of the counters.
*
* The writer uses this series of operations:
*
* 1) Increment epoch to an odd number.
* 2) Update counters.
* 3) Increment epoch to an even number.
*
* The reader must assure 1) that the epoch is even while it reads the
* counters, and 2) that the epoch doesn't change between the time it
* starts and finishes reading the counters.
*/
unsigned
epoch
;
/* Profiling counters. */
prof_cnt_t
cnts
;
};
struct
prof_ctx_s
{
/* Associated backtrace. */
prof_bt_t
*
bt
;
/* Protects cnt_merged and cnts_ql. */
malloc_mutex_t
lock
;
/* Temporary storage for summation during dump. */
prof_cnt_t
cnt_summed
;
/* When threads exit, they merge their stats into cnt_merged. */
prof_cnt_t
cnt_merged
;
/*
* List of profile counters, one for each thread that has allocated in
* this context.
*/
ql_head
(
prof_thr_cnt_t
)
cnts_ql
;
};
struct
prof_tdata_s
{
/*
* Hash of (prof_bt_t *)-->(prof_thr_cnt_t *). Each thread keeps a
* cache of backtraces, with associated thread-specific prof_thr_cnt_t
* objects. Other threads may read the prof_thr_cnt_t contents, but no
* others will ever write them.
*
* Upon thread exit, the thread must merge all the prof_thr_cnt_t
* counter data into the associated prof_ctx_t objects, and unlink/free
* the prof_thr_cnt_t objects.
*/
ckh_t
bt2cnt
;
/* LRU for contents of bt2cnt. */
ql_head
(
prof_thr_cnt_t
)
lru_ql
;
/* Backtrace vector, used for calls to prof_backtrace(). */
void
**
vec
;
/* Sampling state. */
uint64_t
prn_state
;
uint64_t
threshold
;
uint64_t
accum
;
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern
bool
opt_prof
;
/*
* Even if opt_prof is true, sampling can be temporarily disabled by setting
* opt_prof_active to false. No locking is used when updating opt_prof_active,
* so there are no guarantees regarding how long it will take for all threads
* to notice state changes.
*/
extern
bool
opt_prof_active
;
extern
size_t
opt_lg_prof_bt_max
;
/* Maximum backtrace depth. */
extern
size_t
opt_lg_prof_sample
;
/* Mean bytes between samples. */
extern
ssize_t
opt_lg_prof_interval
;
/* lg(prof_interval). */
extern
bool
opt_prof_gdump
;
/* High-water memory dumping. */
extern
bool
opt_prof_leak
;
/* Dump leak summary at exit. */
extern
bool
opt_prof_accum
;
/* Report cumulative bytes. */
extern
ssize_t
opt_lg_prof_tcmax
;
/* lg(max per thread bactrace cache) */
extern
char
opt_prof_prefix
[
PATH_MAX
+
1
];
/*
* Profile dump interval, measured in bytes allocated. Each arena triggers a
* profile dump when it reaches this threshold. The effect is that the
* interval between profile dumps averages prof_interval, though the actual
* interval between dumps will tend to be sporadic, and the interval will be a
* maximum of approximately (prof_interval * narenas).
*/
extern
uint64_t
prof_interval
;
/*
* If true, promote small sampled objects to large objects, since small run
* headers do not have embedded profile context pointers.
*/
extern
bool
prof_promote
;
/* (1U << opt_lg_prof_bt_max). */
extern
unsigned
prof_bt_max
;
/* Thread-specific backtrace cache, used to reduce bt2ctx contention. */
#ifndef NO_TLS
extern
__thread
prof_tdata_t
*
prof_tdata_tls
JEMALLOC_ATTR
(
tls_model
(
"initial-exec"
));
# define PROF_TCACHE_GET() prof_tdata_tls
# define PROF_TCACHE_SET(v) do { \
prof_tdata_tls = (v); \
pthread_setspecific(prof_tdata_tsd, (void *)(v)); \
} while (0)
#else
# define PROF_TCACHE_GET() \
((prof_tdata_t *)pthread_getspecific(prof_tdata_tsd))
# define PROF_TCACHE_SET(v) do { \
pthread_setspecific(prof_tdata_tsd, (void *)(v)); \
} while (0)
#endif
/*
* Same contents as b2cnt_tls, but initialized such that the TSD destructor is
* called when a thread exits, so that prof_tdata_tls contents can be merged,
* unlinked, and deallocated.
*/
extern
pthread_key_t
prof_tdata_tsd
;
void
bt_init
(
prof_bt_t
*
bt
,
void
**
vec
);
void
prof_backtrace
(
prof_bt_t
*
bt
,
unsigned
nignore
,
unsigned
max
);
prof_thr_cnt_t
*
prof_lookup
(
prof_bt_t
*
bt
);
void
prof_idump
(
void
);
bool
prof_mdump
(
const
char
*
filename
);
void
prof_gdump
(
void
);
prof_tdata_t
*
prof_tdata_init
(
void
);
void
prof_boot0
(
void
);
void
prof_boot1
(
void
);
bool
prof_boot2
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void
prof_sample_threshold_update
(
prof_tdata_t
*
prof_tdata
);
prof_thr_cnt_t
*
prof_alloc_prep
(
size_t
size
);
prof_ctx_t
*
prof_ctx_get
(
const
void
*
ptr
);
void
prof_ctx_set
(
const
void
*
ptr
,
prof_ctx_t
*
ctx
);
bool
prof_sample_accum_update
(
size_t
size
);
void
prof_malloc
(
const
void
*
ptr
,
size_t
size
,
prof_thr_cnt_t
*
cnt
);
void
prof_realloc
(
const
void
*
ptr
,
size_t
size
,
prof_thr_cnt_t
*
cnt
,
size_t
old_size
,
prof_ctx_t
*
old_ctx
);
void
prof_free
(
const
void
*
ptr
,
size_t
size
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
JEMALLOC_INLINE
void
prof_sample_threshold_update
(
prof_tdata_t
*
prof_tdata
)
{
uint64_t
r
;
double
u
;
/*
* Compute sample threshold as a geometrically distributed random
* variable with mean (2^opt_lg_prof_sample).
*
* __ __
* | log(u) | 1
* prof_tdata->threshold = | -------- |, where p = -------------------
* | log(1-p) | opt_lg_prof_sample
* 2
*
* For more information on the math, see:
*
* Non-Uniform Random Variate Generation
* Luc Devroye
* Springer-Verlag, New York, 1986
* pp 500
* (http://cg.scs.carleton.ca/~luc/rnbookindex.html)
*/
prn64
(
r
,
53
,
prof_tdata
->
prn_state
,
(
uint64_t
)
6364136223846793005LLU
,
(
uint64_t
)
1442695040888963407LLU
);
u
=
(
double
)
r
*
(
1
.
0
/
9007199254740992
.
0L
);
prof_tdata
->
threshold
=
(
uint64_t
)(
log
(
u
)
/
log
(
1
.
0
-
(
1
.
0
/
(
double
)((
uint64_t
)
1U
<<
opt_lg_prof_sample
))))
+
(
uint64_t
)
1U
;
}
JEMALLOC_INLINE
prof_thr_cnt_t
*
prof_alloc_prep
(
size_t
size
)
{
#ifdef JEMALLOC_ENABLE_INLINE
/* This function does not have its own stack frame, because it is inlined. */
# define NIGNORE 1
#else
# define NIGNORE 2
#endif
prof_thr_cnt_t
*
ret
;
prof_tdata_t
*
prof_tdata
;
prof_bt_t
bt
;
assert
(
size
==
s2u
(
size
));
prof_tdata
=
PROF_TCACHE_GET
();
if
(
prof_tdata
==
NULL
)
{
prof_tdata
=
prof_tdata_init
();
if
(
prof_tdata
==
NULL
)
return
(
NULL
);
}
if
(
opt_prof_active
==
false
)
{
/* Sampling is currently inactive, so avoid sampling. */
ret
=
(
prof_thr_cnt_t
*
)(
uintptr_t
)
1U
;
}
else
if
(
opt_lg_prof_sample
==
0
)
{
/*
* Don't bother with sampling logic, since sampling interval is
* 1.
*/
bt_init
(
&
bt
,
prof_tdata
->
vec
);
prof_backtrace
(
&
bt
,
NIGNORE
,
prof_bt_max
);
ret
=
prof_lookup
(
&
bt
);
}
else
{
if
(
prof_tdata
->
threshold
==
0
)
{
/*
* Initialize. Seed the prng differently for each
* thread.
*/
prof_tdata
->
prn_state
=
(
uint64_t
)(
uintptr_t
)
&
size
;
prof_sample_threshold_update
(
prof_tdata
);
}
/*
* Determine whether to capture a backtrace based on whether
* size is enough for prof_accum to reach
* prof_tdata->threshold. However, delay updating these
* variables until prof_{m,re}alloc(), because we don't know
* for sure that the allocation will succeed.
*
* Use subtraction rather than addition to avoid potential
* integer overflow.
*/
if
(
size
>=
prof_tdata
->
threshold
-
prof_tdata
->
accum
)
{
bt_init
(
&
bt
,
prof_tdata
->
vec
);
prof_backtrace
(
&
bt
,
NIGNORE
,
prof_bt_max
);
ret
=
prof_lookup
(
&
bt
);
}
else
ret
=
(
prof_thr_cnt_t
*
)(
uintptr_t
)
1U
;
}
return
(
ret
);
#undef NIGNORE
}
JEMALLOC_INLINE
prof_ctx_t
*
prof_ctx_get
(
const
void
*
ptr
)
{
prof_ctx_t
*
ret
;
arena_chunk_t
*
chunk
;
assert
(
ptr
!=
NULL
);
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
ptr
);
if
(
chunk
!=
ptr
)
{
/* Region. */
dassert
(
chunk
->
arena
->
magic
==
ARENA_MAGIC
);
ret
=
arena_prof_ctx_get
(
ptr
);
}
else
ret
=
huge_prof_ctx_get
(
ptr
);
return
(
ret
);
}
JEMALLOC_INLINE
void
prof_ctx_set
(
const
void
*
ptr
,
prof_ctx_t
*
ctx
)
{
arena_chunk_t
*
chunk
;
assert
(
ptr
!=
NULL
);
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
ptr
);
if
(
chunk
!=
ptr
)
{
/* Region. */
dassert
(
chunk
->
arena
->
magic
==
ARENA_MAGIC
);
arena_prof_ctx_set
(
ptr
,
ctx
);
}
else
huge_prof_ctx_set
(
ptr
,
ctx
);
}
JEMALLOC_INLINE
bool
prof_sample_accum_update
(
size_t
size
)
{
prof_tdata_t
*
prof_tdata
;
/* Sampling logic is unnecessary if the interval is 1. */
assert
(
opt_lg_prof_sample
!=
0
);
prof_tdata
=
PROF_TCACHE_GET
();
assert
(
prof_tdata
!=
NULL
);
/* Take care to avoid integer overflow. */
if
(
size
>=
prof_tdata
->
threshold
-
prof_tdata
->
accum
)
{
prof_tdata
->
accum
-=
(
prof_tdata
->
threshold
-
size
);
/* Compute new sample threshold. */
prof_sample_threshold_update
(
prof_tdata
);
while
(
prof_tdata
->
accum
>=
prof_tdata
->
threshold
)
{
prof_tdata
->
accum
-=
prof_tdata
->
threshold
;
prof_sample_threshold_update
(
prof_tdata
);
}
return
(
false
);
}
else
{
prof_tdata
->
accum
+=
size
;
return
(
true
);
}
}
JEMALLOC_INLINE
void
prof_malloc
(
const
void
*
ptr
,
size_t
size
,
prof_thr_cnt_t
*
cnt
)
{
assert
(
ptr
!=
NULL
);
assert
(
size
==
isalloc
(
ptr
));
if
(
opt_lg_prof_sample
!=
0
)
{
if
(
prof_sample_accum_update
(
size
))
{
/*
* Don't sample. For malloc()-like allocation, it is
* always possible to tell in advance how large an
* object's usable size will be, so there should never
* be a difference between the size passed to
* prof_alloc_prep() and prof_malloc().
*/
assert
((
uintptr_t
)
cnt
==
(
uintptr_t
)
1U
);
}
}
if
((
uintptr_t
)
cnt
>
(
uintptr_t
)
1U
)
{
prof_ctx_set
(
ptr
,
cnt
->
ctx
);
cnt
->
epoch
++
;
/*********/
mb_write
();
/*********/
cnt
->
cnts
.
curobjs
++
;
cnt
->
cnts
.
curbytes
+=
size
;
if
(
opt_prof_accum
)
{
cnt
->
cnts
.
accumobjs
++
;
cnt
->
cnts
.
accumbytes
+=
size
;
}
/*********/
mb_write
();
/*********/
cnt
->
epoch
++
;
/*********/
mb_write
();
/*********/
}
else
prof_ctx_set
(
ptr
,
(
prof_ctx_t
*
)(
uintptr_t
)
1U
);
}
JEMALLOC_INLINE
void
prof_realloc
(
const
void
*
ptr
,
size_t
size
,
prof_thr_cnt_t
*
cnt
,
size_t
old_size
,
prof_ctx_t
*
old_ctx
)
{
prof_thr_cnt_t
*
told_cnt
;
assert
(
ptr
!=
NULL
||
(
uintptr_t
)
cnt
<=
(
uintptr_t
)
1U
);
if
(
ptr
!=
NULL
)
{
assert
(
size
==
isalloc
(
ptr
));
if
(
opt_lg_prof_sample
!=
0
)
{
if
(
prof_sample_accum_update
(
size
))
{
/*
* Don't sample. The size passed to
* prof_alloc_prep() was larger than what
* actually got allocated, so a backtrace was
* captured for this allocation, even though
* its actual size was insufficient to cross
* the sample threshold.
*/
cnt
=
(
prof_thr_cnt_t
*
)(
uintptr_t
)
1U
;
}
}
}
if
((
uintptr_t
)
old_ctx
>
(
uintptr_t
)
1U
)
{
told_cnt
=
prof_lookup
(
old_ctx
->
bt
);
if
(
told_cnt
==
NULL
)
{
/*
* It's too late to propagate OOM for this realloc(),
* so operate directly on old_cnt->ctx->cnt_merged.
*/
malloc_mutex_lock
(
&
old_ctx
->
lock
);
old_ctx
->
cnt_merged
.
curobjs
--
;
old_ctx
->
cnt_merged
.
curbytes
-=
old_size
;
malloc_mutex_unlock
(
&
old_ctx
->
lock
);
told_cnt
=
(
prof_thr_cnt_t
*
)(
uintptr_t
)
1U
;
}
}
else
told_cnt
=
(
prof_thr_cnt_t
*
)(
uintptr_t
)
1U
;
if
((
uintptr_t
)
told_cnt
>
(
uintptr_t
)
1U
)
told_cnt
->
epoch
++
;
if
((
uintptr_t
)
cnt
>
(
uintptr_t
)
1U
)
{
prof_ctx_set
(
ptr
,
cnt
->
ctx
);
cnt
->
epoch
++
;
}
else
prof_ctx_set
(
ptr
,
(
prof_ctx_t
*
)(
uintptr_t
)
1U
);
/*********/
mb_write
();
/*********/
if
((
uintptr_t
)
told_cnt
>
(
uintptr_t
)
1U
)
{
told_cnt
->
cnts
.
curobjs
--
;
told_cnt
->
cnts
.
curbytes
-=
old_size
;
}
if
((
uintptr_t
)
cnt
>
(
uintptr_t
)
1U
)
{
cnt
->
cnts
.
curobjs
++
;
cnt
->
cnts
.
curbytes
+=
size
;
if
(
opt_prof_accum
)
{
cnt
->
cnts
.
accumobjs
++
;
cnt
->
cnts
.
accumbytes
+=
size
;
}
}
/*********/
mb_write
();
/*********/
if
((
uintptr_t
)
told_cnt
>
(
uintptr_t
)
1U
)
told_cnt
->
epoch
++
;
if
((
uintptr_t
)
cnt
>
(
uintptr_t
)
1U
)
cnt
->
epoch
++
;
/*********/
mb_write
();
/* Not strictly necessary. */
}
JEMALLOC_INLINE
void
prof_free
(
const
void
*
ptr
,
size_t
size
)
{
prof_ctx_t
*
ctx
=
prof_ctx_get
(
ptr
);
if
((
uintptr_t
)
ctx
>
(
uintptr_t
)
1
)
{
assert
(
size
==
isalloc
(
ptr
));
prof_thr_cnt_t
*
tcnt
=
prof_lookup
(
ctx
->
bt
);
if
(
tcnt
!=
NULL
)
{
tcnt
->
epoch
++
;
/*********/
mb_write
();
/*********/
tcnt
->
cnts
.
curobjs
--
;
tcnt
->
cnts
.
curbytes
-=
size
;
/*********/
mb_write
();
/*********/
tcnt
->
epoch
++
;
/*********/
mb_write
();
/*********/
}
else
{
/*
* OOM during free() cannot be propagated, so operate
* directly on cnt->ctx->cnt_merged.
*/
malloc_mutex_lock
(
&
ctx
->
lock
);
ctx
->
cnt_merged
.
curobjs
--
;
ctx
->
cnt_merged
.
curbytes
-=
size
;
malloc_mutex_unlock
(
&
ctx
->
lock
);
}
}
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif
/* JEMALLOC_PROF */
deps/jemalloc/include/jemalloc/internal/ql.h
0 → 100644
View file @
a78e148b
/*
* List definitions.
*/
#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
}
#define ql_head_initializer(a_head) {NULL}
#define ql_elm(a_type) qr(a_type)
/* List functions. */
#define ql_new(a_head) do { \
(a_head)->qlh_first = NULL; \
} while (0)
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
#define ql_first(a_head) ((a_head)->qlh_first)
#define ql_last(a_head, a_field) \
((ql_first(a_head) != NULL) \
? qr_prev(ql_first(a_head), a_field) : NULL)
#define ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
#define ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
ql_first(a_head) = (a_elm); \
} \
} while (0)
#define ql_after_insert(a_qlelm, a_elm, a_field) \
qr_after_insert((a_qlelm), (a_elm), a_field)
#define ql_head_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
} while (0)
#define ql_tail_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
#define ql_remove(a_head, a_elm, a_field) do { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} \
if (ql_first(a_head) != (a_elm)) { \
qr_remove((a_elm), a_field); \
} else { \
ql_first(a_head) = NULL; \
} \
} while (0)
#define ql_head_remove(a_head, a_type, a_field) do { \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_tail_remove(a_head, a_type, a_field) do { \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_foreach(a_var, a_head, a_field) \
qr_foreach((a_var), ql_first(a_head), a_field)
#define ql_reverse_foreach(a_var, a_head, a_field) \
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
deps/jemalloc/include/jemalloc/internal/qr.h
0 → 100644
View file @
a78e148b
/* Ring definitions. */
#define qr(a_type) \
struct { \
a_type *qre_next; \
a_type *qre_prev; \
}
/* Ring functions. */
#define qr_new(a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qrelm); \
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
(a_qrelm)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_after_insert(a_qrelm, a_qr, a_field) \
do \
{ \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
(a_qr)->a_field.qre_prev = (a_qrelm); \
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
(a_qrelm)->a_field.qre_next = (a_qr); \
} while (0)
#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
void *t; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
t = (a_qr_a)->a_field.qre_prev; \
(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
(a_qr_b)->a_field.qre_prev = t; \
} while (0)
/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code. */
#define qr_split(a_qr_a, a_qr_b, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_field)
#define qr_remove(a_qr, a_field) do { \
(a_qr)->a_field.qre_prev->a_field.qre_next \
= (a_qr)->a_field.qre_next; \
(a_qr)->a_field.qre_next->a_field.qre_prev \
= (a_qr)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_foreach(var, a_qr, a_field) \
for ((var) = (a_qr); \
(var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next : NULL))
#define qr_reverse_foreach(var, a_qr, a_field) \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
(var) = (((var) != (a_qr)) \
? (var)->a_field.qre_prev : NULL))
deps/jemalloc/include/jemalloc/internal/rb.h
0 → 100644
View file @
a78e148b
/*-
*******************************************************************************
*
* cpp macro implementation of left-leaning 2-3 red-black trees. Parent
* pointers are not used, and color bits are stored in the least significant
* bit of right-child pointers (if RB_COMPACT is defined), thus making node
* linkage as compact as is possible for red-black trees.
*
* Usage:
*
* #include <stdint.h>
* #include <stdbool.h>
* #define NDEBUG // (Optional, see assert(3).)
* #include <assert.h>
* #define RB_COMPACT // (Optional, embed color bits in right-child pointers.)
* #include <rb.h>
* ...
*
*******************************************************************************
*/
#ifndef RB_H_
#define RB_H_
#if 0
__FBSDID("$FreeBSD: head/lib/libc/stdlib/rb.h 204493 2010-02-28 22:57:13Z jasone $");
#endif
#ifdef RB_COMPACT
/* Node structure. */
#define rb_node(a_type) \
struct { \
a_type *rbn_left; \
a_type *rbn_right_red; \
}
#else
#define rb_node(a_type) \
struct { \
a_type *rbn_left; \
a_type *rbn_right; \
bool rbn_red; \
}
#endif
/* Root structure. */
#define rb_tree(a_type) \
struct { \
a_type *rbt_root; \
a_type rbt_nil; \
}
/* Left accessors. */
#define rbtn_left_get(a_type, a_field, a_node) \
((a_node)->a_field.rbn_left)
#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \
(a_node)->a_field.rbn_left = a_left; \
} while (0)
#ifdef RB_COMPACT
/* Right accessors. */
#define rbtn_right_get(a_type, a_field, a_node) \
((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \
& ((ssize_t)-2)))
#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
(a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \
| (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \
} while (0)
/* Color accessors. */
#define rbtn_red_get(a_type, a_field, a_node) \
((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \
& ((size_t)1)))
#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
(a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \
| ((ssize_t)a_red)); \
} while (0)
#define rbtn_red_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \
(a_node)->a_field.rbn_right_red) | ((size_t)1)); \
} while (0)
#define rbtn_black_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
} while (0)
#else
/* Right accessors. */
#define rbtn_right_get(a_type, a_field, a_node) \
((a_node)->a_field.rbn_right)
#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
(a_node)->a_field.rbn_right = a_right; \
} while (0)
/* Color accessors. */
#define rbtn_red_get(a_type, a_field, a_node) \
((a_node)->a_field.rbn_red)
#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
(a_node)->a_field.rbn_red = (a_red); \
} while (0)
#define rbtn_red_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_red = true; \
} while (0)
#define rbtn_black_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_red = false; \
} while (0)
#endif
/* Node initializer. */
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
rbtn_left_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \
rbtn_right_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \
rbtn_red_set(a_type, a_field, (a_node)); \
} while (0)
/* Tree initializer. */
#define rb_new(a_type, a_field, a_rbt) do { \
(a_rbt)->rbt_root = &(a_rbt)->rbt_nil; \
rbt_node_new(a_type, a_field, a_rbt, &(a_rbt)->rbt_nil); \
rbtn_black_set(a_type, a_field, &(a_rbt)->rbt_nil); \
} while (0)
/* Internal utility macros. */
#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
if ((r_node) != &(a_rbt)->rbt_nil) { \
for (; \
rbtn_left_get(a_type, a_field, (r_node)) != &(a_rbt)->rbt_nil;\
(r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \
} \
} \
} while (0)
#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
if ((r_node) != &(a_rbt)->rbt_nil) { \
for (; rbtn_right_get(a_type, a_field, (r_node)) != \
&(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \
(r_node))) { \
} \
} \
} while (0)
#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \
(r_node) = rbtn_right_get(a_type, a_field, (a_node)); \
rbtn_right_set(a_type, a_field, (a_node), \
rbtn_left_get(a_type, a_field, (r_node))); \
rbtn_left_set(a_type, a_field, (r_node), (a_node)); \
} while (0)
#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \
(r_node) = rbtn_left_get(a_type, a_field, (a_node)); \
rbtn_left_set(a_type, a_field, (a_node), \
rbtn_right_get(a_type, a_field, (r_node))); \
rbtn_right_set(a_type, a_field, (r_node), (a_node)); \
} while (0)
/*
* The rb_proto() macro generates function prototypes that correspond to the
* functions generated by an equivalently parameterized call to rb_gen().
*/
#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
a_attr void \
a_prefix##new(a_rbt_type *rbtree); \
a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree); \
a_attr a_type * \
a_prefix##last(a_rbt_type *rbtree); \
a_attr a_type * \
a_prefix##next(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
a_prefix##search(a_rbt_type *rbtree, a_type *key); \
a_attr a_type * \
a_prefix##nsearch(a_rbt_type *rbtree, a_type *key); \
a_attr a_type * \
a_prefix##psearch(a_rbt_type *rbtree, a_type *key); \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node); \
a_attr void \
a_prefix##remove(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
a_rbt_type *, a_type *, void *), void *arg); \
a_attr a_type * \
a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg);
/*
* The rb_gen() macro generates a type-specific red-black tree implementation,
* based on the above cpp macros.
*
* Arguments:
*
* a_attr : Function attribute for generated functions (ex: static).
* a_prefix : Prefix for generated functions (ex: ex_).
* a_rb_type : Type for red-black tree data structure (ex: ex_t).
* a_type : Type for red-black tree node data structure (ex: ex_node_t).
* a_field : Name of red-black tree node linkage (ex: ex_link).
* a_cmp : Node comparison function name, with the following prototype:
* int (a_cmp *)(a_type *a_node, a_type *a_other);
* ^^^^^^
* or a_key
* Interpretation of comparision function return values:
* -1 : a_node < a_other
* 0 : a_node == a_other
* 1 : a_node > a_other
* In all cases, the a_node or a_key macro argument is the first
* argument to the comparison function, which makes it possible
* to write comparison functions that treat the first argument
* specially.
*
* Assuming the following setup:
*
* typedef struct ex_node_s ex_node_t;
* struct ex_node_s {
* rb_node(ex_node_t) ex_link;
* };
* typedef rb_tree(ex_node_t) ex_t;
* rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp)
*
* The following API is generated:
*
* static void
* ex_new(ex_t *extree);
* Description: Initialize a red-black tree structure.
* Args:
* extree: Pointer to an uninitialized red-black tree object.
*
* static ex_node_t *
* ex_first(ex_t *extree);
* static ex_node_t *
* ex_last(ex_t *extree);
* Description: Get the first/last node in extree.
* Args:
* extree: Pointer to an initialized red-black tree object.
* Ret: First/last node in extree, or NULL if extree is empty.
*
* static ex_node_t *
* ex_next(ex_t *extree, ex_node_t *node);
* static ex_node_t *
* ex_prev(ex_t *extree, ex_node_t *node);
* Description: Get node's successor/predecessor.
* Args:
* extree: Pointer to an initialized red-black tree object.
* node : A node in extree.
* Ret: node's successor/predecessor in extree, or NULL if node is
* last/first.
*
* static ex_node_t *
* ex_search(ex_t *extree, ex_node_t *key);
* Description: Search for node that matches key.
* Args:
* extree: Pointer to an initialized red-black tree object.
* key : Search key.
* Ret: Node in extree that matches key, or NULL if no match.
*
* static ex_node_t *
* ex_nsearch(ex_t *extree, ex_node_t *key);
* static ex_node_t *
* ex_psearch(ex_t *extree, ex_node_t *key);
* Description: Search for node that matches key. If no match is found,
* return what would be key's successor/predecessor, were
* key in extree.
* Args:
* extree: Pointer to an initialized red-black tree object.
* key : Search key.
* Ret: Node in extree that matches key, or if no match, hypothetical
* node's successor/predecessor (NULL if no successor/predecessor).
*
* static void
* ex_insert(ex_t *extree, ex_node_t *node);
* Description: Insert node into extree.
* Args:
* extree: Pointer to an initialized red-black tree object.
* node : Node to be inserted into extree.
*
* static void
* ex_remove(ex_t *extree, ex_node_t *node);
* Description: Remove node from extree.
* Args:
* extree: Pointer to an initialized red-black tree object.
* node : Node in extree to be removed.
*
* static ex_node_t *
* ex_iter(ex_t *extree, ex_node_t *start, ex_node_t *(*cb)(ex_t *,
* ex_node_t *, void *), void *arg);
* static ex_node_t *
* ex_reverse_iter(ex_t *extree, ex_node_t *start, ex_node *(*cb)(ex_t *,
* ex_node_t *, void *), void *arg);
* Description: Iterate forward/backward over extree, starting at node.
* If extree is modified, iteration must be immediately
* terminated by the callback function that causes the
* modification.
* Args:
* extree: Pointer to an initialized red-black tree object.
* start : Node at which to start iteration, or NULL to start at
* first/last node.
* cb : Callback function, which is called for each node during
* iteration. Under normal circumstances the callback function
* should return NULL, which causes iteration to continue. If a
* callback function returns non-NULL, iteration is immediately
* terminated and the non-NULL return value is returned by the
* iterator. This is useful for re-starting iteration after
* modifying extree.
* arg : Opaque pointer passed to cb().
* Ret: NULL if iteration completed, or the non-NULL callback return value
* that caused termination of the iteration.
*/
#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
a_attr void \
a_prefix##new(a_rbt_type *rbtree) { \
rb_new(a_type, a_field, rbtree); \
} \
a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
if (ret == &rbtree->rbt_nil) { \
ret = NULL; \
} \
return (ret); \
} \
a_attr a_type * \
a_prefix##last(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
if (ret == &rbtree->rbt_nil) { \
ret = NULL; \
} \
return (ret); \
} \
a_attr a_type * \
a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \
if (rbtn_right_get(a_type, a_field, node) != &rbtree->rbt_nil) { \
rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \
a_field, node), ret); \
} else { \
a_type *tnode = rbtree->rbt_root; \
assert(tnode != &rbtree->rbt_nil); \
ret = &rbtree->rbt_nil; \
while (true) { \
int cmp = (a_cmp)(node, tnode); \
if (cmp < 0) { \
ret = tnode; \
tnode = rbtn_left_get(a_type, a_field, tnode); \
} else if (cmp > 0) { \
tnode = rbtn_right_get(a_type, a_field, tnode); \
} else { \
break; \
} \
assert(tnode != &rbtree->rbt_nil); \
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
} \
a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \
if (rbtn_left_get(a_type, a_field, node) != &rbtree->rbt_nil) { \
rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \
a_field, node), ret); \
} else { \
a_type *tnode = rbtree->rbt_root; \
assert(tnode != &rbtree->rbt_nil); \
ret = &rbtree->rbt_nil; \
while (true) { \
int cmp = (a_cmp)(node, tnode); \
if (cmp < 0) { \
tnode = rbtn_left_get(a_type, a_field, tnode); \
} else if (cmp > 0) { \
ret = tnode; \
tnode = rbtn_right_get(a_type, a_field, tnode); \
} else { \
break; \
} \
assert(tnode != &rbtree->rbt_nil); \
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
} \
a_attr a_type * \
a_prefix##search(a_rbt_type *rbtree, a_type *key) { \
a_type *ret; \
int cmp; \
ret = rbtree->rbt_root; \
while (ret != &rbtree->rbt_nil \
&& (cmp = (a_cmp)(key, ret)) != 0) { \
if (cmp < 0) { \
ret = rbtn_left_get(a_type, a_field, ret); \
} else { \
ret = rbtn_right_get(a_type, a_field, ret); \
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
} \
a_attr a_type * \
a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \
a_type *ret; \
a_type *tnode = rbtree->rbt_root; \
ret = &rbtree->rbt_nil; \
while (tnode != &rbtree->rbt_nil) { \
int cmp = (a_cmp)(key, tnode); \
if (cmp < 0) { \
ret = tnode; \
tnode = rbtn_left_get(a_type, a_field, tnode); \
} else if (cmp > 0) { \
tnode = rbtn_right_get(a_type, a_field, tnode); \
} else { \
ret = tnode; \
break; \
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
} \
a_attr a_type * \
a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \
a_type *ret; \
a_type *tnode = rbtree->rbt_root; \
ret = &rbtree->rbt_nil; \
while (tnode != &rbtree->rbt_nil) { \
int cmp = (a_cmp)(key, tnode); \
if (cmp < 0) { \
tnode = rbtn_left_get(a_type, a_field, tnode); \
} else if (cmp > 0) { \
ret = tnode; \
tnode = rbtn_right_get(a_type, a_field, tnode); \
} else { \
ret = tnode; \
break; \
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
} \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
struct { \
a_type *node; \
int cmp; \
} path[sizeof(void *) << 4], *pathp; \
rbt_node_new(a_type, a_field, rbtree, node); \
/* Wind. */
\
path->node = rbtree->rbt_root; \
for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
assert(cmp != 0); \
if (cmp < 0) { \
pathp[1].node = rbtn_left_get(a_type, a_field, \
pathp->node); \
} else { \
pathp[1].node = rbtn_right_get(a_type, a_field, \
pathp->node); \
} \
} \
pathp->node = node; \
/* Unwind. */
\
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
a_type *cnode = pathp->node; \
if (pathp->cmp < 0) { \
a_type *left = pathp[1].node; \
rbtn_left_set(a_type, a_field, cnode, left); \
if (rbtn_red_get(a_type, a_field, left)) { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (rbtn_red_get(a_type, a_field, leftleft)) { \
/* Fix up 4-node. */
\
a_type *tnode; \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_rotate_right(a_type, a_field, cnode, tnode); \
cnode = tnode; \
} \
} else { \
return; \
} \
} else { \
a_type *right = pathp[1].node; \
rbtn_right_set(a_type, a_field, cnode, right); \
if (rbtn_red_get(a_type, a_field, right)) { \
a_type *left = rbtn_left_get(a_type, a_field, cnode); \
if (rbtn_red_get(a_type, a_field, left)) { \
/* Split 4-node. */
\
rbtn_black_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, right); \
rbtn_red_set(a_type, a_field, cnode); \
} else { \
/* Lean left. */
\
a_type *tnode; \
bool tred = rbtn_red_get(a_type, a_field, cnode); \
rbtn_rotate_left(a_type, a_field, cnode, tnode); \
rbtn_color_set(a_type, a_field, tnode, tred); \
rbtn_red_set(a_type, a_field, cnode); \
cnode = tnode; \
} \
} else { \
return; \
} \
} \
pathp->node = cnode; \
} \
/* Set root, and make it black. */
\
rbtree->rbt_root = path->node; \
rbtn_black_set(a_type, a_field, rbtree->rbt_root); \
} \
a_attr void \
a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
struct { \
a_type *node; \
int cmp; \
} *pathp, *nodep, path[sizeof(void *) << 4]; \
/* Wind. */
\
nodep = NULL;
/* Silence compiler warning. */
\
path->node = rbtree->rbt_root; \
for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
if (cmp < 0) { \
pathp[1].node = rbtn_left_get(a_type, a_field, \
pathp->node); \
} else { \
pathp[1].node = rbtn_right_get(a_type, a_field, \
pathp->node); \
if (cmp == 0) { \
/* Find node's successor, in preparation for swap. */
\
pathp->cmp = 1; \
nodep = pathp; \
for (pathp++; pathp->node != &rbtree->rbt_nil; \
pathp++) { \
pathp->cmp = -1; \
pathp[1].node = rbtn_left_get(a_type, a_field, \
pathp->node); \
} \
break; \
} \
} \
} \
assert(nodep->node == node); \
pathp--; \
if (pathp->node != node) { \
/* Swap node with its successor. */
\
bool tred = rbtn_red_get(a_type, a_field, pathp->node); \
rbtn_color_set(a_type, a_field, pathp->node, \
rbtn_red_get(a_type, a_field, node)); \
rbtn_left_set(a_type, a_field, pathp->node, \
rbtn_left_get(a_type, a_field, node)); \
/* If node's successor is its right child, the following code */
\
/* will do the wrong thing for the right child pointer. */
\
/* However, it doesn't matter, because the pointer will be */
\
/* properly set when the successor is pruned. */
\
rbtn_right_set(a_type, a_field, pathp->node, \
rbtn_right_get(a_type, a_field, node)); \
rbtn_color_set(a_type, a_field, node, tred); \
/* The pruned leaf node's child pointers are never accessed */
\
/* again, so don't bother setting them to nil. */
\
nodep->node = pathp->node; \
pathp->node = node; \
if (nodep == path) { \
rbtree->rbt_root = nodep->node; \
} else { \
if (nodep[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, nodep[-1].node, \
nodep->node); \
} else { \
rbtn_right_set(a_type, a_field, nodep[-1].node, \
nodep->node); \
} \
} \
} else { \
a_type *left = rbtn_left_get(a_type, a_field, node); \
if (left != &rbtree->rbt_nil) { \
/* node has no successor, but it has a left child. */
\
/* Splice node out, without losing the left child. */
\
assert(rbtn_red_get(a_type, a_field, node) == false); \
assert(rbtn_red_get(a_type, a_field, left)); \
rbtn_black_set(a_type, a_field, left); \
if (pathp == path) { \
rbtree->rbt_root = left; \
} else { \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
left); \
} else { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
left); \
} \
} \
return; \
} else if (pathp == path) { \
/* The tree only contained one node. */
\
rbtree->rbt_root = &rbtree->rbt_nil; \
return; \
} \
} \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
/* Prune red node, which requires no fixup. */
\
assert(pathp[-1].cmp < 0); \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
&rbtree->rbt_nil); \
return; \
} \
/* The node to be pruned is black, so unwind until balance is */
\
/* restored. */
\
pathp->node = &rbtree->rbt_nil; \
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
assert(pathp->cmp != 0); \
if (pathp->cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp->node, \
pathp[1].node); \
assert(rbtn_red_get(a_type, a_field, pathp[1].node) \
== false); \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *right = rbtn_right_get(a_type, a_field, \
pathp->node); \
a_type *rightleft = rbtn_left_get(a_type, a_field, \
right); \
a_type *tnode; \
if (rbtn_red_get(a_type, a_field, rightleft)) { \
/* In the following diagrams, ||, //, and \\ */
\
/* indicate the path to the removed node. */
\
/* */
\
/* || */
\
/* pathp(r) */
\
/* // \ */
\
/* (b) (b) */
\
/* / */
\
/* (r) */
\
/* */
\
rbtn_black_set(a_type, a_field, pathp->node); \
rbtn_rotate_right(a_type, a_field, right, tnode); \
rbtn_right_set(a_type, a_field, pathp->node, tnode);\
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
} else { \
/* || */
\
/* pathp(r) */
\
/* // \ */
\
/* (b) (b) */
\
/* / */
\
/* (b) */
\
/* */
\
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
} \
/* Balance restored, but rotation modified subtree */
\
/* root. */
\
assert((uintptr_t)pathp > (uintptr_t)path); \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
tnode); \
} else { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
tnode); \
} \
return; \
} else { \
a_type *right = rbtn_right_get(a_type, a_field, \
pathp->node); \
a_type *rightleft = rbtn_left_get(a_type, a_field, \
right); \
if (rbtn_red_get(a_type, a_field, rightleft)) { \
/* || */
\
/* pathp(b) */
\
/* // \ */
\
/* (b) (b) */
\
/* / */
\
/* (r) */
\
a_type *tnode; \
rbtn_black_set(a_type, a_field, rightleft); \
rbtn_rotate_right(a_type, a_field, right, tnode); \
rbtn_right_set(a_type, a_field, pathp->node, tnode);\
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
/* Balance restored, but rotation modified */
\
/* subree root, which may actually be the tree */
\
/* root. */
\
if (pathp == path) { \
/* Set root. */
\
rbtree->rbt_root = tnode; \
} else { \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, \
pathp[-1].node, tnode); \
} else { \
rbtn_right_set(a_type, a_field, \
pathp[-1].node, tnode); \
} \
} \
return; \
} else { \
/* || */
\
/* pathp(b) */
\
/* // \ */
\
/* (b) (b) */
\
/* / */
\
/* (b) */
\
a_type *tnode; \
rbtn_red_set(a_type, a_field, pathp->node); \
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
pathp->node = tnode; \
} \
} \
} else { \
a_type *left; \
rbtn_right_set(a_type, a_field, pathp->node, \
pathp[1].node); \
left = rbtn_left_get(a_type, a_field, pathp->node); \
if (rbtn_red_get(a_type, a_field, left)) { \
a_type *tnode; \
a_type *leftright = rbtn_right_get(a_type, a_field, \
left); \
a_type *leftrightleft = rbtn_left_get(a_type, a_field, \
leftright); \
if (rbtn_red_get(a_type, a_field, leftrightleft)) { \
/* || */
\
/* pathp(b) */
\
/* / \\ */
\
/* (r) (b) */
\
/* \ */
\
/* (b) */
\
/* / */
\
/* (r) */
\
a_type *unode; \
rbtn_black_set(a_type, a_field, leftrightleft); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
unode); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
rbtn_right_set(a_type, a_field, unode, tnode); \
rbtn_rotate_left(a_type, a_field, unode, tnode); \
} else { \
/* || */
\
/* pathp(b) */
\
/* / \\ */
\
/* (r) (b) */
\
/* \ */
\
/* (b) */
\
/* / */
\
/* (b) */
\
assert(leftright != &rbtree->rbt_nil); \
rbtn_red_set(a_type, a_field, leftright); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
rbtn_black_set(a_type, a_field, tnode); \
} \
/* Balance restored, but rotation modified subtree */
\
/* root, which may actually be the tree root. */
\
if (pathp == path) { \
/* Set root. */
\
rbtree->rbt_root = tnode; \
} else { \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
tnode); \
} else { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
tnode); \
} \
} \
return; \
} else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (rbtn_red_get(a_type, a_field, leftleft)) { \
/* || */
\
/* pathp(r) */
\
/* / \\ */
\
/* (b) (b) */
\
/* / */
\
/* (r) */
\
a_type *tnode; \
rbtn_black_set(a_type, a_field, pathp->node); \
rbtn_red_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
/* Balance restored, but rotation modified */
\
/* subtree root. */
\
assert((uintptr_t)pathp > (uintptr_t)path); \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
tnode); \
} else { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
tnode); \
} \
return; \
} else { \
/* || */
\
/* pathp(r) */
\
/* / \\ */
\
/* (b) (b) */
\
/* / */
\
/* (b) */
\
rbtn_red_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, pathp->node); \
/* Balance restored. */
\
return; \
} \
} else { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (rbtn_red_get(a_type, a_field, leftleft)) { \
/* || */
\
/* pathp(b) */
\
/* / \\ */
\
/* (b) (b) */
\
/* / */
\
/* (r) */
\
a_type *tnode; \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
/* Balance restored, but rotation modified */
\
/* subtree root, which may actually be the tree */
\
/* root. */
\
if (pathp == path) { \
/* Set root. */
\
rbtree->rbt_root = tnode; \
} else { \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, \
pathp[-1].node, tnode); \
} else { \
rbtn_right_set(a_type, a_field, \
pathp[-1].node, tnode); \
} \
} \
return; \
} else { \
/* || */
\
/* pathp(b) */
\
/* / \\ */
\
/* (b) (b) */
\
/* / */
\
/* (b) */
\
rbtn_red_set(a_type, a_field, left); \
} \
} \
} \
} \
/* Set root. */
\
rbtree->rbt_root = path->node; \
assert(rbtn_red_get(a_type, a_field, rbtree->rbt_root) == false); \
} \
a_attr a_type * \
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
if (node == &rbtree->rbt_nil) { \
return (&rbtree->rbt_nil); \
} else { \
a_type *ret; \
if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
a_field, node), cb, arg)) != &rbtree->rbt_nil \
|| (ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
} \
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg)); \
} \
} \
a_attr a_type * \
a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
int cmp = a_cmp(start, node); \
if (cmp < 0) { \
a_type *ret; \
if ((ret = a_prefix##iter_start(rbtree, start, \
rbtn_left_get(a_type, a_field, node), cb, arg)) != \
&rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
} \
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg)); \
} else if (cmp > 0) { \
return (a_prefix##iter_start(rbtree, start, \
rbtn_right_get(a_type, a_field, node), cb, arg)); \
} else { \
a_type *ret; \
if ((ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
} \
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg)); \
} \
} \
a_attr a_type * \
a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
a_rbt_type *, a_type *, void *), void *arg) { \
a_type *ret; \
if (start != NULL) { \
ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \
cb, arg); \
} else { \
ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
} \
if (ret == &rbtree->rbt_nil) { \
ret = NULL; \
} \
return (ret); \
} \
a_attr a_type * \
a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
if (node == &rbtree->rbt_nil) { \
return (&rbtree->rbt_nil); \
} else { \
a_type *ret; \
if ((ret = a_prefix##reverse_iter_recurse(rbtree, \
rbtn_right_get(a_type, a_field, node), cb, arg)) != \
&rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
} \
return (a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg)); \
} \
} \
a_attr a_type * \
a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \
a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \
void *arg) { \
int cmp = a_cmp(start, node); \
if (cmp > 0) { \
a_type *ret; \
if ((ret = a_prefix##reverse_iter_start(rbtree, start, \
rbtn_right_get(a_type, a_field, node), cb, arg)) != \
&rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
} \
return (a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg)); \
} else if (cmp < 0) { \
return (a_prefix##reverse_iter_start(rbtree, start, \
rbtn_left_get(a_type, a_field, node), cb, arg)); \
} else { \
a_type *ret; \
if ((ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
} \
return (a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg)); \
} \
} \
a_attr a_type * \
a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
a_type *ret; \
if (start != NULL) { \
ret = a_prefix##reverse_iter_start(rbtree, start, \
rbtree->rbt_root, cb, arg); \
} else { \
ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \
cb, arg); \
} \
if (ret == &rbtree->rbt_nil) { \
ret = NULL; \
} \
return (ret); \
}
#endif
/* RB_H_ */
Prev
1
2
3
4
5
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment