Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
fb1f4f4e
Unverified
Commit
fb1f4f4e
authored
Oct 25, 2019
by
Wander Hillen
Committed by
GitHub
Oct 25, 2019
Browse files
Merge branch 'unstable' into minor-typos
parents
dda8cc18
6e98214f
Changes
203
Hide whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
203 of 203+
files are displayed.
Plain diff
Email patch
deps/jemalloc/src/chunk_dss.c
deleted
100644 → 0
View file @
dda8cc18
#define JEMALLOC_CHUNK_DSS_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
const
char
*
dss_prec_names
[]
=
{
"disabled"
,
"primary"
,
"secondary"
,
"N/A"
};
/* Current dss precedence default, used when creating new arenas. */
static
dss_prec_t
dss_prec_default
=
DSS_PREC_DEFAULT
;
/*
* Protects sbrk() calls. This avoids malloc races among threads, though it
* does not protect against races with threads that call sbrk() directly.
*/
static
malloc_mutex_t
dss_mtx
;
/* Base address of the DSS. */
static
void
*
dss_base
;
/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
static
void
*
dss_prev
;
/* Current upper limit on DSS addresses. */
static
void
*
dss_max
;
/******************************************************************************/
static
void
*
chunk_dss_sbrk
(
intptr_t
increment
)
{
#ifdef JEMALLOC_DSS
return
(
sbrk
(
increment
));
#else
not_implemented
();
return
(
NULL
);
#endif
}
dss_prec_t
chunk_dss_prec_get
(
void
)
{
dss_prec_t
ret
;
if
(
!
have_dss
)
return
(
dss_prec_disabled
);
malloc_mutex_lock
(
&
dss_mtx
);
ret
=
dss_prec_default
;
malloc_mutex_unlock
(
&
dss_mtx
);
return
(
ret
);
}
bool
chunk_dss_prec_set
(
dss_prec_t
dss_prec
)
{
if
(
!
have_dss
)
return
(
dss_prec
!=
dss_prec_disabled
);
malloc_mutex_lock
(
&
dss_mtx
);
dss_prec_default
=
dss_prec
;
malloc_mutex_unlock
(
&
dss_mtx
);
return
(
false
);
}
void
*
chunk_alloc_dss
(
arena_t
*
arena
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
)
{
cassert
(
have_dss
);
assert
(
size
>
0
&&
(
size
&
chunksize_mask
)
==
0
);
assert
(
alignment
>
0
&&
(
alignment
&
chunksize_mask
)
==
0
);
/*
* sbrk() uses a signed increment argument, so take care not to
* interpret a huge allocation request as a negative increment.
*/
if
((
intptr_t
)
size
<
0
)
return
(
NULL
);
malloc_mutex_lock
(
&
dss_mtx
);
if
(
dss_prev
!=
(
void
*
)
-
1
)
{
/*
* The loop is necessary to recover from races with other
* threads that are using the DSS for something other than
* malloc.
*/
do
{
void
*
ret
,
*
cpad
,
*
dss_next
;
size_t
gap_size
,
cpad_size
;
intptr_t
incr
;
/* Avoid an unnecessary system call. */
if
(
new_addr
!=
NULL
&&
dss_max
!=
new_addr
)
break
;
/* Get the current end of the DSS. */
dss_max
=
chunk_dss_sbrk
(
0
);
/* Make sure the earlier condition still holds. */
if
(
new_addr
!=
NULL
&&
dss_max
!=
new_addr
)
break
;
/*
* Calculate how much padding is necessary to
* chunk-align the end of the DSS.
*/
gap_size
=
(
chunksize
-
CHUNK_ADDR2OFFSET
(
dss_max
))
&
chunksize_mask
;
/*
* Compute how much chunk-aligned pad space (if any) is
* necessary to satisfy alignment. This space can be
* recycled for later use.
*/
cpad
=
(
void
*
)((
uintptr_t
)
dss_max
+
gap_size
);
ret
=
(
void
*
)
ALIGNMENT_CEILING
((
uintptr_t
)
dss_max
,
alignment
);
cpad_size
=
(
uintptr_t
)
ret
-
(
uintptr_t
)
cpad
;
dss_next
=
(
void
*
)((
uintptr_t
)
ret
+
size
);
if
((
uintptr_t
)
ret
<
(
uintptr_t
)
dss_max
||
(
uintptr_t
)
dss_next
<
(
uintptr_t
)
dss_max
)
{
/* Wrap-around. */
malloc_mutex_unlock
(
&
dss_mtx
);
return
(
NULL
);
}
incr
=
gap_size
+
cpad_size
+
size
;
dss_prev
=
chunk_dss_sbrk
(
incr
);
if
(
dss_prev
==
dss_max
)
{
/* Success. */
dss_max
=
dss_next
;
malloc_mutex_unlock
(
&
dss_mtx
);
if
(
cpad_size
!=
0
)
{
chunk_hooks_t
chunk_hooks
=
CHUNK_HOOKS_INITIALIZER
;
chunk_dalloc_wrapper
(
arena
,
&
chunk_hooks
,
cpad
,
cpad_size
,
true
);
}
if
(
*
zero
)
{
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
memset
(
ret
,
0
,
size
);
}
if
(
!*
commit
)
*
commit
=
pages_decommit
(
ret
,
size
);
return
(
ret
);
}
}
while
(
dss_prev
!=
(
void
*
)
-
1
);
}
malloc_mutex_unlock
(
&
dss_mtx
);
return
(
NULL
);
}
bool
chunk_in_dss
(
void
*
chunk
)
{
bool
ret
;
cassert
(
have_dss
);
malloc_mutex_lock
(
&
dss_mtx
);
if
((
uintptr_t
)
chunk
>=
(
uintptr_t
)
dss_base
&&
(
uintptr_t
)
chunk
<
(
uintptr_t
)
dss_max
)
ret
=
true
;
else
ret
=
false
;
malloc_mutex_unlock
(
&
dss_mtx
);
return
(
ret
);
}
bool
chunk_dss_boot
(
void
)
{
cassert
(
have_dss
);
if
(
malloc_mutex_init
(
&
dss_mtx
))
return
(
true
);
dss_base
=
chunk_dss_sbrk
(
0
);
dss_prev
=
dss_base
;
dss_max
=
dss_base
;
return
(
false
);
}
void
chunk_dss_prefork
(
void
)
{
if
(
have_dss
)
malloc_mutex_prefork
(
&
dss_mtx
);
}
void
chunk_dss_postfork_parent
(
void
)
{
if
(
have_dss
)
malloc_mutex_postfork_parent
(
&
dss_mtx
);
}
void
chunk_dss_postfork_child
(
void
)
{
if
(
have_dss
)
malloc_mutex_postfork_child
(
&
dss_mtx
);
}
/******************************************************************************/
deps/jemalloc/src/chunk_mmap.c
deleted
100644 → 0
View file @
dda8cc18
#define JEMALLOC_CHUNK_MMAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
static
void
*
chunk_alloc_mmap_slow
(
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
)
{
void
*
ret
;
size_t
alloc_size
;
alloc_size
=
size
+
alignment
-
PAGE
;
/* Beware size_t wrap-around. */
if
(
alloc_size
<
size
)
return
(
NULL
);
do
{
void
*
pages
;
size_t
leadsize
;
pages
=
pages_map
(
NULL
,
alloc_size
);
if
(
pages
==
NULL
)
return
(
NULL
);
leadsize
=
ALIGNMENT_CEILING
((
uintptr_t
)
pages
,
alignment
)
-
(
uintptr_t
)
pages
;
ret
=
pages_trim
(
pages
,
alloc_size
,
leadsize
,
size
);
}
while
(
ret
==
NULL
);
assert
(
ret
!=
NULL
);
*
zero
=
true
;
if
(
!*
commit
)
*
commit
=
pages_decommit
(
ret
,
size
);
return
(
ret
);
}
void
*
chunk_alloc_mmap
(
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
)
{
void
*
ret
;
size_t
offset
;
/*
* Ideally, there would be a way to specify alignment to mmap() (like
* NetBSD has), but in the absence of such a feature, we have to work
* hard to efficiently create aligned mappings. The reliable, but
* slow method is to create a mapping that is over-sized, then trim the
* excess. However, that always results in one or two calls to
* pages_unmap().
*
* Optimistically try mapping precisely the right amount before falling
* back to the slow method, with the expectation that the optimistic
* approach works most of the time.
*/
assert
(
alignment
!=
0
);
assert
((
alignment
&
chunksize_mask
)
==
0
);
ret
=
pages_map
(
NULL
,
size
);
if
(
ret
==
NULL
)
return
(
NULL
);
offset
=
ALIGNMENT_ADDR2OFFSET
(
ret
,
alignment
);
if
(
offset
!=
0
)
{
pages_unmap
(
ret
,
size
);
return
(
chunk_alloc_mmap_slow
(
size
,
alignment
,
zero
,
commit
));
}
assert
(
ret
!=
NULL
);
*
zero
=
true
;
if
(
!*
commit
)
*
commit
=
pages_decommit
(
ret
,
size
);
return
(
ret
);
}
bool
chunk_dalloc_mmap
(
void
*
chunk
,
size_t
size
)
{
if
(
config_munmap
)
pages_unmap
(
chunk
,
size
);
return
(
!
config_munmap
);
}
deps/jemalloc/src/ckh.c
View file @
fb1f4f4e
...
...
@@ -34,8 +34,18 @@
* respectively.
*
******************************************************************************/
#define JEMALLOC_CKH_C_
#include "jemalloc/internal/jemalloc_internal.h"
#define JEMALLOC_CKH_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/util.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
...
...
@@ -49,27 +59,26 @@ static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
* Search bucket for key and return the cell number if found; SIZE_T_MAX
* otherwise.
*/
JEMALLOC_INLINE_C
size_t
ckh_bucket_search
(
ckh_t
*
ckh
,
size_t
bucket
,
const
void
*
key
)
{
static
size_t
ckh_bucket_search
(
ckh_t
*
ckh
,
size_t
bucket
,
const
void
*
key
)
{
ckhc_t
*
cell
;
unsigned
i
;
for
(
i
=
0
;
i
<
(
ZU
(
1
)
<<
LG_CKH_BUCKET_CELLS
);
i
++
)
{
cell
=
&
ckh
->
tab
[(
bucket
<<
LG_CKH_BUCKET_CELLS
)
+
i
];
if
(
cell
->
key
!=
NULL
&&
ckh
->
keycomp
(
key
,
cell
->
key
))
return
((
bucket
<<
LG_CKH_BUCKET_CELLS
)
+
i
);
if
(
cell
->
key
!=
NULL
&&
ckh
->
keycomp
(
key
,
cell
->
key
))
{
return
(
bucket
<<
LG_CKH_BUCKET_CELLS
)
+
i
;
}
}
return
(
SIZE_T_MAX
)
;
return
SIZE_T_MAX
;
}
/*
* Search table for key and return cell number if found; SIZE_T_MAX otherwise.
*/
JEMALLOC_INLINE_C
size_t
ckh_isearch
(
ckh_t
*
ckh
,
const
void
*
key
)
{
static
size_t
ckh_isearch
(
ckh_t
*
ckh
,
const
void
*
key
)
{
size_t
hashes
[
2
],
bucket
,
cell
;
assert
(
ckh
!=
NULL
);
...
...
@@ -79,19 +88,19 @@ ckh_isearch(ckh_t *ckh, const void *key)
/* Search primary bucket. */
bucket
=
hashes
[
0
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
cell
=
ckh_bucket_search
(
ckh
,
bucket
,
key
);
if
(
cell
!=
SIZE_T_MAX
)
return
(
cell
);
if
(
cell
!=
SIZE_T_MAX
)
{
return
cell
;
}
/* Search secondary bucket. */
bucket
=
hashes
[
1
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
cell
=
ckh_bucket_search
(
ckh
,
bucket
,
key
);
return
(
cell
)
;
return
cell
;
}
JEMALLOC_INLINE_C
bool
static
bool
ckh_try_bucket_insert
(
ckh_t
*
ckh
,
size_t
bucket
,
const
void
*
key
,
const
void
*
data
)
{
const
void
*
data
)
{
ckhc_t
*
cell
;
unsigned
offset
,
i
;
...
...
@@ -99,7 +108,8 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* Cycle through the cells in the bucket, starting at a random position.
* The randomness avoids worst-case search overhead as buckets fill up.
*/
prng32
(
offset
,
LG_CKH_BUCKET_CELLS
,
ckh
->
prng_state
,
CKH_A
,
CKH_C
);
offset
=
(
unsigned
)
prng_lg_range_u64
(
&
ckh
->
prng_state
,
LG_CKH_BUCKET_CELLS
);
for
(
i
=
0
;
i
<
(
ZU
(
1
)
<<
LG_CKH_BUCKET_CELLS
);
i
++
)
{
cell
=
&
ckh
->
tab
[(
bucket
<<
LG_CKH_BUCKET_CELLS
)
+
((
i
+
offset
)
&
((
ZU
(
1
)
<<
LG_CKH_BUCKET_CELLS
)
-
1
))];
...
...
@@ -107,11 +117,11 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
cell
->
key
=
key
;
cell
->
data
=
data
;
ckh
->
count
++
;
return
(
false
)
;
return
false
;
}
}
return
(
true
)
;
return
true
;
}
/*
...
...
@@ -120,10 +130,9 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* eviction/relocation procedure until either success or detection of an
* eviction/relocation bucket cycle.
*/
JEMALLOC_INLINE_C
bool
static
bool
ckh_evict_reloc_insert
(
ckh_t
*
ckh
,
size_t
argbucket
,
void
const
**
argkey
,
void
const
**
argdata
)
{
void
const
**
argdata
)
{
const
void
*
key
,
*
data
,
*
tkey
,
*
tdata
;
ckhc_t
*
cell
;
size_t
hashes
[
2
],
bucket
,
tbucket
;
...
...
@@ -141,7 +150,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
* were an item for which both hashes indicated the same
* bucket.
*/
prng32
(
i
,
LG_CKH_BUCKET_CELLS
,
ckh
->
prng_state
,
CKH_A
,
CKH_C
);
i
=
(
unsigned
)
prng_lg_range_u64
(
&
ckh
->
prng_state
,
LG_CKH_BUCKET_CELLS
);
cell
=
&
ckh
->
tab
[(
bucket
<<
LG_CKH_BUCKET_CELLS
)
+
i
];
assert
(
cell
->
key
!=
NULL
);
...
...
@@ -181,18 +191,18 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
if
(
tbucket
==
argbucket
)
{
*
argkey
=
key
;
*
argdata
=
data
;
return
(
true
)
;
return
true
;
}
bucket
=
tbucket
;
if
(
!
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
))
return
(
false
);
if
(
!
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
))
{
return
false
;
}
}
}
JEMALLOC_INLINE_C
bool
ckh_try_insert
(
ckh_t
*
ckh
,
void
const
**
argkey
,
void
const
**
argdata
)
{
static
bool
ckh_try_insert
(
ckh_t
*
ckh
,
void
const
**
argkey
,
void
const
**
argdata
)
{
size_t
hashes
[
2
],
bucket
;
const
void
*
key
=
*
argkey
;
const
void
*
data
=
*
argdata
;
...
...
@@ -201,27 +211,28 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
/* Try to insert in primary bucket. */
bucket
=
hashes
[
0
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
if
(
!
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
))
return
(
false
);
if
(
!
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
))
{
return
false
;
}
/* Try to insert in secondary bucket. */
bucket
=
hashes
[
1
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
if
(
!
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
))
return
(
false
);
if
(
!
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
))
{
return
false
;
}
/*
* Try to find a place for this item via iterative eviction/relocation.
*/
return
(
ckh_evict_reloc_insert
(
ckh
,
bucket
,
argkey
,
argdata
)
)
;
return
ckh_evict_reloc_insert
(
ckh
,
bucket
,
argkey
,
argdata
);
}
/*
* Try to rebuild the hash table from scratch by inserting all items from the
* old table into the new.
*/
JEMALLOC_INLINE_C
bool
ckh_rebuild
(
ckh_t
*
ckh
,
ckhc_t
*
aTab
)
{
static
bool
ckh_rebuild
(
ckh_t
*
ckh
,
ckhc_t
*
aTab
)
{
size_t
count
,
i
,
nins
;
const
void
*
key
,
*
data
;
...
...
@@ -233,22 +244,20 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
data
=
aTab
[
i
].
data
;
if
(
ckh_try_insert
(
ckh
,
&
key
,
&
data
))
{
ckh
->
count
=
count
;
return
(
true
)
;
return
true
;
}
nins
++
;
}
}
return
(
false
)
;
return
false
;
}
static
bool
ckh_grow
(
tsd_t
*
tsd
,
ckh_t
*
ckh
)
{
ckh_grow
(
tsd_t
*
tsd
,
ckh_t
*
ckh
)
{
bool
ret
;
ckhc_t
*
tab
,
*
ttab
;
size_t
lg_curcells
;
unsigned
lg_prevbuckets
;
unsigned
lg_prevbuckets
,
lg_curcells
;
#ifdef CKH_COUNT
ckh
->
ngrows
++
;
...
...
@@ -265,13 +274,13 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
size_t
usize
;
lg_curcells
++
;
usize
=
sa2u
(
sizeof
(
ckhc_t
)
<<
lg_curcells
,
CACHELINE
);
if
(
u
size
==
0
)
{
usize
=
sz_
sa2u
(
sizeof
(
ckhc_t
)
<<
lg_curcells
,
CACHELINE
);
if
(
u
nlikely
(
usize
==
0
||
usize
>
LARGE_MAXCLASS
)
)
{
ret
=
true
;
goto
label_return
;
}
tab
=
(
ckhc_t
*
)
ipallocztm
(
tsd
,
usize
,
CACHELINE
,
true
,
NULL
,
true
,
NULL
);
tab
=
(
ckhc_t
*
)
ipallocztm
(
tsd
_tsdn
(
tsd
)
,
usize
,
CACHELINE
,
true
,
NULL
,
true
,
arena_ichoose
(
tsd
,
NULL
)
);
if
(
tab
==
NULL
)
{
ret
=
true
;
goto
label_return
;
...
...
@@ -283,27 +292,26 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
ckh
->
lg_curbuckets
=
lg_curcells
-
LG_CKH_BUCKET_CELLS
;
if
(
!
ckh_rebuild
(
ckh
,
tab
))
{
idalloctm
(
tsd
,
tab
,
tcache_get
(
tsd
,
false
)
,
true
);
idalloctm
(
tsd
_tsdn
(
tsd
)
,
tab
,
NULL
,
NULL
,
true
,
true
);
break
;
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloctm
(
tsd
,
ckh
->
tab
,
tcache_get
(
tsd
,
false
)
,
true
);
idalloctm
(
tsd
_tsdn
(
tsd
)
,
ckh
->
tab
,
NULL
,
NULL
,
true
,
true
);
ckh
->
tab
=
tab
;
ckh
->
lg_curbuckets
=
lg_prevbuckets
;
}
ret
=
false
;
label_return:
return
(
ret
)
;
return
ret
;
}
static
void
ckh_shrink
(
tsd_t
*
tsd
,
ckh_t
*
ckh
)
{
ckh_shrink
(
tsd_t
*
tsd
,
ckh_t
*
ckh
)
{
ckhc_t
*
tab
,
*
ttab
;
size_t
lg_curcells
,
usize
;
unsigned
lg_prevbuckets
;
size_t
usize
;
unsigned
lg_prevbuckets
,
lg_curcells
;
/*
* It is possible (though unlikely, given well behaved hashes) that the
...
...
@@ -311,11 +319,12 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
*/
lg_prevbuckets
=
ckh
->
lg_curbuckets
;
lg_curcells
=
ckh
->
lg_curbuckets
+
LG_CKH_BUCKET_CELLS
-
1
;
usize
=
sa2u
(
sizeof
(
ckhc_t
)
<<
lg_curcells
,
CACHELINE
);
if
(
u
size
==
0
)
usize
=
sz_
sa2u
(
sizeof
(
ckhc_t
)
<<
lg_curcells
,
CACHELINE
);
if
(
u
nlikely
(
usize
==
0
||
usize
>
LARGE_MAXCLASS
))
{
return
;
tab
=
(
ckhc_t
*
)
ipallocztm
(
tsd
,
usize
,
CACHELINE
,
true
,
NULL
,
true
,
NULL
);
}
tab
=
(
ckhc_t
*
)
ipallocztm
(
tsd_tsdn
(
tsd
),
usize
,
CACHELINE
,
true
,
NULL
,
true
,
arena_ichoose
(
tsd
,
NULL
));
if
(
tab
==
NULL
)
{
/*
* An OOM error isn't worth propagating, since it doesn't
...
...
@@ -330,7 +339,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
ckh
->
lg_curbuckets
=
lg_curcells
-
LG_CKH_BUCKET_CELLS
;
if
(
!
ckh_rebuild
(
ckh
,
tab
))
{
idalloctm
(
tsd
,
tab
,
tcache_get
(
tsd
,
false
)
,
true
);
idalloctm
(
tsd
_tsdn
(
tsd
)
,
tab
,
NULL
,
NULL
,
true
,
true
);
#ifdef CKH_COUNT
ckh
->
nshrinks
++
;
#endif
...
...
@@ -338,7 +347,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloctm
(
tsd
,
ckh
->
tab
,
tcache_get
(
tsd
,
false
)
,
true
);
idalloctm
(
tsd
_tsdn
(
tsd
)
,
ckh
->
tab
,
NULL
,
NULL
,
true
,
true
);
ckh
->
tab
=
tab
;
ckh
->
lg_curbuckets
=
lg_prevbuckets
;
#ifdef CKH_COUNT
...
...
@@ -348,8 +357,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
bool
ckh_new
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
size_t
minitems
,
ckh_hash_t
*
hash
,
ckh_keycomp_t
*
keycomp
)
{
ckh_keycomp_t
*
keycomp
)
{
bool
ret
;
size_t
mincells
,
usize
;
unsigned
lg_mincells
;
...
...
@@ -379,20 +387,21 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
mincells
=
((
minitems
+
(
3
-
(
minitems
%
3
)))
/
3
)
<<
2
;
for
(
lg_mincells
=
LG_CKH_BUCKET_CELLS
;
(
ZU
(
1
)
<<
lg_mincells
)
<
mincells
;
lg_mincells
++
)
;
/* Do nothing. */
lg_mincells
++
)
{
/* Do nothing. */
}
ckh
->
lg_minbuckets
=
lg_mincells
-
LG_CKH_BUCKET_CELLS
;
ckh
->
lg_curbuckets
=
lg_mincells
-
LG_CKH_BUCKET_CELLS
;
ckh
->
hash
=
hash
;
ckh
->
keycomp
=
keycomp
;
usize
=
sa2u
(
sizeof
(
ckhc_t
)
<<
lg_mincells
,
CACHELINE
);
if
(
u
size
==
0
)
{
usize
=
sz_
sa2u
(
sizeof
(
ckhc_t
)
<<
lg_mincells
,
CACHELINE
);
if
(
u
nlikely
(
usize
==
0
||
usize
>
LARGE_MAXCLASS
)
)
{
ret
=
true
;
goto
label_return
;
}
ckh
->
tab
=
(
ckhc_t
*
)
ipallocztm
(
tsd
,
usize
,
CACHELINE
,
true
,
NULL
,
true
,
NULL
);
ckh
->
tab
=
(
ckhc_t
*
)
ipallocztm
(
tsd
_tsdn
(
tsd
)
,
usize
,
CACHELINE
,
true
,
NULL
,
true
,
arena_ichoose
(
tsd
,
NULL
)
);
if
(
ckh
->
tab
==
NULL
)
{
ret
=
true
;
goto
label_return
;
...
...
@@ -400,13 +409,11 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ret
=
false
;
label_return:
return
(
ret
)
;
return
ret
;
}
void
ckh_delete
(
tsd_t
*
tsd
,
ckh_t
*
ckh
)
{
ckh_delete
(
tsd_t
*
tsd
,
ckh_t
*
ckh
)
{
assert
(
ckh
!=
NULL
);
#ifdef CKH_VERBOSE
...
...
@@ -421,43 +428,42 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh)
(
unsigned
long
long
)
ckh
->
nrelocs
);
#endif
idalloctm
(
tsd
,
ckh
->
tab
,
tcache_get
(
tsd
,
false
),
true
);
if
(
config_debug
)
memset
(
ckh
,
0x5a
,
sizeof
(
ckh_t
));
idalloctm
(
tsd_tsdn
(
tsd
),
ckh
->
tab
,
NULL
,
NULL
,
true
,
true
);
if
(
config_debug
)
{
memset
(
ckh
,
JEMALLOC_FREE_JUNK
,
sizeof
(
ckh_t
));
}
}
size_t
ckh_count
(
ckh_t
*
ckh
)
{
ckh_count
(
ckh_t
*
ckh
)
{
assert
(
ckh
!=
NULL
);
return
(
ckh
->
count
)
;
return
ckh
->
count
;
}
bool
ckh_iter
(
ckh_t
*
ckh
,
size_t
*
tabind
,
void
**
key
,
void
**
data
)
{
ckh_iter
(
ckh_t
*
ckh
,
size_t
*
tabind
,
void
**
key
,
void
**
data
)
{
size_t
i
,
ncells
;
for
(
i
=
*
tabind
,
ncells
=
(
ZU
(
1
)
<<
(
ckh
->
lg_curbuckets
+
LG_CKH_BUCKET_CELLS
));
i
<
ncells
;
i
++
)
{
if
(
ckh
->
tab
[
i
].
key
!=
NULL
)
{
if
(
key
!=
NULL
)
if
(
key
!=
NULL
)
{
*
key
=
(
void
*
)
ckh
->
tab
[
i
].
key
;
if
(
data
!=
NULL
)
}
if
(
data
!=
NULL
)
{
*
data
=
(
void
*
)
ckh
->
tab
[
i
].
data
;
}
*
tabind
=
i
+
1
;
return
(
false
)
;
return
false
;
}
}
return
(
true
)
;
return
true
;
}
bool
ckh_insert
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
const
void
*
key
,
const
void
*
data
)
{
ckh_insert
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
const
void
*
key
,
const
void
*
data
)
{
bool
ret
;
assert
(
ckh
!=
NULL
);
...
...
@@ -476,23 +482,24 @@ ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data)
ret
=
false
;
label_return:
return
(
ret
)
;
return
ret
;
}
bool
ckh_remove
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
void
**
data
)
{
void
**
data
)
{
size_t
cell
;
assert
(
ckh
!=
NULL
);
cell
=
ckh_isearch
(
ckh
,
searchkey
);
if
(
cell
!=
SIZE_T_MAX
)
{
if
(
key
!=
NULL
)
if
(
key
!=
NULL
)
{
*
key
=
(
void
*
)
ckh
->
tab
[
cell
].
key
;
if
(
data
!=
NULL
)
}
if
(
data
!=
NULL
)
{
*
data
=
(
void
*
)
ckh
->
tab
[
cell
].
data
;
}
ckh
->
tab
[
cell
].
key
=
NULL
;
ckh
->
tab
[
cell
].
data
=
NULL
;
/* Not necessary. */
...
...
@@ -505,51 +512,47 @@ ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
ckh_shrink
(
tsd
,
ckh
);
}
return
(
false
)
;
return
false
;
}
return
(
true
)
;
return
true
;
}
bool
ckh_search
(
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
void
**
data
)
{
ckh_search
(
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
void
**
data
)
{
size_t
cell
;
assert
(
ckh
!=
NULL
);
cell
=
ckh_isearch
(
ckh
,
searchkey
);
if
(
cell
!=
SIZE_T_MAX
)
{
if
(
key
!=
NULL
)
if
(
key
!=
NULL
)
{
*
key
=
(
void
*
)
ckh
->
tab
[
cell
].
key
;
if
(
data
!=
NULL
)
}
if
(
data
!=
NULL
)
{
*
data
=
(
void
*
)
ckh
->
tab
[
cell
].
data
;
return
(
false
);
}
return
false
;
}
return
(
true
)
;
return
true
;
}
void
ckh_string_hash
(
const
void
*
key
,
size_t
r_hash
[
2
])
{
ckh_string_hash
(
const
void
*
key
,
size_t
r_hash
[
2
])
{
hash
(
key
,
strlen
((
const
char
*
)
key
),
0x94122f33U
,
r_hash
);
}
bool
ckh_string_keycomp
(
const
void
*
k1
,
const
void
*
k2
)
{
assert
(
k1
!=
NULL
);
assert
(
k2
!=
NULL
);
ckh_string_keycomp
(
const
void
*
k1
,
const
void
*
k2
)
{
assert
(
k1
!=
NULL
);
assert
(
k2
!=
NULL
);
return
(
strcmp
((
char
*
)
k1
,
(
char
*
)
k2
)
?
false
:
true
)
;
return
!
strcmp
((
char
*
)
k1
,
(
char
*
)
k2
);
}
void
ckh_pointer_hash
(
const
void
*
key
,
size_t
r_hash
[
2
])
{
ckh_pointer_hash
(
const
void
*
key
,
size_t
r_hash
[
2
])
{
union
{
const
void
*
v
;
size_t
i
;
...
...
@@ -561,8 +564,6 @@ ckh_pointer_hash(const void *key, size_t r_hash[2])
}
bool
ckh_pointer_keycomp
(
const
void
*
k1
,
const
void
*
k2
)
{
return
((
k1
==
k2
)
?
true
:
false
);
ckh_pointer_keycomp
(
const
void
*
k1
,
const
void
*
k2
)
{
return
(
k1
==
k2
);
}
Prev
1
…
7
8
9
10
11
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment