Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
3c968ff0
Commit
3c968ff0
authored
May 17, 2018
by
antirez
Browse files
Merge branch 'unstable' of github.com:/antirez/redis into unstable
parents
3c43e984
13779c11
Changes
4
Hide whitespace changes
Inline
Side-by-side
deps/jemalloc/src/jemalloc.c
View file @
3c968ff0
...
...
@@ -2614,8 +2614,8 @@ je_get_defrag_hint(void* ptr, int *bin_util, int *run_util) {
if
(
chunk
!=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
bin
->
runcur
))
{
arena_bin_info_t
*
bin_info
=
&
arena_bin_info
[
run
->
binind
];
size_t
availregs
=
bin_info
->
nregs
*
bin
->
stats
.
curruns
;
*
bin_util
=
(
bin
->
stats
.
curregs
<<
16
)
/
availregs
;
*
run_util
=
((
bin_info
->
nregs
-
run
->
nfree
)
<<
16
)
/
bin_info
->
nregs
;
*
bin_util
=
(
(
long
long
)
bin
->
stats
.
curregs
<<
16
)
/
availregs
;
*
run_util
=
((
long
long
)(
bin_info
->
nregs
-
run
->
nfree
)
<<
16
)
/
bin_info
->
nregs
;
defrag
=
1
;
}
malloc_mutex_unlock
(
&
bin
->
lock
);
...
...
src/server.c
View file @
3c968ff0
...
...
@@ -1019,7 +1019,7 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) {
if
(
zmalloc_used_memory
()
>
server
.
stat_peak_memory
)
server
.
stat_peak_memory
=
zmalloc_used_memory
();
run_with_period
(
10
)
{
run_with_period
(
10
0
)
{
/* Sample the RSS and other metrics here since this is a relatively slow call.
* We must sample the zmalloc_used at the same time we take the rss, otherwise
* the frag ratio calculate may be off (ratio of two samples at different times) */
...
...
src/zmalloc.c
View file @
3c968ff0
...
...
@@ -301,10 +301,13 @@ size_t zmalloc_get_rss(void) {
int
zmalloc_get_allocator_info
(
size_t
*
allocated
,
size_t
*
active
,
size_t
*
resident
)
{
size_t
epoch
=
1
,
sz
=
sizeof
(
size_t
);
uint64_t
epoch
=
1
;
size_t
sz
;
*
allocated
=
*
resident
=
*
active
=
0
;
/* Update the statistics cached by mallctl. */
sz
=
sizeof
(
epoch
);
je_mallctl
(
"epoch"
,
&
epoch
,
&
sz
,
&
epoch
,
sz
);
sz
=
sizeof
(
size_t
);
/* Unlike RSS, this does not include RSS from shared libraries and other non
* heap mappings. */
je_mallctl
(
"stats.resident"
,
resident
,
&
sz
,
NULL
,
0
);
...
...
tests/unit/memefficiency.tcl
View file @
3c968ff0
...
...
@@ -48,7 +48,8 @@ start_server {tags {"defrag"}} {
r config set maxmemory-policy allkeys-lru
r debug populate 700000 asdf 150
r debug populate 170000 asdf 300
after 20
;
# serverCron only updates the info once in 10ms
r ping
;
# trigger eviction following the previous population
after 120
;
# serverCron only updates the info once in 100ms
set frag
[
s allocator_frag_ratio
]
if
{
$::verbose
}
{
puts
"frag
$frag
"
...
...
@@ -68,11 +69,12 @@ start_server {tags {"defrag"}} {
[
s active_defrag_running
]
eq 0
}
else
{
puts
[
r info memory
]
puts
[
r memory malloc-stats
]
fail
"defrag didn't stop."
}
# test the the fragmentation is lower
after 20
;
# serverCron only updates the info once in 10ms
after
1
20
;
# serverCron only updates the info once in 10
0
ms
set frag
[
s allocator_frag_ratio
]
if
{
$::verbose
}
{
puts
"frag
$frag
"
...
...
@@ -140,7 +142,7 @@ start_server {tags {"defrag"}} {
assert
{[
r dbsize
]
== 250008
}
# start defrag
after 20
;
# serverCron only updates the info once in 10ms
after
1
20
;
# serverCron only updates the info once in 10
0
ms
set frag
[
s allocator_frag_ratio
]
if
{
$::verbose
}
{
puts
"frag
$frag
"
...
...
@@ -167,7 +169,7 @@ start_server {tags {"defrag"}} {
}
# test the the fragmentation is lower
after 20
;
# serverCron only updates the info once in 10ms
after
1
20
;
# serverCron only updates the info once in 10
0
ms
set frag
[
s allocator_frag_ratio
]
set max_latency 0
foreach event
[
r latency latest
]
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment