Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
9268493e
Commit
9268493e
authored
Jul 23, 2019
by
antirez
Browse files
Client side caching: implement full slot limit function.
parent
47ce1ceb
Changes
3
Hide whitespace changes
Inline
Side-by-side
src/server.c
View file @
9268493e
...
@@ -2403,6 +2403,9 @@ void initServerConfig(void) {
...
@@ -2403,6 +2403,9 @@ void initServerConfig(void) {
/* Latency monitor */
/* Latency monitor */
server
.
latency_monitor_threshold
=
CONFIG_DEFAULT_LATENCY_MONITOR_THRESHOLD
;
server
.
latency_monitor_threshold
=
CONFIG_DEFAULT_LATENCY_MONITOR_THRESHOLD
;
/* Tracking. */
server
.
tracking_table_max_fill
=
CONFIG_DEFAULT_TRACKING_MAX_FILL
;
/* Debugging */
/* Debugging */
server
.
assert_failed
=
"<no assertion failed>"
;
server
.
assert_failed
=
"<no assertion failed>"
;
server
.
assert_file
=
"<no file>"
;
server
.
assert_file
=
"<no file>"
;
...
...
src/server.h
View file @
9268493e
...
@@ -171,6 +171,7 @@ typedef long long mstime_t; /* millisecond time type. */
...
@@ -171,6 +171,7 @@ typedef long long mstime_t; /* millisecond time type. */
#define CONFIG_DEFAULT_DEFRAG_CYCLE_MAX 75
/* 75% CPU max (at upper threshold) */
#define CONFIG_DEFAULT_DEFRAG_CYCLE_MAX 75
/* 75% CPU max (at upper threshold) */
#define CONFIG_DEFAULT_DEFRAG_MAX_SCAN_FIELDS 1000
/* keys with more than 1000 fields will be processed separately */
#define CONFIG_DEFAULT_DEFRAG_MAX_SCAN_FIELDS 1000
/* keys with more than 1000 fields will be processed separately */
#define CONFIG_DEFAULT_PROTO_MAX_BULK_LEN (512ll*1024*1024)
/* Bulk request max size */
#define CONFIG_DEFAULT_PROTO_MAX_BULK_LEN (512ll*1024*1024)
/* Bulk request max size */
#define CONFIG_DEFAULT_TRACKING_MAX_FILL 10
/* 10% tracking table max fill. */
#define ACTIVE_EXPIRE_CYCLE_LOOKUPS_PER_LOOP 20
/* Loopkups per loop. */
#define ACTIVE_EXPIRE_CYCLE_LOOKUPS_PER_LOOP 20
/* Loopkups per loop. */
#define ACTIVE_EXPIRE_CYCLE_FAST_DURATION 1000
/* Microseconds */
#define ACTIVE_EXPIRE_CYCLE_FAST_DURATION 1000
/* Microseconds */
...
@@ -1316,6 +1317,7 @@ struct redisServer {
...
@@ -1316,6 +1317,7 @@ struct redisServer {
list
*
ready_keys
;
/* List of readyList structures for BLPOP & co */
list
*
ready_keys
;
/* List of readyList structures for BLPOP & co */
/* Client side caching. */
/* Client side caching. */
unsigned
int
tracking_clients
;
/* # of clients with tracking enabled.*/
unsigned
int
tracking_clients
;
/* # of clients with tracking enabled.*/
int
tracking_table_max_fill
;
/* Max fill percentage. */
/* Sort parameters - qsort_r() is only available under BSD so we
/* Sort parameters - qsort_r() is only available under BSD so we
* have to take this state global, in order to pass it to sortCompare() */
* have to take this state global, in order to pass it to sortCompare() */
int
sort_desc
;
int
sort_desc
;
...
...
src/tracking.c
View file @
9268493e
...
@@ -251,4 +251,40 @@ void trackingInvalidateKeysOnFlush(int dbid) {
...
@@ -251,4 +251,40 @@ void trackingInvalidateKeysOnFlush(int dbid) {
* random caching slots, and send invalidation messages to clients like if
* random caching slots, and send invalidation messages to clients like if
* the key was modified. */
* the key was modified. */
void
trackingLimitUsedSlots
(
void
)
{
void
trackingLimitUsedSlots
(
void
)
{
static
unsigned
int
timeout_counter
=
0
;
if
(
server
.
tracking_table_max_fill
==
0
)
return
;
/* No limits set. */
unsigned
int
max_slots
=
(
TRACKING_TABLE_SIZE
/
100
)
*
server
.
tracking_table_max_fill
;
if
(
TrackingTableUsedSlots
<=
max_slots
)
{
timeout_counter
=
0
;
return
;
/* Limit not reached. */
}
/* We have to invalidate a few slots to reach the limit again. The effort
* we do here is proportional to the number of times we entered this
* function and found that we are still over the limit. */
int
effort
=
100
*
(
timeout_counter
+
1
);
/* Let's start at a random position, and perform linear probing, in order
* to improve cache locality. However once we are able to find an used
* slot, jump again randomly, in order to avoid creating big holes in the
* table (that will make this funciton use more resourced later). */
while
(
effort
>
0
)
{
unsigned
int
idx
=
rand
()
%
TRACKING_TABLE_SIZE
;
do
{
effort
--
;
idx
=
(
idx
+
1
)
%
TRACKING_TABLE_SIZE
;
if
(
TrackingTable
[
idx
]
!=
NULL
)
{
trackingInvalidateSlot
(
idx
);
if
(
TrackingTableUsedSlots
<=
max_slots
)
{
timeout_counter
=
0
;
return
;
/* Return ASAP: we are again under the limit. */
}
else
{
break
;
/* Jump to next random position. */
}
}
}
while
(
effort
>
0
);
}
timeout_counter
++
;
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment