Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
b46239e5
Commit
b46239e5
authored
Jul 06, 2016
by
antirez
Browse files
Expire and LRU related code moved into different files.
parent
0610683d
Changes
5
Hide whitespace changes
Inline
Side-by-side
src/Makefile
View file @
b46239e5
...
@@ -128,7 +128,7 @@ endif
...
@@ -128,7 +128,7 @@ endif
REDIS_SERVER_NAME
=
redis-server
REDIS_SERVER_NAME
=
redis-server
REDIS_SENTINEL_NAME
=
redis-sentinel
REDIS_SENTINEL_NAME
=
redis-sentinel
REDIS_SERVER_OBJ
=
adlist.o quicklist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o geo.o lazyfree.o module.o
REDIS_SERVER_OBJ
=
adlist.o quicklist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o geo.o lazyfree.o module.o
evict.o expire.o
REDIS_GEOHASH_OBJ
=
../deps/geohash-int/geohash.o ../deps/geohash-int/geohash_helper.o
REDIS_GEOHASH_OBJ
=
../deps/geohash-int/geohash.o ../deps/geohash-int/geohash_helper.o
REDIS_CLI_NAME
=
redis-cli
REDIS_CLI_NAME
=
redis-cli
REDIS_CLI_OBJ
=
anet.o adlist.o redis-cli.o zmalloc.o release.o anet.o ae.o crc64.o
REDIS_CLI_OBJ
=
anet.o adlist.o redis-cli.o zmalloc.o release.o anet.o ae.o crc64.o
...
...
src/db.c
View file @
b46239e5
...
@@ -1010,134 +1010,6 @@ int expireIfNeeded(redisDb *db, robj *key) {
...
@@ -1010,134 +1010,6 @@ int expireIfNeeded(redisDb *db, robj *key) {
dbSyncDelete
(
db
,
key
);
dbSyncDelete
(
db
,
key
);
}
}
/*-----------------------------------------------------------------------------
* Expires Commands
*----------------------------------------------------------------------------*/
/* This is the generic command implementation for EXPIRE, PEXPIRE, EXPIREAT
* and PEXPIREAT. Because the commad second argument may be relative or absolute
* the "basetime" argument is used to signal what the base time is (either 0
* for *AT variants of the command, or the current time for relative expires).
*
* unit is either UNIT_SECONDS or UNIT_MILLISECONDS, and is only used for
* the argv[2] parameter. The basetime is always specified in milliseconds. */
void
expireGenericCommand
(
client
*
c
,
long
long
basetime
,
int
unit
)
{
robj
*
key
=
c
->
argv
[
1
],
*
param
=
c
->
argv
[
2
];
long
long
when
;
/* unix time in milliseconds when the key will expire. */
if
(
getLongLongFromObjectOrReply
(
c
,
param
,
&
when
,
NULL
)
!=
C_OK
)
return
;
if
(
unit
==
UNIT_SECONDS
)
when
*=
1000
;
when
+=
basetime
;
/* No key, return zero. */
if
(
lookupKeyWrite
(
c
->
db
,
key
)
==
NULL
)
{
addReply
(
c
,
shared
.
czero
);
return
;
}
/* EXPIRE with negative TTL, or EXPIREAT with a timestamp into the past
* should never be executed as a DEL when load the AOF or in the context
* of a slave instance.
*
* Instead we take the other branch of the IF statement setting an expire
* (possibly in the past) and wait for an explicit DEL from the master. */
if
(
when
<=
mstime
()
&&
!
server
.
loading
&&
!
server
.
masterhost
)
{
robj
*
aux
;
int
deleted
=
server
.
lazyfree_lazy_expire
?
dbAsyncDelete
(
c
->
db
,
key
)
:
dbSyncDelete
(
c
->
db
,
key
);
serverAssertWithInfo
(
c
,
key
,
deleted
);
server
.
dirty
++
;
/* Replicate/AOF this as an explicit DEL or UNLINK. */
aux
=
server
.
lazyfree_lazy_expire
?
shared
.
unlink
:
shared
.
del
;
rewriteClientCommandVector
(
c
,
2
,
aux
,
key
);
signalModifiedKey
(
c
->
db
,
key
);
notifyKeyspaceEvent
(
NOTIFY_GENERIC
,
"del"
,
key
,
c
->
db
->
id
);
addReply
(
c
,
shared
.
cone
);
return
;
}
else
{
setExpire
(
c
->
db
,
key
,
when
);
addReply
(
c
,
shared
.
cone
);
signalModifiedKey
(
c
->
db
,
key
);
notifyKeyspaceEvent
(
NOTIFY_GENERIC
,
"expire"
,
key
,
c
->
db
->
id
);
server
.
dirty
++
;
return
;
}
}
void
expireCommand
(
client
*
c
)
{
expireGenericCommand
(
c
,
mstime
(),
UNIT_SECONDS
);
}
void
expireatCommand
(
client
*
c
)
{
expireGenericCommand
(
c
,
0
,
UNIT_SECONDS
);
}
void
pexpireCommand
(
client
*
c
)
{
expireGenericCommand
(
c
,
mstime
(),
UNIT_MILLISECONDS
);
}
void
pexpireatCommand
(
client
*
c
)
{
expireGenericCommand
(
c
,
0
,
UNIT_MILLISECONDS
);
}
void
ttlGenericCommand
(
client
*
c
,
int
output_ms
)
{
long
long
expire
,
ttl
=
-
1
;
/* If the key does not exist at all, return -2 */
if
(
lookupKeyReadWithFlags
(
c
->
db
,
c
->
argv
[
1
],
LOOKUP_NOTOUCH
)
==
NULL
)
{
addReplyLongLong
(
c
,
-
2
);
return
;
}
/* The key exists. Return -1 if it has no expire, or the actual
* TTL value otherwise. */
expire
=
getExpire
(
c
->
db
,
c
->
argv
[
1
]);
if
(
expire
!=
-
1
)
{
ttl
=
expire
-
mstime
();
if
(
ttl
<
0
)
ttl
=
0
;
}
if
(
ttl
==
-
1
)
{
addReplyLongLong
(
c
,
-
1
);
}
else
{
addReplyLongLong
(
c
,
output_ms
?
ttl
:
((
ttl
+
500
)
/
1000
));
}
}
void
ttlCommand
(
client
*
c
)
{
ttlGenericCommand
(
c
,
0
);
}
void
pttlCommand
(
client
*
c
)
{
ttlGenericCommand
(
c
,
1
);
}
void
persistCommand
(
client
*
c
)
{
dictEntry
*
de
;
de
=
dictFind
(
c
->
db
->
dict
,
c
->
argv
[
1
]
->
ptr
);
if
(
de
==
NULL
)
{
addReply
(
c
,
shared
.
czero
);
}
else
{
if
(
removeExpire
(
c
->
db
,
c
->
argv
[
1
]))
{
addReply
(
c
,
shared
.
cone
);
server
.
dirty
++
;
}
else
{
addReply
(
c
,
shared
.
czero
);
}
}
}
/* TOUCH key1 [key2 key3 ... keyN] */
void
touchCommand
(
client
*
c
)
{
int
touched
=
0
;
for
(
int
j
=
1
;
j
<
c
->
argc
;
j
++
)
if
(
lookupKeyRead
(
c
->
db
,
c
->
argv
[
j
])
!=
NULL
)
touched
++
;
addReplyLongLong
(
c
,
touched
);
}
/* -----------------------------------------------------------------------------
/* -----------------------------------------------------------------------------
* API to get key arguments from commands
* API to get key arguments from commands
* ---------------------------------------------------------------------------*/
* ---------------------------------------------------------------------------*/
...
...
src/object.c
View file @
b46239e5
...
@@ -682,18 +682,6 @@ char *strEncoding(int encoding) {
...
@@ -682,18 +682,6 @@ char *strEncoding(int encoding) {
}
}
}
}
/* Given an object returns the min number of milliseconds the object was never
* requested, using an approximated LRU algorithm. */
unsigned
long
long
estimateObjectIdleTime
(
robj
*
o
)
{
unsigned
long
long
lruclock
=
LRU_CLOCK
();
if
(
lruclock
>=
o
->
lru
)
{
return
(
lruclock
-
o
->
lru
)
*
LRU_CLOCK_RESOLUTION
;
}
else
{
return
(
lruclock
+
(
LRU_CLOCK_MAX
-
o
->
lru
))
*
LRU_CLOCK_RESOLUTION
;
}
}
/* This is a helper function for the OBJECT command. We need to lookup keys
/* This is a helper function for the OBJECT command. We need to lookup keys
* without any modification of LRU or other parameters. */
* without any modification of LRU or other parameters. */
robj
*
objectCommandLookup
(
client
*
c
,
robj
*
key
)
{
robj
*
objectCommandLookup
(
client
*
c
,
robj
*
key
)
{
...
...
src/server.c
View file @
b46239e5
...
@@ -738,186 +738,6 @@ void updateDictResizePolicy(void) {
...
@@ -738,186 +738,6 @@ void updateDictResizePolicy(void) {
/* ======================= Cron: called every 100 ms ======================== */
/* ======================= Cron: called every 100 ms ======================== */
/* Helper function for the activeExpireCycle() function.
* This function will try to expire the key that is stored in the hash table
* entry 'de' of the 'expires' hash table of a Redis database.
*
* If the key is found to be expired, it is removed from the database and
* 1 is returned. Otherwise no operation is performed and 0 is returned.
*
* When a key is expired, server.stat_expiredkeys is incremented.
*
* The parameter 'now' is the current time in milliseconds as is passed
* to the function to avoid too many gettimeofday() syscalls. */
int
activeExpireCycleTryExpire
(
redisDb
*
db
,
dictEntry
*
de
,
long
long
now
)
{
long
long
t
=
dictGetSignedIntegerVal
(
de
);
if
(
now
>
t
)
{
sds
key
=
dictGetKey
(
de
);
robj
*
keyobj
=
createStringObject
(
key
,
sdslen
(
key
));
propagateExpire
(
db
,
keyobj
,
server
.
lazyfree_lazy_expire
);
if
(
server
.
lazyfree_lazy_expire
)
dbAsyncDelete
(
db
,
keyobj
);
else
dbSyncDelete
(
db
,
keyobj
);
notifyKeyspaceEvent
(
NOTIFY_EXPIRED
,
"expired"
,
keyobj
,
db
->
id
);
decrRefCount
(
keyobj
);
server
.
stat_expiredkeys
++
;
return
1
;
}
else
{
return
0
;
}
}
/* Try to expire a few timed out keys. The algorithm used is adaptive and
* will use few CPU cycles if there are few expiring keys, otherwise
* it will get more aggressive to avoid that too much memory is used by
* keys that can be removed from the keyspace.
*
* No more than CRON_DBS_PER_CALL databases are tested at every
* iteration.
*
* This kind of call is used when Redis detects that timelimit_exit is
* true, so there is more work to do, and we do it more incrementally from
* the beforeSleep() function of the event loop.
*
* Expire cycle type:
*
* If type is ACTIVE_EXPIRE_CYCLE_FAST the function will try to run a
* "fast" expire cycle that takes no longer than EXPIRE_FAST_CYCLE_DURATION
* microseconds, and is not repeated again before the same amount of time.
*
* If type is ACTIVE_EXPIRE_CYCLE_SLOW, that normal expire cycle is
* executed, where the time limit is a percentage of the REDIS_HZ period
* as specified by the REDIS_EXPIRELOOKUPS_TIME_PERC define. */
void
activeExpireCycle
(
int
type
)
{
/* This function has some global state in order to continue the work
* incrementally across calls. */
static
unsigned
int
current_db
=
0
;
/* Last DB tested. */
static
int
timelimit_exit
=
0
;
/* Time limit hit in previous call? */
static
long
long
last_fast_cycle
=
0
;
/* When last fast cycle ran. */
int
j
,
iteration
=
0
;
int
dbs_per_call
=
CRON_DBS_PER_CALL
;
long
long
start
=
ustime
(),
timelimit
;
if
(
type
==
ACTIVE_EXPIRE_CYCLE_FAST
)
{
/* Don't start a fast cycle if the previous cycle did not exited
* for time limt. Also don't repeat a fast cycle for the same period
* as the fast cycle total duration itself. */
if
(
!
timelimit_exit
)
return
;
if
(
start
<
last_fast_cycle
+
ACTIVE_EXPIRE_CYCLE_FAST_DURATION
*
2
)
return
;
last_fast_cycle
=
start
;
}
/* We usually should test CRON_DBS_PER_CALL per iteration, with
* two exceptions:
*
* 1) Don't test more DBs than we have.
* 2) If last time we hit the time limit, we want to scan all DBs
* in this iteration, as there is work to do in some DB and we don't want
* expired keys to use memory for too much time. */
if
(
dbs_per_call
>
server
.
dbnum
||
timelimit_exit
)
dbs_per_call
=
server
.
dbnum
;
/* We can use at max ACTIVE_EXPIRE_CYCLE_SLOW_TIME_PERC percentage of CPU time
* per iteration. Since this function gets called with a frequency of
* server.hz times per second, the following is the max amount of
* microseconds we can spend in this function. */
timelimit
=
1000000
*
ACTIVE_EXPIRE_CYCLE_SLOW_TIME_PERC
/
server
.
hz
/
100
;
timelimit_exit
=
0
;
if
(
timelimit
<=
0
)
timelimit
=
1
;
if
(
type
==
ACTIVE_EXPIRE_CYCLE_FAST
)
timelimit
=
ACTIVE_EXPIRE_CYCLE_FAST_DURATION
;
/* in microseconds. */
for
(
j
=
0
;
j
<
dbs_per_call
;
j
++
)
{
int
expired
;
redisDb
*
db
=
server
.
db
+
(
current_db
%
server
.
dbnum
);
/* Increment the DB now so we are sure if we run out of time
* in the current DB we'll restart from the next. This allows to
* distribute the time evenly across DBs. */
current_db
++
;
/* Continue to expire if at the end of the cycle more than 25%
* of the keys were expired. */
do
{
unsigned
long
num
,
slots
;
long
long
now
,
ttl_sum
;
int
ttl_samples
;
/* If there is nothing to expire try next DB ASAP. */
if
((
num
=
dictSize
(
db
->
expires
))
==
0
)
{
db
->
avg_ttl
=
0
;
break
;
}
slots
=
dictSlots
(
db
->
expires
);
now
=
mstime
();
/* When there are less than 1% filled slots getting random
* keys is expensive, so stop here waiting for better times...
* The dictionary will be resized asap. */
if
(
num
&&
slots
>
DICT_HT_INITIAL_SIZE
&&
(
num
*
100
/
slots
<
1
))
break
;
/* The main collection cycle. Sample random keys among keys
* with an expire set, checking for expired ones. */
expired
=
0
;
ttl_sum
=
0
;
ttl_samples
=
0
;
if
(
num
>
ACTIVE_EXPIRE_CYCLE_LOOKUPS_PER_LOOP
)
num
=
ACTIVE_EXPIRE_CYCLE_LOOKUPS_PER_LOOP
;
while
(
num
--
)
{
dictEntry
*
de
;
long
long
ttl
;
if
((
de
=
dictGetRandomKey
(
db
->
expires
))
==
NULL
)
break
;
ttl
=
dictGetSignedIntegerVal
(
de
)
-
now
;
if
(
activeExpireCycleTryExpire
(
db
,
de
,
now
))
expired
++
;
if
(
ttl
>
0
)
{
/* We want the average TTL of keys yet not expired. */
ttl_sum
+=
ttl
;
ttl_samples
++
;
}
}
/* Update the average TTL stats for this database. */
if
(
ttl_samples
)
{
long
long
avg_ttl
=
ttl_sum
/
ttl_samples
;
/* Do a simple running average with a few samples.
* We just use the current estimate with a weight of 2%
* and the previous estimate with a weight of 98%. */
if
(
db
->
avg_ttl
==
0
)
db
->
avg_ttl
=
avg_ttl
;
db
->
avg_ttl
=
(
db
->
avg_ttl
/
50
)
*
49
+
(
avg_ttl
/
50
);
}
/* We can't block forever here even if there are many keys to
* expire. So after a given amount of milliseconds return to the
* caller waiting for the other active expire cycle. */
iteration
++
;
if
((
iteration
&
0xf
)
==
0
)
{
/* check once every 16 iterations. */
long
long
elapsed
=
ustime
()
-
start
;
latencyAddSampleIfNeeded
(
"expire-cycle"
,
elapsed
/
1000
);
if
(
elapsed
>
timelimit
)
timelimit_exit
=
1
;
}
if
(
timelimit_exit
)
return
;
/* We don't repeat the cycle if there are less than 25% of keys
* found expired in the current DB. */
}
while
(
expired
>
ACTIVE_EXPIRE_CYCLE_LOOKUPS_PER_LOOP
/
4
);
}
}
unsigned
int
getLRUClock
(
void
)
{
return
(
mstime
()
/
LRU_CLOCK_RESOLUTION
)
&
LRU_CLOCK_MAX
;
}
/* Add a sample to the operations per second array of samples. */
/* Add a sample to the operations per second array of samples. */
void
trackInstantaneousMetric
(
int
metric
,
long
long
current_reading
)
{
void
trackInstantaneousMetric
(
int
metric
,
long
long
current_reading
)
{
long
long
t
=
mstime
()
-
server
.
inst_metric
[
metric
].
last_sample_time
;
long
long
t
=
mstime
()
-
server
.
inst_metric
[
metric
].
last_sample_time
;
...
@@ -3339,318 +3159,6 @@ void monitorCommand(client *c) {
...
@@ -3339,318 +3159,6 @@ void monitorCommand(client *c) {
addReply
(
c
,
shared
.
ok
);
addReply
(
c
,
shared
.
ok
);
}
}
/* ============================ Maxmemory directive ======================== */
/* freeMemoryIfNeeded() gets called when 'maxmemory' is set on the config
* file to limit the max memory used by the server, before processing a
* command.
*
* The goal of the function is to free enough memory to keep Redis under the
* configured memory limit.
*
* The function starts calculating how many bytes should be freed to keep
* Redis under the limit, and enters a loop selecting the best keys to
* evict accordingly to the configured policy.
*
* If all the bytes needed to return back under the limit were freed the
* function returns C_OK, otherwise C_ERR is returned, and the caller
* should block the execution of commands that will result in more memory
* used by the server.
*
* ------------------------------------------------------------------------
*
* LRU approximation algorithm
*
* Redis uses an approximation of the LRU algorithm that runs in constant
* memory. Every time there is a key to expire, we sample N keys (with
* N very small, usually in around 5) to populate a pool of best keys to
* evict of M keys (the pool size is defined by MAXMEMORY_EVICTION_POOL_SIZE).
*
* The N keys sampled are added in the pool of good keys to expire (the one
* with an old access time) if they are better than one of the current keys
* in the pool.
*
* After the pool is populated, the best key we have in the pool is expired.
* However note that we don't remove keys from the pool when they are deleted
* so the pool may contain keys that no longer exist.
*
* When we try to evict a key, and all the entries in the pool don't exist
* we populate it again. This time we'll be sure that the pool has at least
* one key that can be evicted, if there is at least one key that can be
* evicted in the whole database. */
/* Create a new eviction pool. */
struct
evictionPoolEntry
*
evictionPoolAlloc
(
void
)
{
struct
evictionPoolEntry
*
ep
;
int
j
;
ep
=
zmalloc
(
sizeof
(
*
ep
)
*
MAXMEMORY_EVICTION_POOL_SIZE
);
for
(
j
=
0
;
j
<
MAXMEMORY_EVICTION_POOL_SIZE
;
j
++
)
{
ep
[
j
].
idle
=
0
;
ep
[
j
].
key
=
NULL
;
}
return
ep
;
}
/* This is an helper function for freeMemoryIfNeeded(), it is used in order
* to populate the evictionPool with a few entries every time we want to
* expire a key. Keys with idle time smaller than one of the current
* keys are added. Keys are always added if there are free entries.
*
* We insert keys on place in ascending order, so keys with the smaller
* idle time are on the left, and keys with the higher idle time on the
* right. */
#define EVICTION_SAMPLES_ARRAY_SIZE 16
void
evictionPoolPopulate
(
dict
*
sampledict
,
dict
*
keydict
,
struct
evictionPoolEntry
*
pool
)
{
int
j
,
k
,
count
;
dictEntry
*
_samples
[
EVICTION_SAMPLES_ARRAY_SIZE
];
dictEntry
**
samples
;
/* Try to use a static buffer: this function is a big hit...
* Note: it was actually measured that this helps. */
if
(
server
.
maxmemory_samples
<=
EVICTION_SAMPLES_ARRAY_SIZE
)
{
samples
=
_samples
;
}
else
{
samples
=
zmalloc
(
sizeof
(
samples
[
0
])
*
server
.
maxmemory_samples
);
}
count
=
dictGetSomeKeys
(
sampledict
,
samples
,
server
.
maxmemory_samples
);
for
(
j
=
0
;
j
<
count
;
j
++
)
{
unsigned
long
long
idle
;
sds
key
;
robj
*
o
;
dictEntry
*
de
;
de
=
samples
[
j
];
key
=
dictGetKey
(
de
);
/* If the dictionary we are sampling from is not the main
* dictionary (but the expires one) we need to lookup the key
* again in the key dictionary to obtain the value object. */
if
(
sampledict
!=
keydict
)
de
=
dictFind
(
keydict
,
key
);
o
=
dictGetVal
(
de
);
idle
=
estimateObjectIdleTime
(
o
);
/* Insert the element inside the pool.
* First, find the first empty bucket or the first populated
* bucket that has an idle time smaller than our idle time. */
k
=
0
;
while
(
k
<
MAXMEMORY_EVICTION_POOL_SIZE
&&
pool
[
k
].
key
&&
pool
[
k
].
idle
<
idle
)
k
++
;
if
(
k
==
0
&&
pool
[
MAXMEMORY_EVICTION_POOL_SIZE
-
1
].
key
!=
NULL
)
{
/* Can't insert if the element is < the worst element we have
* and there are no empty buckets. */
continue
;
}
else
if
(
k
<
MAXMEMORY_EVICTION_POOL_SIZE
&&
pool
[
k
].
key
==
NULL
)
{
/* Inserting into empty position. No setup needed before insert. */
}
else
{
/* Inserting in the middle. Now k points to the first element
* greater than the element to insert. */
if
(
pool
[
MAXMEMORY_EVICTION_POOL_SIZE
-
1
].
key
==
NULL
)
{
/* Free space on the right? Insert at k shifting
* all the elements from k to end to the right. */
memmove
(
pool
+
k
+
1
,
pool
+
k
,
sizeof
(
pool
[
0
])
*
(
MAXMEMORY_EVICTION_POOL_SIZE
-
k
-
1
));
}
else
{
/* No free space on right? Insert at k-1 */
k
--
;
/* Shift all elements on the left of k (included) to the
* left, so we discard the element with smaller idle time. */
sdsfree
(
pool
[
0
].
key
);
memmove
(
pool
,
pool
+
1
,
sizeof
(
pool
[
0
])
*
k
);
}
}
pool
[
k
].
key
=
sdsdup
(
key
);
pool
[
k
].
idle
=
idle
;
}
if
(
samples
!=
_samples
)
zfree
(
samples
);
}
int
freeMemoryIfNeeded
(
void
)
{
size_t
mem_reported
,
mem_used
,
mem_tofree
,
mem_freed
;
int
slaves
=
listLength
(
server
.
slaves
);
mstime_t
latency
,
eviction_latency
;
long
long
delta
;
/* Check if we are over the memory usage limit. If we are not, no need
* to subtract the slaves output buffers. We can just return ASAP. */
mem_reported
=
zmalloc_used_memory
();
if
(
mem_reported
<=
server
.
maxmemory
)
return
C_OK
;
/* Remove the size of slaves output buffers and AOF buffer from the
* count of used memory. */
mem_used
=
mem_reported
;
if
(
slaves
)
{
listIter
li
;
listNode
*
ln
;
listRewind
(
server
.
slaves
,
&
li
);
while
((
ln
=
listNext
(
&
li
)))
{
client
*
slave
=
listNodeValue
(
ln
);
unsigned
long
obuf_bytes
=
getClientOutputBufferMemoryUsage
(
slave
);
if
(
obuf_bytes
>
mem_used
)
mem_used
=
0
;
else
mem_used
-=
obuf_bytes
;
}
}
if
(
server
.
aof_state
!=
AOF_OFF
)
{
mem_used
-=
sdslen
(
server
.
aof_buf
);
mem_used
-=
aofRewriteBufferSize
();
}
/* Check if we are still over the memory limit. */
if
(
mem_used
<=
server
.
maxmemory
)
return
C_OK
;
/* Compute how much memory we need to free. */
mem_tofree
=
mem_used
-
server
.
maxmemory
;
mem_freed
=
0
;
if
(
server
.
maxmemory_policy
==
MAXMEMORY_NO_EVICTION
)
goto
cant_free
;
/* We need to free memory, but policy forbids. */
latencyStartMonitor
(
latency
);
while
(
mem_freed
<
mem_tofree
)
{
int
j
,
k
,
keys_freed
=
0
;
for
(
j
=
0
;
j
<
server
.
dbnum
;
j
++
)
{
long
bestval
=
0
;
/* just to prevent warning */
sds
bestkey
=
NULL
;
dictEntry
*
de
;
redisDb
*
db
=
server
.
db
+
j
;
dict
*
dict
;
if
(
server
.
maxmemory_policy
==
MAXMEMORY_ALLKEYS_LRU
||
server
.
maxmemory_policy
==
MAXMEMORY_ALLKEYS_RANDOM
)
{
dict
=
server
.
db
[
j
].
dict
;
}
else
{
dict
=
server
.
db
[
j
].
expires
;
}
if
(
dictSize
(
dict
)
==
0
)
continue
;
/* volatile-random and allkeys-random policy */
if
(
server
.
maxmemory_policy
==
MAXMEMORY_ALLKEYS_RANDOM
||
server
.
maxmemory_policy
==
MAXMEMORY_VOLATILE_RANDOM
)
{
de
=
dictGetRandomKey
(
dict
);
bestkey
=
dictGetKey
(
de
);
}
/* volatile-lru and allkeys-lru policy */
else
if
(
server
.
maxmemory_policy
==
MAXMEMORY_ALLKEYS_LRU
||
server
.
maxmemory_policy
==
MAXMEMORY_VOLATILE_LRU
)
{
struct
evictionPoolEntry
*
pool
=
db
->
eviction_pool
;
while
(
bestkey
==
NULL
)
{
evictionPoolPopulate
(
dict
,
db
->
dict
,
db
->
eviction_pool
);
/* Go backward from best to worst element to evict. */
for
(
k
=
MAXMEMORY_EVICTION_POOL_SIZE
-
1
;
k
>=
0
;
k
--
)
{
if
(
pool
[
k
].
key
==
NULL
)
continue
;
de
=
dictFind
(
dict
,
pool
[
k
].
key
);
/* Remove the entry from the pool. */
sdsfree
(
pool
[
k
].
key
);
/* Shift all elements on its right to left. */
memmove
(
pool
+
k
,
pool
+
k
+
1
,
sizeof
(
pool
[
0
])
*
(
MAXMEMORY_EVICTION_POOL_SIZE
-
k
-
1
));
/* Clear the element on the right which is empty
* since we shifted one position to the left. */
pool
[
MAXMEMORY_EVICTION_POOL_SIZE
-
1
].
key
=
NULL
;
pool
[
MAXMEMORY_EVICTION_POOL_SIZE
-
1
].
idle
=
0
;
/* If the key exists, is our pick. Otherwise it is
* a ghost and we need to try the next element. */
if
(
de
)
{
bestkey
=
dictGetKey
(
de
);
break
;
}
else
{
/* Ghost... */
continue
;
}
}
}
}
/* volatile-ttl */
else
if
(
server
.
maxmemory_policy
==
MAXMEMORY_VOLATILE_TTL
)
{
for
(
k
=
0
;
k
<
server
.
maxmemory_samples
;
k
++
)
{
sds
thiskey
;
long
thisval
;
de
=
dictGetRandomKey
(
dict
);
thiskey
=
dictGetKey
(
de
);
thisval
=
(
long
)
dictGetVal
(
de
);
/* Expire sooner (minor expire unix timestamp) is better
* candidate for deletion */
if
(
bestkey
==
NULL
||
thisval
<
bestval
)
{
bestkey
=
thiskey
;
bestval
=
thisval
;
}
}
}
/* Finally remove the selected key. */
if
(
bestkey
)
{
robj
*
keyobj
=
createStringObject
(
bestkey
,
sdslen
(
bestkey
));
propagateExpire
(
db
,
keyobj
,
server
.
lazyfree_lazy_eviction
);
/* We compute the amount of memory freed by db*Delete() alone.
* It is possible that actually the memory needed to propagate
* the DEL in AOF and replication link is greater than the one
* we are freeing removing the key, but we can't account for
* that otherwise we would never exit the loop.
*
* AOF and Output buffer memory will be freed eventually so
* we only care about memory used by the key space. */
delta
=
(
long
long
)
zmalloc_used_memory
();
latencyStartMonitor
(
eviction_latency
);
if
(
server
.
lazyfree_lazy_eviction
)
dbAsyncDelete
(
db
,
keyobj
);
else
dbSyncDelete
(
db
,
keyobj
);
latencyEndMonitor
(
eviction_latency
);
latencyAddSampleIfNeeded
(
"eviction-del"
,
eviction_latency
);
latencyRemoveNestedEvent
(
latency
,
eviction_latency
);
delta
-=
(
long
long
)
zmalloc_used_memory
();
mem_freed
+=
delta
;
server
.
stat_evictedkeys
++
;
notifyKeyspaceEvent
(
NOTIFY_EVICTED
,
"evicted"
,
keyobj
,
db
->
id
);
decrRefCount
(
keyobj
);
keys_freed
++
;
/* When the memory to free starts to be big enough, we may
* start spending so much time here that is impossible to
* deliver data to the slaves fast enough, so we force the
* transmission here inside the loop. */
if
(
slaves
)
flushSlavesOutputBuffers
();
}
}
if
(
!
keys_freed
)
{
latencyEndMonitor
(
latency
);
latencyAddSampleIfNeeded
(
"eviction-cycle"
,
latency
);
goto
cant_free
;
/* nothing to free... */
}
}
latencyEndMonitor
(
latency
);
latencyAddSampleIfNeeded
(
"eviction-cycle"
,
latency
);
return
C_OK
;
cant_free:
/* We are here if we are not able to reclaim memory. There is only one
* last thing we can try: check if the lazyfree thread has jobs in queue
* and wait... */
while
(
bioPendingJobsOfType
(
BIO_LAZY_FREE
))
{
if
(((
mem_reported
-
zmalloc_used_memory
())
+
mem_freed
)
>=
mem_tofree
)
break
;
usleep
(
1000
);
}
return
C_ERR
;
}
/* =================================== Main! ================================ */
/* =================================== Main! ================================ */
#ifdef __linux__
#ifdef __linux__
...
...
src/server.h
View file @
b46239e5
...
@@ -1613,6 +1613,9 @@ void replyToBlockedClientTimedOut(client *c);
...
@@ -1613,6 +1613,9 @@ void replyToBlockedClientTimedOut(client *c);
int
getTimeoutFromObjectOrReply
(
client
*
c
,
robj
*
object
,
mstime_t
*
timeout
,
int
unit
);
int
getTimeoutFromObjectOrReply
(
client
*
c
,
robj
*
object
,
mstime_t
*
timeout
,
int
unit
);
void
disconnectAllBlockedClients
(
void
);
void
disconnectAllBlockedClients
(
void
);
/* expire.c -- Handling of expired keys */
void
activeExpireCycle
(
int
type
);
/* Git SHA1 */
/* Git SHA1 */
char
*
redisGitSHA1
(
void
);
char
*
redisGitSHA1
(
void
);
char
*
redisGitDirty
(
void
);
char
*
redisGitDirty
(
void
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment