Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
2e5b3f08
Commit
2e5b3f08
authored
Jul 28, 2024
by
YaacovHazan
Browse files
Merge remote-tracking branch 'upstream/unstable' into HEAD
parents
0637b4ea
94b9072e
Changes
56
Show whitespace changes
Inline
Side-by-side
src/functions.c
View file @
2e5b3f08
...
...
@@ -24,6 +24,7 @@ static size_t engine_cache_memory = 0;
static
void
engineFunctionDispose
(
dict
*
d
,
void
*
obj
);
static
void
engineStatsDispose
(
dict
*
d
,
void
*
obj
);
static
void
engineLibraryDispose
(
dict
*
d
,
void
*
obj
);
static
void
engineDispose
(
dict
*
d
,
void
*
obj
);
static
int
functionsVerifyName
(
sds
name
);
typedef
struct
functionsLibEngineStats
{
...
...
@@ -50,7 +51,7 @@ dictType engineDictType = {
NULL
,
/* val dup */
dictSdsKeyCaseCompare
,
/* key compare */
dictSdsDestructor
,
/* key destructor */
NULL
,
/* val destructor */
engineDispose
,
/* val destructor */
NULL
/* allow to expand */
};
...
...
@@ -148,6 +149,16 @@ static void engineLibraryDispose(dict *d, void *obj) {
engineLibraryFree
(
obj
);
}
static
void
engineDispose
(
dict
*
d
,
void
*
obj
)
{
UNUSED
(
d
);
engineInfo
*
ei
=
obj
;
freeClient
(
ei
->
c
);
sdsfree
(
ei
->
name
);
ei
->
engine
->
free_ctx
(
ei
->
engine
->
engine_ctx
);
zfree
(
ei
->
engine
);
zfree
(
ei
);
}
/* Clear all the functions from the given library ctx */
void
functionsLibCtxClear
(
functionsLibCtx
*
lib_ctx
)
{
dictEmpty
(
lib_ctx
->
functions
,
NULL
);
...
...
@@ -166,11 +177,13 @@ void functionsLibCtxClear(functionsLibCtx *lib_ctx) {
void
functionsLibCtxClearCurrent
(
int
async
)
{
if
(
async
)
{
functionsLibCtx
*
old_l_ctx
=
curr_functions_lib_ctx
;
curr_functions_lib_ctx
=
functionsLibCtxCreate
()
;
freeFunctionsAsync
(
old_l_ctx
);
dict
*
old_engines
=
engines
;
freeFunctionsAsync
(
old_l_ctx
,
old_engines
);
}
else
{
functionsLibCtxClear
(
curr_functions_lib_ctx
);
functionsLibCtxFree
(
curr_functions_lib_ctx
);
dictRelease
(
engines
);
}
functionsInit
();
}
/* Free the given functions ctx */
...
...
src/functions.h
View file @
2e5b3f08
...
...
@@ -67,6 +67,9 @@ typedef struct engine {
/* free the given function */
void
(
*
free_function
)(
void
*
engine_ctx
,
void
*
compiled_function
);
/* Free the engine context. */
void
(
*
free_ctx
)(
void
*
engine_ctx
);
}
engine
;
/* Hold information about an engine.
...
...
@@ -116,5 +119,6 @@ int functionLibCreateFunction(sds name, void *function, functionLibInfo *li, sds
int
luaEngineInitEngine
(
void
);
int
functionsInit
(
void
);
void
functionsFree
(
functionsLibCtx
*
lib_ctx
,
dict
*
engs
);
#endif
/* __FUNCTIONS_H_ */
src/lazyfree.c
View file @
2e5b3f08
...
...
@@ -72,8 +72,11 @@ void lazyFreeLuaScripts(void *args[]) {
/* Release the functions ctx. */
void
lazyFreeFunctionsCtx
(
void
*
args
[])
{
functionsLibCtx
*
functions_lib_ctx
=
args
[
0
];
dict
*
engs
=
args
[
1
];
size_t
len
=
functionsLibCtxFunctionsLen
(
functions_lib_ctx
);
functionsLibCtxFree
(
functions_lib_ctx
);
len
+=
dictSize
(
engs
);
dictRelease
(
engs
);
atomicDecr
(
lazyfree_objects
,
len
);
atomicIncr
(
lazyfreed_objects
,
len
);
}
...
...
@@ -247,12 +250,13 @@ void freeLuaScriptsAsync(dict *lua_scripts, list *lua_scripts_lru_list, lua_Stat
}
/* Free functions ctx, if the functions ctx contains enough functions, free it in async way. */
void
freeFunctionsAsync
(
functionsLibCtx
*
functions_lib_ctx
)
{
void
freeFunctionsAsync
(
functionsLibCtx
*
functions_lib_ctx
,
dict
*
engs
)
{
if
(
functionsLibCtxFunctionsLen
(
functions_lib_ctx
)
>
LAZYFREE_THRESHOLD
)
{
atomicIncr
(
lazyfree_objects
,
functionsLibCtxFunctionsLen
(
functions_lib_ctx
));
bioCreateLazyFreeJob
(
lazyFreeFunctionsCtx
,
1
,
functions_lib_ctx
);
atomicIncr
(
lazyfree_objects
,
functionsLibCtxFunctionsLen
(
functions_lib_ctx
)
+
dictSize
(
engs
)
);
bioCreateLazyFreeJob
(
lazyFreeFunctionsCtx
,
2
,
functions_lib_ctx
,
engs
);
}
else
{
functionsLibCtxFree
(
functions_lib_ctx
);
dictRelease
(
engs
);
}
}
...
...
src/module.c
View file @
2e5b3f08
...
...
@@ -262,6 +262,10 @@ typedef struct RedisModuleBlockedClient {
monotime background_timer; /* Timer tracking the start of background work */
uint64_t background_duration; /* Current command background time duration.
Used for measuring latency of blocking cmds */
int blocked_on_keys_explicit_unblock; /* Set to 1 only in the case of an explicit RM_Unblock on
* a client that is blocked on keys. In this case we will
* call the timeout call back from within
* moduleHandleBlockedClients which runs from the main thread */
} RedisModuleBlockedClient;
/* This is a list of Module Auth Contexts. Each time a Module registers a callback, a new ctx is
...
...
@@ -6515,7 +6519,8 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch
/* Duplicate relevant flags in the module client. */
c->flags &= ~(CLIENT_READONLY|CLIENT_ASKING);
c->flags |= ctx->client->flags & (CLIENT_READONLY|CLIENT_ASKING);
if (getNodeByQuery(c,c->cmd,c->argv,c->argc,NULL,&error_code) !=
const uint64_t cmd_flags = getCommandFlags(c);
if (getNodeByQuery(c,c->cmd,c->argv,c->argc,NULL,cmd_flags,&error_code) !=
getMyClusterNode())
{
sds msg = NULL;
...
...
@@ -7781,7 +7786,7 @@ RedisModuleBlockedClient *moduleBlockClient(RedisModuleCtx *ctx, RedisModuleCmdF
int islua = scriptIsRunning();
int ismulti = server.in_exec;
c->bstate.module_blocked_handle = z
m
alloc(sizeof(RedisModuleBlockedClient));
c->bstate.module_blocked_handle = z
c
alloc(sizeof(RedisModuleBlockedClient));
RedisModuleBlockedClient *bc = c->bstate.module_blocked_handle;
ctx->module->blocked_clients++;
...
...
@@ -8260,7 +8265,7 @@ int RM_UnblockClient(RedisModuleBlockedClient *bc, void *privdata) {
* argument, but better to be safe than sorry. */
if (bc->timeout_callback == NULL) return REDISMODULE_ERR;
if (bc->unblocked) return REDISMODULE_OK;
if (bc->client)
moduleBlockedClientTimedOut(bc->client,
1
)
;
if (bc->client)
bc->blocked_on_keys_explicit_unblock =
1;
}
moduleUnblockClientByHandle(bc,privdata);
return REDISMODULE_OK;
...
...
@@ -8338,6 +8343,10 @@ void moduleHandleBlockedClients(void) {
reply_us = elapsedUs(replyTimer);
moduleFreeContext(&ctx);
}
if (c && bc->blocked_on_keys_explicit_unblock) {
serverAssert(bc->blocked_on_keys);
moduleBlockedClientTimedOut(c);
}
/* Hold onto the blocked client if module auth is in progress. The reply callback is invoked
* when the client is reprocessed. */
if (c && clientHasModuleAuthInProgress(c)) {
...
...
@@ -8358,11 +8367,12 @@ void moduleHandleBlockedClients(void) {
/* Update stats now that we've finished the blocking operation.
* This needs to be out of the reply callback above given that a
* module might not define any callback and still do blocking ops.
*
* If the module is blocked on keys updateStatsOnUnblock will be
* called from moduleUnblockClientOnKey
*/
if (c && !clientHasModuleAuthInProgress(c)) {
int had_errors = c->deferred_reply_errors ? !!listLength(c->deferred_reply_errors) :
(server.stat_total_error_replies != prev_error_replies);
updateStatsOnUnblock(c, bc->background_duration, reply_us, had_errors);
if (c && !clientHasModuleAuthInProgress(c) && !bc->blocked_on_keys) {
updateStatsOnUnblock(c, bc->background_duration, reply_us, server.stat_total_error_replies != prev_error_replies);
}
if (c != NULL) {
...
...
@@ -8417,31 +8427,19 @@ int moduleBlockedClientMayTimeout(client *c) {
* does not need to do any cleanup. Eventually the module will call the
* API to unblock the client and the memory will be released.
*
* If this function is called from a module, we handle the timeout callback
* and the update of the unblock status in a thread-safe manner to avoid race
* conditions with the main thread.
* If this function is called from the main thread, we must handle the unblocking
* This function should only be called from the main thread, we must handle the unblocking
* of the client synchronously. This ensures that we can reply to the client before
* resetClient() is called. */
void moduleBlockedClientTimedOut(client *c
, int from_module
) {
void moduleBlockedClientTimedOut(client *c) {
RedisModuleBlockedClient *bc = c->bstate.module_blocked_handle;
/* Protect against re-processing: don't serve clients that are already
* in the unblocking list for any reason (including RM_UnblockClient()
* explicit call). See #6798. */
if (bc->unblocked) return;
RedisModuleCtx ctx;
int flags = REDISMODULE_CTX_BLOCKED_TIMEOUT;
if (from_module) flags |= REDISMODULE_CTX_THREAD_SAFE;
moduleCreateContext(&ctx, bc->module, flags);
moduleCreateContext(&ctx, bc->module, REDISMODULE_CTX_BLOCKED_TIMEOUT);
ctx.client = bc->client;
ctx.blocked_client = bc;
ctx.blocked_privdata = bc->privdata;
long long prev_error_replies;
if (!from_module)
prev_error_replies = server.stat_total_error_replies;
long long prev_error_replies = server.stat_total_error_replies;
if (bc->timeout_callback) {
/* In theory, the user should always pass the timeout handler as an
...
...
@@ -8451,7 +8449,6 @@ void moduleBlockedClientTimedOut(client *c, int from_module) {
moduleFreeContext(&ctx);
if (!from_module)
updateStatsOnUnblock(c, bc->background_duration, 0, server.stat_total_error_replies != prev_error_replies);
/* For timeout events, we do not want to call the disconnect callback,
...
...
src/networking.c
View file @
2e5b3f08
...
...
@@ -2,8 +2,13 @@
* Copyright (c) 2009-Present, Redis Ltd.
* All rights reserved.
*
* Copyright (c) 2024-present, Valkey contributors.
* All rights reserved.
*
* Licensed under your choice of the Redis Source Available License 2.0
* (RSALv2) or the Server Side Public License v1 (SSPLv1).
*
* Portions of this file are available under BSD3 terms; see REDISCONTRIBUTIONS for more information.
*/
#include "server.h"
...
...
@@ -924,7 +929,7 @@ void addReplyHumanLongDouble(client *c, long double d) {
/* Add a long long as integer reply or bulk len / multi bulk count.
* Basically this is used to output <prefix><long long><crlf>. */
void
addReplyLongLongWithPrefix
(
client
*
c
,
long
long
ll
,
char
prefix
)
{
static
void
_
addReplyLongLongWithPrefix
(
client
*
c
,
long
long
ll
,
char
prefix
)
{
char
buf
[
128
];
int
len
;
...
...
@@ -934,38 +939,41 @@ void addReplyLongLongWithPrefix(client *c, long long ll, char prefix) {
const
int
opt_hdr
=
ll
<
OBJ_SHARED_BULKHDR_LEN
&&
ll
>=
0
;
const
size_t
hdr_len
=
OBJ_SHARED_HDR_STRLEN
(
ll
);
if
(
prefix
==
'*'
&&
opt_hdr
)
{
addReply
Proto
(
c
,
shared
.
mbulkhdr
[
ll
]
->
ptr
,
hdr_len
);
_
addReply
ToBufferOrList
(
c
,
shared
.
mbulkhdr
[
ll
]
->
ptr
,
hdr_len
);
return
;
}
else
if
(
prefix
==
'$'
&&
opt_hdr
)
{
addReply
Proto
(
c
,
shared
.
bulkhdr
[
ll
]
->
ptr
,
hdr_len
);
_
addReply
ToBufferOrList
(
c
,
shared
.
bulkhdr
[
ll
]
->
ptr
,
hdr_len
);
return
;
}
else
if
(
prefix
==
'%'
&&
opt_hdr
)
{
addReply
Proto
(
c
,
shared
.
maphdr
[
ll
]
->
ptr
,
hdr_len
);
_
addReply
ToBufferOrList
(
c
,
shared
.
maphdr
[
ll
]
->
ptr
,
hdr_len
);
return
;
}
else
if
(
prefix
==
'~'
&&
opt_hdr
)
{
addReply
Proto
(
c
,
shared
.
sethdr
[
ll
]
->
ptr
,
hdr_len
);
_
addReply
ToBufferOrList
(
c
,
shared
.
sethdr
[
ll
]
->
ptr
,
hdr_len
);
return
;
}
buf
[
0
]
=
prefix
;
len
=
ll2string
(
buf
+
1
,
sizeof
(
buf
)
-
1
,
ll
);
buf
[
len
+
1
]
=
'\r'
;
buf
[
len
+
2
]
=
'\n'
;
addReply
Proto
(
c
,
buf
,
len
+
3
);
len
=
ll2string
(
buf
+
1
,
sizeof
(
buf
)
-
1
,
ll
);
buf
[
len
+
1
]
=
'\r'
;
buf
[
len
+
2
]
=
'\n'
;
_
addReply
ToBufferOrList
(
c
,
buf
,
len
+
3
);
}
void
addReplyLongLong
(
client
*
c
,
long
long
ll
)
{
if
(
ll
==
0
)
addReply
(
c
,
shared
.
czero
);
else
if
(
ll
==
1
)
addReply
(
c
,
shared
.
cone
);
else
addReplyLongLongWithPrefix
(
c
,
ll
,
':'
);
addReply
(
c
,
shared
.
cone
);
else
{
if
(
prepareClientToWrite
(
c
)
!=
C_OK
)
return
;
_addReplyLongLongWithPrefix
(
c
,
ll
,
':'
);
}
}
void
addReplyAggregateLen
(
client
*
c
,
long
length
,
int
prefix
)
{
serverAssert
(
length
>=
0
);
addReplyLongLongWithPrefix
(
c
,
length
,
prefix
);
if
(
prepareClientToWrite
(
c
)
!=
C_OK
)
return
;
_addReplyLongLongWithPrefix
(
c
,
length
,
prefix
);
}
void
addReplyArrayLen
(
client
*
c
,
long
length
)
{
...
...
@@ -1025,8 +1033,8 @@ void addReplyNullArray(client *c) {
/* Create the length prefix of a bulk reply, example: $2234 */
void
addReplyBulkLen
(
client
*
c
,
robj
*
obj
)
{
size_t
len
=
stringObjectLen
(
obj
);
addReplyLongLongWithPrefix
(
c
,
len
,
'$'
);
if
(
prepareClientToWrite
(
c
)
!=
C_OK
)
return
;
_
addReplyLongLongWithPrefix
(
c
,
len
,
'$'
);
}
/* Add a Redis Object as a bulk reply */
...
...
@@ -1038,16 +1046,22 @@ void addReplyBulk(client *c, robj *obj) {
/* Add a C buffer as bulk reply */
void
addReplyBulkCBuffer
(
client
*
c
,
const
void
*
p
,
size_t
len
)
{
addReplyLongLongWithPrefix
(
c
,
len
,
'$'
);
addReplyProto
(
c
,
p
,
len
);
addReplyProto
(
c
,
"
\r\n
"
,
2
);
if
(
prepareClientToWrite
(
c
)
!=
C_OK
)
return
;
_addReplyLongLongWithPrefix
(
c
,
len
,
'$'
);
_addReplyToBufferOrList
(
c
,
p
,
len
);
_addReplyToBufferOrList
(
c
,
"
\r\n
"
,
2
);
}
/* Add sds to reply (takes ownership of sds and frees it) */
void
addReplyBulkSds
(
client
*
c
,
sds
s
)
{
addReplyLongLongWithPrefix
(
c
,
sdslen
(
s
),
'$'
);
addReplySds
(
c
,
s
);
addReplyProto
(
c
,
"
\r\n
"
,
2
);
if
(
prepareClientToWrite
(
c
)
!=
C_OK
)
{
sdsfree
(
s
);
return
;
}
_addReplyLongLongWithPrefix
(
c
,
sdslen
(
s
),
'$'
);
_addReplyToBufferOrList
(
c
,
s
,
sdslen
(
s
));
sdsfree
(
s
);
_addReplyToBufferOrList
(
c
,
"
\r\n
"
,
2
);
}
/* Set sds to a deferred reply (for symmetry with addReplyBulkSds it also frees the sds) */
...
...
src/rdb.c
View file @
2e5b3f08
...
...
@@ -699,7 +699,7 @@ int rdbSaveObjectType(rio *rdb, robj *o) {
else
if
(
o
->
encoding
==
OBJ_ENCODING_LISTPACK_EX
)
return
rdbSaveType
(
rdb
,
RDB_TYPE_HASH_LISTPACK_EX
);
else
if
(
o
->
encoding
==
OBJ_ENCODING_HT
)
{
if
(
hashTypeGetMinExpire
(
o
,
0
)
==
EB_EXPIRE_TIME_INVALID
)
if
(
hashTypeGetMinExpire
(
o
,
/*accurate*/
1
)
==
EB_EXPIRE_TIME_INVALID
)
return
rdbSaveType
(
rdb
,
RDB_TYPE_HASH
);
else
return
rdbSaveType
(
rdb
,
RDB_TYPE_HASH_METADATA
);
...
...
@@ -947,12 +947,22 @@ ssize_t rdbSaveObject(rio *rdb, robj *o, robj *key, int dbid) {
if
((
o
->
encoding
==
OBJ_ENCODING_LISTPACK
)
||
(
o
->
encoding
==
OBJ_ENCODING_LISTPACK_EX
))
{
/* Save min/next HFE expiration time if needed */
if
(
o
->
encoding
==
OBJ_ENCODING_LISTPACK_EX
)
{
uint64_t
minExpire
=
hashTypeGetMinExpire
(
o
,
0
);
/* if invalid time then save 0 */
if
(
minExpire
==
EB_EXPIRE_TIME_INVALID
)
minExpire
=
0
;
if
(
rdbSaveMillisecondTime
(
rdb
,
minExpire
)
==
-
1
)
return
-
1
;
}
unsigned
char
*
lp_ptr
=
hashTypeListpackGetLp
(
o
);
size_t
l
=
lpBytes
(
lp_ptr
);
if
((
n
=
rdbSaveRawString
(
rdb
,
lp_ptr
,
l
))
==
-
1
)
return
-
1
;
nwritten
+=
n
;
}
else
if
(
o
->
encoding
==
OBJ_ENCODING_HT
)
{
int
hashWithMeta
=
0
;
/* RDB_TYPE_HASH_METADATA */
dictIterator
*
di
=
dictGetIterator
(
o
->
ptr
);
dictEntry
*
de
;
/* Determine the hash layout to use based on the presence of at least
...
...
@@ -960,7 +970,17 @@ ssize_t rdbSaveObject(rio *rdb, robj *o, robj *key, int dbid) {
* RDB_TYPE_HASH_METADATA layout, including tuples of [ttl][field][value].
* Otherwise, use the standard RDB_TYPE_HASH layout containing only
* the tuples [field][value]. */
int
with_ttl
=
(
hashTypeGetMinExpire
(
o
,
0
)
!=
EB_EXPIRE_TIME_INVALID
);
uint64_t
minExpire
=
hashTypeGetMinExpire
(
o
,
1
);
/* if RDB_TYPE_HASH_METADATA (Can have TTLs on fields) */
if
(
minExpire
!=
EB_EXPIRE_TIME_INVALID
)
{
hashWithMeta
=
1
;
/* Save next field expire time of hash */
if
(
rdbSaveMillisecondTime
(
rdb
,
minExpire
)
==
-
1
)
{
dictReleaseIterator
(
di
);
return
-
1
;
}
}
/* save number of fields in hash */
if
((
n
=
rdbSaveLen
(
rdb
,
dictSize
((
dict
*
)
o
->
ptr
)))
==
-
1
)
{
...
...
@@ -975,10 +995,14 @@ ssize_t rdbSaveObject(rio *rdb, robj *o, robj *key, int dbid) {
sds
value
=
dictGetVal
(
de
);
/* save the TTL */
if
(
with_ttl
)
{
uint64_t
ttl
=
hfieldGetExpireTime
(
field
);
/* 0 is used to indicate no TTL is set for this field */
if
(
ttl
==
EB_EXPIRE_TIME_INVALID
)
ttl
=
0
;
if
(
hashWithMeta
)
{
uint64_t
ttl
,
expiryTime
=
hfieldGetExpireTime
(
field
);
/* Saved TTL value:
* - 0: Indicates no TTL. This is common case so we keep it small.
* - Otherwise: TTL is relative to minExpire (with +1 to avoid 0 that already taken)
*/
ttl
=
(
expiryTime
==
EB_EXPIRE_TIME_INVALID
)
?
0
:
expiryTime
-
minExpire
+
1
;
if
((
n
=
rdbSaveLen
(
rdb
,
ttl
))
==
-
1
)
{
dictReleaseIterator
(
di
);
return
-
1
;
...
...
@@ -2238,12 +2262,29 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error)
/* All pairs should be read by now */
serverAssert
(
len
==
0
);
}
else
if
(
rdbtype
==
RDB_TYPE_HASH_METADATA
)
{
}
else
if
(
rdbtype
==
RDB_TYPE_HASH_METADATA
||
rdbtype
==
RDB_TYPE_HASH_METADATA_PRE_GA
)
{
sds
value
;
hfield
field
;
uint64_t
expireAt
;
uint64_t
ttl
,
expireAt
,
minExpire
=
EB_EXPIRE_TIME_INVALID
;
dict
*
dupSearchDict
=
NULL
;
/* If hash with TTLs, load next/min expiration time
*
* - This value is serialized for future use-case of streaming the object
* directly to FLASH (while keeping in mem its next expiration time).
* - It is also being used to keep only relative TTL for fields in RDB file.
*/
if
(
rdbtype
==
RDB_TYPE_HASH_METADATA
)
{
minExpire
=
rdbLoadMillisecondTime
(
rdb
,
RDB_VERSION
);
if
(
rioGetReadError
(
rdb
))
{
rdbReportCorruptRDB
(
"Hash failed loading minExpire"
);
return
NULL
;
}
if
(
minExpire
>
EB_EXPIRE_TIME_INVALID
)
{
rdbReportCorruptRDB
(
"Hash read invalid minExpire value"
);
}
}
len
=
rdbLoadLen
(
rdb
,
NULL
);
if
(
len
==
RDB_LENERR
)
return
NULL
;
if
(
len
==
0
)
goto
emptykey
;
...
...
@@ -2269,14 +2310,27 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error)
len
--
;
/* read the TTL */
if
(
rdbLoadLenByRef
(
rdb
,
NULL
,
&
expireAt
)
==
-
1
)
{
if
(
rdbLoadLenByRef
(
rdb
,
NULL
,
&
ttl
)
==
-
1
)
{
serverLog
(
LL_WARNING
,
"failed reading hash TTL"
);
decrRefCount
(
o
);
if
(
dupSearchDict
!=
NULL
)
dictRelease
(
dupSearchDict
);
return
NULL
;
}
if
(
rdbtype
==
RDB_TYPE_HASH_METADATA
)
{
/* Loaded TTL value:
* - 0: Indicates no TTL. This is common case so we keep it small.
* - Otherwise: TTL is relative to minExpire (with +1 to avoid 0 that already taken)
*/
expireAt
=
(
ttl
!=
0
)
?
(
ttl
+
minExpire
-
1
)
:
0
;
}
else
{
/* RDB_TYPE_HASH_METADATA_PRE_GA */
expireAt
=
ttl
;
/* Value is absolute */
}
if
(
expireAt
>
EB_EXPIRE_TIME_MAX
)
{
rdbReportCorruptRDB
(
"invalid expireAt time: %llu"
,
(
unsigned
long
long
)
expireAt
);
rdbReportCorruptRDB
(
"invalid expireAt time: %llu"
,
(
unsigned
long
long
)
expireAt
);
decrRefCount
(
o
);
return
NULL
;
}
...
...
@@ -2456,9 +2510,23 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error)
rdbtype
==
RDB_TYPE_ZSET_LISTPACK
||
rdbtype
==
RDB_TYPE_HASH_ZIPLIST
||
rdbtype
==
RDB_TYPE_HASH_LISTPACK
||
rdbtype
==
RDB_TYPE_HASH_LISTPACK_EX_PRE_GA
||
rdbtype
==
RDB_TYPE_HASH_LISTPACK_EX
)
{
size_t
encoded_len
;
/* If Hash TTLs, Load next/min expiration time before the `encoded` */
if
(
rdbtype
==
RDB_TYPE_HASH_LISTPACK_EX
)
{
uint64_t
minExpire
=
rdbLoadMillisecondTime
(
rdb
,
RDB_VERSION
);
/* This value was serialized for future use-case of streaming the object
* directly to FLASH (while keeping in mem its next expiration time) */
UNUSED
(
minExpire
);
if
(
rioGetReadError
(
rdb
))
{
rdbReportCorruptRDB
(
"Hash listpackex integrity check failed."
);
return
NULL
;
}
}
unsigned
char
*
encoded
=
rdbGenericLoadStringObject
(
rdb
,
RDB_LOAD_PLAIN
,
&
encoded_len
);
if
(
encoded
==
NULL
)
return
NULL
;
...
...
@@ -2665,11 +2733,13 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error)
break
;
}
case
RDB_TYPE_HASH_LISTPACK
:
case
RDB_TYPE_HASH_LISTPACK_EX_PRE_GA
:
case
RDB_TYPE_HASH_LISTPACK_EX
:
/* listpack-encoded hash with TTL requires its own struct
* pointed to by o->ptr */
o
->
type
=
OBJ_HASH
;
if
(
rdbtype
==
RDB_TYPE_HASH_LISTPACK_EX
)
{
if
(
(
rdbtype
==
RDB_TYPE_HASH_LISTPACK_EX
)
||
(
rdbtype
==
RDB_TYPE_HASH_LISTPACK_EX_PRE_GA
)
)
{
listpackEx
*
lpt
=
listpackExCreate
();
lpt
->
lp
=
encoded
;
lpt
->
key
=
key
;
...
...
@@ -3733,7 +3803,8 @@ static void backgroundSaveDoneHandlerSocket(int exitcode, int bysignal) {
void
backgroundSaveDoneHandler
(
int
exitcode
,
int
bysignal
)
{
int
type
=
server
.
rdb_child_type
;
time_t
save_end
=
time
(
NULL
);
if
(
server
.
bgsave_aborted
)
bysignal
=
SIGUSR1
;
switch
(
server
.
rdb_child_type
)
{
case
RDB_CHILD_TYPE_DISK
:
backgroundSaveDoneHandlerDisk
(
exitcode
,
bysignal
,
save_end
);
...
...
@@ -3749,6 +3820,7 @@ void backgroundSaveDoneHandler(int exitcode, int bysignal) {
server
.
rdb_child_type
=
RDB_CHILD_TYPE_NONE
;
server
.
rdb_save_time_last
=
save_end
-
server
.
rdb_save_time_start
;
server
.
rdb_save_time_start
=
-
1
;
server
.
bgsave_aborted
=
0
;
/* Possibly there are slaves waiting for a BGSAVE in order to be served
* (the first stage of SYNC is a bulk transfer of dump.rdb) */
updateSlavesWaitingBgsave
((
!
bysignal
&&
exitcode
==
0
)
?
C_OK
:
C_ERR
,
type
);
...
...
@@ -3765,6 +3837,12 @@ void killRDBChild(void) {
* This includes:
* - resetChildState
* - rdbRemoveTempFile */
/* However, there's a chance the child already exited (or about to exit), and will
* not receive the signal, in that case it could result in success and the done
* handler will override some server metrics (e.g. the dirty counter) which it
* shouldn't (e.g. in case of FLUSHALL), or the synchronously created RDB file. */
server
.
bgsave_aborted
=
1
;
}
/* Spawn an RDB child that writes the RDB to the sockets of the slaves
...
...
src/rdb.h
View file @
2e5b3f08
...
...
@@ -73,12 +73,14 @@
#define RDB_TYPE_STREAM_LISTPACKS_2 19
#define RDB_TYPE_SET_LISTPACK 20
#define RDB_TYPE_STREAM_LISTPACKS_3 21
#define RDB_TYPE_HASH_METADATA 22
#define RDB_TYPE_HASH_LISTPACK_EX 23
#define RDB_TYPE_HASH_METADATA_PRE_GA 22
/* Hash with HFEs. Doesn't attach min TTL at start (7.4 RC) */
#define RDB_TYPE_HASH_LISTPACK_EX_PRE_GA 23
/* Hash LP with HFEs. Doesn't attach min TTL at start (7.4 RC) */
#define RDB_TYPE_HASH_METADATA 24
/* Hash with HFEs. Attach min TTL at start */
#define RDB_TYPE_HASH_LISTPACK_EX 25
/* Hash LP with HFEs. Attach min TTL at start */
/* NOTE: WHEN ADDING NEW RDB TYPE, UPDATE rdbIsObjectType(), and rdb_type_string[] */
/* Test if a type is an object type. */
#define rdbIsObjectType(t) (((t) >= 0 && (t) <= 7) || ((t) >= 9 && (t) <= 2
3
))
#define rdbIsObjectType(t) (((t) >= 0 && (t) <= 7) || ((t) >= 9 && (t) <= 2
5
))
/* Special RDB opcodes (saved/loaded with rdbSaveType/rdbLoadType). */
#define RDB_OPCODE_SLOT_INFO 244
/* Individual slot info, such as slot id and size (cluster mode only). */
...
...
src/redis-check-rdb.c
View file @
2e5b3f08
...
...
@@ -26,6 +26,7 @@ struct {
unsigned
long
keys
;
/* Number of keys processed. */
unsigned
long
expires
;
/* Number of keys with an expire. */
unsigned
long
already_expired
;
/* Number of keys already expired. */
unsigned
long
subexpires
;
/* Number of keys with subexpires */
int
doing
;
/* The state while reading the RDB. */
int
error_set
;
/* True if error is populated. */
char
error
[
1024
];
...
...
@@ -80,6 +81,8 @@ char *rdb_type_string[] = {
"stream-v2"
,
"set-listpack"
,
"stream-v3"
,
"hash-hashtable-md-pre-release"
,
"hash-listpack-md-pre-release"
,
"hash-hashtable-md"
,
"hash-listpack-md"
,
};
...
...
@@ -89,6 +92,7 @@ void rdbShowGenericInfo(void) {
printf
(
"[info] %lu keys read
\n
"
,
rdbstate
.
keys
);
printf
(
"[info] %lu expires
\n
"
,
rdbstate
.
expires
);
printf
(
"[info] %lu already expired
\n
"
,
rdbstate
.
already_expired
);
printf
(
"[info] %lu subexpires
\n
"
,
rdbstate
.
subexpires
);
}
/* Called on RDB errors. Provides details about the RDB and the offset
...
...
@@ -339,6 +343,10 @@ int redis_check_rdb(char *rdbfilename, FILE *fp) {
if
(
expiretime
!=
-
1
&&
expiretime
<
now
)
rdbstate
.
already_expired
++
;
if
(
expiretime
!=
-
1
)
rdbstate
.
expires
++
;
/* If hash with HFEs then with expiration on fields then need to count it */
if
((
val
->
type
==
OBJ_HASH
)
&&
(
hashTypeGetMinExpire
(
val
,
1
)
!=
EB_EXPIRE_TIME_INVALID
))
rdbstate
.
subexpires
++
;
rdbstate
.
key
=
NULL
;
decrRefCount
(
key
);
decrRefCount
(
val
);
...
...
src/replication.c
View file @
2e5b3f08
...
...
@@ -5,6 +5,8 @@
*
* Licensed under your choice of the Redis Source Available License 2.0
* (RSALv2) or the Server Side Public License v1 (SSPLv1).
*
* Portions of this file are available under BSD3 terms; see REDISCONTRIBUTIONS for more information.
*/
...
...
@@ -506,6 +508,10 @@ void replicationFeedSlaves(list *slaves, int dictid, robj **argv, int argc) {
void
showLatestBacklog
(
void
)
{
if
(
server
.
repl_backlog
==
NULL
)
return
;
if
(
listLength
(
server
.
repl_buffer_blocks
)
==
0
)
return
;
if
(
server
.
hide_user_data_from_log
)
{
serverLog
(
LL_NOTICE
,
"hide-user-data-from-log is on, skip logging backlog content to avoid spilling PII."
);
return
;
}
size_t
dumplen
=
256
;
if
(
server
.
repl_backlog
->
histlen
<
(
long
long
)
dumplen
)
...
...
@@ -535,16 +541,6 @@ void showLatestBacklog(void) {
* to our sub-slaves. */
#include <ctype.h>
void
replicationFeedStreamFromMasterStream
(
char
*
buf
,
size_t
buflen
)
{
/* Debugging: this is handy to see the stream sent from master
* to slaves. Disabled with if(0). */
if
(
0
)
{
printf
(
"%zu:"
,
buflen
);
for
(
size_t
j
=
0
;
j
<
buflen
;
j
++
)
{
printf
(
"%c"
,
isprint
(
buf
[
j
])
?
buf
[
j
]
:
'.'
);
}
printf
(
"
\n
"
);
}
/* There must be replication backlog if having attached slaves. */
if
(
listLength
(
server
.
slaves
))
serverAssert
(
server
.
repl_backlog
!=
NULL
);
if
(
server
.
repl_backlog
)
{
...
...
@@ -1572,10 +1568,13 @@ void rdbPipeReadHandler(struct aeEventLoop *eventLoop, int fd, void *clientData,
if
(
stillAlive
==
0
)
{
serverLog
(
LL_WARNING
,
"Diskless rdb transfer, last replica dropped, killing fork child."
);
/* Avoid deleting events after killRDBChild as it may trigger new bgsaves for other replicas. */
aeDeleteFileEvent
(
server
.
el
,
server
.
rdb_pipe_read
,
AE_READABLE
);
killRDBChild
();
break
;
}
/* Remove the pipe read handler if at least one write handler was set. */
if
(
server
.
rdb_pipe_numconns_writing
||
stillAlive
==
0
)
{
else
if
(
server
.
rdb_pipe_numconns_writing
)
{
aeDeleteFileEvent
(
server
.
el
,
server
.
rdb_pipe_read
,
AE_READABLE
);
break
;
}
...
...
@@ -1599,6 +1598,9 @@ void updateSlavesWaitingBgsave(int bgsaveerr, int type) {
while
((
ln
=
listNext
(
&
li
)))
{
client
*
slave
=
ln
->
value
;
/* We can get here via freeClient()->killRDBChild()->checkChildrenDone(). skip disconnected slaves. */
if
(
!
slave
->
conn
)
continue
;
if
(
slave
->
replstate
==
SLAVE_STATE_WAIT_BGSAVE_END
)
{
struct
redis_stat
buf
;
...
...
src/script.c
View file @
2e5b3f08
...
...
@@ -470,8 +470,9 @@ static int scriptVerifyClusterState(scriptRunCtx *run_ctx, client *c, client *or
/* Duplicate relevant flags in the script client. */
c
->
flags
&=
~
(
CLIENT_READONLY
|
CLIENT_ASKING
);
c
->
flags
|=
original_c
->
flags
&
(
CLIENT_READONLY
|
CLIENT_ASKING
);
const
uint64_t
cmd_flags
=
getCommandFlags
(
c
);
int
hashslot
=
-
1
;
if
(
getNodeByQuery
(
c
,
c
->
cmd
,
c
->
argv
,
c
->
argc
,
&
hashslot
,
&
error_code
)
!=
getMyClusterNode
())
{
if
(
getNodeByQuery
(
c
,
c
->
cmd
,
c
->
argv
,
c
->
argc
,
&
hashslot
,
cmd_flags
,
&
error_code
)
!=
getMyClusterNode
())
{
if
(
error_code
==
CLUSTER_REDIR_DOWN_RO_STATE
)
{
*
err
=
sdsnew
(
"Script attempted to execute a write command while the "
...
...
src/script.h
View file @
2e5b3f08
...
...
@@ -74,7 +74,6 @@ extern scriptFlag scripts_flags_def[];
void
luaEnvInit
(
void
);
lua_State
*
createLuaState
(
void
);
dict
*
getLuaScripts
(
void
);
uint64_t
scriptFlagsToCmdFlags
(
uint64_t
cmd_flags
,
uint64_t
script_flags
);
int
scriptPrepareForRun
(
scriptRunCtx
*
r_ctx
,
client
*
engine_client
,
client
*
caller
,
const
char
*
funcname
,
uint64_t
script_flags
,
int
ro
);
void
scriptResetRun
(
scriptRunCtx
*
r_ctx
);
...
...
src/script_lua.c
View file @
2e5b3f08
...
...
@@ -1650,23 +1650,6 @@ void luaCallFunction(scriptRunCtx* run_ctx, lua_State *lua, robj** keys, size_t
err
=
lua_pcall
(
lua
,
2
,
1
,
-
4
);
}
/* Call the Lua garbage collector from time to time to avoid a
* full cycle performed by Lua, which adds too latency.
*
* The call is performed every LUA_GC_CYCLE_PERIOD executed commands
* (and for LUA_GC_CYCLE_PERIOD collection steps) because calling it
* for every command uses too much CPU. */
#define LUA_GC_CYCLE_PERIOD 50
{
static
long
gc_count
=
0
;
gc_count
++
;
if
(
gc_count
==
LUA_GC_CYCLE_PERIOD
)
{
lua_gc
(
lua
,
LUA_GCSTEP
,
LUA_GC_CYCLE_PERIOD
);
gc_count
=
0
;
}
}
if
(
err
)
{
/* Error object is a table of the following format:
* {err='<error msg>', source='<source file>', line=<line>}
...
...
@@ -1709,3 +1692,21 @@ void luaCallFunction(scriptRunCtx* run_ctx, lua_State *lua, robj** keys, size_t
unsigned
long
luaMemory
(
lua_State
*
lua
)
{
return
lua_gc
(
lua
,
LUA_GCCOUNT
,
0
)
*
1024LL
;
}
/* Call the Lua garbage collector from time to time to avoid a
* full cycle performed by Lua, which adds too latency.
*
* The call is performed every LUA_GC_CYCLE_PERIOD executed commands
* (and for LUA_GC_CYCLE_PERIOD collection steps) because calling it
* for every command uses too much CPU.
*
* Each script VM / State (Eval and Functions) maintains its own unique `gc_count`
* to control GC independently. */
#define LUA_GC_CYCLE_PERIOD 50
void
luaGC
(
lua_State
*
lua
,
int
*
gc_count
)
{
(
*
gc_count
)
++
;
if
(
*
gc_count
>=
LUA_GC_CYCLE_PERIOD
)
{
lua_gc
(
lua
,
LUA_GCSTEP
,
LUA_GC_CYCLE_PERIOD
);
*
gc_count
=
0
;
}
}
src/script_lua.h
View file @
2e5b3f08
...
...
@@ -61,6 +61,6 @@ void luaCallFunction(scriptRunCtx* r_ctx, lua_State *lua, robj** keys, size_t nk
void
luaExtractErrorInformation
(
lua_State
*
lua
,
errorInfo
*
err_info
);
void
luaErrorInformationDiscard
(
errorInfo
*
err_info
);
unsigned
long
luaMemory
(
lua_State
*
lua
);
void
luaGC
(
lua_State
*
lua
,
int
*
gc_count
);
#endif
/* __SCRIPT_LUA_H_ */
src/server.c
View file @
2e5b3f08
...
...
@@ -2524,7 +2524,7 @@ void resetServerStats(void) {
server
.
stat_numcommands
=
0
;
server
.
stat_numconnections
=
0
;
server
.
stat_expiredkeys
=
0
;
server
.
stat_expired_
hash_field
s
=
0
;
server
.
stat_expired_
subkey
s
=
0
;
server
.
stat_expired_stale_perc
=
0
;
server
.
stat_expired_time_cap_reached_count
=
0
;
server
.
stat_expire_cycle_time_used
=
0
;
...
...
@@ -3949,7 +3949,7 @@ int processCommand(client *c) {
}
}
uint64_t
cmd_flags
=
getCommandFlags
(
c
);
const
uint64_t
cmd_flags
=
getCommandFlags
(
c
);
int
is_read_command
=
(
cmd_flags
&
CMD_READONLY
)
||
(
c
->
cmd
->
proc
==
execCommand
&&
(
c
->
mstate
.
cmd_flags
&
CMD_READONLY
));
...
...
@@ -4004,7 +4004,7 @@ int processCommand(client *c) {
{
int
error_code
;
clusterNode
*
n
=
getNodeByQuery
(
c
,
c
->
cmd
,
c
->
argv
,
c
->
argc
,
&
c
->
slot
,
&
error_code
);
&
c
->
slot
,
cmd_flags
,
&
error_code
);
if
(
n
==
NULL
||
!
clusterNodeIsMyself
(
n
))
{
if
(
c
->
cmd
->
proc
==
execCommand
)
{
discardTransaction
(
c
);
...
...
@@ -5877,7 +5877,7 @@ sds genRedisInfoString(dict *section_dict, int all_sections, int everything) {
"sync_full:%lld
\r\n
"
,
server
.
stat_sync_full
,
"sync_partial_ok:%lld
\r\n
"
,
server
.
stat_sync_partial_ok
,
"sync_partial_err:%lld
\r\n
"
,
server
.
stat_sync_partial_err
,
"expired_
hash_field
s:%lld
\r\n
"
,
server
.
stat_expired_
hash_field
s
,
"expired_
subkey
s:%lld
\r\n
"
,
server
.
stat_expired_
subkey
s
,
"expired_keys:%lld
\r\n
"
,
server
.
stat_expiredkeys
,
"expired_stale_perc:%.2f
\r\n
"
,
server
.
stat_expired_stale_perc
*
100
,
"expired_time_cap_reached_count:%lld
\r\n
"
,
server
.
stat_expired_time_cap_reached_count
,
...
...
@@ -6124,7 +6124,7 @@ sds genRedisInfoString(dict *section_dict, int all_sections, int everything) {
if
(
keys
||
vkeys
)
{
info
=
sdscatprintf
(
info
,
"db%d:keys=%lld,expires=%lld,avg_ttl=%lld,
hashes_with_expiry_fields
=%lld
\r\n
"
,
"db%d:keys=%lld,expires=%lld,avg_ttl=%lld,
subexpiry
=%lld
\r\n
"
,
j
,
keys
,
vkeys
,
server
.
db
[
j
].
avg_ttl
,
hexpires
);
}
}
...
...
src/server.h
View file @
2e5b3f08
...
...
@@ -2,8 +2,13 @@
* Copyright (c) 2009-Present, Redis Ltd.
* All rights reserved.
*
* Copyright (c) 2024-present, Valkey contributors.
* All rights reserved.
*
* Licensed under your choice of the Redis Source Available License 2.0
* (RSALv2) or the Server Side Public License v1 (SSPLv1).
*
* Portions of this file are available under BSD3 terms; see REDISCONTRIBUTIONS for more information.
*/
#ifndef __REDIS_H
...
...
@@ -1651,7 +1656,7 @@ struct redisServer {
long
long
stat_numcommands
;
/* Number of processed commands */
long
long
stat_numconnections
;
/* Number of connections received */
long
long
stat_expiredkeys
;
/* Number of expired keys */
long
long
stat_expired_
hash_field
s
;
/* Number of expired hash-fields */
long
long
stat_expired_
subkey
s
;
/* Number of expired
subkeys (Currently only
hash-fields
)
*/
double
stat_expired_stale_perc
;
/* Percentage of keys probably expired */
long
long
stat_expired_time_cap_reached_count
;
/* Early expire cycle stops.*/
long
long
stat_expire_cycle_time_used
;
/* Cumulative microseconds used. */
...
...
@@ -1731,6 +1736,7 @@ struct redisServer {
/* Configuration */
int
verbosity
;
/* Loglevel in redis.conf */
int
hide_user_data_from_log
;
/* In the event of an assertion failure, hide command arguments from the operator */
int
maxidletime
;
/* Client timeout in seconds */
int
tcpkeepalive
;
/* Set SO_KEEPALIVE if non-zero. */
int
active_expire_enabled
;
/* Can be disabled for testing purposes. */
...
...
@@ -1807,6 +1813,7 @@ struct redisServer {
long
long
dirty_before_bgsave
;
/* Used to restore dirty on failed BGSAVE */
long
long
rdb_last_load_keys_expired
;
/* number of expired keys when loading RDB */
long
long
rdb_last_load_keys_loaded
;
/* number of loaded keys when loading RDB */
int
bgsave_aborted
;
/* Set when killing a child, to treat it as aborted even if it succeeds. */
struct
saveparam
*
saveparams
;
/* Save points array for RDB */
int
saveparamslen
;
/* Number of saving points */
char
*
rdb_filename
;
/* Name of RDB file */
...
...
@@ -2518,7 +2525,7 @@ void moduleCallCommandUnblockedHandler(client *c);
int
isModuleClientUnblocked
(
client
*
c
);
void
unblockClientFromModule
(
client
*
c
);
void
moduleHandleBlockedClients
(
void
);
void
moduleBlockedClientTimedOut
(
client
*
c
,
int
from_module
);
void
moduleBlockedClientTimedOut
(
client
*
c
);
void
modulePipeReadable
(
aeEventLoop
*
el
,
int
fd
,
void
*
privdata
,
int
mask
);
size_t
moduleCount
(
void
);
void
moduleAcquireGIL
(
void
);
...
...
@@ -2617,8 +2624,7 @@ void addReplyErrorArity(client *c);
void
addReplyErrorExpireTime
(
client
*
c
);
void
addReplyStatus
(
client
*
c
,
const
char
*
status
);
void
addReplyDouble
(
client
*
c
,
double
d
);
void
addReplyLongLongWithPrefix
(
client
*
c
,
long
long
ll
,
char
prefix
);
void
addReplyBigNum
(
client
*
c
,
const
char
*
num
,
size_t
len
);
void
addReplyBigNum
(
client
*
c
,
const
char
*
num
,
size_t
len
);
void
addReplyHumanLongDouble
(
client
*
c
,
long
double
d
);
void
addReplyLongLong
(
client
*
c
,
long
long
ll
);
void
addReplyArrayLen
(
client
*
c
,
long
length
);
...
...
@@ -3467,7 +3473,7 @@ int ldbPendingChildren(void);
void
luaLdbLineHook
(
lua_State
*
lua
,
lua_Debug
*
ar
);
void
freeLuaScriptsSync
(
dict
*
lua_scripts
,
list
*
lua_scripts_lru_list
,
lua_State
*
lua
);
void
freeLuaScriptsAsync
(
dict
*
lua_scripts
,
list
*
lua_scripts_lru_list
,
lua_State
*
lua
);
void
freeFunctionsAsync
(
functionsLibCtx
*
lib_ctx
);
void
freeFunctionsAsync
(
functionsLibCtx
*
functions_lib_ctx
,
dict
*
engines
);
int
ldbIsEnabled
(
void
);
void
ldbLog
(
sds
entry
);
void
ldbLogRedisReply
(
char
*
reply
);
...
...
src/t_hash.c
View file @
2e5b3f08
...
...
@@ -414,6 +414,7 @@ void listpackExExpire(redisDb *db, robj *o, ExpireInfo *info) {
break
;
propagateHashFieldDeletion
(
db
,
((
listpackEx
*
)
o
->
ptr
)
->
key
,
(
char
*
)((
fref
)
?
fref
:
intbuf
),
flen
);
server
.
stat_expired_subkeys
++
;
ptr
=
lpNext
(
lpt
->
lp
,
ptr
);
...
...
@@ -545,6 +546,7 @@ SetExRes hashTypeSetExpiryListpack(HashTypeSetEx *ex, sds field,
if
(
unlikely
(
checkAlreadyExpired
(
expireAt
)))
{
propagateHashFieldDeletion
(
ex
->
db
,
ex
->
key
->
ptr
,
field
,
sdslen
(
field
));
hashTypeDelete
(
ex
->
hashObj
,
field
,
1
);
server
.
stat_expired_subkeys
++
;
ex
->
fieldDeleted
++
;
return
HSETEX_DELETED
;
}
...
...
@@ -758,6 +760,7 @@ GetFieldRes hashTypeGetValue(redisDb *db, robj *o, sds field, unsigned char **vs
/* delete the field and propagate the deletion */
serverAssert
(
hashTypeDelete
(
o
,
field
,
1
)
==
1
);
propagateHashFieldDeletion
(
db
,
key
,
field
,
sdslen
(
field
));
server
.
stat_expired_subkeys
++
;
/* If the field is the last one in the hash, then the hash will be deleted */
res
=
GETF_EXPIRED
;
...
...
@@ -1042,6 +1045,7 @@ SetExRes hashTypeSetExpiryHT(HashTypeSetEx *exInfo, sds field, uint64_t expireAt
/* replicas should not initiate deletion of fields */
propagateHashFieldDeletion
(
exInfo
->
db
,
exInfo
->
key
->
ptr
,
field
,
sdslen
(
field
));
hashTypeDelete
(
exInfo
->
hashObj
,
field
,
1
);
server
.
stat_expired_subkeys
++
;
exInfo
->
fieldDeleted
++
;
return
HSETEX_DELETED
;
}
...
...
@@ -1167,11 +1171,11 @@ void hashTypeSetExDone(HashTypeSetEx *ex) {
if
(
ex
->
fieldDeleted
&&
hashTypeLength
(
ex
->
hashObj
,
0
)
==
0
)
{
dbDelete
(
ex
->
db
,
ex
->
key
);
signalModifiedKey
(
ex
->
c
,
ex
->
db
,
ex
->
key
);
notifyKeyspaceEvent
(
NOTIFY_HASH
,
"h
expired
"
,
ex
->
key
,
ex
->
db
->
id
);
notifyKeyspaceEvent
(
NOTIFY_HASH
,
"h
del
"
,
ex
->
key
,
ex
->
db
->
id
);
notifyKeyspaceEvent
(
NOTIFY_GENERIC
,
"del"
,
ex
->
key
,
ex
->
db
->
id
);
}
else
{
signalModifiedKey
(
ex
->
c
,
ex
->
db
,
ex
->
key
);
notifyKeyspaceEvent
(
NOTIFY_HASH
,
ex
->
fieldDeleted
?
"h
expired
"
:
"hexpire"
,
notifyKeyspaceEvent
(
NOTIFY_HASH
,
ex
->
fieldDeleted
?
"h
del
"
:
"hexpire"
,
ex
->
key
,
ex
->
db
->
id
);
/* If minimum HFE of the hash is smaller than expiration time of the
...
...
@@ -1833,7 +1837,6 @@ static uint64_t hashTypeExpire(robj *o, ExpireCtx *expireCtx, int updateGlobalHF
.
itemsExpired
=
0
};
listpackExExpire
(
db
,
o
,
&
info
);
server
.
stat_expired_hash_fields
+=
info
.
itemsExpired
;
keystr
=
((
listpackEx
*
)
o
->
ptr
)
->
key
;
}
else
{
serverAssert
(
o
->
encoding
==
OBJ_ENCODING_HT
);
...
...
@@ -1914,8 +1917,9 @@ static int hashTypeExpireIfNeeded(redisDb *db, robj *o) {
/* Return the next/minimum expiry time of the hash-field.
* accurate=1 - Return the exact time by looking into the object DS.
* accurate=0 - Return the minimum expiration time maintained in expireMeta which
* might not be accurate due to optimization reasons.
* accurate=0 - Return the minimum expiration time maintained in expireMeta
* (Verify it is not trash before using it) which might not be
* accurate due to optimization reasons.
*
* If not found, return EB_EXPIRE_TIME_INVALID
*/
...
...
@@ -2887,7 +2891,7 @@ static ExpireAction onFieldExpire(eItem item, void *ctx) {
dictExpireMetadata
*
dictExpireMeta
=
(
dictExpireMetadata
*
)
dictMetadata
(
d
);
propagateHashFieldDeletion
(
expCtx
->
db
,
dictExpireMeta
->
key
,
hf
,
hfieldlen
(
hf
));
serverAssert
(
hashTypeDelete
(
expCtx
->
hashObj
,
hf
,
0
)
==
1
);
server
.
stat_expired_
hash_field
s
++
;
server
.
stat_expired_
subkey
s
++
;
return
ACT_REMOVE_EXP_ITEM
;
}
...
...
tests/helpers/fake_redis_node.tcl
View file @
2e5b3f08
...
...
@@ -53,6 +53,8 @@ proc accept {sock host port} {
close $sock
}
socket -server accept $port
set sockfd
[
socket -server accept
-myaddr 127.0.0.1
$port
]
after 5000 set done timeout
vwait done
close $sockfd
tests/integration/failover.tcl
View file @
2e5b3f08
...
...
@@ -33,6 +33,12 @@ start_server {overrides {save {}}} {
$node_2 replicaof $node_0_host $node_0_port
wait_for_sync $node_1
wait_for_sync $node_2
# wait for both replicas to be online from the perspective of the master
wait_for_condition 50 100
{
[
string match
"*slave0:*,state=online*slave1:*,state=online*"
[
$node
_0 info replication
]]
}
else
{
fail
"replica didn't online in time"
}
}
test
{
failover command fails with invalid host
}
{
...
...
tests/integration/logging.tcl
View file @
2e5b3f08
...
...
@@ -123,4 +123,32 @@ if {$backtrace_supported} {
}
}
# Tests that when `hide-user-data-from-log` is enabled, user information from logs is hidden
if
{
$backtrace
_supported
}
{
if
{
!$::valgrind
}
{
set server_path
[
tmpdir server5.log
]
start_server
[
list overrides
[
list dir $server_path crash-memcheck-enabled no
]]
{
test
"Crash report generated on DEBUG SEGFAULT with user data hidden when 'hide-user-data-from-log' is enabled"
{
r config set hide-user-data-from-log yes
catch
{
r debug segfault
}
check_log_backtrace_for_debug
"*crashed by signal*"
check_log_backtrace_for_debug
"*argv*0*: *debug*"
check_log_backtrace_for_debug
"*argv*1*: *redacted*"
check_log_backtrace_for_debug
"*hide-user-data-from-log is on, skip logging stack content to avoid spilling PII*"
}
}
}
set server_path
[
tmpdir server6.log
]
start_server
[
list overrides
[
list dir $server_path use-exit-on-panic yes crash-memcheck-enabled no
]]
{
test
"Generate stacktrace on assertion with user data hidden when 'hide-user-data-from-log' is enabled"
{
r config set hide-user-data-from-log yes
catch
{
r debug assert
}
check_log_backtrace_for_debug
"*ASSERTION FAILED*"
check_log_backtrace_for_debug
"*argv*0* = *debug*"
check_log_backtrace_for_debug
"*argv*1* = *redacted*"
}
}
}
}
tests/integration/rdb.tcl
View file @
2e5b3f08
...
...
@@ -442,15 +442,21 @@ start_server [list overrides [list "dir" $server_path]] {
restart_server 0 true false
wait_done_loading r
assert_equal
[
lsort
[
r hgetall key
]]
"1 2 3 a b c"
# Never be sure when active-expire kicks in into action
wait_for_condition 100 10
{
[
lsort
[
r hgetall key
]]
==
"1 2 3 a b c"
}
else
{
fail
"hgetall of key is not as expected"
}
assert_equal
[
r hpexpiretime key FIELDS 3 a b c
]
{
2524600800000 65755674080852 -1
}
assert_equal
[
s rdb_last_load_keys_loaded
]
1
# wait until expired_
hash_field
s equals 2
# wait until expired_
subkey
s equals 2
wait_for_condition 10 100
{
[
s expired_
hash_field
s
]
== 2
[
s expired_
subkey
s
]
== 2
}
else
{
fail
"Value of expired_
hash_field
s is not as expected"
fail
"Value of expired_
subkey
s is not as expected"
}
}
}
...
...
@@ -562,9 +568,9 @@ foreach {type lp_entries} {listpack 512 dict 0} {
# wait at most 2 secs to make sure 'c' and 'd' will active-expire
wait_for_condition 20 100
{
[
s expired_
hash_field
s
]
== 2
[
s expired_
subkey
s
]
== 2
}
else
{
fail
"expired hash fields is
[
s expired_
hash_field
s
]
!= 2"
fail
"expired hash fields is
[
s expired_
subkey
s
]
!= 2"
}
assert_equal
[
s rdb_last_load_keys_loaded
]
1
...
...
@@ -597,7 +603,7 @@ foreach {type lp_entries} {listpack 512 dict 0} {
after 500
assert_equal
[
s rdb_last_load_keys_loaded
]
1
assert_equal
[
s expired_
hash_field
s
]
0
assert_equal
[
s expired_
subkey
s
]
0
# hgetall will lazy expire fields, so it's only called after the stat asserts
assert_equal
[
lsort
[
r hgetall key
]]
"1 2 5 6 a b e f"
...
...
Prev
1
2
3
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment