Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
6a10146f
Commit
6a10146f
authored
Mar 21, 2023
by
Vitaly Arbuzov
Browse files
Refactor rdb.c to use db iterator to iterate over keys
parent
41fab4ec
Changes
4
Hide whitespace changes
Inline
Side-by-side
src/rdb.c
View file @
6a10146f
...
@@ -1293,7 +1293,6 @@ werr:
...
@@ -1293,7 +1293,6 @@ werr:
}
}
ssize_t
rdbSaveDb
(
rio
*
rdb
,
int
dbid
,
int
rdbflags
,
long
*
key_counter
)
{
ssize_t
rdbSaveDb
(
rio
*
rdb
,
int
dbid
,
int
rdbflags
,
long
*
key_counter
)
{
dictIterator
*
di
=
NULL
;
dictEntry
*
de
;
dictEntry
*
de
;
ssize_t
written
=
0
;
ssize_t
written
=
0
;
ssize_t
res
;
ssize_t
res
;
...
@@ -1320,61 +1319,52 @@ ssize_t rdbSaveDb(rio *rdb, int dbid, int rdbflags, long *key_counter) {
...
@@ -1320,61 +1319,52 @@ ssize_t rdbSaveDb(rio *rdb, int dbid, int rdbflags, long *key_counter) {
if
((
res
=
rdbSaveLen
(
rdb
,
expires_size
))
<
0
)
goto
werr
;
if
((
res
=
rdbSaveLen
(
rdb
,
expires_size
))
<
0
)
goto
werr
;
written
+=
res
;
written
+=
res
;
dict
*
d
;
dbIterator
dbit
;
dbIterator
dbit
;
dbIteratorInit
(
&
dbit
,
db
);
dbIteratorInit
(
&
dbit
,
db
);
while
((
d
=
dbIteratorNextDict
(
&
dbit
)))
{
int
last_slot
=
-
1
;
if
(
!
dictSize
(
d
))
continue
;
/* Iterate this DB writing every entry */
while
((
de
=
dbIteratorNext
(
&
dbit
))
!=
NULL
)
{
/* Save slot info. */
/* Save slot info. */
if
(
server
.
cluster_enabled
)
{
if
(
server
.
cluster_enabled
&&
dbit
.
cur_slot
!=
last_slot
)
{
serverAssert
(
dbit
.
cur_slot
>=
0
&&
dbit
.
cur_slot
<
CLUSTER_SLOTS
);
serverAssert
(
dbit
.
cur_slot
>=
0
&&
dbit
.
cur_slot
<
CLUSTER_SLOTS
);
if
((
res
=
rdbSaveType
(
rdb
,
RDB_OPCODE_SLOT_INFO
))
<
0
)
goto
werr
;
if
((
res
=
rdbSaveType
(
rdb
,
RDB_OPCODE_SLOT_INFO
))
<
0
)
goto
werr
;
written
+=
res
;
written
+=
res
;
if
((
res
=
rdbSaveLen
(
rdb
,
dbit
.
cur_slot
))
<
0
)
goto
werr
;
if
((
res
=
rdbSaveLen
(
rdb
,
dbit
.
cur_slot
))
<
0
)
goto
werr
;
written
+=
res
;
written
+=
res
;
if
((
res
=
rdbSaveLen
(
rdb
,
dictSize
(
d
)))
<
0
)
goto
werr
;
if
((
res
=
rdbSaveLen
(
rdb
,
dictSize
(
d
b
->
dict
[
dbit
.
cur_slot
]
)))
<
0
)
goto
werr
;
written
+=
res
;
written
+=
res
;
last_slot
=
dbit
.
cur_slot
;
}
}
sds
keystr
=
dictGetKey
(
de
);
robj
key
,
*
o
=
dictGetVal
(
de
);
long
long
expire
;
size_t
rdb_bytes_before_key
=
rdb
->
processed_bytes
;
di
=
dictGetSafeIterator
(
d
);
initStaticStringObject
(
key
,
keystr
);
/* Iterate this DB writing every entry */
expire
=
getExpire
(
db
,
&
key
);
while
((
de
=
dictNext
(
di
))
!=
NULL
)
{
if
((
res
=
rdbSaveKeyValuePair
(
rdb
,
&
key
,
o
,
expire
,
dbid
))
<
0
)
goto
werr
;
sds
keystr
=
dictGetKey
(
de
);
written
+=
res
;
robj
key
,
*
o
=
dictGetVal
(
de
);
long
long
expire
;
size_t
rdb_bytes_before_key
=
rdb
->
processed_bytes
;
initStaticStringObject
(
key
,
keystr
);
/* In fork child process, we can try to release memory back to the
expire
=
getExpire
(
db
,
&
key
);
* OS and possibly avoid or decrease COW. We give the dismiss
if
((
res
=
rdbSaveKeyValuePair
(
rdb
,
&
key
,
o
,
expire
,
dbid
))
<
0
)
goto
werr
;
* mechanism a hint about an estimated size of the object we stored. */
written
+=
res
;
size_t
dump_size
=
rdb
->
processed_bytes
-
rdb_bytes_before_key
;
if
(
server
.
in_fork_child
)
dismissObject
(
o
,
dump_size
);
/* In fork child process, we can try to release memory back to the
/* Update child info every 1 second (approximately).
* OS and possibly avoid or decrease COW. We give the dismiss
* in order to avoid calling mstime() on each iteration, we will
* mechanism a hint about an estimated size of the object we stored. */
* check the diff every 1024 keys */
size_t
dump_size
=
rdb
->
processed_bytes
-
rdb_bytes_before_key
;
if
(((
*
key_counter
)
++
&
1023
)
==
0
)
{
if
(
server
.
in_fork_child
)
dismissObject
(
o
,
dump_size
);
long
long
now
=
mstime
();
if
(
now
-
info_updated_time
>=
1000
)
{
/* Update child info every 1 second (approximately).
sendChildInfo
(
CHILD_INFO_TYPE_CURRENT_INFO
,
*
key_counter
,
pname
);
* in order to avoid calling mstime() on each iteration, we will
info_updated_time
=
now
;
* check the diff every 1024 keys */
if
(((
*
key_counter
)
++
&
1023
)
==
0
)
{
long
long
now
=
mstime
();
if
(
now
-
info_updated_time
>=
1000
)
{
sendChildInfo
(
CHILD_INFO_TYPE_CURRENT_INFO
,
*
key_counter
,
pname
);
info_updated_time
=
now
;
}
}
}
}
}
dictReleaseIterator
(
di
);
di
=
NULL
;
}
}
return
written
;
return
written
;
werr:
werr:
if
(
di
)
dictReleaseIterator
(
di
);
return
-
1
;
return
-
1
;
}
}
...
...
src/script.c
View file @
6a10146f
...
@@ -451,7 +451,7 @@ static int scriptVerifyClusterState(scriptRunCtx *run_ctx, client *c, client *or
...
@@ -451,7 +451,7 @@ static int scriptVerifyClusterState(scriptRunCtx *run_ctx, client *c, client *or
original_c
->
slot
=
hashslot
;
original_c
->
slot
=
hashslot
;
}
else
if
(
original_c
->
slot
!=
hashslot
)
{
}
else
if
(
original_c
->
slot
!=
hashslot
)
{
*
err
=
sdsnew
(
"Script attempted to access keys that do not hash to "
*
err
=
sdsnew
(
"Script attempted to access keys that do not hash to "
"the same slot"
);
"the same slot"
);
return
C_ERR
;
return
C_ERR
;
}
}
}
}
...
...
src/server.c
View file @
6a10146f
...
@@ -398,7 +398,7 @@ int dictExpandAllowed(size_t moreMem, double usedRatio) {
...
@@ -398,7 +398,7 @@ int dictExpandAllowed(size_t moreMem, double usedRatio) {
void
dictRehashingStarted
(
dict
*
d
)
{
void
dictRehashingStarted
(
dict
*
d
)
{
if
(
!
server
.
cluster_enabled
||
!
server
.
activerehashing
)
return
;
if
(
!
server
.
cluster_enabled
||
!
server
.
activerehashing
)
return
;
/* Safety check against queue overflow. */
/* Safety check against queue overflow. */
if
(
listLength
(
server
.
db
[
0
].
rehashing
)
>
INCREMENTAL_REHASHING_MAX_QUEUE_SIZE
)
return
;
if
(
listLength
(
server
.
db
[
0
].
rehashing
)
>
CLUSTER_SLOTS
)
return
;
listAddNodeTail
(
server
.
db
[
0
].
rehashing
,
d
);
listAddNodeTail
(
server
.
db
[
0
].
rehashing
,
d
);
}
}
...
@@ -4149,6 +4149,7 @@ int processCommand(client *c) {
...
@@ -4149,6 +4149,7 @@ int processCommand(client *c) {
blockPostponeClient
(
c
);
blockPostponeClient
(
c
);
return
C_OK
;
return
C_OK
;
}
}
/* Exec the command */
/* Exec the command */
if
(
c
->
flags
&
CLIENT_MULTI
&&
if
(
c
->
flags
&
CLIENT_MULTI
&&
c
->
cmd
->
proc
!=
execCommand
&&
c
->
cmd
->
proc
!=
execCommand
&&
...
@@ -4167,6 +4168,7 @@ int processCommand(client *c) {
...
@@ -4167,6 +4168,7 @@ int processCommand(client *c) {
if
(
listLength
(
server
.
ready_keys
))
if
(
listLength
(
server
.
ready_keys
))
handleClientsBlockedOnKeys
();
handleClientsBlockedOnKeys
();
}
}
return
C_OK
;
return
C_OK
;
}
}
...
...
src/server.h
View file @
6a10146f
...
@@ -138,7 +138,6 @@ typedef struct redisObject robj;
...
@@ -138,7 +138,6 @@ typedef struct redisObject robj;
#define CONFIG_BINDADDR_MAX 16
#define CONFIG_BINDADDR_MAX 16
#define CONFIG_MIN_RESERVED_FDS 32
#define CONFIG_MIN_RESERVED_FDS 32
#define CONFIG_DEFAULT_PROC_TITLE_TEMPLATE "{title} {listen-addr} {server-mode}"
#define CONFIG_DEFAULT_PROC_TITLE_TEMPLATE "{title} {listen-addr} {server-mode}"
#define INCREMENTAL_REHASHING_MAX_QUEUE_SIZE (1024*16)
#define INCREMENTAL_REHASHING_THRESHOLD_MS 1
#define INCREMENTAL_REHASHING_THRESHOLD_MS 1
/* Bucket sizes for client eviction pools. Each bucket stores clients with
/* Bucket sizes for client eviction pools. Each bucket stores clients with
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment