Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
cbb2ac07
Unverified
Commit
cbb2ac07
authored
Jul 03, 2018
by
chendianqiang
Committed by
GitHub
Jul 03, 2018
Browse files
Merge branch 'unstable' into pending-querybuf
parents
7de1ada0
2edcafb3
Changes
61
Hide whitespace changes
Inline
Side-by-side
src/server.h
View file @
cbb2ac07
...
@@ -142,6 +142,7 @@ typedef long long mstime_t; /* millisecond time type. */
...
@@ -142,6 +142,7 @@ typedef long long mstime_t; /* millisecond time type. */
#define CONFIG_DEFAULT_AOF_USE_RDB_PREAMBLE 1
#define CONFIG_DEFAULT_AOF_USE_RDB_PREAMBLE 1
#define CONFIG_DEFAULT_ACTIVE_REHASHING 1
#define CONFIG_DEFAULT_ACTIVE_REHASHING 1
#define CONFIG_DEFAULT_AOF_REWRITE_INCREMENTAL_FSYNC 1
#define CONFIG_DEFAULT_AOF_REWRITE_INCREMENTAL_FSYNC 1
#define CONFIG_DEFAULT_RDB_SAVE_INCREMENTAL_FSYNC 1
#define CONFIG_DEFAULT_MIN_SLAVES_TO_WRITE 0
#define CONFIG_DEFAULT_MIN_SLAVES_TO_WRITE 0
#define CONFIG_DEFAULT_MIN_SLAVES_MAX_LAG 10
#define CONFIG_DEFAULT_MIN_SLAVES_MAX_LAG 10
#define NET_IP_STR_LEN 46
/* INET6_ADDRSTRLEN is 46, but we need to be sure */
#define NET_IP_STR_LEN 46
/* INET6_ADDRSTRLEN is 46, but we need to be sure */
...
@@ -183,7 +184,8 @@ typedef long long mstime_t; /* millisecond time type. */
...
@@ -183,7 +184,8 @@ typedef long long mstime_t; /* millisecond time type. */
#define PROTO_INLINE_MAX_SIZE (1024*64)
/* Max size of inline reads */
#define PROTO_INLINE_MAX_SIZE (1024*64)
/* Max size of inline reads */
#define PROTO_MBULK_BIG_ARG (1024*32)
#define PROTO_MBULK_BIG_ARG (1024*32)
#define LONG_STR_SIZE 21
/* Bytes needed for long -> str + '\0' */
#define LONG_STR_SIZE 21
/* Bytes needed for long -> str + '\0' */
#define AOF_AUTOSYNC_BYTES (1024*1024*32)
/* fdatasync every 32MB */
#define REDIS_AUTOSYNC_BYTES (1024*1024*32)
/* fdatasync every 32MB */
#define LIMIT_PENDING_QUERYBUF (4*1024*1024)
/* 4mb */
#define LIMIT_PENDING_QUERYBUF (4*1024*1024)
/* 4mb */
/* When configuring the server eventloop, we setup it so that the total number
/* When configuring the server eventloop, we setup it so that the total number
...
@@ -340,7 +342,7 @@ typedef long long mstime_t; /* millisecond time type. */
...
@@ -340,7 +342,7 @@ typedef long long mstime_t; /* millisecond time type. */
/* Anti-warning macro... */
/* Anti-warning macro... */
#define UNUSED(V) ((void) V)
#define UNUSED(V) ((void) V)
#define ZSKIPLIST_MAXLEVEL
32
/* Should be enough for 2^
32
elements */
#define ZSKIPLIST_MAXLEVEL
64
/* Should be enough for 2^
64
elements */
#define ZSKIPLIST_P 0.25
/* Skiplist P = 1/4 */
#define ZSKIPLIST_P 0.25
/* Skiplist P = 1/4 */
/* Append only defines */
/* Append only defines */
...
@@ -349,12 +351,14 @@ typedef long long mstime_t; /* millisecond time type. */
...
@@ -349,12 +351,14 @@ typedef long long mstime_t; /* millisecond time type. */
#define AOF_FSYNC_EVERYSEC 2
#define AOF_FSYNC_EVERYSEC 2
#define CONFIG_DEFAULT_AOF_FSYNC AOF_FSYNC_EVERYSEC
#define CONFIG_DEFAULT_AOF_FSYNC AOF_FSYNC_EVERYSEC
/* Zip structure related defaults */
/* Zip
ped
structure
s
related defaults */
#define OBJ_HASH_MAX_ZIPLIST_ENTRIES 512
#define OBJ_HASH_MAX_ZIPLIST_ENTRIES 512
#define OBJ_HASH_MAX_ZIPLIST_VALUE 64
#define OBJ_HASH_MAX_ZIPLIST_VALUE 64
#define OBJ_SET_MAX_INTSET_ENTRIES 512
#define OBJ_SET_MAX_INTSET_ENTRIES 512
#define OBJ_ZSET_MAX_ZIPLIST_ENTRIES 128
#define OBJ_ZSET_MAX_ZIPLIST_ENTRIES 128
#define OBJ_ZSET_MAX_ZIPLIST_VALUE 64
#define OBJ_ZSET_MAX_ZIPLIST_VALUE 64
#define OBJ_STREAM_NODE_MAX_BYTES 4096
#define OBJ_STREAM_NODE_MAX_ENTRIES 100
/* List defaults */
/* List defaults */
#define OBJ_LIST_MAX_ZIPLIST_SIZE -2
#define OBJ_LIST_MAX_ZIPLIST_SIZE -2
...
@@ -781,7 +785,7 @@ typedef struct zskiplistNode {
...
@@ -781,7 +785,7 @@ typedef struct zskiplistNode {
struct
zskiplistNode
*
backward
;
struct
zskiplistNode
*
backward
;
struct
zskiplistLevel
{
struct
zskiplistLevel
{
struct
zskiplistNode
*
forward
;
struct
zskiplistNode
*
forward
;
unsigned
int
span
;
unsigned
long
span
;
}
level
[];
}
level
[];
}
zskiplistNode
;
}
zskiplistNode
;
...
@@ -880,13 +884,13 @@ typedef struct rdbSaveInfo {
...
@@ -880,13 +884,13 @@ typedef struct rdbSaveInfo {
#define RDB_SAVE_INFO_INIT {-1,0,"000000000000000000000000000000",-1}
#define RDB_SAVE_INFO_INIT {-1,0,"000000000000000000000000000000",-1}
typedef
struct
malloc_stats
{
struct
malloc_stats
{
size_t
zmalloc_used
;
size_t
zmalloc_used
;
size_t
process_rss
;
size_t
process_rss
;
size_t
allocator_allocated
;
size_t
allocator_allocated
;
size_t
allocator_active
;
size_t
allocator_active
;
size_t
allocator_resident
;
size_t
allocator_resident
;
}
malloc_stats
;
};
/*-----------------------------------------------------------------------------
/*-----------------------------------------------------------------------------
* Global server state
* Global server state
...
@@ -950,6 +954,7 @@ struct redisServer {
...
@@ -950,6 +954,7 @@ struct redisServer {
list
*
clients_pending_write
;
/* There is to write or install handler. */
list
*
clients_pending_write
;
/* There is to write or install handler. */
list
*
slaves
,
*
monitors
;
/* List of slaves and MONITORs */
list
*
slaves
,
*
monitors
;
/* List of slaves and MONITORs */
client
*
current_client
;
/* Current client, only used on crash report */
client
*
current_client
;
/* Current client, only used on crash report */
rax
*
clients_index
;
/* Active clients dictionary by client ID. */
int
clients_paused
;
/* True if clients are currently paused */
int
clients_paused
;
/* True if clients are currently paused */
mstime_t
clients_pause_end_time
;
/* Time when we undo clients_paused */
mstime_t
clients_pause_end_time
;
/* Time when we undo clients_paused */
char
neterr
[
ANET_ERR_LEN
];
/* Error buffer for anet.c */
char
neterr
[
ANET_ERR_LEN
];
/* Error buffer for anet.c */
...
@@ -993,7 +998,7 @@ struct redisServer {
...
@@ -993,7 +998,7 @@ struct redisServer {
long
long
slowlog_entry_id
;
/* SLOWLOG current entry ID */
long
long
slowlog_entry_id
;
/* SLOWLOG current entry ID */
long
long
slowlog_log_slower_than
;
/* SLOWLOG time limit (to get logged) */
long
long
slowlog_log_slower_than
;
/* SLOWLOG time limit (to get logged) */
unsigned
long
slowlog_max_len
;
/* SLOWLOG max number of items logged */
unsigned
long
slowlog_max_len
;
/* SLOWLOG max number of items logged */
malloc_stats
cron_malloc_stats
;
/* sampled in serverCron(). */
struct
malloc_stats
cron_malloc_stats
;
/* sampled in serverCron(). */
long
long
stat_net_input_bytes
;
/* Bytes read from network. */
long
long
stat_net_input_bytes
;
/* Bytes read from network. */
long
long
stat_net_output_bytes
;
/* Bytes written to network. */
long
long
stat_net_output_bytes
;
/* Bytes written to network. */
size_t
stat_rdb_cow_bytes
;
/* Copy on write bytes during RDB saving. */
size_t
stat_rdb_cow_bytes
;
/* Copy on write bytes during RDB saving. */
...
@@ -1045,7 +1050,8 @@ struct redisServer {
...
@@ -1045,7 +1050,8 @@ struct redisServer {
time_t
aof_rewrite_time_start
;
/* Current AOF rewrite start time. */
time_t
aof_rewrite_time_start
;
/* Current AOF rewrite start time. */
int
aof_lastbgrewrite_status
;
/* C_OK or C_ERR */
int
aof_lastbgrewrite_status
;
/* C_OK or C_ERR */
unsigned
long
aof_delayed_fsync
;
/* delayed AOF fsync() counter */
unsigned
long
aof_delayed_fsync
;
/* delayed AOF fsync() counter */
int
aof_rewrite_incremental_fsync
;
/* fsync incrementally while rewriting? */
int
aof_rewrite_incremental_fsync
;
/* fsync incrementally while aof rewriting? */
int
rdb_save_incremental_fsync
;
/* fsync incrementally while rdb saving? */
int
aof_last_write_status
;
/* C_OK or C_ERR */
int
aof_last_write_status
;
/* C_OK or C_ERR */
int
aof_last_write_errno
;
/* Valid if aof_last_write_status is ERR */
int
aof_last_write_errno
;
/* Valid if aof_last_write_status is ERR */
int
aof_load_truncated
;
/* Don't stop on unexpected AOF EOF. */
int
aof_load_truncated
;
/* Don't stop on unexpected AOF EOF. */
...
@@ -1178,6 +1184,8 @@ struct redisServer {
...
@@ -1178,6 +1184,8 @@ struct redisServer {
size_t
zset_max_ziplist_entries
;
size_t
zset_max_ziplist_entries
;
size_t
zset_max_ziplist_value
;
size_t
zset_max_ziplist_value
;
size_t
hll_sparse_max_bytes
;
size_t
hll_sparse_max_bytes
;
size_t
stream_node_max_bytes
;
int64_t
stream_node_max_entries
;
/* List parameters */
/* List parameters */
int
list_max_ziplist_size
;
int
list_max_ziplist_size
;
int
list_compress_depth
;
int
list_compress_depth
;
...
@@ -1407,6 +1415,7 @@ void addReplyHumanLongDouble(client *c, long double d);
...
@@ -1407,6 +1415,7 @@ void addReplyHumanLongDouble(client *c, long double d);
void
addReplyLongLong
(
client
*
c
,
long
long
ll
);
void
addReplyLongLong
(
client
*
c
,
long
long
ll
);
void
addReplyMultiBulkLen
(
client
*
c
,
long
length
);
void
addReplyMultiBulkLen
(
client
*
c
,
long
length
);
void
addReplyHelp
(
client
*
c
,
const
char
**
help
);
void
addReplyHelp
(
client
*
c
,
const
char
**
help
);
void
addReplySubcommandSyntaxError
(
client
*
c
);
void
copyClientOutputBuffer
(
client
*
dst
,
client
*
src
);
void
copyClientOutputBuffer
(
client
*
dst
,
client
*
src
);
size_t
sdsZmallocSize
(
sds
s
);
size_t
sdsZmallocSize
(
sds
s
);
size_t
getStringObjectSdsUsedMemory
(
robj
*
o
);
size_t
getStringObjectSdsUsedMemory
(
robj
*
o
);
...
@@ -1415,7 +1424,7 @@ void getClientsMaxBuffers(unsigned long *longest_output_list,
...
@@ -1415,7 +1424,7 @@ void getClientsMaxBuffers(unsigned long *longest_output_list,
unsigned
long
*
biggest_input_buffer
);
unsigned
long
*
biggest_input_buffer
);
char
*
getClientPeerId
(
client
*
client
);
char
*
getClientPeerId
(
client
*
client
);
sds
catClientInfoString
(
sds
s
,
client
*
client
);
sds
catClientInfoString
(
sds
s
,
client
*
client
);
sds
getAllClientsInfoString
(
void
);
sds
getAllClientsInfoString
(
int
type
);
void
rewriteClientCommandVector
(
client
*
c
,
int
argc
,
...);
void
rewriteClientCommandVector
(
client
*
c
,
int
argc
,
...);
void
rewriteClientCommandArgument
(
client
*
c
,
int
i
,
robj
*
newval
);
void
rewriteClientCommandArgument
(
client
*
c
,
int
i
,
robj
*
newval
);
void
replaceClientCommandVector
(
client
*
c
,
int
argc
,
robj
**
argv
);
void
replaceClientCommandVector
(
client
*
c
,
int
argc
,
robj
**
argv
);
...
@@ -1496,6 +1505,7 @@ robj *tryObjectEncoding(robj *o);
...
@@ -1496,6 +1505,7 @@ robj *tryObjectEncoding(robj *o);
robj
*
getDecodedObject
(
robj
*
o
);
robj
*
getDecodedObject
(
robj
*
o
);
size_t
stringObjectLen
(
robj
*
o
);
size_t
stringObjectLen
(
robj
*
o
);
robj
*
createStringObjectFromLongLong
(
long
long
value
);
robj
*
createStringObjectFromLongLong
(
long
long
value
);
robj
*
createStringObjectFromLongLongForValue
(
long
long
value
);
robj
*
createStringObjectFromLongDouble
(
long
double
value
,
int
humanfriendly
);
robj
*
createStringObjectFromLongDouble
(
long
double
value
,
int
humanfriendly
);
robj
*
createQuicklistObject
(
void
);
robj
*
createQuicklistObject
(
void
);
robj
*
createZiplistObject
(
void
);
robj
*
createZiplistObject
(
void
);
...
@@ -1625,7 +1635,7 @@ void zzlNext(unsigned char *zl, unsigned char **eptr, unsigned char **sptr);
...
@@ -1625,7 +1635,7 @@ void zzlNext(unsigned char *zl, unsigned char **eptr, unsigned char **sptr);
void
zzlPrev
(
unsigned
char
*
zl
,
unsigned
char
**
eptr
,
unsigned
char
**
sptr
);
void
zzlPrev
(
unsigned
char
*
zl
,
unsigned
char
**
eptr
,
unsigned
char
**
sptr
);
unsigned
char
*
zzlFirstInRange
(
unsigned
char
*
zl
,
zrangespec
*
range
);
unsigned
char
*
zzlFirstInRange
(
unsigned
char
*
zl
,
zrangespec
*
range
);
unsigned
char
*
zzlLastInRange
(
unsigned
char
*
zl
,
zrangespec
*
range
);
unsigned
char
*
zzlLastInRange
(
unsigned
char
*
zl
,
zrangespec
*
range
);
unsigned
int
zsetLength
(
const
robj
*
zobj
);
unsigned
long
zsetLength
(
const
robj
*
zobj
);
void
zsetConvert
(
robj
*
zobj
,
int
encoding
);
void
zsetConvert
(
robj
*
zobj
,
int
encoding
);
void
zsetConvertToZiplistIfNeeded
(
robj
*
zobj
,
size_t
maxelelen
);
void
zsetConvertToZiplistIfNeeded
(
robj
*
zobj
,
size_t
maxelelen
);
int
zsetScore
(
robj
*
zobj
,
sds
member
,
double
*
score
);
int
zsetScore
(
robj
*
zobj
,
sds
member
,
double
*
score
);
...
@@ -1766,6 +1776,8 @@ robj *lookupKeyWriteOrReply(client *c, robj *key, robj *reply);
...
@@ -1766,6 +1776,8 @@ robj *lookupKeyWriteOrReply(client *c, robj *key, robj *reply);
robj
*
lookupKeyReadWithFlags
(
redisDb
*
db
,
robj
*
key
,
int
flags
);
robj
*
lookupKeyReadWithFlags
(
redisDb
*
db
,
robj
*
key
,
int
flags
);
robj
*
objectCommandLookup
(
client
*
c
,
robj
*
key
);
robj
*
objectCommandLookup
(
client
*
c
,
robj
*
key
);
robj
*
objectCommandLookupOrReply
(
client
*
c
,
robj
*
key
,
robj
*
reply
);
robj
*
objectCommandLookupOrReply
(
client
*
c
,
robj
*
key
,
robj
*
reply
);
void
objectSetLRUOrLFU
(
robj
*
val
,
long
long
lfu_freq
,
long
long
lru_idle
,
long
long
lru_clock
);
#define LOOKUP_NONE 0
#define LOOKUP_NONE 0
#define LOOKUP_NOTOUCH (1<<0)
#define LOOKUP_NOTOUCH (1<<0)
void
dbAdd
(
redisDb
*
db
,
robj
*
key
,
robj
*
val
);
void
dbAdd
(
redisDb
*
db
,
robj
*
key
,
robj
*
val
);
...
...
src/siphash.c
View file @
cbb2ac07
...
@@ -142,12 +142,12 @@ uint64_t siphash(const uint8_t *in, const size_t inlen, const uint8_t *k) {
...
@@ -142,12 +142,12 @@ uint64_t siphash(const uint8_t *in, const size_t inlen, const uint8_t *k) {
}
}
switch
(
left
)
{
switch
(
left
)
{
case
7
:
b
|=
((
uint64_t
)
in
[
6
])
<<
48
;
case
7
:
b
|=
((
uint64_t
)
in
[
6
])
<<
48
;
/* fall-thru */
case
6
:
b
|=
((
uint64_t
)
in
[
5
])
<<
40
;
case
6
:
b
|=
((
uint64_t
)
in
[
5
])
<<
40
;
/* fall-thru */
case
5
:
b
|=
((
uint64_t
)
in
[
4
])
<<
32
;
case
5
:
b
|=
((
uint64_t
)
in
[
4
])
<<
32
;
/* fall-thru */
case
4
:
b
|=
((
uint64_t
)
in
[
3
])
<<
24
;
case
4
:
b
|=
((
uint64_t
)
in
[
3
])
<<
24
;
/* fall-thru */
case
3
:
b
|=
((
uint64_t
)
in
[
2
])
<<
16
;
case
3
:
b
|=
((
uint64_t
)
in
[
2
])
<<
16
;
/* fall-thru */
case
2
:
b
|=
((
uint64_t
)
in
[
1
])
<<
8
;
case
2
:
b
|=
((
uint64_t
)
in
[
1
])
<<
8
;
/* fall-thru */
case
1
:
b
|=
((
uint64_t
)
in
[
0
]);
break
;
case
1
:
b
|=
((
uint64_t
)
in
[
0
]);
break
;
case
0
:
break
;
case
0
:
break
;
}
}
...
@@ -202,12 +202,12 @@ uint64_t siphash_nocase(const uint8_t *in, const size_t inlen, const uint8_t *k)
...
@@ -202,12 +202,12 @@ uint64_t siphash_nocase(const uint8_t *in, const size_t inlen, const uint8_t *k)
}
}
switch
(
left
)
{
switch
(
left
)
{
case
7
:
b
|=
((
uint64_t
)
siptlw
(
in
[
6
]))
<<
48
;
case
7
:
b
|=
((
uint64_t
)
siptlw
(
in
[
6
]))
<<
48
;
/* fall-thru */
case
6
:
b
|=
((
uint64_t
)
siptlw
(
in
[
5
]))
<<
40
;
case
6
:
b
|=
((
uint64_t
)
siptlw
(
in
[
5
]))
<<
40
;
/* fall-thru */
case
5
:
b
|=
((
uint64_t
)
siptlw
(
in
[
4
]))
<<
32
;
case
5
:
b
|=
((
uint64_t
)
siptlw
(
in
[
4
]))
<<
32
;
/* fall-thru */
case
4
:
b
|=
((
uint64_t
)
siptlw
(
in
[
3
]))
<<
24
;
case
4
:
b
|=
((
uint64_t
)
siptlw
(
in
[
3
]))
<<
24
;
/* fall-thru */
case
3
:
b
|=
((
uint64_t
)
siptlw
(
in
[
2
]))
<<
16
;
case
3
:
b
|=
((
uint64_t
)
siptlw
(
in
[
2
]))
<<
16
;
/* fall-thru */
case
2
:
b
|=
((
uint64_t
)
siptlw
(
in
[
1
]))
<<
8
;
case
2
:
b
|=
((
uint64_t
)
siptlw
(
in
[
1
]))
<<
8
;
/* fall-thru */
case
1
:
b
|=
((
uint64_t
)
siptlw
(
in
[
0
]));
break
;
case
1
:
b
|=
((
uint64_t
)
siptlw
(
in
[
0
]));
break
;
case
0
:
break
;
case
0
:
break
;
}
}
...
...
src/slowlog.c
View file @
cbb2ac07
...
@@ -142,11 +142,11 @@ void slowlogReset(void) {
...
@@ -142,11 +142,11 @@ void slowlogReset(void) {
void
slowlogCommand
(
client
*
c
)
{
void
slowlogCommand
(
client
*
c
)
{
if
(
c
->
argc
==
2
&&
!
strcasecmp
(
c
->
argv
[
1
]
->
ptr
,
"help"
))
{
if
(
c
->
argc
==
2
&&
!
strcasecmp
(
c
->
argv
[
1
]
->
ptr
,
"help"
))
{
const
char
*
help
[]
=
{
const
char
*
help
[]
=
{
"
get
[count] -- Return top entries from the slowlog (default: 10)."
"
GET
[count] -- Return top entries from the slowlog (default: 10)."
" Entries are made of:"
,
" Entries are made of:"
,
" id, timestamp, time in microseconds, arguments array, client IP and port, client name"
,
" id, timestamp, time in microseconds, arguments array, client IP and port, client name"
,
"
len
-- Return the length of the slowlog."
,
"
LEN
-- Return the length of the slowlog."
,
"
reset
-- Reset the slowlog."
,
"
RESET
-- Reset the slowlog."
,
NULL
NULL
};
};
addReplyHelp
(
c
,
help
);
addReplyHelp
(
c
,
help
);
...
@@ -187,6 +187,6 @@ NULL
...
@@ -187,6 +187,6 @@ NULL
}
}
setDeferredMultiBulkLength
(
c
,
totentries
,
sent
);
setDeferredMultiBulkLength
(
c
,
totentries
,
sent
);
}
else
{
}
else
{
addReply
ErrorFormat
(
c
,
"Unknown subcommand or wrong number of arguments for '%s'. Try SLOWLOG HELP"
,
(
char
*
)
c
->
argv
[
1
]
->
ptr
);
addReply
SubcommandSyntaxError
(
c
);
}
}
}
}
src/sort.c
View file @
cbb2ac07
...
@@ -447,7 +447,7 @@ void sortCommand(client *c) {
...
@@ -447,7 +447,7 @@ void sortCommand(client *c) {
serverAssertWithInfo
(
c
,
sortval
,
j
==
vectorlen
);
serverAssertWithInfo
(
c
,
sortval
,
j
==
vectorlen
);
/* Now it's time to load the right scores in the sorting vector */
/* Now it's time to load the right scores in the sorting vector */
if
(
dontsort
==
0
)
{
if
(
!
dontsort
)
{
for
(
j
=
0
;
j
<
vectorlen
;
j
++
)
{
for
(
j
=
0
;
j
<
vectorlen
;
j
++
)
{
robj
*
byval
;
robj
*
byval
;
if
(
sortby
)
{
if
(
sortby
)
{
...
@@ -487,9 +487,7 @@ void sortCommand(client *c) {
...
@@ -487,9 +487,7 @@ void sortCommand(client *c) {
decrRefCount
(
byval
);
decrRefCount
(
byval
);
}
}
}
}
}
if
(
dontsort
==
0
)
{
server
.
sort_desc
=
desc
;
server
.
sort_desc
=
desc
;
server
.
sort_alpha
=
alpha
;
server
.
sort_alpha
=
alpha
;
server
.
sort_bypattern
=
sortby
?
1
:
0
;
server
.
sort_bypattern
=
sortby
?
1
:
0
;
...
...
src/t_stream.c
View file @
cbb2ac07
...
@@ -41,6 +41,7 @@
...
@@ -41,6 +41,7 @@
#define STREAM_ITEM_FLAG_SAMEFIELDS (1<<1)
/* Same fields as master entry. */
#define STREAM_ITEM_FLAG_SAMEFIELDS (1<<1)
/* Same fields as master entry. */
void
streamFreeCG
(
streamCG
*
cg
);
void
streamFreeCG
(
streamCG
*
cg
);
void
streamFreeNACK
(
streamNACK
*
na
);
size_t
streamReplyWithRangeFromConsumerPEL
(
client
*
c
,
stream
*
s
,
streamID
*
start
,
streamID
*
end
,
size_t
count
,
streamConsumer
*
consumer
);
size_t
streamReplyWithRangeFromConsumerPEL
(
client
*
c
,
stream
*
s
,
streamID
*
start
,
streamID
*
end
,
size_t
count
,
streamConsumer
*
consumer
);
/* -----------------------------------------------------------------------
/* -----------------------------------------------------------------------
...
@@ -171,7 +172,7 @@ int streamCompareID(streamID *a, streamID *b) {
...
@@ -171,7 +172,7 @@ int streamCompareID(streamID *a, streamID *b) {
* if the ID was generated by the function. However the function may return
* if the ID was generated by the function. However the function may return
* C_ERR if an ID was given via 'use_id', but adding it failed since the
* C_ERR if an ID was given via 'use_id', but adding it failed since the
* current top ID is greater or equal. */
* current top ID is greater or equal. */
int
streamAppendItem
(
stream
*
s
,
robj
**
argv
,
int
numfields
,
streamID
*
added_id
,
streamID
*
use_id
)
{
int
streamAppendItem
(
stream
*
s
,
robj
**
argv
,
int
64_t
numfields
,
streamID
*
added_id
,
streamID
*
use_id
)
{
/* If an ID was given, check that it's greater than the last entry ID
/* If an ID was given, check that it's greater than the last entry ID
* or return an error. */
* or return an error. */
if
(
use_id
&&
streamCompareID
(
use_id
,
&
s
->
last_id
)
<=
0
)
return
C_ERR
;
if
(
use_id
&&
streamCompareID
(
use_id
,
&
s
->
last_id
)
<=
0
)
return
C_ERR
;
...
@@ -221,7 +222,7 @@ int streamAppendItem(stream *s, robj **argv, int numfields, streamID *added_id,
...
@@ -221,7 +222,7 @@ int streamAppendItem(stream *s, robj **argv, int numfields, streamID *added_id,
* +-------+---------+------------+---------+--/--+---------+---------+-+
* +-------+---------+------------+---------+--/--+---------+---------+-+
*
*
* count and deleted just represent respectively the total number of
* count and deleted just represent respectively the total number of
* ent
i
res inside the listpack that are valid, and marked as deleted
* entr
i
es inside the listpack that are valid, and marked as deleted
* (delted flag in the entry flags set). So the total number of items
* (delted flag in the entry flags set). So the total number of items
* actually inside the listpack (both deleted and not) is count+deleted.
* actually inside the listpack (both deleted and not) is count+deleted.
*
*
...
@@ -234,10 +235,24 @@ int streamAppendItem(stream *s, robj **argv, int numfields, streamID *added_id,
...
@@ -234,10 +235,24 @@ int streamAppendItem(stream *s, robj **argv, int numfields, streamID *added_id,
*
*
* The "0" entry at the end is the same as the 'lp-count' entry in the
* The "0" entry at the end is the same as the 'lp-count' entry in the
* regular stream entries (see below), and marks the fact that there are
* regular stream entries (see below), and marks the fact that there are
* no more entires, when we scan the stream from right to left. */
* no more entries, when we scan the stream from right to left. */
/* First of all, check if we can append to the current macro node or
* if we need to switch to the next one. 'lp' will be set to NULL if
* the current node is full. */
if
(
lp
!=
NULL
)
{
if
(
server
.
stream_node_max_bytes
&&
lp_bytes
>
server
.
stream_node_max_bytes
)
{
lp
=
NULL
;
}
else
if
(
server
.
stream_node_max_entries
)
{
int64_t
count
=
lpGetInteger
(
lpFirst
(
lp
));
if
(
count
>
server
.
stream_node_max_entries
)
lp
=
NULL
;
}
}
int
flags
=
STREAM_ITEM_FLAG_NONE
;
int
flags
=
STREAM_ITEM_FLAG_NONE
;
if
(
lp
==
NULL
||
lp_bytes
>
STREAM_BYTES_PER_LISTPACK
)
{
if
(
lp
==
NULL
||
lp_bytes
>
server
.
stream_node_max_bytes
)
{
master_id
=
id
;
master_id
=
id
;
streamEncodeID
(
rax_key
,
&
id
);
streamEncodeID
(
rax_key
,
&
id
);
/* Create the listpack having the master entry ID and fields. */
/* Create the listpack having the master entry ID and fields. */
...
@@ -245,7 +260,7 @@ int streamAppendItem(stream *s, robj **argv, int numfields, streamID *added_id,
...
@@ -245,7 +260,7 @@ int streamAppendItem(stream *s, robj **argv, int numfields, streamID *added_id,
lp
=
lpAppendInteger
(
lp
,
1
);
/* One item, the one we are adding. */
lp
=
lpAppendInteger
(
lp
,
1
);
/* One item, the one we are adding. */
lp
=
lpAppendInteger
(
lp
,
0
);
/* Zero deleted so far. */
lp
=
lpAppendInteger
(
lp
,
0
);
/* Zero deleted so far. */
lp
=
lpAppendInteger
(
lp
,
numfields
);
lp
=
lpAppendInteger
(
lp
,
numfields
);
for
(
int
i
=
0
;
i
<
numfields
;
i
++
)
{
for
(
int
64_t
i
=
0
;
i
<
numfields
;
i
++
)
{
sds
field
=
argv
[
i
*
2
]
->
ptr
;
sds
field
=
argv
[
i
*
2
]
->
ptr
;
lp
=
lpAppend
(
lp
,(
unsigned
char
*
)
field
,
sdslen
(
field
));
lp
=
lpAppend
(
lp
,(
unsigned
char
*
)
field
,
sdslen
(
field
));
}
}
...
@@ -270,10 +285,10 @@ int streamAppendItem(stream *s, robj **argv, int numfields, streamID *added_id,
...
@@ -270,10 +285,10 @@ int streamAppendItem(stream *s, robj **argv, int numfields, streamID *added_id,
/* Check if the entry we are adding, have the same fields
/* Check if the entry we are adding, have the same fields
* as the master entry. */
* as the master entry. */
int
master_fields_count
=
lpGetInteger
(
lp_ele
);
int
64_t
master_fields_count
=
lpGetInteger
(
lp_ele
);
lp_ele
=
lpNext
(
lp
,
lp_ele
);
lp_ele
=
lpNext
(
lp
,
lp_ele
);
if
(
numfields
==
master_fields_count
)
{
if
(
numfields
==
master_fields_count
)
{
int
i
;
int
64_t
i
;
for
(
i
=
0
;
i
<
master_fields_count
;
i
++
)
{
for
(
i
=
0
;
i
<
master_fields_count
;
i
++
)
{
sds
field
=
argv
[
i
*
2
]
->
ptr
;
sds
field
=
argv
[
i
*
2
]
->
ptr
;
int64_t
e_len
;
int64_t
e_len
;
...
@@ -317,14 +332,14 @@ int streamAppendItem(stream *s, robj **argv, int numfields, streamID *added_id,
...
@@ -317,14 +332,14 @@ int streamAppendItem(stream *s, robj **argv, int numfields, streamID *added_id,
lp
=
lpAppendInteger
(
lp
,
id
.
seq
-
master_id
.
seq
);
lp
=
lpAppendInteger
(
lp
,
id
.
seq
-
master_id
.
seq
);
if
(
!
(
flags
&
STREAM_ITEM_FLAG_SAMEFIELDS
))
if
(
!
(
flags
&
STREAM_ITEM_FLAG_SAMEFIELDS
))
lp
=
lpAppendInteger
(
lp
,
numfields
);
lp
=
lpAppendInteger
(
lp
,
numfields
);
for
(
int
i
=
0
;
i
<
numfields
;
i
++
)
{
for
(
int
64_t
i
=
0
;
i
<
numfields
;
i
++
)
{
sds
field
=
argv
[
i
*
2
]
->
ptr
,
value
=
argv
[
i
*
2
+
1
]
->
ptr
;
sds
field
=
argv
[
i
*
2
]
->
ptr
,
value
=
argv
[
i
*
2
+
1
]
->
ptr
;
if
(
!
(
flags
&
STREAM_ITEM_FLAG_SAMEFIELDS
))
if
(
!
(
flags
&
STREAM_ITEM_FLAG_SAMEFIELDS
))
lp
=
lpAppend
(
lp
,(
unsigned
char
*
)
field
,
sdslen
(
field
));
lp
=
lpAppend
(
lp
,(
unsigned
char
*
)
field
,
sdslen
(
field
));
lp
=
lpAppend
(
lp
,(
unsigned
char
*
)
value
,
sdslen
(
value
));
lp
=
lpAppend
(
lp
,(
unsigned
char
*
)
value
,
sdslen
(
value
));
}
}
/* Compute and store the lp-count field. */
/* Compute and store the lp-count field. */
int
lp_count
=
numfields
;
int
64_t
lp_count
=
numfields
;
lp_count
+=
3
;
/* Add the 3 fixed fields flags + ms-diff + seq-diff. */
lp_count
+=
3
;
/* Add the 3 fixed fields flags + ms-diff + seq-diff. */
if
(
!
(
flags
&
STREAM_ITEM_FLAG_SAMEFIELDS
))
{
if
(
!
(
flags
&
STREAM_ITEM_FLAG_SAMEFIELDS
))
{
/* If the item is not compressed, it also has the fields other than
/* If the item is not compressed, it also has the fields other than
...
@@ -564,7 +579,7 @@ int streamIteratorGetID(streamIterator *si, streamID *id, int64_t *numfields) {
...
@@ -564,7 +579,7 @@ int streamIteratorGetID(streamIterator *si, streamID *id, int64_t *numfields) {
/* If we are going backward, read the number of elements this
/* If we are going backward, read the number of elements this
* entry is composed of, and jump backward N times to seek
* entry is composed of, and jump backward N times to seek
* its start. */
* its start. */
int
lp_count
=
lpGetInteger
(
si
->
lp_ele
);
int
64_t
lp_count
=
lpGetInteger
(
si
->
lp_ele
);
if
(
lp_count
==
0
)
{
/* We reached the master entry. */
if
(
lp_count
==
0
)
{
/* We reached the master entry. */
si
->
lp
=
NULL
;
si
->
lp
=
NULL
;
si
->
lp_ele
=
NULL
;
si
->
lp_ele
=
NULL
;
...
@@ -627,12 +642,17 @@ int streamIteratorGetID(streamIterator *si, streamID *id, int64_t *numfields) {
...
@@ -627,12 +642,17 @@ int streamIteratorGetID(streamIterator *si, streamID *id, int64_t *numfields) {
* forward, or seek the previous entry if we are going
* forward, or seek the previous entry if we are going
* backward. */
* backward. */
if
(
!
si
->
rev
)
{
if
(
!
si
->
rev
)
{
int
to_discard
=
(
flags
&
STREAM_ITEM_FLAG_SAMEFIELDS
)
?
int
64_t
to_discard
=
(
flags
&
STREAM_ITEM_FLAG_SAMEFIELDS
)
?
*
numfields
:
*
numfields
*
2
;
*
numfields
:
*
numfields
*
2
;
for
(
int64_t
i
=
0
;
i
<
to_discard
;
i
++
)
for
(
int64_t
i
=
0
;
i
<
to_discard
;
i
++
)
si
->
lp_ele
=
lpNext
(
si
->
lp
,
si
->
lp_ele
);
si
->
lp_ele
=
lpNext
(
si
->
lp
,
si
->
lp_ele
);
}
else
{
}
else
{
int
prev_times
=
4
;
/* flag + id ms/seq diff + numfields. */
int64_t
prev_times
=
4
;
/* flag + id ms + id seq + one more to
go back to the previous entry "count"
field. */
/* If the entry was not flagged SAMEFIELD we also read the
* number of fields, so go back one more. */
if
(
!
(
flags
&
STREAM_ITEM_FLAG_SAMEFIELDS
))
prev_times
++
;
while
(
prev_times
--
)
si
->
lp_ele
=
lpPrev
(
si
->
lp
,
si
->
lp_ele
);
while
(
prev_times
--
)
si
->
lp_ele
=
lpPrev
(
si
->
lp
,
si
->
lp_ele
);
}
}
}
}
...
@@ -690,6 +710,9 @@ void streamIteratorRemoveEntry(streamIterator *si, streamID *current) {
...
@@ -690,6 +710,9 @@ void streamIteratorRemoveEntry(streamIterator *si, streamID *current) {
aux
=
lpGetInteger
(
p
);
aux
=
lpGetInteger
(
p
);
lp
=
lpReplaceInteger
(
lp
,
&
p
,
aux
+
1
);
lp
=
lpReplaceInteger
(
lp
,
&
p
,
aux
+
1
);
/* Update the number of entries counter. */
si
->
stream
->
length
--
;
/* Re-seek the iterator to fix the now messed up state. */
/* Re-seek the iterator to fix the now messed up state. */
streamID
start
,
end
;
streamID
start
,
end
;
if
(
si
->
rev
)
{
if
(
si
->
rev
)
{
...
@@ -814,7 +837,7 @@ void streamPropagateXCLAIM(client *c, robj *key, robj *group, robj *id, streamNA
...
@@ -814,7 +837,7 @@ void streamPropagateXCLAIM(client *c, robj *key, robj *group, robj *id, streamNA
* Note that this function is recursive in certian cases. When it's called
* Note that this function is recursive in certian cases. When it's called
* with a non NULL group and consumer argument, it may call
* with a non NULL group and consumer argument, it may call
* streamReplyWithRangeFromConsumerPEL() in order to get entries from the
* streamReplyWithRangeFromConsumerPEL() in order to get entries from the
* consumer pending ent
i
res list. However such a function will then call
* consumer pending entr
i
es list. However such a function will then call
* streamReplyWithRange() in order to emit single entries (found in the
* streamReplyWithRange() in order to emit single entries (found in the
* PEL by ID) to the client. This is the use case for the STREAM_RWR_RAWENTRIES
* PEL by ID) to the client. This is the use case for the STREAM_RWR_RAWENTRIES
* flag.
* flag.
...
@@ -867,18 +890,41 @@ size_t streamReplyWithRange(client *c, stream *s, streamID *start, streamID *end
...
@@ -867,18 +890,41 @@ size_t streamReplyWithRange(client *c, stream *s, streamID *start, streamID *end
/* If a group is passed, we need to create an entry in the
/* If a group is passed, we need to create an entry in the
* PEL (pending entries list) of this group *and* this consumer.
* PEL (pending entries list) of this group *and* this consumer.
* Note that we are sure about the fact the message is not already
*
* associated with some other consumer, because if we reached this
* Note that we cannot be sure about the fact the message is not
* loop the IDs the user is requesting are greater than any message
* already owned by another consumer, because the admin is able
* delivered for this group. */
* to change the consumer group last delivered ID using the
* XGROUP SETID command. So if we find that there is already
* a NACK for the entry, we need to associate it to the new
* consumer. */
if
(
group
&&
!
(
flags
&
STREAM_RWR_NOACK
))
{
if
(
group
&&
!
(
flags
&
STREAM_RWR_NOACK
))
{
unsigned
char
buf
[
sizeof
(
streamID
)];
unsigned
char
buf
[
sizeof
(
streamID
)];
streamEncodeID
(
buf
,
&
id
);
streamEncodeID
(
buf
,
&
id
);
/* Try to add a new NACK. Most of the time this will work and
* will not require extra lookups. We'll fix the problem later
* if we find that there is already a entry for this ID. */
streamNACK
*
nack
=
streamCreateNACK
(
consumer
);
streamNACK
*
nack
=
streamCreateNACK
(
consumer
);
int
retval
=
0
;
int
retval
=
0
;
retval
+=
raxInsert
(
group
->
pel
,
buf
,
sizeof
(
buf
),
nack
,
NULL
);
retval
+=
raxTryInsert
(
group
->
pel
,
buf
,
sizeof
(
buf
),
nack
,
NULL
);
retval
+=
raxInsert
(
consumer
->
pel
,
buf
,
sizeof
(
buf
),
nack
,
NULL
);
retval
+=
raxTryInsert
(
consumer
->
pel
,
buf
,
sizeof
(
buf
),
nack
,
NULL
);
serverAssert
(
retval
==
2
);
/* Make sure entry was inserted. */
/* Now we can check if the entry was already busy, and
* in that case reassign the entry to the new consumer. */
if
(
retval
==
0
)
{
streamFreeNACK
(
nack
);
nack
=
raxFind
(
group
->
pel
,
buf
,
sizeof
(
buf
));
serverAssert
(
nack
!=
raxNotFound
);
raxRemove
(
nack
->
consumer
->
pel
,
buf
,
sizeof
(
buf
),
NULL
);
/* Update the consumer and idle time. */
nack
->
consumer
=
consumer
;
nack
->
delivery_time
=
mstime
();
nack
->
delivery_count
++
;
/* Add the entry in the new consumer local PEL. */
raxInsert
(
consumer
->
pel
,
buf
,
sizeof
(
buf
),
nack
,
NULL
);
}
else
if
(
retval
==
1
)
{
serverPanic
(
"NACK half-created. Should not be possible."
);
}
/* Propagate as XCLAIM. */
/* Propagate as XCLAIM. */
if
(
spi
)
{
if
(
spi
)
{
...
@@ -899,7 +945,7 @@ size_t streamReplyWithRange(client *c, stream *s, streamID *start, streamID *end
...
@@ -899,7 +945,7 @@ size_t streamReplyWithRange(client *c, stream *s, streamID *start, streamID *end
/* This is an helper function for streamReplyWithRange() when called with
/* This is an helper function for streamReplyWithRange() when called with
* group and consumer arguments, but with a range that is referring to already
* group and consumer arguments, but with a range that is referring to already
* delivered messages. In this case we just emit messages that are already
* delivered messages. In this case we just emit messages that are already
* in the history of the con
u
smer, fetching the IDs from its PEL.
* in the history of the cons
u
mer, fetching the IDs from its PEL.
*
*
* Note that this function does not have a 'rev' argument because it's not
* Note that this function does not have a 'rev' argument because it's not
* possible to iterate in reverse using a group. Basically this function
* possible to iterate in reverse using a group. Basically this function
...
@@ -1035,7 +1081,7 @@ invalid:
...
@@ -1035,7 +1081,7 @@ invalid:
void
xaddCommand
(
client
*
c
)
{
void
xaddCommand
(
client
*
c
)
{
streamID
id
;
streamID
id
;
int
id_given
=
0
;
/* Was an ID different than "*" specified? */
int
id_given
=
0
;
/* Was an ID different than "*" specified? */
long
long
maxlen
=
0
;
/*
0 means no maximum length
. */
long
long
maxlen
=
-
1
;
/*
If left to -1 no trimming is performed
. */
int
approx_maxlen
=
0
;
/* If 1 only delete whole radix tree nodes, so
int
approx_maxlen
=
0
;
/* If 1 only delete whole radix tree nodes, so
the maxium length is not applied verbatim. */
the maxium length is not applied verbatim. */
int
maxlen_arg_idx
=
0
;
/* Index of the count in MAXLEN, for rewriting. */
int
maxlen_arg_idx
=
0
;
/* Index of the count in MAXLEN, for rewriting. */
...
@@ -1059,6 +1105,11 @@ void xaddCommand(client *c) {
...
@@ -1059,6 +1105,11 @@ void xaddCommand(client *c) {
}
}
if
(
getLongLongFromObjectOrReply
(
c
,
c
->
argv
[
i
+
1
],
&
maxlen
,
NULL
)
if
(
getLongLongFromObjectOrReply
(
c
,
c
->
argv
[
i
+
1
],
&
maxlen
,
NULL
)
!=
C_OK
)
return
;
!=
C_OK
)
return
;
if
(
maxlen
<
0
)
{
addReplyError
(
c
,
"The MAXLEN argument must be >= 0."
);
return
;
}
i
++
;
i
++
;
maxlen_arg_idx
=
i
;
maxlen_arg_idx
=
i
;
}
else
{
}
else
{
...
@@ -1098,7 +1149,7 @@ void xaddCommand(client *c) {
...
@@ -1098,7 +1149,7 @@ void xaddCommand(client *c) {
server
.
dirty
++
;
server
.
dirty
++
;
/* Remove older elements if MAXLEN was specified. */
/* Remove older elements if MAXLEN was specified. */
if
(
maxlen
)
{
if
(
maxlen
>=
0
)
{
if
(
!
streamTrimByLength
(
s
,
maxlen
,
approx_maxlen
))
{
if
(
!
streamTrimByLength
(
s
,
maxlen
,
approx_maxlen
))
{
/* If no trimming was performed, for instance because approximated
/* If no trimming was performed, for instance because approximated
* trimming length was specified, rewrite the MAXLEN argument
* trimming length was specified, rewrite the MAXLEN argument
...
@@ -1269,14 +1320,13 @@ void xreadCommand(client *c) {
...
@@ -1269,14 +1320,13 @@ void xreadCommand(client *c) {
* starting from now. */
* starting from now. */
int
id_idx
=
i
-
streams_arg
-
streams_count
;
int
id_idx
=
i
-
streams_arg
-
streams_count
;
robj
*
key
=
c
->
argv
[
i
-
streams_count
];
robj
*
key
=
c
->
argv
[
i
-
streams_count
];
robj
*
o
;
robj
*
o
=
lookupKeyRead
(
c
->
db
,
key
);
if
(
o
&&
checkType
(
c
,
o
,
OBJ_STREAM
))
goto
cleanup
;
streamCG
*
group
=
NULL
;
streamCG
*
group
=
NULL
;
/* If a group was specified, than we need to be sure that the
/* If a group was specified, than we need to be sure that the
* key and group actually exist. */
* key and group actually exist. */
if
(
groupname
)
{
if
(
groupname
)
{
o
=
lookupKeyRead
(
c
->
db
,
key
);
if
(
o
&&
checkType
(
c
,
o
,
OBJ_STREAM
))
goto
cleanup
;
if
(
o
==
NULL
||
if
(
o
==
NULL
||
(
group
=
streamLookupCG
(
o
->
ptr
,
groupname
->
ptr
))
==
NULL
)
(
group
=
streamLookupCG
(
o
->
ptr
,
groupname
->
ptr
))
==
NULL
)
{
{
...
@@ -1290,8 +1340,6 @@ void xreadCommand(client *c) {
...
@@ -1290,8 +1340,6 @@ void xreadCommand(client *c) {
}
}
if
(
strcmp
(
c
->
argv
[
i
]
->
ptr
,
"$"
)
==
0
)
{
if
(
strcmp
(
c
->
argv
[
i
]
->
ptr
,
"$"
)
==
0
)
{
o
=
lookupKeyRead
(
c
->
db
,
key
);
if
(
o
&&
checkType
(
c
,
o
,
OBJ_STREAM
))
goto
cleanup
;
if
(
o
)
{
if
(
o
)
{
stream
*
s
=
o
->
ptr
;
stream
*
s
=
o
->
ptr
;
ids
[
id_idx
]
=
s
->
last_id
;
ids
[
id_idx
]
=
s
->
last_id
;
...
@@ -1336,7 +1384,7 @@ void xreadCommand(client *c) {
...
@@ -1336,7 +1384,7 @@ void xreadCommand(client *c) {
/* Emit the two elements sub-array consisting of the name
/* Emit the two elements sub-array consisting of the name
* of the stream and the data we extracted from it. */
* of the stream and the data we extracted from it. */
addReplyMultiBulkLen
(
c
,
2
);
addReplyMultiBulkLen
(
c
,
2
);
addReplyBulk
(
c
,
c
->
argv
[
i
+
streams_arg
]);
addReplyBulk
(
c
,
c
->
argv
[
streams_arg
+
i
]);
streamConsumer
*
consumer
=
NULL
;
streamConsumer
*
consumer
=
NULL
;
if
(
groups
)
consumer
=
streamLookupConsumer
(
groups
[
i
],
if
(
groups
)
consumer
=
streamLookupConsumer
(
groups
[
i
],
consumername
->
ptr
,
1
);
consumername
->
ptr
,
1
);
...
@@ -1516,14 +1564,14 @@ uint64_t streamDelConsumer(streamCG *cg, sds name) {
...
@@ -1516,14 +1564,14 @@ uint64_t streamDelConsumer(streamCG *cg, sds name) {
/* XGROUP CREATE <key> <groupname> <id or $>
/* XGROUP CREATE <key> <groupname> <id or $>
* XGROUP SETID <key> <id or $>
* XGROUP SETID <key> <id or $>
* XGROUP DE
LGROUP
<key> <groupname>
* XGROUP DE
STROY
<key> <groupname>
* XGROUP DELCONSUMER <key> <groupname> <consumername> */
* XGROUP DELCONSUMER <key> <groupname> <consumername> */
void
xgroupCommand
(
client
*
c
)
{
void
xgroupCommand
(
client
*
c
)
{
const
char
*
help
[]
=
{
const
char
*
help
[]
=
{
"CREATE <key> <groupname> <id or $> -- Create a new consumer group."
,
"CREATE <key> <groupname> <id or $> -- Create a new consumer group."
,
"SETID <key> <groupname> <id or $> -- Set the current group ID."
,
"SETID <key> <groupname> <id or $> -- Set the current group ID."
,
"DE
LGROUP
<key> <groupname> -- Remove the specified group."
,
"DE
STROY
<key> <groupname> -- Remove the specified group."
,
"DELCONSUMER <key> <groupname> <consumer> -- Remove the specified con
u
smer."
,
"DELCONSUMER <key> <groupname> <consumer> -- Remove the specified cons
u
mer."
,
"HELP -- Prints this help."
,
"HELP -- Prints this help."
,
NULL
NULL
};
};
...
@@ -1535,14 +1583,13 @@ NULL
...
@@ -1535,14 +1583,13 @@ NULL
/* Lookup the key now, this is common for all the subcommands but HELP. */
/* Lookup the key now, this is common for all the subcommands but HELP. */
if
(
c
->
argc
>=
4
)
{
if
(
c
->
argc
>=
4
)
{
robj
*
o
=
lookupKeyWriteOrReply
(
c
,
c
->
argv
[
2
],
shared
.
nokeyerr
);
robj
*
o
=
lookupKeyWriteOrReply
(
c
,
c
->
argv
[
2
],
shared
.
nokeyerr
);
if
(
o
==
NULL
)
return
;
if
(
o
==
NULL
||
checkType
(
c
,
o
,
OBJ_STREAM
)
)
return
;
s
=
o
->
ptr
;
s
=
o
->
ptr
;
grpname
=
c
->
argv
[
3
]
->
ptr
;
grpname
=
c
->
argv
[
3
]
->
ptr
;
/* Certain subcommands require the group to exist. */
/* Certain subcommands require the group to exist. */
if
((
cg
=
streamLookupCG
(
s
,
grpname
))
==
NULL
&&
if
((
cg
=
streamLookupCG
(
s
,
grpname
))
==
NULL
&&
(
!
strcasecmp
(
opt
,
"SETID"
)
||
(
!
strcasecmp
(
opt
,
"SETID"
)
||
!
strcasecmp
(
opt
,
"DELGROUP"
)
||
!
strcasecmp
(
opt
,
"DELCONSUMER"
)))
!
strcasecmp
(
opt
,
"DELCONSUMER"
)))
{
{
addReplyErrorFormat
(
c
,
"-NOGROUP No such consumer group '%s' "
addReplyErrorFormat
(
c
,
"-NOGROUP No such consumer group '%s' "
...
@@ -1564,22 +1611,46 @@ NULL
...
@@ -1564,22 +1611,46 @@ NULL
if
(
cg
)
{
if
(
cg
)
{
addReply
(
c
,
shared
.
ok
);
addReply
(
c
,
shared
.
ok
);
server
.
dirty
++
;
server
.
dirty
++
;
notifyKeyspaceEvent
(
NOTIFY_STREAM
,
"xgroup-create"
,
c
->
argv
[
2
],
c
->
db
->
id
);
}
else
{
}
else
{
addReplySds
(
c
,
addReplySds
(
c
,
sdsnew
(
"-BUSYGROUP Consumer Group name already exists
\r\n
"
));
sdsnew
(
"-BUSYGROUP Consumer Group name already exists
\r\n
"
));
}
}
}
else
if
(
!
strcasecmp
(
opt
,
"SETID"
)
&&
c
->
argc
==
5
)
{
}
else
if
(
!
strcasecmp
(
opt
,
"SETID"
)
&&
c
->
argc
==
5
)
{
}
else
if
(
!
strcasecmp
(
opt
,
"DELGROUP"
)
&&
c
->
argc
==
4
)
{
streamID
id
;
if
(
!
strcmp
(
c
->
argv
[
4
]
->
ptr
,
"$"
))
{
id
=
s
->
last_id
;
}
else
if
(
streamParseIDOrReply
(
c
,
c
->
argv
[
4
],
&
id
,
0
)
!=
C_OK
)
{
return
;
}
cg
->
last_id
=
id
;
addReply
(
c
,
shared
.
ok
);
server
.
dirty
++
;
notifyKeyspaceEvent
(
NOTIFY_STREAM
,
"xgroup-setid"
,
c
->
argv
[
2
],
c
->
db
->
id
);
}
else
if
(
!
strcasecmp
(
opt
,
"DESTROY"
)
&&
c
->
argc
==
4
)
{
if
(
cg
)
{
raxRemove
(
s
->
cgroups
,(
unsigned
char
*
)
grpname
,
sdslen
(
grpname
),
NULL
);
streamFreeCG
(
cg
);
addReply
(
c
,
shared
.
cone
);
server
.
dirty
++
;
notifyKeyspaceEvent
(
NOTIFY_STREAM
,
"xgroup-destroy"
,
c
->
argv
[
2
],
c
->
db
->
id
);
}
else
{
addReply
(
c
,
shared
.
czero
);
}
}
else
if
(
!
strcasecmp
(
opt
,
"DELCONSUMER"
)
&&
c
->
argc
==
5
)
{
}
else
if
(
!
strcasecmp
(
opt
,
"DELCONSUMER"
)
&&
c
->
argc
==
5
)
{
/* Delete the consumer and returns the number of pending messages
/* Delete the consumer and returns the number of pending messages
* that were yet associated with such a consumer. */
* that were yet associated with such a consumer. */
long
long
pending
=
streamDelConsumer
(
cg
,
c
->
argv
[
4
]
->
ptr
);
long
long
pending
=
streamDelConsumer
(
cg
,
c
->
argv
[
4
]
->
ptr
);
addReplyLongLong
(
c
,
pending
);
addReplyLongLong
(
c
,
pending
);
server
.
dirty
++
;
server
.
dirty
++
;
notifyKeyspaceEvent
(
NOTIFY_STREAM
,
"xgroup-delconsumer"
,
c
->
argv
[
2
],
c
->
db
->
id
);
}
else
if
(
!
strcasecmp
(
opt
,
"HELP"
))
{
}
else
if
(
!
strcasecmp
(
opt
,
"HELP"
))
{
addReplyHelp
(
c
,
help
);
addReplyHelp
(
c
,
help
);
}
else
{
}
else
{
addReply
(
c
,
shared
.
syntaxerr
);
addReply
SubcommandSyntaxError
(
c
);
}
}
}
}
...
@@ -1728,8 +1799,10 @@ void xpendingCommand(client *c) {
...
@@ -1728,8 +1799,10 @@ void xpendingCommand(client *c) {
/* If a consumer name was mentioned but it does not exist, we can
/* If a consumer name was mentioned but it does not exist, we can
* just return an empty array. */
* just return an empty array. */
if
(
consumername
&&
consumer
==
NULL
)
if
(
consumername
&&
consumer
==
NULL
)
{
addReplyMultiBulkLen
(
c
,
0
);
addReplyMultiBulkLen
(
c
,
0
);
return
;
}
rax
*
pel
=
consumer
?
consumer
->
pel
:
group
->
pel
;
rax
*
pel
=
consumer
?
consumer
->
pel
:
group
->
pel
;
unsigned
char
startkey
[
sizeof
(
streamID
)];
unsigned
char
startkey
[
sizeof
(
streamID
)];
...
@@ -1785,7 +1858,7 @@ void xpendingCommand(client *c) {
...
@@ -1785,7 +1858,7 @@ void xpendingCommand(client *c) {
* becomes the specified <consumer>. If the minimum idle time specified
* becomes the specified <consumer>. If the minimum idle time specified
* is zero, messages are claimed regardless of their idle time.
* is zero, messages are claimed regardless of their idle time.
*
*
* All the messages that cannot be found inside the pending ent
i
res list
* All the messages that cannot be found inside the pending entr
i
es list
* are ignored, but in case the FORCE option is used. In that case we
* are ignored, but in case the FORCE option is used. In that case we
* create the NACK (representing a not yet acknowledged message) entry in
* create the NACK (representing a not yet acknowledged message) entry in
* the consumer group PEL.
* the consumer group PEL.
...
@@ -1970,7 +2043,7 @@ void xclaimCommand(client *c) {
...
@@ -1970,7 +2043,7 @@ void xclaimCommand(client *c) {
nack
->
delivery_time
=
deliverytime
;
nack
->
delivery_time
=
deliverytime
;
/* Set the delivery attempts counter if given. */
/* Set the delivery attempts counter if given. */
if
(
retrycount
>=
0
)
nack
->
delivery_count
=
retrycount
;
if
(
retrycount
>=
0
)
nack
->
delivery_count
=
retrycount
;
/* Add the entry in the new co
s
numer local PEL. */
/* Add the entry in the new con
s
umer local PEL. */
raxInsert
(
consumer
->
pel
,
buf
,
sizeof
(
buf
),
nack
,
NULL
);
raxInsert
(
consumer
->
pel
,
buf
,
sizeof
(
buf
),
nack
,
NULL
);
/* Send the reply for this entry. */
/* Send the reply for this entry. */
if
(
justid
)
{
if
(
justid
)
{
...
@@ -1999,7 +2072,7 @@ void xclaimCommand(client *c) {
...
@@ -1999,7 +2072,7 @@ void xclaimCommand(client *c) {
void
xdelCommand
(
client
*
c
)
{
void
xdelCommand
(
client
*
c
)
{
robj
*
o
;
robj
*
o
;
if
((
o
=
lookupKey
Read
OrReply
(
c
,
c
->
argv
[
1
],
shared
.
czero
))
==
NULL
if
((
o
=
lookupKey
Write
OrReply
(
c
,
c
->
argv
[
1
],
shared
.
czero
))
==
NULL
||
checkType
(
c
,
o
,
OBJ_STREAM
))
return
;
||
checkType
(
c
,
o
,
OBJ_STREAM
))
return
;
stream
*
s
=
o
->
ptr
;
stream
*
s
=
o
->
ptr
;
...
@@ -2040,7 +2113,7 @@ void xtrimCommand(client *c) {
...
@@ -2040,7 +2113,7 @@ void xtrimCommand(client *c) {
/* If the key does not exist, we are ok returning zero, that is, the
/* If the key does not exist, we are ok returning zero, that is, the
* number of elements removed from the stream. */
* number of elements removed from the stream. */
if
((
o
=
lookupKey
Read
OrReply
(
c
,
c
->
argv
[
1
],
shared
.
czero
))
==
NULL
if
((
o
=
lookupKey
Write
OrReply
(
c
,
c
->
argv
[
1
],
shared
.
czero
))
==
NULL
||
checkType
(
c
,
o
,
OBJ_STREAM
))
return
;
||
checkType
(
c
,
o
,
OBJ_STREAM
))
return
;
stream
*
s
=
o
->
ptr
;
stream
*
s
=
o
->
ptr
;
...
@@ -2093,14 +2166,12 @@ void xtrimCommand(client *c) {
...
@@ -2093,14 +2166,12 @@ void xtrimCommand(client *c) {
/* XINFO CONSUMERS key group
/* XINFO CONSUMERS key group
* XINFO GROUPS <key>
* XINFO GROUPS <key>
* XINFO STREAM <key>
* XINFO STREAM <key>
* XINFO <key> (alias of XINFO STREAM key)
* XINFO HELP. */
* XINFO HELP. */
void
xinfoCommand
(
client
*
c
)
{
void
xinfoCommand
(
client
*
c
)
{
const
char
*
help
[]
=
{
const
char
*
help
[]
=
{
"CONSUMERS <key> <groupname> -- Show consumer groups of group <groupname>."
,
"CONSUMERS <key> <groupname> -- Show consumer groups of group <groupname>."
,
"GROUPS <key> -- Show the stream consumer groups."
,
"GROUPS <key> -- Show the stream consumer groups."
,
"STREAM <key> -- Show information about the stream."
,
"STREAM <key> -- Show information about the stream."
,
"<key> -- Alias for STREAM <key>."
,
"HELP -- Print this help."
,
"HELP -- Print this help."
,
NULL
NULL
};
};
...
@@ -2112,20 +2183,19 @@ NULL
...
@@ -2112,20 +2183,19 @@ NULL
if
(
!
strcasecmp
(
c
->
argv
[
1
]
->
ptr
,
"HELP"
))
{
if
(
!
strcasecmp
(
c
->
argv
[
1
]
->
ptr
,
"HELP"
))
{
addReplyHelp
(
c
,
help
);
addReplyHelp
(
c
,
help
);
return
;
return
;
}
else
if
(
c
->
argc
<
3
)
{
addReplyError
(
c
,
"syntax error, try 'XINFO HELP'"
);
return
;
}
}
/* Handle the fact that no subcommand means "STREAM". */
/* With the exception of HELP handled before any other sub commands, all
if
(
c
->
argc
==
2
)
{
* the ones are in the form of "<subcommand> <key>". */
opt
=
"STREAM"
;
opt
=
c
->
argv
[
1
]
->
ptr
;
key
=
c
->
argv
[
1
];
key
=
c
->
argv
[
2
];
}
else
{
opt
=
c
->
argv
[
1
]
->
ptr
;
key
=
c
->
argv
[
2
];
}
/* Lookup the key now, this is common for all the subcommands but HELP. */
/* Lookup the key now, this is common for all the subcommands but HELP. */
robj
*
o
=
lookupKeyWriteOrReply
(
c
,
key
,
shared
.
nokeyerr
);
robj
*
o
=
lookupKeyWriteOrReply
(
c
,
key
,
shared
.
nokeyerr
);
if
(
o
==
NULL
)
return
;
if
(
o
==
NULL
||
checkType
(
c
,
o
,
OBJ_STREAM
)
)
return
;
s
=
o
->
ptr
;
s
=
o
->
ptr
;
/* Dispatch the different subcommands. */
/* Dispatch the different subcommands. */
...
@@ -2180,9 +2250,7 @@ NULL
...
@@ -2180,9 +2250,7 @@ NULL
addReplyLongLong
(
c
,
raxSize
(
cg
->
pel
));
addReplyLongLong
(
c
,
raxSize
(
cg
->
pel
));
}
}
raxStop
(
&
ri
);
raxStop
(
&
ri
);
}
else
if
(
c
->
argc
==
2
||
}
else
if
(
!
strcasecmp
(
opt
,
"STREAM"
)
&&
c
->
argc
==
3
)
{
(
!
strcasecmp
(
opt
,
"STREAM"
)
&&
c
->
argc
==
3
))
{
/* XINFO STREAM <key> (or the alias XINFO <key>). */
/* XINFO STREAM <key> (or the alias XINFO <key>). */
addReplyMultiBulkLen
(
c
,
12
);
addReplyMultiBulkLen
(
c
,
12
);
addReplyStatus
(
c
,
"length"
);
addReplyStatus
(
c
,
"length"
);
...
@@ -2209,7 +2277,7 @@ NULL
...
@@ -2209,7 +2277,7 @@ NULL
STREAM_RWR_RAWENTRIES
,
NULL
);
STREAM_RWR_RAWENTRIES
,
NULL
);
if
(
!
count
)
addReply
(
c
,
shared
.
nullbulk
);
if
(
!
count
)
addReply
(
c
,
shared
.
nullbulk
);
}
else
{
}
else
{
addReply
Error
(
c
,
"s
yntax
e
rror
, try 'XINFO HELP'"
);
addReply
SubcommandS
yntax
E
rror
(
c
);
}
}
}
}
src/t_string.c
View file @
cbb2ac07
...
@@ -361,7 +361,7 @@ void incrDecrCommand(client *c, long long incr) {
...
@@ -361,7 +361,7 @@ void incrDecrCommand(client *c, long long incr) {
new
=
o
;
new
=
o
;
o
->
ptr
=
(
void
*
)((
long
)
value
);
o
->
ptr
=
(
void
*
)((
long
)
value
);
}
else
{
}
else
{
new
=
createStringObjectFromLongLong
(
value
);
new
=
createStringObjectFromLongLong
ForValue
(
value
);
if
(
o
)
{
if
(
o
)
{
dbOverwrite
(
c
->
db
,
c
->
argv
[
1
],
new
);
dbOverwrite
(
c
->
db
,
c
->
argv
[
1
],
new
);
}
else
{
}
else
{
...
...
src/t_zset.c
View file @
cbb2ac07
...
@@ -1100,8 +1100,8 @@ unsigned char *zzlDeleteRangeByRank(unsigned char *zl, unsigned int start, unsig
...
@@ -1100,8 +1100,8 @@ unsigned char *zzlDeleteRangeByRank(unsigned char *zl, unsigned int start, unsig
* Common sorted set API
* Common sorted set API
*----------------------------------------------------------------------------*/
*----------------------------------------------------------------------------*/
unsigned
int
zsetLength
(
const
robj
*
zobj
)
{
unsigned
long
zsetLength
(
const
robj
*
zobj
)
{
int
length
=
-
1
;
unsigned
long
length
=
0
;
if
(
zobj
->
encoding
==
OBJ_ENCODING_ZIPLIST
)
{
if
(
zobj
->
encoding
==
OBJ_ENCODING_ZIPLIST
)
{
length
=
zzlLength
(
zobj
->
ptr
);
length
=
zzlLength
(
zobj
->
ptr
);
}
else
if
(
zobj
->
encoding
==
OBJ_ENCODING_SKIPLIST
)
{
}
else
if
(
zobj
->
encoding
==
OBJ_ENCODING_SKIPLIST
)
{
...
@@ -1878,7 +1878,7 @@ void zuiClearIterator(zsetopsrc *op) {
...
@@ -1878,7 +1878,7 @@ void zuiClearIterator(zsetopsrc *op) {
}
}
}
}
int
zuiLength
(
zsetopsrc
*
op
)
{
unsigned
long
zuiLength
(
zsetopsrc
*
op
)
{
if
(
op
->
subject
==
NULL
)
if
(
op
->
subject
==
NULL
)
return
0
;
return
0
;
...
@@ -2085,7 +2085,11 @@ int zuiFind(zsetopsrc *op, zsetopval *val, double *score) {
...
@@ -2085,7 +2085,11 @@ int zuiFind(zsetopsrc *op, zsetopval *val, double *score) {
}
}
int
zuiCompareByCardinality
(
const
void
*
s1
,
const
void
*
s2
)
{
int
zuiCompareByCardinality
(
const
void
*
s1
,
const
void
*
s2
)
{
return
zuiLength
((
zsetopsrc
*
)
s1
)
-
zuiLength
((
zsetopsrc
*
)
s2
);
unsigned
long
first
=
zuiLength
((
zsetopsrc
*
)
s1
);
unsigned
long
second
=
zuiLength
((
zsetopsrc
*
)
s2
);
if
(
first
>
second
)
return
1
;
if
(
first
<
second
)
return
-
1
;
return
0
;
}
}
#define REDIS_AGGR_SUM 1
#define REDIS_AGGR_SUM 1
...
@@ -2129,7 +2133,7 @@ void zunionInterGenericCommand(client *c, robj *dstkey, int op) {
...
@@ -2129,7 +2133,7 @@ void zunionInterGenericCommand(client *c, robj *dstkey, int op) {
zsetopsrc
*
src
;
zsetopsrc
*
src
;
zsetopval
zval
;
zsetopval
zval
;
sds
tmp
;
sds
tmp
;
unsigned
in
t
maxelelen
=
0
;
size_
t
maxelelen
=
0
;
robj
*
dstobj
;
robj
*
dstobj
;
zset
*
dstzset
;
zset
*
dstzset
;
zskiplistNode
*
znode
;
zskiplistNode
*
znode
;
...
@@ -2363,8 +2367,8 @@ void zrangeGenericCommand(client *c, int reverse) {
...
@@ -2363,8 +2367,8 @@ void zrangeGenericCommand(client *c, int reverse) {
int
withscores
=
0
;
int
withscores
=
0
;
long
start
;
long
start
;
long
end
;
long
end
;
int
llen
;
long
llen
;
int
rangelen
;
long
rangelen
;
if
((
getLongFromObjectOrReply
(
c
,
c
->
argv
[
2
],
&
start
,
NULL
)
!=
C_OK
)
||
if
((
getLongFromObjectOrReply
(
c
,
c
->
argv
[
2
],
&
start
,
NULL
)
!=
C_OK
)
||
(
getLongFromObjectOrReply
(
c
,
c
->
argv
[
3
],
&
end
,
NULL
)
!=
C_OK
))
return
;
(
getLongFromObjectOrReply
(
c
,
c
->
argv
[
3
],
&
end
,
NULL
)
!=
C_OK
))
return
;
...
@@ -2671,7 +2675,7 @@ void zcountCommand(client *c) {
...
@@ -2671,7 +2675,7 @@ void zcountCommand(client *c) {
robj
*
key
=
c
->
argv
[
1
];
robj
*
key
=
c
->
argv
[
1
];
robj
*
zobj
;
robj
*
zobj
;
zrangespec
range
;
zrangespec
range
;
int
count
=
0
;
unsigned
long
count
=
0
;
/* Parse the range arguments */
/* Parse the range arguments */
if
(
zslParseRange
(
c
->
argv
[
2
],
c
->
argv
[
3
],
&
range
)
!=
C_OK
)
{
if
(
zslParseRange
(
c
->
argv
[
2
],
c
->
argv
[
3
],
&
range
)
!=
C_OK
)
{
...
@@ -2748,7 +2752,7 @@ void zlexcountCommand(client *c) {
...
@@ -2748,7 +2752,7 @@ void zlexcountCommand(client *c) {
robj
*
key
=
c
->
argv
[
1
];
robj
*
key
=
c
->
argv
[
1
];
robj
*
zobj
;
robj
*
zobj
;
zlexrangespec
range
;
zlexrangespec
range
;
int
count
=
0
;
unsigned
long
count
=
0
;
/* Parse the range arguments */
/* Parse the range arguments */
if
(
zslParseLexRange
(
c
->
argv
[
2
],
c
->
argv
[
3
],
&
range
)
!=
C_OK
)
{
if
(
zslParseLexRange
(
c
->
argv
[
2
],
c
->
argv
[
3
],
&
range
)
!=
C_OK
)
{
...
@@ -3163,8 +3167,8 @@ void genericZpopCommand(client *c, robj **keyv, int keyc, int where, int emitkey
...
@@ -3163,8 +3167,8 @@ void genericZpopCommand(client *c, robj **keyv, int keyc, int where, int emitkey
signalModifiedKey
(
c
->
db
,
key
);
signalModifiedKey
(
c
->
db
,
key
);
}
}
addReplyDouble
(
c
,
score
);
addReplyBulkCBuffer
(
c
,
ele
,
sdslen
(
ele
));
addReplyBulkCBuffer
(
c
,
ele
,
sdslen
(
ele
));
addReplyDouble
(
c
,
score
);
sdsfree
(
ele
);
sdsfree
(
ele
);
arraylen
+=
2
;
arraylen
+=
2
;
...
@@ -3216,9 +3220,9 @@ void blockingGenericZpopCommand(client *c, int where) {
...
@@ -3216,9 +3220,9 @@ void blockingGenericZpopCommand(client *c, int where) {
return
;
return
;
}
else
{
}
else
{
if
(
zsetLength
(
o
)
!=
0
)
{
if
(
zsetLength
(
o
)
!=
0
)
{
/* Non empty zset, this is like a normal Z
[REV]POP
. */
/* Non empty zset, this is like a normal Z
POP[MIN|MAX]
. */
genericZpopCommand
(
c
,
&
c
->
argv
[
j
],
1
,
where
,
1
,
NULL
);
genericZpopCommand
(
c
,
&
c
->
argv
[
j
],
1
,
where
,
1
,
NULL
);
/* Replicate it as an Z
[REV]POP
instead of BZ
[REV]POP
. */
/* Replicate it as an Z
POP[MIN|MAX]
instead of BZ
POP[MIN|MAX]
. */
rewriteClientCommandVector
(
c
,
2
,
rewriteClientCommandVector
(
c
,
2
,
where
==
ZSET_MAX
?
shared
.
zpopmax
:
shared
.
zpopmin
,
where
==
ZSET_MAX
?
shared
.
zpopmax
:
shared
.
zpopmin
,
c
->
argv
[
j
]);
c
->
argv
[
j
]);
...
...
src/ziplist.c
View file @
cbb2ac07
...
@@ -27,7 +27,7 @@
...
@@ -27,7 +27,7 @@
* traversal.
* traversal.
*
*
* <uint16_t zllen> is the number of entries. When there are more than
* <uint16_t zllen> is the number of entries. When there are more than
* 2^16-2 ent
i
res, this value is set to 2^16-1 and we need to traverse the
* 2^16-2 entr
i
es, this value is set to 2^16-1 and we need to traverse the
* entire list to know how many items it holds.
* entire list to know how many items it holds.
*
*
* <uint8_t zlend> is a special entry representing the end of the ziplist.
* <uint8_t zlend> is a special entry representing the end of the ziplist.
...
@@ -256,7 +256,7 @@
...
@@ -256,7 +256,7 @@
#define ZIPLIST_ENTRY_END(zl) ((zl)+intrev32ifbe(ZIPLIST_BYTES(zl))-1)
#define ZIPLIST_ENTRY_END(zl) ((zl)+intrev32ifbe(ZIPLIST_BYTES(zl))-1)
/* Increment the number of items field in the ziplist header. Note that this
/* Increment the number of items field in the ziplist header. Note that this
* macro should never overflow the unsigned 16 bit integer, since ent
i
res are
* macro should never overflow the unsigned 16 bit integer, since entr
i
es are
* always pushed one at a time. When UINT16_MAX is reached we want the count
* always pushed one at a time. When UINT16_MAX is reached we want the count
* to stay there to signal that a full scan is needed to get the number of
* to stay there to signal that a full scan is needed to get the number of
* items inside the ziplist. */
* items inside the ziplist. */
...
...
src/zmalloc.c
View file @
cbb2ac07
...
@@ -30,6 +30,7 @@
...
@@ -30,6 +30,7 @@
#include <stdio.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdlib.h>
#include <stdint.h>
/* This function provide us access to the original libc free(). This is useful
/* This function provide us access to the original libc free(). This is useful
* for instance to free results obtained by backtrace_symbols(). We need
* for instance to free results obtained by backtrace_symbols(). We need
...
@@ -164,7 +165,7 @@ void *zrealloc(void *ptr, size_t size) {
...
@@ -164,7 +165,7 @@ void *zrealloc(void *ptr, size_t size) {
*
((
size_t
*
)
newptr
)
=
size
;
*
((
size_t
*
)
newptr
)
=
size
;
update_zmalloc_stat_free
(
oldsize
);
update_zmalloc_stat_free
(
oldsize
);
update_zmalloc_stat_alloc
(
size
);
update_zmalloc_stat_alloc
(
size
+
PREFIX_SIZE
);
return
(
char
*
)
newptr
+
PREFIX_SIZE
;
return
(
char
*
)
newptr
+
PREFIX_SIZE
;
#endif
#endif
}
}
...
@@ -418,7 +419,7 @@ size_t zmalloc_get_memory_size(void) {
...
@@ -418,7 +419,7 @@ size_t zmalloc_get_memory_size(void) {
mib
[
0
]
=
CTL_HW
;
mib
[
0
]
=
CTL_HW
;
#if defined(HW_REALMEM)
#if defined(HW_REALMEM)
mib
[
1
]
=
HW_REALMEM
;
/* FreeBSD. ----------------- */
mib
[
1
]
=
HW_REALMEM
;
/* FreeBSD. ----------------- */
#elif defined(HW_PYSMEM)
#elif defined(HW_P
H
YSMEM)
mib
[
1
]
=
HW_PHYSMEM
;
/* Others. ------------------ */
mib
[
1
]
=
HW_PHYSMEM
;
/* Others. ------------------ */
#endif
#endif
unsigned
int
size
=
0
;
/* 32-bit */
unsigned
int
size
=
0
;
/* 32-bit */
...
...
src/zmalloc.h
View file @
cbb2ac07
...
@@ -63,6 +63,11 @@
...
@@ -63,6 +63,11 @@
#ifndef ZMALLOC_LIB
#ifndef ZMALLOC_LIB
#define ZMALLOC_LIB "libc"
#define ZMALLOC_LIB "libc"
#ifdef __GLIBC__
#include <malloc.h>
#define HAVE_MALLOC_SIZE 1
#define zmalloc_size(p) malloc_usable_size(p)
#endif
#endif
#endif
/* We can enable the Redis defrag capabilities only if we are using Jemalloc
/* We can enable the Redis defrag capabilities only if we are using Jemalloc
...
...
tests/integration/rdb.tcl
View file @
cbb2ac07
...
@@ -39,6 +39,25 @@ start_server [list overrides [list "dir" $server_path]] {
...
@@ -39,6 +39,25 @@ start_server [list overrides [list "dir" $server_path]] {
}
{
0000000000000000000000000000000000000000
}
}
{
0000000000000000000000000000000000000000
}
}
}
start_server
[
list overrides
[
list
"dir"
$server_path
]]
{
test
{
Test RDB stream encoding
}
{
for
{
set j 0
}
{
$j
< 1000
}
{
incr j
}
{
if
{
rand
()
< 0.9
}
{
r xadd stream * foo $j
}
else
{
r xadd stream * bar $j
}
}
r xgroup create stream mygroup 0
r xreadgroup GROUP mygroup Alice COUNT 1 STREAMS stream >
set digest
[
r debug digest
]
r debug reload
set newdigest
[
r debug digest
]
assert
{
$digest
eq $newdigest
}
r del stream
}
}
# Helper function to start a server and kill it, just to check the error
# Helper function to start a server and kill it, just to check the error
# logged.
# logged.
set defaults
{}
set defaults
{}
...
...
tests/sentinel/tests/07-down-conditions.tcl
View file @
cbb2ac07
...
@@ -66,3 +66,13 @@ test "SDOWN is triggered by misconfigured instance repling with errors" {
...
@@ -66,3 +66,13 @@ test "SDOWN is triggered by misconfigured instance repling with errors" {
R 0 bgsave
R 0 bgsave
ensure_master_up
ensure_master_up
}
}
# We use this test setup to also test command renaming, as a side
# effect of the master going down if we send PONG instead of PING
test
"SDOWN is triggered if we rename PING to PONG"
{
ensure_master_up
S 4 SENTINEL SET mymaster rename-command PING PONG
ensure_master_down
S 4 SENTINEL SET mymaster rename-command PING PING
ensure_master_up
}
tests/support/server.tcl
View file @
cbb2ac07
...
@@ -276,6 +276,12 @@ proc start_server {options {code undefined}} {
...
@@ -276,6 +276,12 @@ proc start_server {options {code undefined}} {
error_and_quit $config_file $line
error_and_quit $config_file $line
}
}
if
{
$::wait
_server
}
{
set msg
"server started PID:
[
dict get $srv
"pid"
]
. press any key to continue..."
puts $msg
read stdin 1
}
while 1
{
while 1
{
# check that the server actually started and is ready for connections
# check that the server actually started and is ready for connections
if
{[
exec grep -i
"Ready to accept"
| wc -l < $stdout
]
> 0
}
{
if
{[
exec grep -i
"Ready to accept"
| wc -l < $stdout
]
> 0
}
{
...
...
tests/test_helper.tcl
View file @
cbb2ac07
...
@@ -83,6 +83,8 @@ set ::force_failure 0
...
@@ -83,6 +83,8 @@ set ::force_failure 0
set ::timeout 600
;
# 10 minutes without progresses will quit the test.
set ::timeout 600
;
# 10 minutes without progresses will quit the test.
set ::last_progress
[
clock seconds
]
set ::last_progress
[
clock seconds
]
set ::active_servers
{}
;
# Pids of active Redis instances.
set ::active_servers
{}
;
# Pids of active Redis instances.
set ::dont_clean 0
set ::wait_server 0
# Set to 1 when we are running in client mode. The Redis test uses a
# Set to 1 when we are running in client mode. The Redis test uses a
# server-client model to run tests simultaneously. The server instance
# server-client model to run tests simultaneously. The server instance
...
@@ -176,6 +178,9 @@ proc s {args} {
...
@@ -176,6 +178,9 @@ proc s {args} {
}
}
proc cleanup
{}
{
proc cleanup
{}
{
if
{
$::dont
_clean
}
{
return
}
if
{
!$::quiet
}
{
puts -nonewline
"Cleanup: may take some time... "
}
if
{
!$::quiet
}
{
puts -nonewline
"Cleanup: may take some time... "
}
flush stdout
flush stdout
catch
{
exec rm -rf
{*}
[
glob tests/tmp/redis.conf.*
]}
catch
{
exec rm -rf
{*}
[
glob tests/tmp/redis.conf.*
]}
...
@@ -225,6 +230,7 @@ proc test_server_cron {} {
...
@@ -225,6 +230,7 @@ proc test_server_cron {} {
if
{
$elapsed
> $::timeout
}
{
if
{
$elapsed
> $::timeout
}
{
set err
"
\[
[
colorstr red TIMEOUT
]
\]
: clients state report follows."
set err
"
\[
[
colorstr red TIMEOUT
]
\]
: clients state report follows."
puts $err
puts $err
lappend ::failed_tests $err
show_clients_state
show_clients_state
kill_clients
kill_clients
force_kill_all_servers
force_kill_all_servers
...
@@ -411,6 +417,8 @@ proc print_help_screen {} {
...
@@ -411,6 +417,8 @@ proc print_help_screen {} {
"--clients <num> Number of test clients (default 16)."
"--clients <num> Number of test clients (default 16)."
"--timeout <sec> Test timeout in seconds (default 10 min)."
"--timeout <sec> Test timeout in seconds (default 10 min)."
"--force-failure Force the execution of a test that always fails."
"--force-failure Force the execution of a test that always fails."
"--dont-clean don't delete redis log files after the run"
"--wait-server wait after server is started (so that you can attach a debugger)"
"--help Print this help screen."
"--help Print this help screen."
}
"
\n
"
]
}
"
\n
"
]
}
}
...
@@ -464,6 +472,10 @@ for {set j 0} {$j < [llength $argv]} {incr j} {
...
@@ -464,6 +472,10 @@ for {set j 0} {$j < [llength $argv]} {incr j} {
}
elseif
{
$opt
eq
{
--clients
}}
{
}
elseif
{
$opt
eq
{
--clients
}}
{
set ::numclients $arg
set ::numclients $arg
incr j
incr j
}
elseif
{
$opt
eq
{
--dont-clean
}}
{
set ::dont_clean 1
}
elseif
{
$opt
eq
{
--wait-server
}}
{
set ::wait_server 1
}
elseif
{
$opt
eq
{
--timeout
}}
{
}
elseif
{
$opt
eq
{
--timeout
}}
{
set ::timeout $arg
set ::timeout $arg
incr j
incr j
...
...
tests/unit/dump.tcl
View file @
cbb2ac07
...
@@ -25,6 +25,39 @@ start_server {tags {"dump"}} {
...
@@ -25,6 +25,39 @@ start_server {tags {"dump"}} {
assert
{
$ttl
>=
(
2569591501-3000
)
&& $ttl <= 2569591501
}
assert
{
$ttl
>=
(
2569591501-3000
)
&& $ttl <= 2569591501
}
r get foo
r get foo
}
{
bar
}
}
{
bar
}
test
{
RESTORE can set an absolute expire
}
{
r set foo bar
set encoded
[
r dump foo
]
r del foo
set now
[
clock milliseconds
]
r restore foo
[
expr $now+3000
]
$encoded absttl
set ttl
[
r pttl foo
]
assert
{
$ttl
>= 2998 && $ttl <= 3000
}
r get foo
}
{
bar
}
test
{
RESTORE can set LRU
}
{
r set foo bar
set encoded
[
r dump foo
]
r del foo
r config set maxmemory-policy allkeys-lru
r restore foo 0 $encoded idletime 1000
set idle
[
r object idletime foo
]
assert
{
$idle
>= 1000 && $idle <= 1002
}
r get foo
}
{
bar
}
test
{
RESTORE can set LFU
}
{
r set foo bar
set encoded
[
r dump foo
]
r del foo
r config set maxmemory-policy allkeys-lfu
r restore foo 0 $encoded freq 100
set freq
[
r object freq foo
]
assert
{
$freq
== 100
}
r get foo
}
{
bar
}
test
{
RESTORE returns an error of the key already exists
}
{
test
{
RESTORE returns an error of the key already exists
}
{
r set foo bar
r set foo bar
...
...
tests/unit/expire.tcl
View file @
cbb2ac07
...
@@ -121,7 +121,7 @@ start_server {tags {"expire"}} {
...
@@ -121,7 +121,7 @@ start_server {tags {"expire"}} {
list $a $b
list $a $b
}
{
somevalue
{}}
}
{
somevalue
{}}
test
{
TTL returns ti
e
m to live in seconds
}
{
test
{
TTL returns tim
e
to live in seconds
}
{
r del x
r del x
r setex x 10 somevalue
r setex x 10 somevalue
set ttl
[
r ttl x
]
set ttl
[
r ttl x
]
...
...
tests/unit/memefficiency.tcl
View file @
cbb2ac07
...
@@ -97,10 +97,15 @@ start_server {tags {"defrag"}} {
...
@@ -97,10 +97,15 @@ start_server {tags {"defrag"}} {
r config set active-defrag-ignore-bytes 2mb
r config set active-defrag-ignore-bytes 2mb
r config set maxmemory 0
r config set maxmemory 0
r config set list-max-ziplist-size 5
;
# list of 10k items will have 2000 quicklist nodes
r config set list-max-ziplist-size 5
;
# list of 10k items will have 2000 quicklist nodes
r config set stream-node-max-entries 5
r hmset hash h1 v1 h2 v2 h3 v3
r hmset hash h1 v1 h2 v2 h3 v3
r lpush list a b c d
r lpush list a b c d
r zadd zset 0 a 1 b 2 c 3 d
r zadd zset 0 a 1 b 2 c 3 d
r sadd set a b c d
r sadd set a b c d
r xadd stream * item 1 value a
r xadd stream * item 2 value b
r xgroup create stream mygroup 0
r xreadgroup GROUP mygroup Alice COUNT 1 STREAMS stream >
# create big keys with 10k items
# create big keys with 10k items
set rd
[
redis_deferring_client
]
set rd
[
redis_deferring_client
]
...
@@ -109,8 +114,9 @@ start_server {tags {"defrag"}} {
...
@@ -109,8 +114,9 @@ start_server {tags {"defrag"}} {
$rd lpush biglist
[
concat
"asdfasdfasdf"
$j
]
$rd lpush biglist
[
concat
"asdfasdfasdf"
$j
]
$rd zadd bigzset $j
[
concat
"asdfasdfasdf"
$j
]
$rd zadd bigzset $j
[
concat
"asdfasdfasdf"
$j
]
$rd sadd bigset
[
concat
"asdfasdfasdf"
$j
]
$rd sadd bigset
[
concat
"asdfasdfasdf"
$j
]
$rd xadd bigstream * item 1 value a
}
}
for
{
set j 0
}
{
$j
<
4
0000
}
{
incr j
}
{
for
{
set j 0
}
{
$j
<
5
0000
}
{
incr j
}
{
$rd read
;
# Discard replies
$rd read
;
# Discard replies
}
}
...
@@ -134,7 +140,7 @@ start_server {tags {"defrag"}} {
...
@@ -134,7 +140,7 @@ start_server {tags {"defrag"}} {
for
{
set j 0
}
{
$j
< 500000
}
{
incr j
}
{
for
{
set j 0
}
{
$j
< 500000
}
{
incr j
}
{
$rd read
;
# Discard replies
$rd read
;
# Discard replies
}
}
assert
{[
r dbsize
]
== 50000
8
}
assert
{[
r dbsize
]
== 5000
1
0
}
# create some fragmentation
# create some fragmentation
for
{
set j 0
}
{
$j
< 500000
}
{
incr j 2
}
{
for
{
set j 0
}
{
$j
< 500000
}
{
incr j 2
}
{
...
@@ -143,7 +149,7 @@ start_server {tags {"defrag"}} {
...
@@ -143,7 +149,7 @@ start_server {tags {"defrag"}} {
for
{
set j 0
}
{
$j
< 500000
}
{
incr j 2
}
{
for
{
set j 0
}
{
$j
< 500000
}
{
incr j 2
}
{
$rd read
;
# Discard replies
$rd read
;
# Discard replies
}
}
assert
{[
r dbsize
]
== 25000
8
}
assert
{[
r dbsize
]
== 2500
1
0
}
# start defrag
# start defrag
after 120
;
# serverCron only updates the info once in 100ms
after 120
;
# serverCron only updates the info once in 100ms
...
@@ -155,6 +161,7 @@ start_server {tags {"defrag"}} {
...
@@ -155,6 +161,7 @@ start_server {tags {"defrag"}} {
r config set latency-monitor-threshold 5
r config set latency-monitor-threshold 5
r latency reset
r latency reset
set digest
[
r debug digest
]
catch
{
r config set activedefrag yes
}
e
catch
{
r config set activedefrag yes
}
e
if
{
!
[
string match
{
DISABLED*
}
$e
]}
{
if
{
!
[
string match
{
DISABLED*
}
$e
]}
{
# wait for the active defrag to start working
(
decision once a second
)
# wait for the active defrag to start working
(
decision once a second
)
...
@@ -193,9 +200,11 @@ start_server {tags {"defrag"}} {
...
@@ -193,9 +200,11 @@ start_server {tags {"defrag"}} {
# due to high fragmentation, 10hz, and active-defrag-cycle-max set to 75,
# due to high fragmentation, 10hz, and active-defrag-cycle-max set to 75,
# we expect max latency to be not much higher than 75ms
# we expect max latency to be not much higher than 75ms
assert
{
$max
_latency <= 80
}
assert
{
$max
_latency <= 80
}
}
else
{
set _
""
}
}
}
{}
# verify the data isn't corrupted or changed
set newdigest
[
r debug digest
]
assert
{
$digest
eq $newdigest
}
r save
;
# saving an rdb iterates over all the data / pointers
}
{
OK
}
}
}
}
}
tests/unit/scan.tcl
View file @
cbb2ac07
...
@@ -236,4 +236,50 @@ start_server {tags {"scan"}} {
...
@@ -236,4 +236,50 @@ start_server {tags {"scan"}} {
set first_score
[
lindex $res 1
]
set first_score
[
lindex $res 1
]
assert
{
$first
_score != 0
}
assert
{
$first
_score != 0
}
}
}
test
"SCAN regression test for issue #4906"
{
for
{
set k 0
}
{
$k
< 100
}
{
incr k
}
{
r del set
r sadd set x
;
# Make sure it's not intset encoded
set toremove
{}
unset -nocomplain found
array set found
{}
# Populate the set
set numele
[
expr
{
101+
[
randomInt 1000
]}]
for
{
set j 0
}
{
$j
< $numele
}
{
incr j
}
{
r sadd set $j
if
{
$j
>= 100
}
{
lappend toremove $j
}
}
# Start scanning
set cursor 0
set iteration 0
set del_iteration
[
randomInt 10
]
while
{
!
(
$cursor
== 0 && $iteration != 0
)}
{
lassign
[
r sscan set $cursor
]
cursor items
# Mark found items. We expect to find from 0 to 99 at the end
# since those elements will never be removed during the scanning.
foreach i $items
{
set found
(
$i
)
1
}
incr iteration
# At some point remove most of the items to trigger the
# rehashing to a smaller hash table.
if
{
$iteration
== $del_iteration
}
{
r srem set
{*}
$toremove
}
}
# Verify that SSCAN reported everything from 0 to 99
for
{
set j 0
}
{
$j
< 100
}
{
incr j
}
{
if
{
!
[
info exists found
(
$j
)]}
{
fail
"SSCAN element missing
$j
"
}
}
}
}
}
}
tests/unit/type/stream.tcl
View file @
cbb2ac07
...
@@ -253,4 +253,20 @@ start_server {
...
@@ -253,4 +253,20 @@ start_server {
}
}
}
}
}
}
test
{
XREVRANGE regression test for issue #5006
}
{
# Add non compressed entries
r xadd teststream 1234567891230 key1 value1
r xadd teststream 1234567891240 key2 value2
r xadd teststream 1234567891250 key3 value3
# Add SAMEFIELD compressed entries
r xadd teststream2 1234567891230 key1 value1
r xadd teststream2 1234567891240 key1 value2
r xadd teststream2 1234567891250 key1 value3
assert_equal
[
r xrevrange teststream 1234567891245 -
]
{{
1234567891240-0
{
key2 value2
}}
{
1234567891230-0
{
key1 value1
}}}
assert_equal
[
r xrevrange teststream2 1234567891245 -
]
{{
1234567891240-0
{
key1 value2
}}
{
1234567891230-0
{
key1 value1
}}}
}
}
}
tests/unit/type/zset.tcl
View file @
cbb2ac07
...
@@ -653,11 +653,11 @@ start_server {tags {"zset"}} {
...
@@ -653,11 +653,11 @@ start_server {tags {"zset"}} {
r del zset
r del zset
assert_equal
{}
[
r zpopmin zset
]
assert_equal
{}
[
r zpopmin zset
]
create_zset zset
{
-1 a 1 b 2 c 3 d 4 e
}
create_zset zset
{
-1 a 1 b 2 c 3 d 4 e
}
assert_equal
{
-1
a
}
[
r zpopmin zset
]
assert_equal
{
a
-1
}
[
r zpopmin zset
]
assert_equal
{
1 b
}
[
r zpopmin zset
]
assert_equal
{
b 1
}
[
r zpopmin zset
]
assert_equal
{
4 e
}
[
r zpopmax zset
]
assert_equal
{
e 4
}
[
r zpopmax zset
]
assert_equal
{
3 d
}
[
r zpopmax zset
]
assert_equal
{
d 3
}
[
r zpopmax zset
]
assert_equal
{
2 c
}
[
r zpopmin zset
]
assert_equal
{
c 2
}
[
r zpopmin zset
]
assert_equal 0
[
r exists zset
]
assert_equal 0
[
r exists zset
]
r set foo bar
r set foo bar
assert_error
"*WRONGTYPE*"
{
r zpopmin foo
}
assert_error
"*WRONGTYPE*"
{
r zpopmin foo
}
...
@@ -669,8 +669,8 @@ start_server {tags {"zset"}} {
...
@@ -669,8 +669,8 @@ start_server {tags {"zset"}} {
assert_equal
{}
[
r zpopmin z1 2
]
assert_equal
{}
[
r zpopmin z1 2
]
assert_error
"*WRONGTYPE*"
{
r zpopmin foo 2
}
assert_error
"*WRONGTYPE*"
{
r zpopmin foo 2
}
create_zset z1
{
0 a 1 b 2 c 3 d
}
create_zset z1
{
0 a 1 b 2 c 3 d
}
assert_equal
{
0
a
1
b
}
[
r zpopmin z1 2
]
assert_equal
{
a
0
b
1
}
[
r zpopmin z1 2
]
assert_equal
{
3
d
2
c
}
[
r zpopmax z1 2
]
assert_equal
{
d
3
c
2
}
[
r zpopmax z1 2
]
}
}
test
"BZPOP with a single existing sorted set -
$encoding
"
{
test
"BZPOP with a single existing sorted set -
$encoding
"
{
...
@@ -678,11 +678,11 @@ start_server {tags {"zset"}} {
...
@@ -678,11 +678,11 @@ start_server {tags {"zset"}} {
create_zset zset
{
0 a 1 b 2 c
}
create_zset zset
{
0 a 1 b 2 c
}
$rd bzpopmin zset 5
$rd bzpopmin zset 5
assert_equal
{
zset
0 a
}
[
$rd
read
]
assert_equal
{
zset
a 0
}
[
$rd
read
]
$rd bzpopmin zset 5
$rd bzpopmin zset 5
assert_equal
{
zset
1 b
}
[
$rd
read
]
assert_equal
{
zset
b 1
}
[
$rd
read
]
$rd bzpopmax zset 5
$rd bzpopmax zset 5
assert_equal
{
zset
2 c
}
[
$rd
read
]
assert_equal
{
zset
c 2
}
[
$rd
read
]
assert_equal 0
[
r exists zset
]
assert_equal 0
[
r exists zset
]
}
}
...
@@ -692,16 +692,16 @@ start_server {tags {"zset"}} {
...
@@ -692,16 +692,16 @@ start_server {tags {"zset"}} {
create_zset z2
{
3 d 4 e 5 f
}
create_zset z2
{
3 d 4 e 5 f
}
$rd bzpopmin z1 z2 5
$rd bzpopmin z1 z2 5
assert_equal
{
z1
0 a
}
[
$rd
read
]
assert_equal
{
z1
a 0
}
[
$rd
read
]
$rd bzpopmax z1 z2 5
$rd bzpopmax z1 z2 5
assert_equal
{
z1
2 c
}
[
$rd
read
]
assert_equal
{
z1
c 2
}
[
$rd
read
]
assert_equal 1
[
r zcard z1
]
assert_equal 1
[
r zcard z1
]
assert_equal 3
[
r zcard z2
]
assert_equal 3
[
r zcard z2
]
$rd bzpopmax z2 z1 5
$rd bzpopmax z2 z1 5
assert_equal
{
z2
5 f
}
[
$rd
read
]
assert_equal
{
z2
f 5
}
[
$rd
read
]
$rd bzpopmin z2 z1 5
$rd bzpopmin z2 z1 5
assert_equal
{
z2
3 d
}
[
$rd
read
]
assert_equal
{
z2
d 3
}
[
$rd
read
]
assert_equal 1
[
r zcard z1
]
assert_equal 1
[
r zcard z1
]
assert_equal 1
[
r zcard z2
]
assert_equal 1
[
r zcard z2
]
}
}
...
@@ -711,9 +711,9 @@ start_server {tags {"zset"}} {
...
@@ -711,9 +711,9 @@ start_server {tags {"zset"}} {
r del z1
r del z1
create_zset z2
{
3 d 4 e 5 f
}
create_zset z2
{
3 d 4 e 5 f
}
$rd bzpopmax z1 z2 5
$rd bzpopmax z1 z2 5
assert_equal
{
z2
5 f
}
[
$rd
read
]
assert_equal
{
z2
f 5
}
[
$rd
read
]
$rd bzpopmin z2 z1 5
$rd bzpopmin z2 z1 5
assert_equal
{
z2
3 d
}
[
$rd
read
]
assert_equal
{
z2
d 3
}
[
$rd
read
]
assert_equal 0
[
r zcard z1
]
assert_equal 0
[
r zcard z1
]
assert_equal 1
[
r zcard z2
]
assert_equal 1
[
r zcard z2
]
}
}
...
@@ -1107,7 +1107,7 @@ start_server {tags {"zset"}} {
...
@@ -1107,7 +1107,7 @@ start_server {tags {"zset"}} {
r del zset
r del zset
r zadd zset 1 bar
r zadd zset 1 bar
$rd read
$rd read
}
{
zset
1
bar
}
}
{
zset bar
1
}
test
"BZPOPMIN, ZADD + DEL + SET should not awake blocked client"
{
test
"BZPOPMIN, ZADD + DEL + SET should not awake blocked client"
{
set rd
[
redis_deferring_client
]
set rd
[
redis_deferring_client
]
...
@@ -1124,7 +1124,7 @@ start_server {tags {"zset"}} {
...
@@ -1124,7 +1124,7 @@ start_server {tags {"zset"}} {
r del zset
r del zset
r zadd zset 1 bar
r zadd zset 1 bar
$rd read
$rd read
}
{
zset
1
bar
}
}
{
zset bar
1
}
test
"BZPOPMIN with same key multiple times should work"
{
test
"BZPOPMIN with same key multiple times should work"
{
set rd
[
redis_deferring_client
]
set rd
[
redis_deferring_client
]
...
@@ -1133,18 +1133,18 @@ start_server {tags {"zset"}} {
...
@@ -1133,18 +1133,18 @@ start_server {tags {"zset"}} {
# Data arriving after the BZPOPMIN.
# Data arriving after the BZPOPMIN.
$rd bzpopmin z1 z2 z2 z1 0
$rd bzpopmin z1 z2 z2 z1 0
r zadd z1 0 a
r zadd z1 0 a
assert_equal
[
$rd
read
]
{
z1
0 a
}
assert_equal
[
$rd
read
]
{
z1
a 0
}
$rd bzpopmin z1 z2 z2 z1 0
$rd bzpopmin z1 z2 z2 z1 0
r zadd z2 1 b
r zadd z2 1 b
assert_equal
[
$rd
read
]
{
z2
1 b
}
assert_equal
[
$rd
read
]
{
z2
b 1
}
# Data already there.
# Data already there.
r zadd z1 0 a
r zadd z1 0 a
r zadd z2 1 b
r zadd z2 1 b
$rd bzpopmin z1 z2 z2 z1 0
$rd bzpopmin z1 z2 z2 z1 0
assert_equal
[
$rd
read
]
{
z1
0 a
}
assert_equal
[
$rd
read
]
{
z1
a 0
}
$rd bzpopmin z1 z2 z2 z1 0
$rd bzpopmin z1 z2 z2 z1 0
assert_equal
[
$rd
read
]
{
z2
1 b
}
assert_equal
[
$rd
read
]
{
z2
b 1
}
}
}
test
"MULTI/EXEC is isolated from the point of view of BZPOPMIN"
{
test
"MULTI/EXEC is isolated from the point of view of BZPOPMIN"
{
...
@@ -1157,7 +1157,7 @@ start_server {tags {"zset"}} {
...
@@ -1157,7 +1157,7 @@ start_server {tags {"zset"}} {
r zadd zset 2 c
r zadd zset 2 c
r exec
r exec
$rd read
$rd read
}
{
zset
0 a
}
}
{
zset
a 0
}
test
"BZPOPMIN with variadic ZADD"
{
test
"BZPOPMIN with variadic ZADD"
{
set rd
[
redis_deferring_client
]
set rd
[
redis_deferring_client
]
...
@@ -1167,7 +1167,7 @@ start_server {tags {"zset"}} {
...
@@ -1167,7 +1167,7 @@ start_server {tags {"zset"}} {
if
{
$::valgrind
}
{
after 100
}
if
{
$::valgrind
}
{
after 100
}
assert_equal 2
[
r zadd zset -1 foo 1 bar
]
assert_equal 2
[
r zadd zset -1 foo 1 bar
]
if
{
$::valgrind
}
{
after 100
}
if
{
$::valgrind
}
{
after 100
}
assert_equal
{
zset
-1
foo
}
[
$rd
read
]
assert_equal
{
zset foo
-1
}
[
$rd
read
]
assert_equal
{
bar
}
[
r zrange zset 0 -1
]
assert_equal
{
bar
}
[
r zrange zset 0 -1
]
}
}
...
@@ -1177,7 +1177,7 @@ start_server {tags {"zset"}} {
...
@@ -1177,7 +1177,7 @@ start_server {tags {"zset"}} {
$rd bzpopmin zset 0
$rd bzpopmin zset 0
after 1000
after 1000
r zadd zset 0 foo
r zadd zset 0 foo
assert_equal
{
zset
0
foo
}
[
$rd
read
]
assert_equal
{
zset foo
0
}
[
$rd
read
]
}
}
}
}
...
...
Prev
1
2
3
4
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment