Unverified Commit cbb2ac07 authored by chendianqiang's avatar chendianqiang Committed by GitHub
Browse files

Merge branch 'unstable' into pending-querybuf

parents 7de1ada0 2edcafb3
......@@ -142,6 +142,7 @@ typedef long long mstime_t; /* millisecond time type. */
#define CONFIG_DEFAULT_AOF_USE_RDB_PREAMBLE 1
#define CONFIG_DEFAULT_ACTIVE_REHASHING 1
#define CONFIG_DEFAULT_AOF_REWRITE_INCREMENTAL_FSYNC 1
#define CONFIG_DEFAULT_RDB_SAVE_INCREMENTAL_FSYNC 1
#define CONFIG_DEFAULT_MIN_SLAVES_TO_WRITE 0
#define CONFIG_DEFAULT_MIN_SLAVES_MAX_LAG 10
#define NET_IP_STR_LEN 46 /* INET6_ADDRSTRLEN is 46, but we need to be sure */
......@@ -183,7 +184,8 @@ typedef long long mstime_t; /* millisecond time type. */
#define PROTO_INLINE_MAX_SIZE (1024*64) /* Max size of inline reads */
#define PROTO_MBULK_BIG_ARG (1024*32)
#define LONG_STR_SIZE 21 /* Bytes needed for long -> str + '\0' */
#define AOF_AUTOSYNC_BYTES (1024*1024*32) /* fdatasync every 32MB */
#define REDIS_AUTOSYNC_BYTES (1024*1024*32) /* fdatasync every 32MB */
#define LIMIT_PENDING_QUERYBUF (4*1024*1024) /* 4mb */
/* When configuring the server eventloop, we setup it so that the total number
......@@ -340,7 +342,7 @@ typedef long long mstime_t; /* millisecond time type. */
/* Anti-warning macro... */
#define UNUSED(V) ((void) V)
#define ZSKIPLIST_MAXLEVEL 32 /* Should be enough for 2^32 elements */
#define ZSKIPLIST_MAXLEVEL 64 /* Should be enough for 2^64 elements */
#define ZSKIPLIST_P 0.25 /* Skiplist P = 1/4 */
/* Append only defines */
......@@ -349,12 +351,14 @@ typedef long long mstime_t; /* millisecond time type. */
#define AOF_FSYNC_EVERYSEC 2
#define CONFIG_DEFAULT_AOF_FSYNC AOF_FSYNC_EVERYSEC
/* Zip structure related defaults */
/* Zipped structures related defaults */
#define OBJ_HASH_MAX_ZIPLIST_ENTRIES 512
#define OBJ_HASH_MAX_ZIPLIST_VALUE 64
#define OBJ_SET_MAX_INTSET_ENTRIES 512
#define OBJ_ZSET_MAX_ZIPLIST_ENTRIES 128
#define OBJ_ZSET_MAX_ZIPLIST_VALUE 64
#define OBJ_STREAM_NODE_MAX_BYTES 4096
#define OBJ_STREAM_NODE_MAX_ENTRIES 100
/* List defaults */
#define OBJ_LIST_MAX_ZIPLIST_SIZE -2
......@@ -781,7 +785,7 @@ typedef struct zskiplistNode {
struct zskiplistNode *backward;
struct zskiplistLevel {
struct zskiplistNode *forward;
unsigned int span;
unsigned long span;
} level[];
} zskiplistNode;
......@@ -880,13 +884,13 @@ typedef struct rdbSaveInfo {
#define RDB_SAVE_INFO_INIT {-1,0,"000000000000000000000000000000",-1}
typedef struct malloc_stats {
struct malloc_stats {
size_t zmalloc_used;
size_t process_rss;
size_t allocator_allocated;
size_t allocator_active;
size_t allocator_resident;
} malloc_stats;
};
/*-----------------------------------------------------------------------------
* Global server state
......@@ -950,6 +954,7 @@ struct redisServer {
list *clients_pending_write; /* There is to write or install handler. */
list *slaves, *monitors; /* List of slaves and MONITORs */
client *current_client; /* Current client, only used on crash report */
rax *clients_index; /* Active clients dictionary by client ID. */
int clients_paused; /* True if clients are currently paused */
mstime_t clients_pause_end_time; /* Time when we undo clients_paused */
char neterr[ANET_ERR_LEN]; /* Error buffer for anet.c */
......@@ -993,7 +998,7 @@ struct redisServer {
long long slowlog_entry_id; /* SLOWLOG current entry ID */
long long slowlog_log_slower_than; /* SLOWLOG time limit (to get logged) */
unsigned long slowlog_max_len; /* SLOWLOG max number of items logged */
malloc_stats cron_malloc_stats; /* sampled in serverCron(). */
struct malloc_stats cron_malloc_stats; /* sampled in serverCron(). */
long long stat_net_input_bytes; /* Bytes read from network. */
long long stat_net_output_bytes; /* Bytes written to network. */
size_t stat_rdb_cow_bytes; /* Copy on write bytes during RDB saving. */
......@@ -1045,7 +1050,8 @@ struct redisServer {
time_t aof_rewrite_time_start; /* Current AOF rewrite start time. */
int aof_lastbgrewrite_status; /* C_OK or C_ERR */
unsigned long aof_delayed_fsync; /* delayed AOF fsync() counter */
int aof_rewrite_incremental_fsync;/* fsync incrementally while rewriting? */
int aof_rewrite_incremental_fsync;/* fsync incrementally while aof rewriting? */
int rdb_save_incremental_fsync; /* fsync incrementally while rdb saving? */
int aof_last_write_status; /* C_OK or C_ERR */
int aof_last_write_errno; /* Valid if aof_last_write_status is ERR */
int aof_load_truncated; /* Don't stop on unexpected AOF EOF. */
......@@ -1178,6 +1184,8 @@ struct redisServer {
size_t zset_max_ziplist_entries;
size_t zset_max_ziplist_value;
size_t hll_sparse_max_bytes;
size_t stream_node_max_bytes;
int64_t stream_node_max_entries;
/* List parameters */
int list_max_ziplist_size;
int list_compress_depth;
......@@ -1407,6 +1415,7 @@ void addReplyHumanLongDouble(client *c, long double d);
void addReplyLongLong(client *c, long long ll);
void addReplyMultiBulkLen(client *c, long length);
void addReplyHelp(client *c, const char **help);
void addReplySubcommandSyntaxError(client *c);
void copyClientOutputBuffer(client *dst, client *src);
size_t sdsZmallocSize(sds s);
size_t getStringObjectSdsUsedMemory(robj *o);
......@@ -1415,7 +1424,7 @@ void getClientsMaxBuffers(unsigned long *longest_output_list,
unsigned long *biggest_input_buffer);
char *getClientPeerId(client *client);
sds catClientInfoString(sds s, client *client);
sds getAllClientsInfoString(void);
sds getAllClientsInfoString(int type);
void rewriteClientCommandVector(client *c, int argc, ...);
void rewriteClientCommandArgument(client *c, int i, robj *newval);
void replaceClientCommandVector(client *c, int argc, robj **argv);
......@@ -1496,6 +1505,7 @@ robj *tryObjectEncoding(robj *o);
robj *getDecodedObject(robj *o);
size_t stringObjectLen(robj *o);
robj *createStringObjectFromLongLong(long long value);
robj *createStringObjectFromLongLongForValue(long long value);
robj *createStringObjectFromLongDouble(long double value, int humanfriendly);
robj *createQuicklistObject(void);
robj *createZiplistObject(void);
......@@ -1625,7 +1635,7 @@ void zzlNext(unsigned char *zl, unsigned char **eptr, unsigned char **sptr);
void zzlPrev(unsigned char *zl, unsigned char **eptr, unsigned char **sptr);
unsigned char *zzlFirstInRange(unsigned char *zl, zrangespec *range);
unsigned char *zzlLastInRange(unsigned char *zl, zrangespec *range);
unsigned int zsetLength(const robj *zobj);
unsigned long zsetLength(const robj *zobj);
void zsetConvert(robj *zobj, int encoding);
void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen);
int zsetScore(robj *zobj, sds member, double *score);
......@@ -1766,6 +1776,8 @@ robj *lookupKeyWriteOrReply(client *c, robj *key, robj *reply);
robj *lookupKeyReadWithFlags(redisDb *db, robj *key, int flags);
robj *objectCommandLookup(client *c, robj *key);
robj *objectCommandLookupOrReply(client *c, robj *key, robj *reply);
void objectSetLRUOrLFU(robj *val, long long lfu_freq, long long lru_idle,
long long lru_clock);
#define LOOKUP_NONE 0
#define LOOKUP_NOTOUCH (1<<0)
void dbAdd(redisDb *db, robj *key, robj *val);
......
......@@ -142,12 +142,12 @@ uint64_t siphash(const uint8_t *in, const size_t inlen, const uint8_t *k) {
}
switch (left) {
case 7: b |= ((uint64_t)in[6]) << 48;
case 6: b |= ((uint64_t)in[5]) << 40;
case 5: b |= ((uint64_t)in[4]) << 32;
case 4: b |= ((uint64_t)in[3]) << 24;
case 3: b |= ((uint64_t)in[2]) << 16;
case 2: b |= ((uint64_t)in[1]) << 8;
case 7: b |= ((uint64_t)in[6]) << 48; /* fall-thru */
case 6: b |= ((uint64_t)in[5]) << 40; /* fall-thru */
case 5: b |= ((uint64_t)in[4]) << 32; /* fall-thru */
case 4: b |= ((uint64_t)in[3]) << 24; /* fall-thru */
case 3: b |= ((uint64_t)in[2]) << 16; /* fall-thru */
case 2: b |= ((uint64_t)in[1]) << 8; /* fall-thru */
case 1: b |= ((uint64_t)in[0]); break;
case 0: break;
}
......@@ -202,12 +202,12 @@ uint64_t siphash_nocase(const uint8_t *in, const size_t inlen, const uint8_t *k)
}
switch (left) {
case 7: b |= ((uint64_t)siptlw(in[6])) << 48;
case 6: b |= ((uint64_t)siptlw(in[5])) << 40;
case 5: b |= ((uint64_t)siptlw(in[4])) << 32;
case 4: b |= ((uint64_t)siptlw(in[3])) << 24;
case 3: b |= ((uint64_t)siptlw(in[2])) << 16;
case 2: b |= ((uint64_t)siptlw(in[1])) << 8;
case 7: b |= ((uint64_t)siptlw(in[6])) << 48; /* fall-thru */
case 6: b |= ((uint64_t)siptlw(in[5])) << 40; /* fall-thru */
case 5: b |= ((uint64_t)siptlw(in[4])) << 32; /* fall-thru */
case 4: b |= ((uint64_t)siptlw(in[3])) << 24; /* fall-thru */
case 3: b |= ((uint64_t)siptlw(in[2])) << 16; /* fall-thru */
case 2: b |= ((uint64_t)siptlw(in[1])) << 8; /* fall-thru */
case 1: b |= ((uint64_t)siptlw(in[0])); break;
case 0: break;
}
......
......@@ -142,11 +142,11 @@ void slowlogReset(void) {
void slowlogCommand(client *c) {
if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr,"help")) {
const char *help[] = {
"get [count] -- Return top entries from the slowlog (default: 10)."
"GET [count] -- Return top entries from the slowlog (default: 10)."
" Entries are made of:",
" id, timestamp, time in microseconds, arguments array, client IP and port, client name",
"len -- Return the length of the slowlog.",
"reset -- Reset the slowlog.",
"LEN -- Return the length of the slowlog.",
"RESET -- Reset the slowlog.",
NULL
};
addReplyHelp(c, help);
......@@ -187,6 +187,6 @@ NULL
}
setDeferredMultiBulkLength(c,totentries,sent);
} else {
addReplyErrorFormat(c, "Unknown subcommand or wrong number of arguments for '%s'. Try SLOWLOG HELP", (char*)c->argv[1]->ptr);
addReplySubcommandSyntaxError(c);
}
}
......@@ -447,7 +447,7 @@ void sortCommand(client *c) {
serverAssertWithInfo(c,sortval,j == vectorlen);
/* Now it's time to load the right scores in the sorting vector */
if (dontsort == 0) {
if (!dontsort) {
for (j = 0; j < vectorlen; j++) {
robj *byval;
if (sortby) {
......@@ -487,9 +487,7 @@ void sortCommand(client *c) {
decrRefCount(byval);
}
}
}
if (dontsort == 0) {
server.sort_desc = desc;
server.sort_alpha = alpha;
server.sort_bypattern = sortby ? 1 : 0;
......
......@@ -41,6 +41,7 @@
#define STREAM_ITEM_FLAG_SAMEFIELDS (1<<1) /* Same fields as master entry. */
void streamFreeCG(streamCG *cg);
void streamFreeNACK(streamNACK *na);
size_t streamReplyWithRangeFromConsumerPEL(client *c, stream *s, streamID *start, streamID *end, size_t count, streamConsumer *consumer);
/* -----------------------------------------------------------------------
......@@ -171,7 +172,7 @@ int streamCompareID(streamID *a, streamID *b) {
* if the ID was generated by the function. However the function may return
* C_ERR if an ID was given via 'use_id', but adding it failed since the
* current top ID is greater or equal. */
int streamAppendItem(stream *s, robj **argv, int numfields, streamID *added_id, streamID *use_id) {
int streamAppendItem(stream *s, robj **argv, int64_t numfields, streamID *added_id, streamID *use_id) {
/* If an ID was given, check that it's greater than the last entry ID
* or return an error. */
if (use_id && streamCompareID(use_id,&s->last_id) <= 0) return C_ERR;
......@@ -221,7 +222,7 @@ int streamAppendItem(stream *s, robj **argv, int numfields, streamID *added_id,
* +-------+---------+------------+---------+--/--+---------+---------+-+
*
* count and deleted just represent respectively the total number of
* entires inside the listpack that are valid, and marked as deleted
* entries inside the listpack that are valid, and marked as deleted
* (delted flag in the entry flags set). So the total number of items
* actually inside the listpack (both deleted and not) is count+deleted.
*
......@@ -234,10 +235,24 @@ int streamAppendItem(stream *s, robj **argv, int numfields, streamID *added_id,
*
* The "0" entry at the end is the same as the 'lp-count' entry in the
* regular stream entries (see below), and marks the fact that there are
* no more entires, when we scan the stream from right to left. */
* no more entries, when we scan the stream from right to left. */
/* First of all, check if we can append to the current macro node or
* if we need to switch to the next one. 'lp' will be set to NULL if
* the current node is full. */
if (lp != NULL) {
if (server.stream_node_max_bytes &&
lp_bytes > server.stream_node_max_bytes)
{
lp = NULL;
} else if (server.stream_node_max_entries) {
int64_t count = lpGetInteger(lpFirst(lp));
if (count > server.stream_node_max_entries) lp = NULL;
}
}
int flags = STREAM_ITEM_FLAG_NONE;
if (lp == NULL || lp_bytes > STREAM_BYTES_PER_LISTPACK) {
if (lp == NULL || lp_bytes > server.stream_node_max_bytes) {
master_id = id;
streamEncodeID(rax_key,&id);
/* Create the listpack having the master entry ID and fields. */
......@@ -245,7 +260,7 @@ int streamAppendItem(stream *s, robj **argv, int numfields, streamID *added_id,
lp = lpAppendInteger(lp,1); /* One item, the one we are adding. */
lp = lpAppendInteger(lp,0); /* Zero deleted so far. */
lp = lpAppendInteger(lp,numfields);
for (int i = 0; i < numfields; i++) {
for (int64_t i = 0; i < numfields; i++) {
sds field = argv[i*2]->ptr;
lp = lpAppend(lp,(unsigned char*)field,sdslen(field));
}
......@@ -270,10 +285,10 @@ int streamAppendItem(stream *s, robj **argv, int numfields, streamID *added_id,
/* Check if the entry we are adding, have the same fields
* as the master entry. */
int master_fields_count = lpGetInteger(lp_ele);
int64_t master_fields_count = lpGetInteger(lp_ele);
lp_ele = lpNext(lp,lp_ele);
if (numfields == master_fields_count) {
int i;
int64_t i;
for (i = 0; i < master_fields_count; i++) {
sds field = argv[i*2]->ptr;
int64_t e_len;
......@@ -317,14 +332,14 @@ int streamAppendItem(stream *s, robj **argv, int numfields, streamID *added_id,
lp = lpAppendInteger(lp,id.seq - master_id.seq);
if (!(flags & STREAM_ITEM_FLAG_SAMEFIELDS))
lp = lpAppendInteger(lp,numfields);
for (int i = 0; i < numfields; i++) {
for (int64_t i = 0; i < numfields; i++) {
sds field = argv[i*2]->ptr, value = argv[i*2+1]->ptr;
if (!(flags & STREAM_ITEM_FLAG_SAMEFIELDS))
lp = lpAppend(lp,(unsigned char*)field,sdslen(field));
lp = lpAppend(lp,(unsigned char*)value,sdslen(value));
}
/* Compute and store the lp-count field. */
int lp_count = numfields;
int64_t lp_count = numfields;
lp_count += 3; /* Add the 3 fixed fields flags + ms-diff + seq-diff. */
if (!(flags & STREAM_ITEM_FLAG_SAMEFIELDS)) {
/* If the item is not compressed, it also has the fields other than
......@@ -564,7 +579,7 @@ int streamIteratorGetID(streamIterator *si, streamID *id, int64_t *numfields) {
/* If we are going backward, read the number of elements this
* entry is composed of, and jump backward N times to seek
* its start. */
int lp_count = lpGetInteger(si->lp_ele);
int64_t lp_count = lpGetInteger(si->lp_ele);
if (lp_count == 0) { /* We reached the master entry. */
si->lp = NULL;
si->lp_ele = NULL;
......@@ -627,12 +642,17 @@ int streamIteratorGetID(streamIterator *si, streamID *id, int64_t *numfields) {
* forward, or seek the previous entry if we are going
* backward. */
if (!si->rev) {
int to_discard = (flags & STREAM_ITEM_FLAG_SAMEFIELDS) ?
*numfields : *numfields*2;
int64_t to_discard = (flags & STREAM_ITEM_FLAG_SAMEFIELDS) ?
*numfields : *numfields*2;
for (int64_t i = 0; i < to_discard; i++)
si->lp_ele = lpNext(si->lp,si->lp_ele);
} else {
int prev_times = 4; /* flag + id ms/seq diff + numfields. */
int64_t prev_times = 4; /* flag + id ms + id seq + one more to
go back to the previous entry "count"
field. */
/* If the entry was not flagged SAMEFIELD we also read the
* number of fields, so go back one more. */
if (!(flags & STREAM_ITEM_FLAG_SAMEFIELDS)) prev_times++;
while(prev_times--) si->lp_ele = lpPrev(si->lp,si->lp_ele);
}
}
......@@ -690,6 +710,9 @@ void streamIteratorRemoveEntry(streamIterator *si, streamID *current) {
aux = lpGetInteger(p);
lp = lpReplaceInteger(lp,&p,aux+1);
/* Update the number of entries counter. */
si->stream->length--;
/* Re-seek the iterator to fix the now messed up state. */
streamID start, end;
if (si->rev) {
......@@ -814,7 +837,7 @@ void streamPropagateXCLAIM(client *c, robj *key, robj *group, robj *id, streamNA
* Note that this function is recursive in certian cases. When it's called
* with a non NULL group and consumer argument, it may call
* streamReplyWithRangeFromConsumerPEL() in order to get entries from the
* consumer pending entires list. However such a function will then call
* consumer pending entries list. However such a function will then call
* streamReplyWithRange() in order to emit single entries (found in the
* PEL by ID) to the client. This is the use case for the STREAM_RWR_RAWENTRIES
* flag.
......@@ -867,18 +890,41 @@ size_t streamReplyWithRange(client *c, stream *s, streamID *start, streamID *end
/* If a group is passed, we need to create an entry in the
* PEL (pending entries list) of this group *and* this consumer.
* Note that we are sure about the fact the message is not already
* associated with some other consumer, because if we reached this
* loop the IDs the user is requesting are greater than any message
* delivered for this group. */
*
* Note that we cannot be sure about the fact the message is not
* already owned by another consumer, because the admin is able
* to change the consumer group last delivered ID using the
* XGROUP SETID command. So if we find that there is already
* a NACK for the entry, we need to associate it to the new
* consumer. */
if (group && !(flags & STREAM_RWR_NOACK)) {
unsigned char buf[sizeof(streamID)];
streamEncodeID(buf,&id);
/* Try to add a new NACK. Most of the time this will work and
* will not require extra lookups. We'll fix the problem later
* if we find that there is already a entry for this ID. */
streamNACK *nack = streamCreateNACK(consumer);
int retval = 0;
retval += raxInsert(group->pel,buf,sizeof(buf),nack,NULL);
retval += raxInsert(consumer->pel,buf,sizeof(buf),nack,NULL);
serverAssert(retval == 2); /* Make sure entry was inserted. */
retval += raxTryInsert(group->pel,buf,sizeof(buf),nack,NULL);
retval += raxTryInsert(consumer->pel,buf,sizeof(buf),nack,NULL);
/* Now we can check if the entry was already busy, and
* in that case reassign the entry to the new consumer. */
if (retval == 0) {
streamFreeNACK(nack);
nack = raxFind(group->pel,buf,sizeof(buf));
serverAssert(nack != raxNotFound);
raxRemove(nack->consumer->pel,buf,sizeof(buf),NULL);
/* Update the consumer and idle time. */
nack->consumer = consumer;
nack->delivery_time = mstime();
nack->delivery_count++;
/* Add the entry in the new consumer local PEL. */
raxInsert(consumer->pel,buf,sizeof(buf),nack,NULL);
} else if (retval == 1) {
serverPanic("NACK half-created. Should not be possible.");
}
/* Propagate as XCLAIM. */
if (spi) {
......@@ -899,7 +945,7 @@ size_t streamReplyWithRange(client *c, stream *s, streamID *start, streamID *end
/* This is an helper function for streamReplyWithRange() when called with
* group and consumer arguments, but with a range that is referring to already
* delivered messages. In this case we just emit messages that are already
* in the history of the conusmer, fetching the IDs from its PEL.
* in the history of the consumer, fetching the IDs from its PEL.
*
* Note that this function does not have a 'rev' argument because it's not
* possible to iterate in reverse using a group. Basically this function
......@@ -1035,7 +1081,7 @@ invalid:
void xaddCommand(client *c) {
streamID id;
int id_given = 0; /* Was an ID different than "*" specified? */
long long maxlen = 0; /* 0 means no maximum length. */
long long maxlen = -1; /* If left to -1 no trimming is performed. */
int approx_maxlen = 0; /* If 1 only delete whole radix tree nodes, so
the maxium length is not applied verbatim. */
int maxlen_arg_idx = 0; /* Index of the count in MAXLEN, for rewriting. */
......@@ -1059,6 +1105,11 @@ void xaddCommand(client *c) {
}
if (getLongLongFromObjectOrReply(c,c->argv[i+1],&maxlen,NULL)
!= C_OK) return;
if (maxlen < 0) {
addReplyError(c,"The MAXLEN argument must be >= 0.");
return;
}
i++;
maxlen_arg_idx = i;
} else {
......@@ -1098,7 +1149,7 @@ void xaddCommand(client *c) {
server.dirty++;
/* Remove older elements if MAXLEN was specified. */
if (maxlen) {
if (maxlen >= 0) {
if (!streamTrimByLength(s,maxlen,approx_maxlen)) {
/* If no trimming was performed, for instance because approximated
* trimming length was specified, rewrite the MAXLEN argument
......@@ -1269,14 +1320,13 @@ void xreadCommand(client *c) {
* starting from now. */
int id_idx = i - streams_arg - streams_count;
robj *key = c->argv[i-streams_count];
robj *o;
robj *o = lookupKeyRead(c->db,key);
if (o && checkType(c,o,OBJ_STREAM)) goto cleanup;
streamCG *group = NULL;
/* If a group was specified, than we need to be sure that the
* key and group actually exist. */
if (groupname) {
o = lookupKeyRead(c->db,key);
if (o && checkType(c,o,OBJ_STREAM)) goto cleanup;
if (o == NULL ||
(group = streamLookupCG(o->ptr,groupname->ptr)) == NULL)
{
......@@ -1290,8 +1340,6 @@ void xreadCommand(client *c) {
}
if (strcmp(c->argv[i]->ptr,"$") == 0) {
o = lookupKeyRead(c->db,key);
if (o && checkType(c,o,OBJ_STREAM)) goto cleanup;
if (o) {
stream *s = o->ptr;
ids[id_idx] = s->last_id;
......@@ -1336,7 +1384,7 @@ void xreadCommand(client *c) {
/* Emit the two elements sub-array consisting of the name
* of the stream and the data we extracted from it. */
addReplyMultiBulkLen(c,2);
addReplyBulk(c,c->argv[i+streams_arg]);
addReplyBulk(c,c->argv[streams_arg+i]);
streamConsumer *consumer = NULL;
if (groups) consumer = streamLookupConsumer(groups[i],
consumername->ptr,1);
......@@ -1516,14 +1564,14 @@ uint64_t streamDelConsumer(streamCG *cg, sds name) {
/* XGROUP CREATE <key> <groupname> <id or $>
* XGROUP SETID <key> <id or $>
* XGROUP DELGROUP <key> <groupname>
* XGROUP DESTROY <key> <groupname>
* XGROUP DELCONSUMER <key> <groupname> <consumername> */
void xgroupCommand(client *c) {
const char *help[] = {
"CREATE <key> <groupname> <id or $> -- Create a new consumer group.",
"SETID <key> <groupname> <id or $> -- Set the current group ID.",
"DELGROUP <key> <groupname> -- Remove the specified group.",
"DELCONSUMER <key> <groupname> <consumer> -- Remove the specified conusmer.",
"DESTROY <key> <groupname> -- Remove the specified group.",
"DELCONSUMER <key> <groupname> <consumer> -- Remove the specified consumer.",
"HELP -- Prints this help.",
NULL
};
......@@ -1535,14 +1583,13 @@ NULL
/* Lookup the key now, this is common for all the subcommands but HELP. */
if (c->argc >= 4) {
robj *o = lookupKeyWriteOrReply(c,c->argv[2],shared.nokeyerr);
if (o == NULL) return;
if (o == NULL || checkType(c,o,OBJ_STREAM)) return;
s = o->ptr;
grpname = c->argv[3]->ptr;
/* Certain subcommands require the group to exist. */
if ((cg = streamLookupCG(s,grpname)) == NULL &&
(!strcasecmp(opt,"SETID") ||
!strcasecmp(opt,"DELGROUP") ||
!strcasecmp(opt,"DELCONSUMER")))
{
addReplyErrorFormat(c, "-NOGROUP No such consumer group '%s' "
......@@ -1564,22 +1611,46 @@ NULL
if (cg) {
addReply(c,shared.ok);
server.dirty++;
notifyKeyspaceEvent(NOTIFY_STREAM,"xgroup-create",
c->argv[2],c->db->id);
} else {
addReplySds(c,
sdsnew("-BUSYGROUP Consumer Group name already exists\r\n"));
}
} else if (!strcasecmp(opt,"SETID") && c->argc == 5) {
} else if (!strcasecmp(opt,"DELGROUP") && c->argc == 4) {
streamID id;
if (!strcmp(c->argv[4]->ptr,"$")) {
id = s->last_id;
} else if (streamParseIDOrReply(c,c->argv[4],&id,0) != C_OK) {
return;
}
cg->last_id = id;
addReply(c,shared.ok);
server.dirty++;
notifyKeyspaceEvent(NOTIFY_STREAM,"xgroup-setid",c->argv[2],c->db->id);
} else if (!strcasecmp(opt,"DESTROY") && c->argc == 4) {
if (cg) {
raxRemove(s->cgroups,(unsigned char*)grpname,sdslen(grpname),NULL);
streamFreeCG(cg);
addReply(c,shared.cone);
server.dirty++;
notifyKeyspaceEvent(NOTIFY_STREAM,"xgroup-destroy",
c->argv[2],c->db->id);
} else {
addReply(c,shared.czero);
}
} else if (!strcasecmp(opt,"DELCONSUMER") && c->argc == 5) {
/* Delete the consumer and returns the number of pending messages
* that were yet associated with such a consumer. */
long long pending = streamDelConsumer(cg,c->argv[4]->ptr);
addReplyLongLong(c,pending);
server.dirty++;
notifyKeyspaceEvent(NOTIFY_STREAM,"xgroup-delconsumer",
c->argv[2],c->db->id);
} else if (!strcasecmp(opt,"HELP")) {
addReplyHelp(c, help);
} else {
addReply(c,shared.syntaxerr);
addReplySubcommandSyntaxError(c);
}
}
......@@ -1728,8 +1799,10 @@ void xpendingCommand(client *c) {
/* If a consumer name was mentioned but it does not exist, we can
* just return an empty array. */
if (consumername && consumer == NULL)
if (consumername && consumer == NULL) {
addReplyMultiBulkLen(c,0);
return;
}
rax *pel = consumer ? consumer->pel : group->pel;
unsigned char startkey[sizeof(streamID)];
......@@ -1785,7 +1858,7 @@ void xpendingCommand(client *c) {
* becomes the specified <consumer>. If the minimum idle time specified
* is zero, messages are claimed regardless of their idle time.
*
* All the messages that cannot be found inside the pending entires list
* All the messages that cannot be found inside the pending entries list
* are ignored, but in case the FORCE option is used. In that case we
* create the NACK (representing a not yet acknowledged message) entry in
* the consumer group PEL.
......@@ -1970,7 +2043,7 @@ void xclaimCommand(client *c) {
nack->delivery_time = deliverytime;
/* Set the delivery attempts counter if given. */
if (retrycount >= 0) nack->delivery_count = retrycount;
/* Add the entry in the new cosnumer local PEL. */
/* Add the entry in the new consumer local PEL. */
raxInsert(consumer->pel,buf,sizeof(buf),nack,NULL);
/* Send the reply for this entry. */
if (justid) {
......@@ -1999,7 +2072,7 @@ void xclaimCommand(client *c) {
void xdelCommand(client *c) {
robj *o;
if ((o = lookupKeyReadOrReply(c,c->argv[1],shared.czero)) == NULL
if ((o = lookupKeyWriteOrReply(c,c->argv[1],shared.czero)) == NULL
|| checkType(c,o,OBJ_STREAM)) return;
stream *s = o->ptr;
......@@ -2040,7 +2113,7 @@ void xtrimCommand(client *c) {
/* If the key does not exist, we are ok returning zero, that is, the
* number of elements removed from the stream. */
if ((o = lookupKeyReadOrReply(c,c->argv[1],shared.czero)) == NULL
if ((o = lookupKeyWriteOrReply(c,c->argv[1],shared.czero)) == NULL
|| checkType(c,o,OBJ_STREAM)) return;
stream *s = o->ptr;
......@@ -2093,14 +2166,12 @@ void xtrimCommand(client *c) {
/* XINFO CONSUMERS key group
* XINFO GROUPS <key>
* XINFO STREAM <key>
* XINFO <key> (alias of XINFO STREAM key)
* XINFO HELP. */
void xinfoCommand(client *c) {
const char *help[] = {
"CONSUMERS <key> <groupname> -- Show consumer groups of group <groupname>.",
"GROUPS <key> -- Show the stream consumer groups.",
"STREAM <key> -- Show information about the stream.",
"<key> -- Alias for STREAM <key>.",
"HELP -- Print this help.",
NULL
};
......@@ -2112,20 +2183,19 @@ NULL
if (!strcasecmp(c->argv[1]->ptr,"HELP")) {
addReplyHelp(c, help);
return;
} else if (c->argc < 3) {
addReplyError(c,"syntax error, try 'XINFO HELP'");
return;
}
/* Handle the fact that no subcommand means "STREAM". */
if (c->argc == 2) {
opt = "STREAM";
key = c->argv[1];
} else {
opt = c->argv[1]->ptr;
key = c->argv[2];
}
/* With the exception of HELP handled before any other sub commands, all
* the ones are in the form of "<subcommand> <key>". */
opt = c->argv[1]->ptr;
key = c->argv[2];
/* Lookup the key now, this is common for all the subcommands but HELP. */
robj *o = lookupKeyWriteOrReply(c,key,shared.nokeyerr);
if (o == NULL) return;
if (o == NULL || checkType(c,o,OBJ_STREAM)) return;
s = o->ptr;
/* Dispatch the different subcommands. */
......@@ -2180,9 +2250,7 @@ NULL
addReplyLongLong(c,raxSize(cg->pel));
}
raxStop(&ri);
} else if (c->argc == 2 ||
(!strcasecmp(opt,"STREAM") && c->argc == 3))
{
} else if (!strcasecmp(opt,"STREAM") && c->argc == 3) {
/* XINFO STREAM <key> (or the alias XINFO <key>). */
addReplyMultiBulkLen(c,12);
addReplyStatus(c,"length");
......@@ -2209,7 +2277,7 @@ NULL
STREAM_RWR_RAWENTRIES,NULL);
if (!count) addReply(c,shared.nullbulk);
} else {
addReplyError(c,"syntax error, try 'XINFO HELP'");
addReplySubcommandSyntaxError(c);
}
}
......@@ -361,7 +361,7 @@ void incrDecrCommand(client *c, long long incr) {
new = o;
o->ptr = (void*)((long)value);
} else {
new = createStringObjectFromLongLong(value);
new = createStringObjectFromLongLongForValue(value);
if (o) {
dbOverwrite(c->db,c->argv[1],new);
} else {
......
......@@ -1100,8 +1100,8 @@ unsigned char *zzlDeleteRangeByRank(unsigned char *zl, unsigned int start, unsig
* Common sorted set API
*----------------------------------------------------------------------------*/
unsigned int zsetLength(const robj *zobj) {
int length = -1;
unsigned long zsetLength(const robj *zobj) {
unsigned long length = 0;
if (zobj->encoding == OBJ_ENCODING_ZIPLIST) {
length = zzlLength(zobj->ptr);
} else if (zobj->encoding == OBJ_ENCODING_SKIPLIST) {
......@@ -1878,7 +1878,7 @@ void zuiClearIterator(zsetopsrc *op) {
}
}
int zuiLength(zsetopsrc *op) {
unsigned long zuiLength(zsetopsrc *op) {
if (op->subject == NULL)
return 0;
......@@ -2085,7 +2085,11 @@ int zuiFind(zsetopsrc *op, zsetopval *val, double *score) {
}
int zuiCompareByCardinality(const void *s1, const void *s2) {
return zuiLength((zsetopsrc*)s1) - zuiLength((zsetopsrc*)s2);
unsigned long first = zuiLength((zsetopsrc*)s1);
unsigned long second = zuiLength((zsetopsrc*)s2);
if (first > second) return 1;
if (first < second) return -1;
return 0;
}
#define REDIS_AGGR_SUM 1
......@@ -2129,7 +2133,7 @@ void zunionInterGenericCommand(client *c, robj *dstkey, int op) {
zsetopsrc *src;
zsetopval zval;
sds tmp;
unsigned int maxelelen = 0;
size_t maxelelen = 0;
robj *dstobj;
zset *dstzset;
zskiplistNode *znode;
......@@ -2363,8 +2367,8 @@ void zrangeGenericCommand(client *c, int reverse) {
int withscores = 0;
long start;
long end;
int llen;
int rangelen;
long llen;
long rangelen;
if ((getLongFromObjectOrReply(c, c->argv[2], &start, NULL) != C_OK) ||
(getLongFromObjectOrReply(c, c->argv[3], &end, NULL) != C_OK)) return;
......@@ -2671,7 +2675,7 @@ void zcountCommand(client *c) {
robj *key = c->argv[1];
robj *zobj;
zrangespec range;
int count = 0;
unsigned long count = 0;
/* Parse the range arguments */
if (zslParseRange(c->argv[2],c->argv[3],&range) != C_OK) {
......@@ -2748,7 +2752,7 @@ void zlexcountCommand(client *c) {
robj *key = c->argv[1];
robj *zobj;
zlexrangespec range;
int count = 0;
unsigned long count = 0;
/* Parse the range arguments */
if (zslParseLexRange(c->argv[2],c->argv[3],&range) != C_OK) {
......@@ -3163,8 +3167,8 @@ void genericZpopCommand(client *c, robj **keyv, int keyc, int where, int emitkey
signalModifiedKey(c->db,key);
}
addReplyDouble(c,score);
addReplyBulkCBuffer(c,ele,sdslen(ele));
addReplyDouble(c,score);
sdsfree(ele);
arraylen += 2;
......@@ -3216,9 +3220,9 @@ void blockingGenericZpopCommand(client *c, int where) {
return;
} else {
if (zsetLength(o) != 0) {
/* Non empty zset, this is like a normal Z[REV]POP. */
/* Non empty zset, this is like a normal ZPOP[MIN|MAX]. */
genericZpopCommand(c,&c->argv[j],1,where,1,NULL);
/* Replicate it as an Z[REV]POP instead of BZ[REV]POP. */
/* Replicate it as an ZPOP[MIN|MAX] instead of BZPOP[MIN|MAX]. */
rewriteClientCommandVector(c,2,
where == ZSET_MAX ? shared.zpopmax : shared.zpopmin,
c->argv[j]);
......
......@@ -27,7 +27,7 @@
* traversal.
*
* <uint16_t zllen> is the number of entries. When there are more than
* 2^16-2 entires, this value is set to 2^16-1 and we need to traverse the
* 2^16-2 entries, this value is set to 2^16-1 and we need to traverse the
* entire list to know how many items it holds.
*
* <uint8_t zlend> is a special entry representing the end of the ziplist.
......@@ -256,7 +256,7 @@
#define ZIPLIST_ENTRY_END(zl) ((zl)+intrev32ifbe(ZIPLIST_BYTES(zl))-1)
/* Increment the number of items field in the ziplist header. Note that this
* macro should never overflow the unsigned 16 bit integer, since entires are
* macro should never overflow the unsigned 16 bit integer, since entries are
* always pushed one at a time. When UINT16_MAX is reached we want the count
* to stay there to signal that a full scan is needed to get the number of
* items inside the ziplist. */
......
......@@ -30,6 +30,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
/* This function provide us access to the original libc free(). This is useful
* for instance to free results obtained by backtrace_symbols(). We need
......@@ -164,7 +165,7 @@ void *zrealloc(void *ptr, size_t size) {
*((size_t*)newptr) = size;
update_zmalloc_stat_free(oldsize);
update_zmalloc_stat_alloc(size);
update_zmalloc_stat_alloc(size+PREFIX_SIZE);
return (char*)newptr+PREFIX_SIZE;
#endif
}
......@@ -418,7 +419,7 @@ size_t zmalloc_get_memory_size(void) {
mib[0] = CTL_HW;
#if defined(HW_REALMEM)
mib[1] = HW_REALMEM; /* FreeBSD. ----------------- */
#elif defined(HW_PYSMEM)
#elif defined(HW_PHYSMEM)
mib[1] = HW_PHYSMEM; /* Others. ------------------ */
#endif
unsigned int size = 0; /* 32-bit */
......
......@@ -63,6 +63,11 @@
#ifndef ZMALLOC_LIB
#define ZMALLOC_LIB "libc"
#ifdef __GLIBC__
#include <malloc.h>
#define HAVE_MALLOC_SIZE 1
#define zmalloc_size(p) malloc_usable_size(p)
#endif
#endif
/* We can enable the Redis defrag capabilities only if we are using Jemalloc
......
......@@ -39,6 +39,25 @@ start_server [list overrides [list "dir" $server_path]] {
} {0000000000000000000000000000000000000000}
}
start_server [list overrides [list "dir" $server_path]] {
test {Test RDB stream encoding} {
for {set j 0} {$j < 1000} {incr j} {
if {rand() < 0.9} {
r xadd stream * foo $j
} else {
r xadd stream * bar $j
}
}
r xgroup create stream mygroup 0
r xreadgroup GROUP mygroup Alice COUNT 1 STREAMS stream >
set digest [r debug digest]
r debug reload
set newdigest [r debug digest]
assert {$digest eq $newdigest}
r del stream
}
}
# Helper function to start a server and kill it, just to check the error
# logged.
set defaults {}
......
......@@ -66,3 +66,13 @@ test "SDOWN is triggered by misconfigured instance repling with errors" {
R 0 bgsave
ensure_master_up
}
# We use this test setup to also test command renaming, as a side
# effect of the master going down if we send PONG instead of PING
test "SDOWN is triggered if we rename PING to PONG" {
ensure_master_up
S 4 SENTINEL SET mymaster rename-command PING PONG
ensure_master_down
S 4 SENTINEL SET mymaster rename-command PING PING
ensure_master_up
}
......@@ -276,6 +276,12 @@ proc start_server {options {code undefined}} {
error_and_quit $config_file $line
}
if {$::wait_server} {
set msg "server started PID: [dict get $srv "pid"]. press any key to continue..."
puts $msg
read stdin 1
}
while 1 {
# check that the server actually started and is ready for connections
if {[exec grep -i "Ready to accept" | wc -l < $stdout] > 0} {
......
......@@ -83,6 +83,8 @@ set ::force_failure 0
set ::timeout 600; # 10 minutes without progresses will quit the test.
set ::last_progress [clock seconds]
set ::active_servers {} ; # Pids of active Redis instances.
set ::dont_clean 0
set ::wait_server 0
# Set to 1 when we are running in client mode. The Redis test uses a
# server-client model to run tests simultaneously. The server instance
......@@ -176,6 +178,9 @@ proc s {args} {
}
proc cleanup {} {
if {$::dont_clean} {
return
}
if {!$::quiet} {puts -nonewline "Cleanup: may take some time... "}
flush stdout
catch {exec rm -rf {*}[glob tests/tmp/redis.conf.*]}
......@@ -225,6 +230,7 @@ proc test_server_cron {} {
if {$elapsed > $::timeout} {
set err "\[[colorstr red TIMEOUT]\]: clients state report follows."
puts $err
lappend ::failed_tests $err
show_clients_state
kill_clients
force_kill_all_servers
......@@ -411,6 +417,8 @@ proc print_help_screen {} {
"--clients <num> Number of test clients (default 16)."
"--timeout <sec> Test timeout in seconds (default 10 min)."
"--force-failure Force the execution of a test that always fails."
"--dont-clean don't delete redis log files after the run"
"--wait-server wait after server is started (so that you can attach a debugger)"
"--help Print this help screen."
} "\n"]
}
......@@ -464,6 +472,10 @@ for {set j 0} {$j < [llength $argv]} {incr j} {
} elseif {$opt eq {--clients}} {
set ::numclients $arg
incr j
} elseif {$opt eq {--dont-clean}} {
set ::dont_clean 1
} elseif {$opt eq {--wait-server}} {
set ::wait_server 1
} elseif {$opt eq {--timeout}} {
set ::timeout $arg
incr j
......
......@@ -25,6 +25,39 @@ start_server {tags {"dump"}} {
assert {$ttl >= (2569591501-3000) && $ttl <= 2569591501}
r get foo
} {bar}
test {RESTORE can set an absolute expire} {
r set foo bar
set encoded [r dump foo]
r del foo
set now [clock milliseconds]
r restore foo [expr $now+3000] $encoded absttl
set ttl [r pttl foo]
assert {$ttl >= 2998 && $ttl <= 3000}
r get foo
} {bar}
test {RESTORE can set LRU} {
r set foo bar
set encoded [r dump foo]
r del foo
r config set maxmemory-policy allkeys-lru
r restore foo 0 $encoded idletime 1000
set idle [r object idletime foo]
assert {$idle >= 1000 && $idle <= 1002}
r get foo
} {bar}
test {RESTORE can set LFU} {
r set foo bar
set encoded [r dump foo]
r del foo
r config set maxmemory-policy allkeys-lfu
r restore foo 0 $encoded freq 100
set freq [r object freq foo]
assert {$freq == 100}
r get foo
} {bar}
test {RESTORE returns an error of the key already exists} {
r set foo bar
......
......@@ -121,7 +121,7 @@ start_server {tags {"expire"}} {
list $a $b
} {somevalue {}}
test {TTL returns tiem to live in seconds} {
test {TTL returns time to live in seconds} {
r del x
r setex x 10 somevalue
set ttl [r ttl x]
......
......@@ -97,10 +97,15 @@ start_server {tags {"defrag"}} {
r config set active-defrag-ignore-bytes 2mb
r config set maxmemory 0
r config set list-max-ziplist-size 5 ;# list of 10k items will have 2000 quicklist nodes
r config set stream-node-max-entries 5
r hmset hash h1 v1 h2 v2 h3 v3
r lpush list a b c d
r zadd zset 0 a 1 b 2 c 3 d
r sadd set a b c d
r xadd stream * item 1 value a
r xadd stream * item 2 value b
r xgroup create stream mygroup 0
r xreadgroup GROUP mygroup Alice COUNT 1 STREAMS stream >
# create big keys with 10k items
set rd [redis_deferring_client]
......@@ -109,8 +114,9 @@ start_server {tags {"defrag"}} {
$rd lpush biglist [concat "asdfasdfasdf" $j]
$rd zadd bigzset $j [concat "asdfasdfasdf" $j]
$rd sadd bigset [concat "asdfasdfasdf" $j]
$rd xadd bigstream * item 1 value a
}
for {set j 0} {$j < 40000} {incr j} {
for {set j 0} {$j < 50000} {incr j} {
$rd read ; # Discard replies
}
......@@ -134,7 +140,7 @@ start_server {tags {"defrag"}} {
for {set j 0} {$j < 500000} {incr j} {
$rd read ; # Discard replies
}
assert {[r dbsize] == 500008}
assert {[r dbsize] == 500010}
# create some fragmentation
for {set j 0} {$j < 500000} {incr j 2} {
......@@ -143,7 +149,7 @@ start_server {tags {"defrag"}} {
for {set j 0} {$j < 500000} {incr j 2} {
$rd read ; # Discard replies
}
assert {[r dbsize] == 250008}
assert {[r dbsize] == 250010}
# start defrag
after 120 ;# serverCron only updates the info once in 100ms
......@@ -155,6 +161,7 @@ start_server {tags {"defrag"}} {
r config set latency-monitor-threshold 5
r latency reset
set digest [r debug digest]
catch {r config set activedefrag yes} e
if {![string match {DISABLED*} $e]} {
# wait for the active defrag to start working (decision once a second)
......@@ -193,9 +200,11 @@ start_server {tags {"defrag"}} {
# due to high fragmentation, 10hz, and active-defrag-cycle-max set to 75,
# we expect max latency to be not much higher than 75ms
assert {$max_latency <= 80}
} else {
set _ ""
}
} {}
# verify the data isn't corrupted or changed
set newdigest [r debug digest]
assert {$digest eq $newdigest}
r save ;# saving an rdb iterates over all the data / pointers
} {OK}
}
}
......@@ -236,4 +236,50 @@ start_server {tags {"scan"}} {
set first_score [lindex $res 1]
assert {$first_score != 0}
}
test "SCAN regression test for issue #4906" {
for {set k 0} {$k < 100} {incr k} {
r del set
r sadd set x; # Make sure it's not intset encoded
set toremove {}
unset -nocomplain found
array set found {}
# Populate the set
set numele [expr {101+[randomInt 1000]}]
for {set j 0} {$j < $numele} {incr j} {
r sadd set $j
if {$j >= 100} {
lappend toremove $j
}
}
# Start scanning
set cursor 0
set iteration 0
set del_iteration [randomInt 10]
while {!($cursor == 0 && $iteration != 0)} {
lassign [r sscan set $cursor] cursor items
# Mark found items. We expect to find from 0 to 99 at the end
# since those elements will never be removed during the scanning.
foreach i $items {
set found($i) 1
}
incr iteration
# At some point remove most of the items to trigger the
# rehashing to a smaller hash table.
if {$iteration == $del_iteration} {
r srem set {*}$toremove
}
}
# Verify that SSCAN reported everything from 0 to 99
for {set j 0} {$j < 100} {incr j} {
if {![info exists found($j)]} {
fail "SSCAN element missing $j"
}
}
}
}
}
......@@ -253,4 +253,20 @@ start_server {
}
}
}
test {XREVRANGE regression test for issue #5006} {
# Add non compressed entries
r xadd teststream 1234567891230 key1 value1
r xadd teststream 1234567891240 key2 value2
r xadd teststream 1234567891250 key3 value3
# Add SAMEFIELD compressed entries
r xadd teststream2 1234567891230 key1 value1
r xadd teststream2 1234567891240 key1 value2
r xadd teststream2 1234567891250 key1 value3
assert_equal [r xrevrange teststream 1234567891245 -] {{1234567891240-0 {key2 value2}} {1234567891230-0 {key1 value1}}}
assert_equal [r xrevrange teststream2 1234567891245 -] {{1234567891240-0 {key1 value2}} {1234567891230-0 {key1 value1}}}
}
}
......@@ -653,11 +653,11 @@ start_server {tags {"zset"}} {
r del zset
assert_equal {} [r zpopmin zset]
create_zset zset {-1 a 1 b 2 c 3 d 4 e}
assert_equal {-1 a} [r zpopmin zset]
assert_equal {1 b} [r zpopmin zset]
assert_equal {4 e} [r zpopmax zset]
assert_equal {3 d} [r zpopmax zset]
assert_equal {2 c} [r zpopmin zset]
assert_equal {a -1} [r zpopmin zset]
assert_equal {b 1} [r zpopmin zset]
assert_equal {e 4} [r zpopmax zset]
assert_equal {d 3} [r zpopmax zset]
assert_equal {c 2} [r zpopmin zset]
assert_equal 0 [r exists zset]
r set foo bar
assert_error "*WRONGTYPE*" {r zpopmin foo}
......@@ -669,8 +669,8 @@ start_server {tags {"zset"}} {
assert_equal {} [r zpopmin z1 2]
assert_error "*WRONGTYPE*" {r zpopmin foo 2}
create_zset z1 {0 a 1 b 2 c 3 d}
assert_equal {0 a 1 b} [r zpopmin z1 2]
assert_equal {3 d 2 c} [r zpopmax z1 2]
assert_equal {a 0 b 1} [r zpopmin z1 2]
assert_equal {d 3 c 2} [r zpopmax z1 2]
}
test "BZPOP with a single existing sorted set - $encoding" {
......@@ -678,11 +678,11 @@ start_server {tags {"zset"}} {
create_zset zset {0 a 1 b 2 c}
$rd bzpopmin zset 5
assert_equal {zset 0 a} [$rd read]
assert_equal {zset a 0} [$rd read]
$rd bzpopmin zset 5
assert_equal {zset 1 b} [$rd read]
assert_equal {zset b 1} [$rd read]
$rd bzpopmax zset 5
assert_equal {zset 2 c} [$rd read]
assert_equal {zset c 2} [$rd read]
assert_equal 0 [r exists zset]
}
......@@ -692,16 +692,16 @@ start_server {tags {"zset"}} {
create_zset z2 {3 d 4 e 5 f}
$rd bzpopmin z1 z2 5
assert_equal {z1 0 a} [$rd read]
assert_equal {z1 a 0} [$rd read]
$rd bzpopmax z1 z2 5
assert_equal {z1 2 c} [$rd read]
assert_equal {z1 c 2} [$rd read]
assert_equal 1 [r zcard z1]
assert_equal 3 [r zcard z2]
$rd bzpopmax z2 z1 5
assert_equal {z2 5 f} [$rd read]
assert_equal {z2 f 5} [$rd read]
$rd bzpopmin z2 z1 5
assert_equal {z2 3 d} [$rd read]
assert_equal {z2 d 3} [$rd read]
assert_equal 1 [r zcard z1]
assert_equal 1 [r zcard z2]
}
......@@ -711,9 +711,9 @@ start_server {tags {"zset"}} {
r del z1
create_zset z2 {3 d 4 e 5 f}
$rd bzpopmax z1 z2 5
assert_equal {z2 5 f} [$rd read]
assert_equal {z2 f 5} [$rd read]
$rd bzpopmin z2 z1 5
assert_equal {z2 3 d} [$rd read]
assert_equal {z2 d 3} [$rd read]
assert_equal 0 [r zcard z1]
assert_equal 1 [r zcard z2]
}
......@@ -1107,7 +1107,7 @@ start_server {tags {"zset"}} {
r del zset
r zadd zset 1 bar
$rd read
} {zset 1 bar}
} {zset bar 1}
test "BZPOPMIN, ZADD + DEL + SET should not awake blocked client" {
set rd [redis_deferring_client]
......@@ -1124,7 +1124,7 @@ start_server {tags {"zset"}} {
r del zset
r zadd zset 1 bar
$rd read
} {zset 1 bar}
} {zset bar 1}
test "BZPOPMIN with same key multiple times should work" {
set rd [redis_deferring_client]
......@@ -1133,18 +1133,18 @@ start_server {tags {"zset"}} {
# Data arriving after the BZPOPMIN.
$rd bzpopmin z1 z2 z2 z1 0
r zadd z1 0 a
assert_equal [$rd read] {z1 0 a}
assert_equal [$rd read] {z1 a 0}
$rd bzpopmin z1 z2 z2 z1 0
r zadd z2 1 b
assert_equal [$rd read] {z2 1 b}
assert_equal [$rd read] {z2 b 1}
# Data already there.
r zadd z1 0 a
r zadd z2 1 b
$rd bzpopmin z1 z2 z2 z1 0
assert_equal [$rd read] {z1 0 a}
assert_equal [$rd read] {z1 a 0}
$rd bzpopmin z1 z2 z2 z1 0
assert_equal [$rd read] {z2 1 b}
assert_equal [$rd read] {z2 b 1}
}
test "MULTI/EXEC is isolated from the point of view of BZPOPMIN" {
......@@ -1157,7 +1157,7 @@ start_server {tags {"zset"}} {
r zadd zset 2 c
r exec
$rd read
} {zset 0 a}
} {zset a 0}
test "BZPOPMIN with variadic ZADD" {
set rd [redis_deferring_client]
......@@ -1167,7 +1167,7 @@ start_server {tags {"zset"}} {
if {$::valgrind} {after 100}
assert_equal 2 [r zadd zset -1 foo 1 bar]
if {$::valgrind} {after 100}
assert_equal {zset -1 foo} [$rd read]
assert_equal {zset foo -1} [$rd read]
assert_equal {bar} [r zrange zset 0 -1]
}
......@@ -1177,7 +1177,7 @@ start_server {tags {"zset"}} {
$rd bzpopmin zset 0
after 1000
r zadd zset 0 foo
assert_equal {zset 0 foo} [$rd read]
assert_equal {zset foo 0} [$rd read]
}
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment