Commit 77a7ec72 authored by antirez's avatar antirez
Browse files

Merge branch 'unstable' into 5.0 branch

parents 48dfd42d 4ff47a0b
......@@ -8,7 +8,9 @@ each source file that you contribute.
# IMPORTANT: HOW TO USE REDIS GITHUB ISSUES
* Github issues SHOULD ONLY BE USED to report bugs, and for DETAILED feature
requests. Everything else belongs to the Redis Google Group.
requests. Everything else belongs to the Redis Google Group:
https://groups.google.com/forum/m/#!forum/Redis-db
PLEASE DO NOT POST GENERAL QUESTIONS that are not about bugs or suspected
bugs in the Github issues system. We'll be very happy to help you and provide
......@@ -30,7 +32,7 @@ each source file that you contribute.
a. Fork Redis on github ( http://help.github.com/fork-a-repo/ )
b. Create a topic branch (git checkout -b my_branch)
c. Push to your branch (git push origin my_branch)
d. Initiate a pull request on github ( http://help.github.com/send-pull-requests/ )
d. Initiate a pull request on github ( https://help.github.com/articles/creating-a-pull-request/ )
e. Done :)
For minor fixes just open a pull request on Github.
......
......@@ -435,7 +435,7 @@ top comment inside `server.c`.
After the command operates in some way, it returns a reply to the client,
usually using `addReply()` or a similar function defined inside `networking.c`.
There are tons of commands implementations inside th Redis source code
There are tons of commands implementations inside the Redis source code
that can serve as examples of actual commands implementations. To write
a few toy commands can be a good exercise to familiarize with the code base.
......
......@@ -22,7 +22,7 @@ just following tose steps:
1. Remove the jemalloc directory.
2. Substitute it with the new jemalloc source tree.
3. Edit the Makefile localted in the same directoy as the README you are
3. Edit the Makefile localted in the same directory as the README you are
reading, and change the --with-version in the Jemalloc configure script
options with the version you are using. This is required because otherwise
Jemalloc configuration script is broken and will not work nested in another
......@@ -50,7 +50,7 @@ This is never upgraded since it's part of the Redis project. If there are change
Hiredis
---
Hiredis uses the SDS string library, that must be the same version used inside Redis itself. Hiredis is also very critical for Sentinel. Historically Redis often used forked versions of hiredis in a way or the other. In order to upgrade it is adviced to take a lot of care:
Hiredis uses the SDS string library, that must be the same version used inside Redis itself. Hiredis is also very critical for Sentinel. Historically Redis often used forked versions of hiredis in a way or the other. In order to upgrade it is advised to take a lot of care:
1. Check with diff if hiredis API changed and what impact it could have in Redis.
2. Make sure thet the SDS library inside Hiredis and inside Redis are compatible.
......@@ -83,6 +83,6 @@ and our version:
1. Makefile is modified to allow a different compiler than GCC.
2. We have the implementation source code, and directly link to the following external libraries: `lua_cjson.o`, `lua_struct.o`, `lua_cmsgpack.o` and `lua_bit.o`.
3. There is a security fix in `ldo.c`, line 498: The check for `LUA_SIGNATURE[0]` is removed in order toa void direct bytecode exectuion.
3. There is a security fix in `ldo.c`, line 498: The check for `LUA_SIGNATURE[0]` is removed in order toa void direct bytecode execution.
......@@ -215,4 +215,32 @@ ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero);
}
JEMALLOC_ALWAYS_INLINE int
iget_defrag_hint(tsdn_t *tsdn, void* ptr, int *bin_util, int *run_util) {
int defrag = 0;
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
szind_t szind;
bool is_slab;
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &szind, &is_slab);
if (likely(is_slab)) {
/* Small allocation. */
extent_t *slab = iealloc(tsdn, ptr);
arena_t *arena = extent_arena_get(slab);
szind_t binind = extent_szind_get(slab);
bin_t *bin = &arena->bins[binind];
malloc_mutex_lock(tsdn, &bin->lock);
/* don't bother moving allocations from the slab currently used for new allocations */
if (slab != bin->slabcur) {
const bin_info_t *bin_info = &bin_infos[binind];
size_t availregs = bin_info->nregs * bin->stats.curslabs;
*bin_util = ((long long)bin->stats.curregs<<16) / availregs;
*run_util = ((long long)(bin_info->nregs - extent_nfree_get(slab))<<16) / bin_info->nregs;
defrag = 1;
}
malloc_mutex_unlock(tsdn, &bin->lock);
}
return defrag;
}
#endif /* JEMALLOC_INTERNAL_INLINES_C_H */
......@@ -120,3 +120,7 @@
# define JEMALLOC_RESTRICT_RETURN
# define JEMALLOC_ALLOCATOR
#endif
/* This version of Jemalloc, modified for Redis, has the je_get_defrag_hint()
* function. */
#define JEMALLOC_FRAG_HINT
......@@ -3324,3 +3324,14 @@ jemalloc_postfork_child(void) {
}
/******************************************************************************/
/* Helps the application decide if a pointer is worth re-allocating in order to reduce fragmentation.
* returns 0 if the allocation is in the currently active run,
* or when it is not causing any frag issue (large or huge bin)
* returns the bin utilization and run utilization both in fixed point 16:16.
* If the application decides to re-allocate it should use MALLOCX_TCACHE_NONE when doing so. */
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
get_defrag_hint(void* ptr, int *bin_util, int *run_util) {
assert(ptr != NULL);
return iget_defrag_hint(TSDN_NULL, ptr, bin_util, run_util);
}
......@@ -639,7 +639,7 @@ slave-priority 100
# it with the specified string.
# 4) During replication, when a slave performs a full resynchronization with
# its master, the content of the whole database is removed in order to
# load the RDB file just transfered.
# load the RDB file just transferred.
#
# In all the above cases the default is to delete objects in a blocking way,
# like if DEL was called. However you can configure each case specifically
......@@ -1211,6 +1211,12 @@ hz 10
# big latency spikes.
aof-rewrite-incremental-fsync yes
# When redis saves RDB file, if the following option is enabled
# the file will be fsync-ed every 32 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
rdb-save-incremental-fsync yes
# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good
# idea to start with the default settings and only change them after investigating
# how to improve the performances and how the keys LFU change over time, which
......
......@@ -194,3 +194,31 @@ sentinel failover-timeout mymaster 180000
#
# sentinel client-reconfig-script mymaster /var/redis/reconfig.sh
# SECURITY
#
# By default SENTINEL SET will not be able to change the notification-script
# and client-reconfig-script at runtime. This avoids a trivial security issue
# where clients can set the script to anything and trigger a failover in order
# to get the program executed.
sentinel deny-scripts-reconfig yes
# REDIS COMMANDS RENAMING
#
# Sometimes the Redis server has certain commands, that are needed for Sentinel
# to work correctly, renamed to unguessable strings. This is often the case
# of CONFIG and SLAVEOF in the context of providers that provide Redis as
# a service, and don't want the customers to reconfigure the instances outside
# of the administration console.
#
# In such case it is possible to tell Sentinel to use different command names
# instead of the normal ones. For example if the master "mymaster", and the
# associated slaves, have "CONFIG" all renamed to "GUESSME", I could use:
#
# sentinel rename-command mymaster CONFIG GUESSME
#
# After such configuration is set, every time Sentinel would use CONFIG it will
# use GUESSME instead. Note that there is no actual need to respect the command
# case, so writing "config guessme" is the same in the example above.
#
# SENTINEL SET can also be used in order to perform this configuration at runtime.
......@@ -144,7 +144,7 @@ endif
REDIS_SERVER_NAME=redis-server
REDIS_SENTINEL_NAME=redis-sentinel
REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o redis-check-aof.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o siphash.o rax.o t_stream.o listpack.o
REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o redis-check-aof.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o siphash.o rax.o t_stream.o listpack.o localtime.o
REDIS_CLI_NAME=redis-cli
REDIS_CLI_OBJ=anet.o adlist.o dict.o redis-cli.o zmalloc.o release.o anet.o ae.o crc64.o siphash.o crc16.o
REDIS_BENCHMARK_NAME=redis-benchmark
......
......@@ -433,7 +433,7 @@ int aeProcessEvents(aeEventLoop *eventLoop, int flags)
* before replying to a client. */
int invert = fe->mask & AE_BARRIER;
/* Note the "fe->mask & mask & ..." code: maybe an already
/* Note the "fe->mask & mask & ..." code: maybe an already
* processed event removed an element that fired and we still
* didn't processed, so we check if the event is still valid.
*
......@@ -485,7 +485,7 @@ int aeWait(int fd, int mask, long long milliseconds) {
if ((retval = poll(&pfd, 1, milliseconds))== 1) {
if (pfd.revents & POLLIN) retmask |= AE_READABLE;
if (pfd.revents & POLLOUT) retmask |= AE_WRITABLE;
if (pfd.revents & POLLERR) retmask |= AE_WRITABLE;
if (pfd.revents & POLLERR) retmask |= AE_WRITABLE;
if (pfd.revents & POLLHUP) retmask |= AE_WRITABLE;
return retmask;
} else {
......
......@@ -228,7 +228,7 @@ static void killAppendOnlyChild(void) {
void stopAppendOnly(void) {
serverAssert(server.aof_state != AOF_OFF);
flushAppendOnlyFile(1);
aof_fsync(server.aof_fd);
redis_fsync(server.aof_fd);
close(server.aof_fd);
server.aof_fd = -1;
......@@ -261,7 +261,7 @@ int startAppendOnly(void) {
serverLog(LL_WARNING,"AOF was enabled but there is already a child process saving an RDB file on disk. An AOF background was scheduled to start when possible.");
} else {
/* If there is a pending AOF rewrite, we need to switch it off and
* start a new one: the old one cannot be reused becuase it is not
* start a new one: the old one cannot be reused because it is not
* accumulating the AOF buffer. */
if (server.aof_child_pid != -1) {
serverLog(LL_WARNING,"AOF was enabled but there is already an AOF rewriting in background. Stopping background AOF and starting a rewrite now.");
......@@ -476,10 +476,10 @@ void flushAppendOnlyFile(int force) {
/* Perform the fsync if needed. */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* aof_fsync is defined as fdatasync() for Linux in order to avoid
/* redis_fsync is defined as fdatasync() for Linux in order to avoid
* flushing metadata. */
latencyStartMonitor(latency);
aof_fsync(server.aof_fd); /* Let's try to get this data on the disk */
redis_fsync(server.aof_fd); /* Let's try to get this data on the disk */
latencyEndMonitor(latency);
latencyAddSampleIfNeeded("aof-fsync-always",latency);
server.aof_last_fsync = server.unixtime;
......@@ -645,7 +645,7 @@ struct client *createFakeClient(void) {
c->obuf_soft_limit_reached_time = 0;
c->watched_keys = listCreate();
c->peerid = NULL;
listSetFreeMethod(c->reply,decrRefCountVoid);
listSetFreeMethod(c->reply,freeClientReplyValue);
listSetDupMethod(c->reply,dupClientReplyValue);
initClientMultiState(c);
return c;
......@@ -683,7 +683,7 @@ int loadAppendOnlyFile(char *filename) {
exit(1);
}
/* Handle a zero-length AOF file as a special case. An emtpy AOF file
/* Handle a zero-length AOF file as a special case. An empty AOF file
* is a valid AOF because an empty server with AOF enabled will create
* a zero length file at startup, that will remain like that if no write
* operation is received. */
......@@ -1221,7 +1221,6 @@ int rewriteAppendOnlyFileRio(rio *aof) {
dictIterator *di = NULL;
dictEntry *de;
size_t processed = 0;
long long now = mstime();
int j;
for (j = 0; j < server.dbnum; j++) {
......@@ -1247,9 +1246,6 @@ int rewriteAppendOnlyFileRio(rio *aof) {
expiretime = getExpire(db,&key);
/* If this key is already expired skip it */
if (expiretime != -1 && expiretime < now) continue;
/* Save the key and associated value */
if (o->type == OBJ_STRING) {
/* Emit a SET command */
......@@ -1322,7 +1318,7 @@ int rewriteAppendOnlyFile(char *filename) {
rioInitWithFile(&aof,fp);
if (server.aof_rewrite_incremental_fsync)
rioSetAutoSync(&aof,AOF_AUTOSYNC_BYTES);
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
if (server.aof_use_rdb_preamble) {
int error;
......@@ -1690,7 +1686,7 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) {
oldfd = server.aof_fd;
server.aof_fd = newfd;
if (server.aof_fsync == AOF_FSYNC_ALWAYS)
aof_fsync(newfd);
redis_fsync(newfd);
else if (server.aof_fsync == AOF_FSYNC_EVERYSEC)
aof_background_fsync(newfd);
server.aof_selected_db = -1; /* Make sure SELECT is re-issued */
......@@ -1717,7 +1713,7 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) {
"Background AOF rewrite signal handler took %lldus", ustime()-now);
} else if (!bysignal && exitcode != 0) {
/* SIGUSR1 is whitelisted, so we have a way to kill a child without
* tirggering an error conditon. */
* tirggering an error condition. */
if (bysignal != SIGUSR1)
server.aof_lastbgrewrite_status = C_ERR;
serverLog(LL_WARNING,
......
......@@ -16,7 +16,7 @@
* pthread_mutex_t myvar_mutex;
* atomicSet(myvar,12345);
*
* If atomic primitives are availble (tested in config.h) the mutex
* If atomic primitives are available (tested in config.h) the mutex
* is not used.
*
* Never use return value from the macros, instead use the AtomicGetIncr()
......
......@@ -187,7 +187,7 @@ void *bioProcessBackgroundJobs(void *arg) {
if (type == BIO_CLOSE_FILE) {
close((long)job->arg1);
} else if (type == BIO_AOF_FSYNC) {
aof_fsync((long)job->arg1);
redis_fsync((long)job->arg1);
} else if (type == BIO_LAZY_FREE) {
/* What we free changes depending on what arguments are set:
* arg1 -> free the object at pointer.
......
......@@ -918,7 +918,7 @@ void bitfieldCommand(client *c) {
struct bitfieldOp *ops = NULL; /* Array of ops to execute at end. */
int owtype = BFOVERFLOW_WRAP; /* Overflow type. */
int readonly = 1;
size_t higest_write_offset = 0;
size_t highest_write_offset = 0;
for (j = 2; j < c->argc; j++) {
int remargs = c->argc-j-1; /* Remaining args other than current. */
......@@ -968,8 +968,8 @@ void bitfieldCommand(client *c) {
if (opcode != BITFIELDOP_GET) {
readonly = 0;
if (higest_write_offset < bitoffset + bits - 1)
higest_write_offset = bitoffset + bits - 1;
if (highest_write_offset < bitoffset + bits - 1)
highest_write_offset = bitoffset + bits - 1;
/* INCRBY and SET require another argument. */
if (getLongLongFromObjectOrReply(c,c->argv[j+3],&i64,NULL) != C_OK){
zfree(ops);
......@@ -999,7 +999,7 @@ void bitfieldCommand(client *c) {
/* Lookup by making room up to the farest bit reached by
* this operation. */
if ((o = lookupStringForBitCommand(c,
higest_write_offset)) == NULL) return;
highest_write_offset)) == NULL) return;
}
addReplyMultiBulkLen(c,numops);
......
......@@ -370,32 +370,46 @@ void handleClientsBlockedOnKeys(void) {
if (receiver->btype != BLOCKED_STREAM) continue;
streamID *gt = dictFetchValue(receiver->bpop.keys,
rl->key);
if (s->last_id.ms > gt->ms ||
(s->last_id.ms == gt->ms &&
s->last_id.seq > gt->seq))
{
/* If we blocked in the context of a consumer
* group, we need to resolve the group and update the
* last ID the client is blocked for: this is needed
* because serving other clients in the same consumer
* group will alter the "last ID" of the consumer
* group, and clients blocked in a consumer group are
* always blocked for the ">" ID: we need to deliver
* only new messages and avoid unblocking the client
* otherwise. */
streamCG *group = NULL;
if (receiver->bpop.xread_group) {
group = streamLookupCG(s,
receiver->bpop.xread_group->ptr);
/* If the group was not found, send an error
* to the consumer. */
if (!group) {
addReplyError(receiver,
"-NOGROUP the consumer group this client "
"was blocked on no longer exists");
unblockClient(receiver);
continue;
} else {
*gt = group->last_id;
}
}
if (streamCompareID(&s->last_id, gt) > 0) {
streamID start = *gt;
start.seq++; /* Can't overflow, it's an uint64_t */
/* If we blocked in the context of a consumer
* group, we need to resolve the group and
* consumer here. */
streamCG *group = NULL;
/* Lookup the consumer for the group, if any. */
streamConsumer *consumer = NULL;
if (receiver->bpop.xread_group) {
group = streamLookupCG(s,
receiver->bpop.xread_group->ptr);
/* In theory if the group is not found we
* just perform the read without the group,
* but actually when the group, or the key
* itself is deleted (triggering the removal
* of the group), we check for blocked clients
* and send them an error. */
}
int noack = 0;
if (group) {
consumer = streamLookupConsumer(group,
receiver->bpop.xread_consumer->ptr,
1);
noack = receiver->bpop.xread_group_noack;
}
/* Emit the two elements sub-array consisting of
......@@ -412,7 +426,7 @@ void handleClientsBlockedOnKeys(void) {
};
streamReplyWithRange(receiver,s,&start,NULL,
receiver->bpop.xread_count,
0, group, consumer, 0, &pi);
0, group, consumer, noack, &pi);
/* Note that after we unblock the client, 'gt'
* and other receiver->bpop stuff are no longer
......
......@@ -3100,7 +3100,7 @@ void clusterHandleSlaveFailover(void) {
(unsigned long long) myself->configEpoch);
}
/* Take responsability for the cluster slots. */
/* Take responsibility for the cluster slots. */
clusterFailoverReplaceYourMaster();
} else {
clusterLogCantFailover(CLUSTER_CANT_FAILOVER_WAITING_VOTES);
......@@ -3151,11 +3151,11 @@ void clusterHandleSlaveMigration(int max_slaves) {
!nodeTimedOut(mymaster->slaves[j])) okslaves++;
if (okslaves <= server.cluster_migration_barrier) return;
/* Step 3: Idenitfy a candidate for migration, and check if among the
/* Step 3: Identify a candidate for migration, and check if among the
* masters with the greatest number of ok slaves, I'm the one with the
* smallest node ID (the "candidate slave").
*
* Note: this means that eventually a replica migration will occurr
* Note: this means that eventually a replica migration will occur
* since slaves that are reachable again always have their FAIL flag
* cleared, so eventually there must be a candidate. At the same time
* this does not mean that there are no race conditions possible (two
......@@ -3736,7 +3736,7 @@ void clusterCloseAllSlots(void) {
* -------------------------------------------------------------------------- */
/* The following are defines that are only used in the evaluation function
* and are based on heuristics. Actaully the main point about the rejoin and
* and are based on heuristics. Actually the main point about the rejoin and
* writable delay is that they should be a few orders of magnitude larger
* than the network latency. */
#define CLUSTER_MAX_REJOIN_DELAY 5000
......@@ -4178,27 +4178,27 @@ void clusterCommand(client *c) {
if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr,"help")) {
const char *help[] = {
"addslots <slot> [slot ...] -- Assign slots to current node.",
"bumpepoch -- Advance the cluster config epoch.",
"count-failure-reports <node-id> -- Return number of failure reports for <node-id>.",
"countkeysinslot <slot> - Return the number of keys in <slot>.",
"delslots <slot> [slot ...] -- Delete slots information from current node.",
"failover [force|takeover] -- Promote current slave node to being a master.",
"forget <node-id> -- Remove a node from the cluster.",
"getkeysinslot <slot> <count> -- Return key names stored by current node in a slot.",
"flushslots -- Delete current node own slots information.",
"info - Return onformation about the cluster.",
"keyslot <key> -- Return the hash slot for <key>.",
"meet <ip> <port> [bus-port] -- Connect nodes into a working cluster.",
"myid -- Return the node id.",
"nodes -- Return cluster configuration seen by node. Output format:",
"ADDSLOTS <slot> [slot ...] -- Assign slots to current node.",
"BUMPEPOCH -- Advance the cluster config epoch.",
"COUNT-failure-reports <node-id> -- Return number of failure reports for <node-id>.",
"COUNTKEYSINSLOT <slot> - Return the number of keys in <slot>.",
"DELSLOTS <slot> [slot ...] -- Delete slots information from current node.",
"FAILOVER [force|takeover] -- Promote current slave node to being a master.",
"FORGET <node-id> -- Remove a node from the cluster.",
"GETKEYSINSLOT <slot> <count> -- Return key names stored by current node in a slot.",
"FLUSHSLOTS -- Delete current node own slots information.",
"INFO - Return onformation about the cluster.",
"KEYSLOT <key> -- Return the hash slot for <key>.",
"MEET <ip> <port> [bus-port] -- Connect nodes into a working cluster.",
"MYID -- Return the node id.",
"NODES -- Return cluster configuration seen by node. Output format:",
" <id> <ip:port> <flags> <master> <pings> <pongs> <epoch> <link> <slot> ... <slot>",
"replicate <node-id> -- Configure current node as slave to <node-id>.",
"reset [hard|soft] -- Reset current node (default: soft).",
"set-config-epoch <epoch> - Set config epoch of current node.",
"setslot <slot> (importing|migrating|stable|node <node-id>) -- Set slot state.",
"slaves <node-id> -- Return <node-id> slaves.",
"slots -- Return information about slots range mappings. Each range is made of:",
"REPLICATE <node-id> -- Configure current node as slave to <node-id>.",
"RESET [hard|soft] -- Reset current node (default: soft).",
"SET-config-epoch <epoch> - Set config epoch of current node.",
"SETSLOT <slot> (importing|migrating|stable|node <node-id>) -- Set slot state.",
"SLAVES <node-id> -- Return <node-id> slaves.",
"SLOTS -- Return information about slots range mappings. Each range is made of:",
" start, end, master and replicas IP addresses, ports and ids",
NULL
};
......@@ -4746,8 +4746,7 @@ NULL
clusterReset(hard);
addReply(c,shared.ok);
} else {
addReplyErrorFormat(c, "Unknown subcommand or wrong number of arguments for '%s'. Try CLUSTER HELP",
(char*)c->argv[1]->ptr);
addReplySubcommandSyntaxError(c);
return;
}
}
......@@ -4835,15 +4834,39 @@ void dumpCommand(client *c) {
/* RESTORE key ttl serialized-value [REPLACE] */
void restoreCommand(client *c) {
long long ttl;
long long ttl, lfu_freq = -1, lru_idle = -1, lru_clock = -1;
rio payload;
int j, type, replace = 0;
int j, type, replace = 0, absttl = 0;
robj *obj;
/* Parse additional options */
for (j = 4; j < c->argc; j++) {
int additional = c->argc-j-1;
if (!strcasecmp(c->argv[j]->ptr,"replace")) {
replace = 1;
} else if (!strcasecmp(c->argv[j]->ptr,"absttl")) {
absttl = 1;
} else if (!strcasecmp(c->argv[j]->ptr,"idletime") && additional >= 1 &&
lfu_freq == -1)
{
if (getLongLongFromObjectOrReply(c,c->argv[j+1],&lru_idle,NULL)
!= C_OK) return;
if (lru_idle < 0) {
addReplyError(c,"Invalid IDLETIME value, must be >= 0");
return;
}
lru_clock = LRU_CLOCK();
j++; /* Consume additional arg. */
} else if (!strcasecmp(c->argv[j]->ptr,"freq") && additional >= 1 &&
lru_idle == -1)
{
if (getLongLongFromObjectOrReply(c,c->argv[j+1],&lfu_freq,NULL)
!= C_OK) return;
if (lfu_freq < 0 || lfu_freq > 255) {
addReplyError(c,"Invalid FREQ value, must be >= 0 and <= 255");
return;
}
j++; /* Consume additional arg. */
} else {
addReply(c,shared.syntaxerr);
return;
......@@ -4884,7 +4907,11 @@ void restoreCommand(client *c) {
/* Create the key and set the TTL if any */
dbAdd(c->db,c->argv[1],obj);
if (ttl) setExpire(c,c->db,c->argv[1],mstime()+ttl);
if (ttl) {
if (!absttl) ttl+=mstime();
setExpire(c,c->db,c->argv[1],ttl);
}
objectSetLRUOrLFU(obj,lfu_freq,lru_idle,lru_clock);
signalModifiedKey(c->db,c->argv[1]);
addReply(c,shared.ok);
server.dirty++;
......@@ -5557,7 +5584,7 @@ void clusterRedirectClient(client *c, clusterNode *n, int hashslot, int error_co
if (error_code == CLUSTER_REDIR_CROSS_SLOT) {
addReplySds(c,sdsnew("-CROSSSLOT Keys in request don't hash to the same slot\r\n"));
} else if (error_code == CLUSTER_REDIR_UNSTABLE) {
/* The request spawns mutliple keys in the same slot,
/* The request spawns multiple keys in the same slot,
* but the slot is not "stable" currently as there is
* a migration or import in progress. */
addReplySds(c,sdsnew("-TRYAGAIN Multiple keys request during rehashing of slot\r\n"));
......@@ -5589,7 +5616,11 @@ void clusterRedirectClient(client *c, clusterNode *n, int hashslot, int error_co
* longer handles, the client is sent a redirection error, and the function
* returns 1. Otherwise 0 is returned and no operation is performed. */
int clusterRedirectBlockedClientIfNeeded(client *c) {
if (c->flags & CLIENT_BLOCKED && c->btype == BLOCKED_LIST) {
if (c->flags & CLIENT_BLOCKED &&
(c->btype == BLOCKED_LIST ||
c->btype == BLOCKED_ZSET ||
c->btype == BLOCKED_STREAM))
{
dictEntry *de;
dictIterator *di;
......
......@@ -243,7 +243,7 @@ union clusterMsgData {
#define CLUSTER_PROTO_VER 1 /* Cluster bus protocol version. */
typedef struct {
char sig[4]; /* Siganture "RCmb" (Redis Cluster message bus). */
char sig[4]; /* Signature "RCmb" (Redis Cluster message bus). */
uint32_t totlen; /* Total length of this message */
uint16_t ver; /* Protocol version, currently set to 1. */
uint16_t port; /* TCP base port number. */
......
......@@ -390,7 +390,7 @@ void loadServerConfigFromString(char *config) {
}
} else if (!strcasecmp(argv[0],"masterauth") && argc == 2) {
zfree(server.masterauth);
server.masterauth = zstrdup(argv[1]);
server.masterauth = argv[1][0] ? zstrdup(argv[1]) : NULL;
} else if (!strcasecmp(argv[0],"slave-serve-stale-data") && argc == 2) {
if ((server.repl_serve_stale_data = yesnotoi(argv[1])) == -1) {
err = "argument must be 'yes' or 'no'"; goto loaderr;
......@@ -431,6 +431,11 @@ void loadServerConfigFromString(char *config) {
if ((server.active_defrag_enabled = yesnotoi(argv[1])) == -1) {
err = "argument must be 'yes' or 'no'"; goto loaderr;
}
if (server.active_defrag_enabled) {
#ifndef HAVE_DEFRAG
err = "active defrag can't be enabled without proper jemalloc support"; goto loaderr;
#endif
}
} else if (!strcasecmp(argv[0],"daemonize") && argc == 2) {
if ((server.daemonize = yesnotoi(argv[1])) == -1) {
err = "argument must be 'yes' or 'no'"; goto loaderr;
......@@ -483,6 +488,13 @@ void loadServerConfigFromString(char *config) {
yesnotoi(argv[1])) == -1) {
err = "argument must be 'yes' or 'no'"; goto loaderr;
}
} else if (!strcasecmp(argv[0],"rdb-save-incremental-fsync") &&
argc == 2)
{
if ((server.rdb_save_incremental_fsync =
yesnotoi(argv[1])) == -1) {
err = "argument must be 'yes' or 'no'"; goto loaderr;
}
} else if (!strcasecmp(argv[0],"aof-load-truncated") && argc == 2) {
if ((server.aof_load_truncated = yesnotoi(argv[1])) == -1) {
err = "argument must be 'yes' or 'no'"; goto loaderr;
......@@ -496,7 +508,7 @@ void loadServerConfigFromString(char *config) {
err = "Password is longer than CONFIG_AUTHPASS_MAX_LEN";
goto loaderr;
}
server.requirepass = zstrdup(argv[1]);
server.requirepass = argv[1][0] ? zstrdup(argv[1]) : NULL;
} else if (!strcasecmp(argv[0],"pidfile") && argc == 2) {
zfree(server.pidfile);
server.pidfile = zstrdup(argv[1]);
......@@ -1021,6 +1033,8 @@ void configSetCommand(client *c) {
"cluster-slave-no-failover",server.cluster_slave_no_failover) {
} config_set_bool_field(
"aof-rewrite-incremental-fsync",server.aof_rewrite_incremental_fsync) {
} config_set_bool_field(
"rdb-save-incremental-fsync",server.rdb_save_incremental_fsync) {
} config_set_bool_field(
"aof-load-truncated",server.aof_load_truncated) {
} config_set_bool_field(
......@@ -1062,15 +1076,15 @@ void configSetCommand(client *c) {
/* Numerical fields.
* config_set_numerical_field(name,var,min,max) */
} config_set_numerical_field(
"tcp-keepalive",server.tcpkeepalive,0,LLONG_MAX) {
"tcp-keepalive",server.tcpkeepalive,0,INT_MAX) {
} config_set_numerical_field(
"maxmemory-samples",server.maxmemory_samples,1,LLONG_MAX) {
"maxmemory-samples",server.maxmemory_samples,1,INT_MAX) {
} config_set_numerical_field(
"lfu-log-factor",server.lfu_log_factor,0,LLONG_MAX) {
"lfu-log-factor",server.lfu_log_factor,0,INT_MAX) {
} config_set_numerical_field(
"lfu-decay-time",server.lfu_decay_time,0,LLONG_MAX) {
"lfu-decay-time",server.lfu_decay_time,0,INT_MAX) {
} config_set_numerical_field(
"timeout",server.maxidletime,0,LONG_MAX) {
"timeout",server.maxidletime,0,INT_MAX) {
} config_set_numerical_field(
"active-defrag-threshold-lower",server.active_defrag_threshold_lower,0,1000) {
} config_set_numerical_field(
......@@ -1082,13 +1096,17 @@ void configSetCommand(client *c) {
} config_set_numerical_field(
"active-defrag-cycle-max",server.active_defrag_cycle_max,1,99) {
} config_set_numerical_field(
"active-defrag-max-scan-fields",server.active_defrag_max_scan_fields,1,LLONG_MAX) {
"active-defrag-max-scan-fields",server.active_defrag_max_scan_fields,1,LONG_MAX) {
} config_set_numerical_field(
"auto-aof-rewrite-percentage",server.aof_rewrite_perc,0,LLONG_MAX){
"auto-aof-rewrite-percentage",server.aof_rewrite_perc,0,INT_MAX){
} config_set_numerical_field(
"hash-max-ziplist-entries",server.hash_max_ziplist_entries,0,LLONG_MAX) {
"hash-max-ziplist-entries",server.hash_max_ziplist_entries,0,LONG_MAX) {
} config_set_numerical_field(
"hash-max-ziplist-value",server.hash_max_ziplist_value,0,LLONG_MAX) {
"hash-max-ziplist-value",server.hash_max_ziplist_value,0,LONG_MAX) {
} config_set_numerical_field(
"stream-node-max-bytes",server.stream_node_max_bytes,0,LONG_MAX) {
} config_set_numerical_field(
"stream-node-max-entries",server.stream_node_max_entries,0,LLONG_MAX) {
} config_set_numerical_field(
"stream-node-max-bytes",server.stream_node_max_bytes,0,LLONG_MAX) {
} config_set_numerical_field(
......@@ -1098,40 +1116,40 @@ void configSetCommand(client *c) {
} config_set_numerical_field(
"list-compress-depth",server.list_compress_depth,0,INT_MAX) {
} config_set_numerical_field(
"set-max-intset-entries",server.set_max_intset_entries,0,LLONG_MAX) {
"set-max-intset-entries",server.set_max_intset_entries,0,LONG_MAX) {
} config_set_numerical_field(
"zset-max-ziplist-entries",server.zset_max_ziplist_entries,0,LLONG_MAX) {
"zset-max-ziplist-entries",server.zset_max_ziplist_entries,0,LONG_MAX) {
} config_set_numerical_field(
"zset-max-ziplist-value",server.zset_max_ziplist_value,0,LLONG_MAX) {
"zset-max-ziplist-value",server.zset_max_ziplist_value,0,LONG_MAX) {
} config_set_numerical_field(
"hll-sparse-max-bytes",server.hll_sparse_max_bytes,0,LLONG_MAX) {
"hll-sparse-max-bytes",server.hll_sparse_max_bytes,0,LONG_MAX) {
} config_set_numerical_field(
"lua-time-limit",server.lua_time_limit,0,LLONG_MAX) {
"lua-time-limit",server.lua_time_limit,0,LONG_MAX) {
} config_set_numerical_field(
"slowlog-log-slower-than",server.slowlog_log_slower_than,0,LLONG_MAX) {
"slowlog-log-slower-than",server.slowlog_log_slower_than,-1,LLONG_MAX) {
} config_set_numerical_field(
"slowlog-max-len",ll,0,LLONG_MAX) {
"slowlog-max-len",ll,0,LONG_MAX) {
/* Cast to unsigned. */
server.slowlog_max_len = (unsigned long)ll;
} config_set_numerical_field(
"latency-monitor-threshold",server.latency_monitor_threshold,0,LLONG_MAX){
} config_set_numerical_field(
"repl-ping-slave-period",server.repl_ping_slave_period,1,LLONG_MAX) {
"repl-ping-slave-period",server.repl_ping_slave_period,1,INT_MAX) {
} config_set_numerical_field(
"repl-timeout",server.repl_timeout,1,LLONG_MAX) {
"repl-timeout",server.repl_timeout,1,INT_MAX) {
} config_set_numerical_field(
"repl-backlog-ttl",server.repl_backlog_time_limit,0,LLONG_MAX) {
"repl-backlog-ttl",server.repl_backlog_time_limit,0,LONG_MAX) {
} config_set_numerical_field(
"repl-diskless-sync-delay",server.repl_diskless_sync_delay,0,LLONG_MAX) {
"repl-diskless-sync-delay",server.repl_diskless_sync_delay,0,INT_MAX) {
} config_set_numerical_field(
"slave-priority",server.slave_priority,0,LLONG_MAX) {
"slave-priority",server.slave_priority,0,INT_MAX) {
} config_set_numerical_field(
"slave-announce-port",server.slave_announce_port,0,65535) {
} config_set_numerical_field(
"min-slaves-to-write",server.repl_min_slaves_to_write,0,LLONG_MAX) {
"min-slaves-to-write",server.repl_min_slaves_to_write,0,INT_MAX) {
refreshGoodSlavesCount();
} config_set_numerical_field(
"min-slaves-max-lag",server.repl_min_slaves_max_lag,0,LLONG_MAX) {
"min-slaves-max-lag",server.repl_min_slaves_max_lag,0,INT_MAX) {
refreshGoodSlavesCount();
} config_set_numerical_field(
"cluster-node-timeout",server.cluster_node_timeout,0,LLONG_MAX) {
......@@ -1140,17 +1158,17 @@ void configSetCommand(client *c) {
} config_set_numerical_field(
"cluster-announce-bus-port",server.cluster_announce_bus_port,0,65535) {
} config_set_numerical_field(
"cluster-migration-barrier",server.cluster_migration_barrier,0,LLONG_MAX){
"cluster-migration-barrier",server.cluster_migration_barrier,0,INT_MAX){
} config_set_numerical_field(
"cluster-slave-validity-factor",server.cluster_slave_validity_factor,0,LLONG_MAX) {
"cluster-slave-validity-factor",server.cluster_slave_validity_factor,0,INT_MAX) {
} config_set_numerical_field(
"hz",server.hz,0,LLONG_MAX) {
"hz",server.hz,0,INT_MAX) {
/* Hz is more an hint from the user, so we accept values out of range
* but cap them to reasonable values. */
if (server.hz < CONFIG_MIN_HZ) server.hz = CONFIG_MIN_HZ;
if (server.hz > CONFIG_MAX_HZ) server.hz = CONFIG_MAX_HZ;
} config_set_numerical_field(
"watchdog-period",ll,0,LLONG_MAX) {
"watchdog-period",ll,0,INT_MAX) {
if (ll)
enableWatchdog(ll);
else
......@@ -1347,6 +1365,8 @@ void configGetCommand(client *c) {
server.repl_diskless_sync);
config_get_bool_field("aof-rewrite-incremental-fsync",
server.aof_rewrite_incremental_fsync);
config_get_bool_field("rdb-save-incremental-fsync",
server.rdb_save_incremental_fsync);
config_get_bool_field("aof-load-truncated",
server.aof_load_truncated);
config_get_bool_field("aof-use-rdb-preamble",
......@@ -2084,6 +2104,7 @@ int rewriteConfig(char *path) {
rewriteConfigClientoutputbufferlimitOption(state);
rewriteConfigNumericalOption(state,"hz",server.hz,CONFIG_DEFAULT_HZ);
rewriteConfigYesNoOption(state,"aof-rewrite-incremental-fsync",server.aof_rewrite_incremental_fsync,CONFIG_DEFAULT_AOF_REWRITE_INCREMENTAL_FSYNC);
rewriteConfigYesNoOption(state,"rdb-save-incremental-fsync",server.rdb_save_incremental_fsync,CONFIG_DEFAULT_RDB_SAVE_INCREMENTAL_FSYNC);
rewriteConfigYesNoOption(state,"aof-load-truncated",server.aof_load_truncated,CONFIG_DEFAULT_AOF_LOAD_TRUNCATED);
rewriteConfigYesNoOption(state,"aof-use-rdb-preamble",server.aof_use_rdb_preamble,CONFIG_DEFAULT_AOF_USE_RDB_PREAMBLE);
rewriteConfigEnumOption(state,"supervised",server.supervised_mode,supervised_mode_enum,SUPERVISED_NONE);
......@@ -2123,10 +2144,10 @@ void configCommand(client *c) {
if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr,"help")) {
const char *help[] = {
"get <pattern> -- Return parameters matching the glob-like <pattern> and their values.",
"set <parameter> <value> -- Set parameter to value.",
"resetstat -- Reset statistics reported by INFO.",
"rewrite -- Rewrite the configuration file.",
"GET <pattern> -- Return parameters matching the glob-like <pattern> and their values.",
"SET <parameter> <value> -- Set parameter to value.",
"RESETSTAT -- Reset statistics reported by INFO.",
"REWRITE -- Rewrite the configuration file.",
NULL
};
addReplyHelp(c, help);
......@@ -2151,8 +2172,7 @@ NULL
addReply(c,shared.ok);
}
} else {
addReplyErrorFormat(c, "Unknown subcommand or wrong number of arguments for '%s'. Try CONFIG HELP",
(char*)c->argv[1]->ptr);
addReplySubcommandSyntaxError(c);
return;
}
}
......@@ -87,11 +87,11 @@
#endif
#endif
/* Define aof_fsync to fdatasync() in Linux and fsync() for all the rest */
/* Define redis_fsync to fdatasync() in Linux and fsync() for all the rest */
#ifdef __linux__
#define aof_fsync fdatasync
#define redis_fsync fdatasync
#else
#define aof_fsync fsync
#define redis_fsync fsync
#endif
/* Define rdb_fsync_range to sync_file_range() on Linux, otherwise we use
......
......@@ -90,7 +90,7 @@ robj *lookupKey(redisDb *db, robj *key, int flags) {
* LOOKUP_NONE (or zero): no special flags are passed.
* LOOKUP_NOTOUCH: don't alter the last access time of the key.
*
* Note: this function also returns NULL is the key is logically expired
* Note: this function also returns NULL if the key is logically expired
* but still existing, in case this is a slave, since this API is called only
* for read operations. Even if the key expiry is master-driven, we can
* correctly report a key is expired on slaves even if the master is lagging
......@@ -113,7 +113,7 @@ robj *lookupKeyReadWithFlags(redisDb *db, robj *key, int flags) {
* safety measure, the command invoked is a read-only command, we can
* safely return NULL here, and provide a more consistent behavior
* to clients accessign expired values in a read-only fashion, that
* will say the key as non exisitng.
* will say the key as non existing.
*
* Notably this covers GETs when slaves are used to scale reads. */
if (server.current_client &&
......@@ -223,6 +223,8 @@ int dbExists(redisDb *db, robj *key) {
* The function makes sure to return keys not already expired. */
robj *dbRandomKey(redisDb *db) {
dictEntry *de;
int maxtries = 100;
int allvolatile = dictSize(db->dict) == dictSize(db->expires);
while(1) {
sds key;
......@@ -234,6 +236,17 @@ robj *dbRandomKey(redisDb *db) {
key = dictGetKey(de);
keyobj = createStringObject(key,sdslen(key));
if (dictFind(db->expires,key)) {
if (allvolatile && server.masterhost && --maxtries == 0) {
/* If the DB is composed only of keys with an expire set,
* it could happen that all the keys are already logically
* expired in the slave, so the function cannot stop because
* expireIfNeeded() is false, nor it can stop because
* dictGetRandomKey() returns NULL (there are keys to return).
* To prevent the infinite loop we do some tries, but if there
* are the conditions for an infinite loop, eventually we
* return a key name that may be already expired. */
return keyobj;
}
if (expireIfNeeded(db,keyobj)) {
decrRefCount(keyobj);
continue; /* search for another key. This expired. */
......@@ -305,7 +318,7 @@ robj *dbUnshareStringValue(redisDb *db, robj *key, robj *o) {
* If callback is given the function is called from time to time to
* signal that work is in progress.
*
* The dbnum can be -1 if all teh DBs should be flushed, or the specified
* The dbnum can be -1 if all the DBs should be flushed, or the specified
* DB number if we want to flush only a single Redis database number.
*
* Flags are be EMPTYDB_NO_FLAGS if no special flags are specified or
......@@ -467,8 +480,7 @@ void existsCommand(client *c) {
int j;
for (j = 1; j < c->argc; j++) {
expireIfNeeded(c->db,c->argv[j]);
if (dbExists(c->db,c->argv[j])) count++;
if (lookupKeyRead(c->db,c->argv[j])) count++;
}
addReplyLongLong(c,count);
}
......@@ -1173,7 +1185,7 @@ int *getKeysUsingCommandTable(struct redisCommand *cmd,robj **argv, int argc, in
for (j = cmd->firstkey; j <= last; j += cmd->keystep) {
if (j >= argc) {
/* Modules commands, and standard commands with a not fixed number
* of arugments (negative arity parameter) do not have dispatch
* of arguments (negative arity parameter) do not have dispatch
* time arity checks, so we need to handle the case where the user
* passed an invalid number of arguments here. In this case we
* return no keys and expect the command implementation to report
......@@ -1228,7 +1240,7 @@ int *zunionInterGetKeys(struct redisCommand *cmd, robj **argv, int argc, int *nu
num = atoi(argv[2]->ptr);
/* Sanity check. Don't return any key if the command is going to
* reply with syntax error. */
if (num > (argc-3)) {
if (num < 1 || num > (argc-3)) {
*numkeys = 0;
return NULL;
}
......@@ -1257,7 +1269,7 @@ int *evalGetKeys(struct redisCommand *cmd, robj **argv, int argc, int *numkeys)
num = atoi(argv[2]->ptr);
/* Sanity check. Don't return any key if the command is going to
* reply with syntax error. */
if (num > (argc-3)) {
if (num <= 0 || num > (argc-3)) {
*numkeys = 0;
return NULL;
}
......@@ -1386,23 +1398,37 @@ int *georadiusGetKeys(struct redisCommand *cmd, robj **argv, int argc, int *numk
}
/* XREAD [BLOCK <milliseconds>] [COUNT <count>] [GROUP <groupname> <ttl>]
* [RETRY <milliseconds> <ttl>] STREAMS key_1 key_2 ... key_N
* ID_1 ID_2 ... ID_N */
* STREAMS key_1 key_2 ... key_N ID_1 ID_2 ... ID_N */
int *xreadGetKeys(struct redisCommand *cmd, robj **argv, int argc, int *numkeys) {
int i, num, *keys;
int i, num = 0, *keys;
UNUSED(cmd);
/* We need to seek the last argument that contains "STREAMS", because other
* arguments before may contain it (for example the group name). */
/* We need to parse the options of the command in order to seek the first
* "STREAMS" string which is actually the option. This is needed because
* "STREAMS" could also be the name of the consumer group and even the
* name of the stream key. */
int streams_pos = -1;
for (i = 1; i < argc; i++) {
char *arg = argv[i]->ptr;
if (!strcasecmp(arg, "streams")) streams_pos = i;
if (!strcasecmp(arg, "block")) {
i++; /* Skip option argument. */
} else if (!strcasecmp(arg, "count")) {
i++; /* Skip option argument. */
} else if (!strcasecmp(arg, "group")) {
i += 2; /* Skip option argument. */
} else if (!strcasecmp(arg, "noack")) {
/* Nothing to do. */
} else if (!strcasecmp(arg, "streams")) {
streams_pos = i;
break;
} else {
break; /* Syntax error. */
}
}
if (streams_pos != -1) num = argc - streams_pos - 1;
/* Syntax error. */
if (streams_pos == -1 || num % 2 != 0) {
if (streams_pos == -1 || num == 0 || num % 2 != 0) {
*numkeys = 0;
return NULL;
}
......@@ -1410,7 +1436,7 @@ int *xreadGetKeys(struct redisCommand *cmd, robj **argv, int argc, int *numkeys)
there are also the IDs, one per key. */
keys = zmalloc(sizeof(int) * num);
for (i = streams_pos+1; i < argc; i++) keys[i-streams_pos-1] = i;
for (i = streams_pos+1; i < argc-num; i++) keys[i-streams_pos-1] = i;
*numkeys = num;
return keys;
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment