Unverified Commit 49816941 authored by chendianqiang's avatar chendianqiang Committed by GitHub
Browse files

Merge pull request #2 from antirez/unstable

merge from redis
parents 68ceb466 f311a529
...@@ -11,4 +11,4 @@ then ...@@ -11,4 +11,4 @@ then
echo "You need tcl 8.5 or newer in order to run the Redis test" echo "You need tcl 8.5 or newer in order to run the Redis test"
exit 1 exit 1
fi fi
$TCLSH tests/test_helper.tcl $* $TCLSH tests/test_helper.tcl "${@}"
...@@ -20,6 +20,21 @@ ...@@ -20,6 +20,21 @@
# The port that this sentinel instance will run on # The port that this sentinel instance will run on
port 26379 port 26379
# By default Redis Sentinel does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/redis-sentinel.pid when
# daemonized.
daemonize no
# When running daemonized, Redis Sentinel writes a pid file in
# /var/run/redis-sentinel.pid by default. You can specify a custom pid file
# location here.
pidfile /var/run/redis-sentinel.pid
# Specify the log file name. Also the empty string can be used to force
# Sentinel to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
logfile ""
# sentinel announce-ip <ip> # sentinel announce-ip <ip>
# sentinel announce-port <port> # sentinel announce-port <port>
# #
...@@ -58,11 +73,11 @@ dir /tmp ...@@ -58,11 +73,11 @@ dir /tmp
# be elected by the majority of the known Sentinels in order to # be elected by the majority of the known Sentinels in order to
# start a failover, so no failover can be performed in minority. # start a failover, so no failover can be performed in minority.
# #
# Slaves are auto-discovered, so you don't need to specify slaves in # Replicas are auto-discovered, so you don't need to specify replicas in
# any way. Sentinel itself will rewrite this configuration file adding # any way. Sentinel itself will rewrite this configuration file adding
# the slaves using additional configuration options. # the replicas using additional configuration options.
# Also note that the configuration file is rewritten when a # Also note that the configuration file is rewritten when a
# slave is promoted to master. # replica is promoted to master.
# #
# Note: master name should not include special characters or spaces. # Note: master name should not include special characters or spaces.
# The valid charset is A-z 0-9 and the three characters ".-_". # The valid charset is A-z 0-9 and the three characters ".-_".
...@@ -70,11 +85,11 @@ sentinel monitor mymaster 127.0.0.1 6379 2 ...@@ -70,11 +85,11 @@ sentinel monitor mymaster 127.0.0.1 6379 2
# sentinel auth-pass <master-name> <password> # sentinel auth-pass <master-name> <password>
# #
# Set the password to use to authenticate with the master and slaves. # Set the password to use to authenticate with the master and replicas.
# Useful if there is a password set in the Redis instances to monitor. # Useful if there is a password set in the Redis instances to monitor.
# #
# Note that the master password is also used for slaves, so it is not # Note that the master password is also used for replicas, so it is not
# possible to set a different password in masters and slaves instances # possible to set a different password in masters and replicas instances
# if you want to be able to monitor these instances with Sentinel. # if you want to be able to monitor these instances with Sentinel.
# #
# However you can have Redis instances without the authentication enabled # However you can have Redis instances without the authentication enabled
...@@ -89,7 +104,7 @@ sentinel monitor mymaster 127.0.0.1 6379 2 ...@@ -89,7 +104,7 @@ sentinel monitor mymaster 127.0.0.1 6379 2
# sentinel down-after-milliseconds <master-name> <milliseconds> # sentinel down-after-milliseconds <master-name> <milliseconds>
# #
# Number of milliseconds the master (or any attached slave or sentinel) should # Number of milliseconds the master (or any attached replica or sentinel) should
# be unreachable (as in, not acceptable reply to PING, continuously, for the # be unreachable (as in, not acceptable reply to PING, continuously, for the
# specified period) in order to consider it in S_DOWN state (Subjectively # specified period) in order to consider it in S_DOWN state (Subjectively
# Down). # Down).
...@@ -97,11 +112,11 @@ sentinel monitor mymaster 127.0.0.1 6379 2 ...@@ -97,11 +112,11 @@ sentinel monitor mymaster 127.0.0.1 6379 2
# Default is 30 seconds. # Default is 30 seconds.
sentinel down-after-milliseconds mymaster 30000 sentinel down-after-milliseconds mymaster 30000
# sentinel parallel-syncs <master-name> <numslaves> # sentinel parallel-syncs <master-name> <numreplicas>
# #
# How many slaves we can reconfigure to point to the new slave simultaneously # How many replicas we can reconfigure to point to the new replica simultaneously
# during the failover. Use a low number if you use the slaves to serve query # during the failover. Use a low number if you use the replicas to serve query
# to avoid that all the slaves will be unreachable at about the same # to avoid that all the replicas will be unreachable at about the same
# time while performing the synchronization with the master. # time while performing the synchronization with the master.
sentinel parallel-syncs mymaster 1 sentinel parallel-syncs mymaster 1
...@@ -113,18 +128,18 @@ sentinel parallel-syncs mymaster 1 ...@@ -113,18 +128,18 @@ sentinel parallel-syncs mymaster 1
# already tried against the same master by a given Sentinel, is two # already tried against the same master by a given Sentinel, is two
# times the failover timeout. # times the failover timeout.
# #
# - The time needed for a slave replicating to a wrong master according # - The time needed for a replica replicating to a wrong master according
# to a Sentinel current configuration, to be forced to replicate # to a Sentinel current configuration, to be forced to replicate
# with the right master, is exactly the failover timeout (counting since # with the right master, is exactly the failover timeout (counting since
# the moment a Sentinel detected the misconfiguration). # the moment a Sentinel detected the misconfiguration).
# #
# - The time needed to cancel a failover that is already in progress but # - The time needed to cancel a failover that is already in progress but
# did not produced any configuration change (SLAVEOF NO ONE yet not # did not produced any configuration change (SLAVEOF NO ONE yet not
# acknowledged by the promoted slave). # acknowledged by the promoted replica).
# #
# - The maximum time a failover in progress waits for all the slaves to be # - The maximum time a failover in progress waits for all the replicas to be
# reconfigured as slaves of the new master. However even after this time # reconfigured as replicas of the new master. However even after this time
# the slaves will be reconfigured by the Sentinels anyway, but not with # the replicas will be reconfigured by the Sentinels anyway, but not with
# the exact parallel-syncs progression as specified. # the exact parallel-syncs progression as specified.
# #
# Default is 3 minutes. # Default is 3 minutes.
...@@ -185,7 +200,7 @@ sentinel failover-timeout mymaster 180000 ...@@ -185,7 +200,7 @@ sentinel failover-timeout mymaster 180000
# <role> is either "leader" or "observer" # <role> is either "leader" or "observer"
# #
# The arguments from-ip, from-port, to-ip, to-port are used to communicate # The arguments from-ip, from-port, to-ip, to-port are used to communicate
# the old address of the master and the new address of the elected slave # the old address of the master and the new address of the elected replica
# (now a master). # (now a master).
# #
# This script should be resistant to multiple invocations. # This script should be resistant to multiple invocations.
...@@ -213,12 +228,17 @@ sentinel deny-scripts-reconfig yes ...@@ -213,12 +228,17 @@ sentinel deny-scripts-reconfig yes
# #
# In such case it is possible to tell Sentinel to use different command names # In such case it is possible to tell Sentinel to use different command names
# instead of the normal ones. For example if the master "mymaster", and the # instead of the normal ones. For example if the master "mymaster", and the
# associated slaves, have "CONFIG" all renamed to "GUESSME", I could use: # associated replicas, have "CONFIG" all renamed to "GUESSME", I could use:
# #
# sentinel rename-command mymaster CONFIG GUESSME # SENTINEL rename-command mymaster CONFIG GUESSME
# #
# After such configuration is set, every time Sentinel would use CONFIG it will # After such configuration is set, every time Sentinel would use CONFIG it will
# use GUESSME instead. Note that there is no actual need to respect the command # use GUESSME instead. Note that there is no actual need to respect the command
# case, so writing "config guessme" is the same in the example above. # case, so writing "config guessme" is the same in the example above.
# #
# SENTINEL SET can also be used in order to perform this configuration at runtime. # SENTINEL SET can also be used in order to perform this configuration at runtime.
#
# In order to set a command back to its original name (undo the renaming), it
# is possible to just rename a command to itsef:
#
# SENTINEL rename-command mymaster CONFIG CONFIG
...@@ -21,6 +21,11 @@ NODEPS:=clean distclean ...@@ -21,6 +21,11 @@ NODEPS:=clean distclean
# Default settings # Default settings
STD=-std=c99 -pedantic -DREDIS_STATIC='' STD=-std=c99 -pedantic -DREDIS_STATIC=''
ifneq (,$(findstring clang,$(CC)))
ifneq (,$(findstring FreeBSD,$(uname_S)))
STD+=-Wno-c11-extensions
endif
endif
WARN=-Wall -W -Wno-missing-field-initializers WARN=-Wall -W -Wno-missing-field-initializers
OPT=$(OPTIMIZATION) OPT=$(OPTIMIZATION)
...@@ -41,6 +46,10 @@ endif ...@@ -41,6 +46,10 @@ endif
# To get ARM stack traces if Redis crashes we need a special C flag. # To get ARM stack traces if Redis crashes we need a special C flag.
ifneq (,$(filter aarch64 armv,$(uname_M))) ifneq (,$(filter aarch64 armv,$(uname_M)))
CFLAGS+=-funwind-tables CFLAGS+=-funwind-tables
else
ifneq (,$(findstring armv,$(uname_M)))
CFLAGS+=-funwind-tables
endif
endif endif
# Backwards compatibility for selecting an allocator # Backwards compatibility for selecting an allocator
...@@ -93,10 +102,20 @@ else ...@@ -93,10 +102,20 @@ else
ifeq ($(uname_S),OpenBSD) ifeq ($(uname_S),OpenBSD)
# OpenBSD # OpenBSD
FINAL_LIBS+= -lpthread FINAL_LIBS+= -lpthread
ifeq ($(USE_BACKTRACE),yes)
FINAL_CFLAGS+= -DUSE_BACKTRACE -I/usr/local/include
FINAL_LDFLAGS+= -L/usr/local/lib
FINAL_LIBS+= -lexecinfo
endif
else else
ifeq ($(uname_S),FreeBSD) ifeq ($(uname_S),FreeBSD)
# FreeBSD # FreeBSD
FINAL_LIBS+= -lpthread FINAL_LIBS+= -lpthread -lexecinfo
else
ifeq ($(uname_S),DragonFly)
# FreeBSD
FINAL_LIBS+= -lpthread -lexecinfo
else else
# All the other OSes (notably Linux) # All the other OSes (notably Linux)
FINAL_LDFLAGS+= -rdynamic FINAL_LDFLAGS+= -rdynamic
...@@ -106,6 +125,7 @@ endif ...@@ -106,6 +125,7 @@ endif
endif endif
endif endif
endif endif
endif
# Include paths to dependencies # Include paths to dependencies
FINAL_CFLAGS+= -I../deps/hiredis -I../deps/linenoise -I../deps/lua/src FINAL_CFLAGS+= -I../deps/hiredis -I../deps/linenoise -I../deps/lua/src
...@@ -144,7 +164,7 @@ endif ...@@ -144,7 +164,7 @@ endif
REDIS_SERVER_NAME=redis-server REDIS_SERVER_NAME=redis-server
REDIS_SENTINEL_NAME=redis-sentinel REDIS_SENTINEL_NAME=redis-sentinel
REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o redis-check-aof.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o siphash.o rax.o t_stream.o listpack.o localtime.o REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o redis-check-aof.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o siphash.o rax.o t_stream.o listpack.o localtime.o lolwut.o lolwut5.o acl.o gopher.o
REDIS_CLI_NAME=redis-cli REDIS_CLI_NAME=redis-cli
REDIS_CLI_OBJ=anet.o adlist.o dict.o redis-cli.o zmalloc.o release.o anet.o ae.o crc64.o siphash.o crc16.o REDIS_CLI_OBJ=anet.o adlist.o dict.o redis-cli.o zmalloc.o release.o anet.o ae.o crc64.o siphash.o crc16.o
REDIS_BENCHMARK_NAME=redis-benchmark REDIS_BENCHMARK_NAME=redis-benchmark
......
This diff is collapsed.
...@@ -351,8 +351,8 @@ static int processTimeEvents(aeEventLoop *eventLoop) { ...@@ -351,8 +351,8 @@ static int processTimeEvents(aeEventLoop *eventLoop) {
* if flags has AE_FILE_EVENTS set, file events are processed. * if flags has AE_FILE_EVENTS set, file events are processed.
* if flags has AE_TIME_EVENTS set, time events are processed. * if flags has AE_TIME_EVENTS set, time events are processed.
* if flags has AE_DONT_WAIT set the function returns ASAP until all * if flags has AE_DONT_WAIT set the function returns ASAP until all
* if flags has AE_CALL_AFTER_SLEEP set, the aftersleep callback is called.
* the events that's possible to process without to wait are processed. * the events that's possible to process without to wait are processed.
* if flags has AE_CALL_AFTER_SLEEP set, the aftersleep callback is called.
* *
* The function returns the number of events processed. */ * The function returns the number of events processed. */
int aeProcessEvents(aeEventLoop *eventLoop, int flags) int aeProcessEvents(aeEventLoop *eventLoop, int flags)
......
...@@ -204,7 +204,7 @@ void aof_background_fsync(int fd) { ...@@ -204,7 +204,7 @@ void aof_background_fsync(int fd) {
} }
/* Kills an AOFRW child process if exists */ /* Kills an AOFRW child process if exists */
static void killAppendOnlyChild(void) { void killAppendOnlyChild(void) {
int statloc; int statloc;
/* No AOFRW child? return. */ /* No AOFRW child? return. */
if (server.aof_child_pid == -1) return; if (server.aof_child_pid == -1) return;
...@@ -221,6 +221,8 @@ static void killAppendOnlyChild(void) { ...@@ -221,6 +221,8 @@ static void killAppendOnlyChild(void) {
server.aof_rewrite_time_start = -1; server.aof_rewrite_time_start = -1;
/* Close pipes used for IPC between the two processes. */ /* Close pipes used for IPC between the two processes. */
aofClosePipes(); aofClosePipes();
closeChildInfoPipe();
updateDictResizePolicy();
} }
/* Called when the user switches from "appendonly yes" to "appendonly no" /* Called when the user switches from "appendonly yes" to "appendonly no"
...@@ -645,6 +647,8 @@ struct client *createFakeClient(void) { ...@@ -645,6 +647,8 @@ struct client *createFakeClient(void) {
c->obuf_soft_limit_reached_time = 0; c->obuf_soft_limit_reached_time = 0;
c->watched_keys = listCreate(); c->watched_keys = listCreate();
c->peerid = NULL; c->peerid = NULL;
c->resp = 2;
c->user = NULL;
listSetFreeMethod(c->reply,freeClientReplyValue); listSetFreeMethod(c->reply,freeClientReplyValue);
listSetDupMethod(c->reply,dupClientReplyValue); listSetDupMethod(c->reply,dupClientReplyValue);
initClientMultiState(c); initClientMultiState(c);
...@@ -677,6 +681,7 @@ int loadAppendOnlyFile(char *filename) { ...@@ -677,6 +681,7 @@ int loadAppendOnlyFile(char *filename) {
int old_aof_state = server.aof_state; int old_aof_state = server.aof_state;
long loops = 0; long loops = 0;
off_t valid_up_to = 0; /* Offset of latest well-formed command loaded. */ off_t valid_up_to = 0; /* Offset of latest well-formed command loaded. */
off_t valid_before_multi = 0; /* Offset before MULTI command loaded. */
if (fp == NULL) { if (fp == NULL) {
serverLog(LL_WARNING,"Fatal error: can't open the append log file for reading: %s",strerror(errno)); serverLog(LL_WARNING,"Fatal error: can't open the append log file for reading: %s",strerror(errno));
...@@ -777,16 +782,28 @@ int loadAppendOnlyFile(char *filename) { ...@@ -777,16 +782,28 @@ int loadAppendOnlyFile(char *filename) {
/* Command lookup */ /* Command lookup */
cmd = lookupCommand(argv[0]->ptr); cmd = lookupCommand(argv[0]->ptr);
if (!cmd) { if (!cmd) {
serverLog(LL_WARNING,"Unknown command '%s' reading the append only file", (char*)argv[0]->ptr); serverLog(LL_WARNING,
"Unknown command '%s' reading the append only file",
(char*)argv[0]->ptr);
exit(1); exit(1);
} }
if (cmd == server.multiCommand) valid_before_multi = valid_up_to;
/* Run the command in the context of a fake client */ /* Run the command in the context of a fake client */
fakeClient->cmd = cmd; fakeClient->cmd = cmd;
if (fakeClient->flags & CLIENT_MULTI &&
fakeClient->cmd->proc != execCommand)
{
queueMultiCommand(fakeClient);
} else {
cmd->proc(fakeClient); cmd->proc(fakeClient);
}
/* The fake client should not have a reply */ /* The fake client should not have a reply */
serverAssert(fakeClient->bufpos == 0 && listLength(fakeClient->reply) == 0); serverAssert(fakeClient->bufpos == 0 &&
listLength(fakeClient->reply) == 0);
/* The fake client should never get blocked */ /* The fake client should never get blocked */
serverAssert((fakeClient->flags & CLIENT_BLOCKED) == 0); serverAssert((fakeClient->flags & CLIENT_BLOCKED) == 0);
...@@ -798,8 +815,15 @@ int loadAppendOnlyFile(char *filename) { ...@@ -798,8 +815,15 @@ int loadAppendOnlyFile(char *filename) {
} }
/* This point can only be reached when EOF is reached without errors. /* This point can only be reached when EOF is reached without errors.
* If the client is in the middle of a MULTI/EXEC, log error and quit. */ * If the client is in the middle of a MULTI/EXEC, handle it as it was
if (fakeClient->flags & CLIENT_MULTI) goto uxeof; * a short read, even if technically the protocol is correct: we want
* to remove the unprocessed tail and continue. */
if (fakeClient->flags & CLIENT_MULTI) {
serverLog(LL_WARNING,
"Revert incomplete MULTI/EXEC transaction in AOF file");
valid_up_to = valid_before_multi;
goto uxeof;
}
loaded_ok: /* DB loaded, cleanup and return C_OK to the caller. */ loaded_ok: /* DB loaded, cleanup and return C_OK to the caller. */
fclose(fp); fclose(fp);
...@@ -1119,6 +1143,7 @@ int rewriteStreamObject(rio *r, robj *key, robj *o) { ...@@ -1119,6 +1143,7 @@ int rewriteStreamObject(rio *r, robj *key, robj *o) {
streamID id; streamID id;
int64_t numfields; int64_t numfields;
if (s->length) {
/* Reconstruct the stream data using XADD commands. */ /* Reconstruct the stream data using XADD commands. */
while(streamIteratorGetID(&si,&id,&numfields)) { while(streamIteratorGetID(&si,&id,&numfields)) {
/* Emit a two elements array for each item. The first is /* Emit a two elements array for each item. The first is
...@@ -1137,6 +1162,27 @@ int rewriteStreamObject(rio *r, robj *key, robj *o) { ...@@ -1137,6 +1162,27 @@ int rewriteStreamObject(rio *r, robj *key, robj *o) {
if (rioWriteBulkString(r,(char*)value,value_len) == 0) return 0; if (rioWriteBulkString(r,(char*)value,value_len) == 0) return 0;
} }
} }
} else {
/* Use the XADD MAXLEN 0 trick to generate an empty stream if
* the key we are serializing is an empty string, which is possible
* for the Stream type. */
if (rioWriteBulkCount(r,'*',7) == 0) return 0;
if (rioWriteBulkString(r,"XADD",4) == 0) return 0;
if (rioWriteBulkObject(r,key) == 0) return 0;
if (rioWriteBulkString(r,"MAXLEN",6) == 0) return 0;
if (rioWriteBulkString(r,"0",1) == 0) return 0;
if (rioWriteBulkStreamID(r,&s->last_id) == 0) return 0;
if (rioWriteBulkString(r,"x",1) == 0) return 0;
if (rioWriteBulkString(r,"y",1) == 0) return 0;
}
/* Append XSETID after XADD, make sure lastid is correct,
* in case of XDEL lastid. */
if (rioWriteBulkCount(r,'*',3) == 0) return 0;
if (rioWriteBulkString(r,"XSETID",6) == 0) return 0;
if (rioWriteBulkObject(r,key) == 0) return 0;
if (rioWriteBulkStreamID(r,&s->last_id) == 0) return 0;
/* Create all the stream consumer groups. */ /* Create all the stream consumer groups. */
if (s->cgroups) { if (s->cgroups) {
......
/* This file implements atomic counters using __atomic or __sync macros if /* This file implements atomic counters using __atomic or __sync macros if
* available, otherwise synchronizing different threads using a mutex. * available, otherwise synchronizing different threads using a mutex.
* *
* The exported interaface is composed of three macros: * The exported interface is composed of three macros:
* *
* atomicIncr(var,count) -- Increment the atomic counter * atomicIncr(var,count) -- Increment the atomic counter
* atomicGetIncr(var,oldvalue_var,count) -- Get and increment the atomic counter * atomicGetIncr(var,oldvalue_var,count) -- Get and increment the atomic counter
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
* *
* The design is trivial, we have a structure representing a job to perform * The design is trivial, we have a structure representing a job to perform
* and a different thread and job queue for every job type. * and a different thread and job queue for every job type.
* Every thread wait for new jobs in its queue, and process every job * Every thread waits for new jobs in its queue, and process every job
* sequentially. * sequentially.
* *
* Jobs of the same type are guaranteed to be processed from the least * Jobs of the same type are guaranteed to be processed from the least
...@@ -204,14 +204,14 @@ void *bioProcessBackgroundJobs(void *arg) { ...@@ -204,14 +204,14 @@ void *bioProcessBackgroundJobs(void *arg) {
} }
zfree(job); zfree(job);
/* Unblock threads blocked on bioWaitStepOfType() if any. */
pthread_cond_broadcast(&bio_step_cond[type]);
/* Lock again before reiterating the loop, if there are no longer /* Lock again before reiterating the loop, if there are no longer
* jobs to process we'll block again in pthread_cond_wait(). */ * jobs to process we'll block again in pthread_cond_wait(). */
pthread_mutex_lock(&bio_mutex[type]); pthread_mutex_lock(&bio_mutex[type]);
listDelNode(bio_jobs[type],ln); listDelNode(bio_jobs[type],ln);
bio_pending[type]--; bio_pending[type]--;
/* Unblock threads blocked on bioWaitStepOfType() if any. */
pthread_cond_broadcast(&bio_step_cond[type]);
} }
} }
......
...@@ -1002,7 +1002,7 @@ void bitfieldCommand(client *c) { ...@@ -1002,7 +1002,7 @@ void bitfieldCommand(client *c) {
highest_write_offset)) == NULL) return; highest_write_offset)) == NULL) return;
} }
addReplyMultiBulkLen(c,numops); addReplyArrayLen(c,numops);
/* Actually process the operations. */ /* Actually process the operations. */
for (j = 0; j < numops; j++) { for (j = 0; j < numops; j++) {
...@@ -1047,7 +1047,7 @@ void bitfieldCommand(client *c) { ...@@ -1047,7 +1047,7 @@ void bitfieldCommand(client *c) {
setSignedBitfield(o->ptr,thisop->offset, setSignedBitfield(o->ptr,thisop->offset,
thisop->bits,newval); thisop->bits,newval);
} else { } else {
addReply(c,shared.nullbulk); addReplyNull(c);
} }
} else { } else {
uint64_t oldval, newval, wrapped, retval; uint64_t oldval, newval, wrapped, retval;
...@@ -1076,7 +1076,7 @@ void bitfieldCommand(client *c) { ...@@ -1076,7 +1076,7 @@ void bitfieldCommand(client *c) {
setUnsignedBitfield(o->ptr,thisop->offset, setUnsignedBitfield(o->ptr,thisop->offset,
thisop->bits,newval); thisop->bits,newval);
} else { } else {
addReply(c,shared.nullbulk); addReplyNull(c);
} }
} }
changes++; changes++;
......
...@@ -126,12 +126,37 @@ void processUnblockedClients(void) { ...@@ -126,12 +126,37 @@ void processUnblockedClients(void) {
* the code is conceptually more correct this way. */ * the code is conceptually more correct this way. */
if (!(c->flags & CLIENT_BLOCKED)) { if (!(c->flags & CLIENT_BLOCKED)) {
if (c->querybuf && sdslen(c->querybuf) > 0) { if (c->querybuf && sdslen(c->querybuf) > 0) {
processInputBuffer(c); processInputBufferAndReplicate(c);
} }
} }
} }
} }
/* This function will schedule the client for reprocessing at a safe time.
*
* This is useful when a client was blocked for some reason (blocking opeation,
* CLIENT PAUSE, or whatever), because it may end with some accumulated query
* buffer that needs to be processed ASAP:
*
* 1. When a client is blocked, its readable handler is still active.
* 2. However in this case it only gets data into the query buffer, but the
* query is not parsed or executed once there is enough to proceed as
* usually (because the client is blocked... so we can't execute commands).
* 3. When the client is unblocked, without this function, the client would
* have to write some query in order for the readable handler to finally
* call processQueryBuffer*() on it.
* 4. With this function instead we can put the client in a queue that will
* process it for queries ready to be executed at a safe time.
*/
void queueClientForReprocessing(client *c) {
/* The client may already be into the unblocked list because of a previous
* blocking operation, don't add back it into the list multiple times. */
if (!(c->flags & CLIENT_UNBLOCKED)) {
c->flags |= CLIENT_UNBLOCKED;
listAddNodeTail(server.unblocked_clients,c);
}
}
/* Unblock a client calling the right function depending on the kind /* Unblock a client calling the right function depending on the kind
* of operation the client is blocking for. */ * of operation the client is blocking for. */
void unblockClient(client *c) { void unblockClient(client *c) {
...@@ -152,12 +177,7 @@ void unblockClient(client *c) { ...@@ -152,12 +177,7 @@ void unblockClient(client *c) {
server.blocked_clients_by_type[c->btype]--; server.blocked_clients_by_type[c->btype]--;
c->flags &= ~CLIENT_BLOCKED; c->flags &= ~CLIENT_BLOCKED;
c->btype = BLOCKED_NONE; c->btype = BLOCKED_NONE;
/* The client may already be into the unblocked list because of a previous queueClientForReprocessing(c);
* blocking operation, don't add back it into the list multiple times. */
if (!(c->flags & CLIENT_UNBLOCKED)) {
c->flags |= CLIENT_UNBLOCKED;
listAddNodeTail(server.unblocked_clients,c);
}
} }
/* This function gets called when a blocked client timed out in order to /* This function gets called when a blocked client timed out in order to
...@@ -167,7 +187,7 @@ void replyToBlockedClientTimedOut(client *c) { ...@@ -167,7 +187,7 @@ void replyToBlockedClientTimedOut(client *c) {
if (c->btype == BLOCKED_LIST || if (c->btype == BLOCKED_LIST ||
c->btype == BLOCKED_ZSET || c->btype == BLOCKED_ZSET ||
c->btype == BLOCKED_STREAM) { c->btype == BLOCKED_STREAM) {
addReply(c,shared.nullmultibulk); addReplyNullArray(c);
} else if (c->btype == BLOCKED_WAIT) { } else if (c->btype == BLOCKED_WAIT) {
addReplyLongLong(c,replicationCountAcksByOffset(c->bpop.reploffset)); addReplyLongLong(c,replicationCountAcksByOffset(c->bpop.reploffset));
} else if (c->btype == BLOCKED_MODULE) { } else if (c->btype == BLOCKED_MODULE) {
...@@ -195,7 +215,7 @@ void disconnectAllBlockedClients(void) { ...@@ -195,7 +215,7 @@ void disconnectAllBlockedClients(void) {
if (c->flags & CLIENT_BLOCKED) { if (c->flags & CLIENT_BLOCKED) {
addReplySds(c,sdsnew( addReplySds(c,sdsnew(
"-UNBLOCKED force unblock from blocking operation, " "-UNBLOCKED force unblock from blocking operation, "
"instance state changed (master -> slave?)\r\n")); "instance state changed (master -> replica?)\r\n"));
unblockClient(c); unblockClient(c);
c->flags |= CLIENT_CLOSE_AFTER_REPLY; c->flags |= CLIENT_CLOSE_AFTER_REPLY;
} }
...@@ -416,8 +436,12 @@ void handleClientsBlockedOnKeys(void) { ...@@ -416,8 +436,12 @@ void handleClientsBlockedOnKeys(void) {
* the name of the stream and the data we * the name of the stream and the data we
* extracted from it. Wrapped in a single-item * extracted from it. Wrapped in a single-item
* array, since we have just one key. */ * array, since we have just one key. */
addReplyMultiBulkLen(receiver,1); if (receiver->resp == 2) {
addReplyMultiBulkLen(receiver,2); addReplyArrayLen(receiver,1);
addReplyArrayLen(receiver,2);
} else {
addReplyMapLen(receiver,1);
}
addReplyBulk(receiver,rl->key); addReplyBulk(receiver,rl->key);
streamPropInfo pi = { streamPropInfo pi = {
......
...@@ -1230,7 +1230,7 @@ void clearNodeFailureIfNeeded(clusterNode *node) { ...@@ -1230,7 +1230,7 @@ void clearNodeFailureIfNeeded(clusterNode *node) {
serverLog(LL_NOTICE, serverLog(LL_NOTICE,
"Clear FAIL state for node %.40s: %s is reachable again.", "Clear FAIL state for node %.40s: %s is reachable again.",
node->name, node->name,
nodeIsSlave(node) ? "slave" : "master without slots"); nodeIsSlave(node) ? "replica" : "master without slots");
node->flags &= ~CLUSTER_NODE_FAIL; node->flags &= ~CLUSTER_NODE_FAIL;
clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE|CLUSTER_TODO_SAVE_CONFIG); clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE|CLUSTER_TODO_SAVE_CONFIG);
} }
...@@ -1589,6 +1589,12 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc ...@@ -1589,6 +1589,12 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc
} }
} }
/* After updating the slots configuration, don't do any actual change
* in the state of the server if a module disabled Redis Cluster
* keys redirections. */
if (server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION)
return;
/* If at least one slot was reassigned from a node to another node /* If at least one slot was reassigned from a node to another node
* with a greater configEpoch, it is possible that: * with a greater configEpoch, it is possible that:
* 1) We are a master left without slots. This means that we were * 1) We are a master left without slots. This means that we were
...@@ -2059,7 +2065,7 @@ int clusterProcessPacket(clusterLink *link) { ...@@ -2059,7 +2065,7 @@ int clusterProcessPacket(clusterLink *link) {
server.cluster->mf_end = mstime() + CLUSTER_MF_TIMEOUT; server.cluster->mf_end = mstime() + CLUSTER_MF_TIMEOUT;
server.cluster->mf_slave = sender; server.cluster->mf_slave = sender;
pauseClients(mstime()+(CLUSTER_MF_TIMEOUT*2)); pauseClients(mstime()+(CLUSTER_MF_TIMEOUT*2));
serverLog(LL_WARNING,"Manual failover requested by slave %.40s.", serverLog(LL_WARNING,"Manual failover requested by replica %.40s.",
sender->name); sender->name);
} else if (type == CLUSTERMSG_TYPE_UPDATE) { } else if (type == CLUSTERMSG_TYPE_UPDATE) {
clusterNode *n; /* The node the update is about. */ clusterNode *n; /* The node the update is about. */
...@@ -2873,7 +2879,7 @@ void clusterLogCantFailover(int reason) { ...@@ -2873,7 +2879,7 @@ void clusterLogCantFailover(int reason) {
switch(reason) { switch(reason) {
case CLUSTER_CANT_FAILOVER_DATA_AGE: case CLUSTER_CANT_FAILOVER_DATA_AGE:
msg = "Disconnected from master for longer than allowed. " msg = "Disconnected from master for longer than allowed. "
"Please check the 'cluster-slave-validity-factor' configuration " "Please check the 'cluster-replica-validity-factor' configuration "
"option."; "option.";
break; break;
case CLUSTER_CANT_FAILOVER_WAITING_DELAY: case CLUSTER_CANT_FAILOVER_WAITING_DELAY:
...@@ -3054,7 +3060,7 @@ void clusterHandleSlaveFailover(void) { ...@@ -3054,7 +3060,7 @@ void clusterHandleSlaveFailover(void) {
server.cluster->failover_auth_time += added_delay; server.cluster->failover_auth_time += added_delay;
server.cluster->failover_auth_rank = newrank; server.cluster->failover_auth_rank = newrank;
serverLog(LL_WARNING, serverLog(LL_WARNING,
"Slave rank updated to #%d, added %lld milliseconds of delay.", "Replica rank updated to #%d, added %lld milliseconds of delay.",
newrank, added_delay); newrank, added_delay);
} }
} }
...@@ -3210,7 +3216,8 @@ void clusterHandleSlaveMigration(int max_slaves) { ...@@ -3210,7 +3216,8 @@ void clusterHandleSlaveMigration(int max_slaves) {
* the natural slaves of this instance to advertise their switch from * the natural slaves of this instance to advertise their switch from
* the old master to the new one. */ * the old master to the new one. */
if (target && candidate == myself && if (target && candidate == myself &&
(mstime()-target->orphaned_time) > CLUSTER_SLAVE_MIGRATION_DELAY) (mstime()-target->orphaned_time) > CLUSTER_SLAVE_MIGRATION_DELAY &&
!(server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER))
{ {
serverLog(LL_WARNING,"Migrating to orphaned master %.40s", serverLog(LL_WARNING,"Migrating to orphaned master %.40s",
target->name); target->name);
...@@ -3321,14 +3328,18 @@ void clusterCron(void) { ...@@ -3321,14 +3328,18 @@ void clusterCron(void) {
int changed = 0; int changed = 0;
if (prev_ip == NULL && curr_ip != NULL) changed = 1; if (prev_ip == NULL && curr_ip != NULL) changed = 1;
if (prev_ip != NULL && curr_ip == NULL) changed = 1; else if (prev_ip != NULL && curr_ip == NULL) changed = 1;
if (prev_ip && curr_ip && strcmp(prev_ip,curr_ip)) changed = 1; else if (prev_ip && curr_ip && strcmp(prev_ip,curr_ip)) changed = 1;
if (changed) { if (changed) {
if (prev_ip) zfree(prev_ip);
prev_ip = curr_ip; prev_ip = curr_ip;
if (prev_ip) prev_ip = zstrdup(prev_ip);
if (curr_ip) { if (curr_ip) {
/* We always take a copy of the previous IP address, by
* duplicating the string. This way later we can check if
* the address really changed. */
prev_ip = zstrdup(prev_ip);
strncpy(myself->ip,server.cluster_announce_ip,NET_IP_STR_LEN); strncpy(myself->ip,server.cluster_announce_ip,NET_IP_STR_LEN);
myself->ip[NET_IP_STR_LEN-1] = '\0'; myself->ip[NET_IP_STR_LEN-1] = '\0';
} else { } else {
...@@ -3559,6 +3570,7 @@ void clusterCron(void) { ...@@ -3559,6 +3570,7 @@ void clusterCron(void) {
if (nodeIsSlave(myself)) { if (nodeIsSlave(myself)) {
clusterHandleManualFailover(); clusterHandleManualFailover();
if (!(server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER))
clusterHandleSlaveFailover(); clusterHandleSlaveFailover();
/* If there are orphaned slaves, and we are a slave among the masters /* If there are orphaned slaves, and we are a slave among the masters
* with the max number of non-failing slaves, consider migrating to * with the max number of non-failing slaves, consider migrating to
...@@ -3865,6 +3877,11 @@ int verifyClusterConfigWithData(void) { ...@@ -3865,6 +3877,11 @@ int verifyClusterConfigWithData(void) {
int j; int j;
int update_config = 0; int update_config = 0;
/* Return ASAP if a module disabled cluster redirections. In that case
* every master can store keys about every possible hash slot. */
if (server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION)
return C_OK;
/* If this node is a slave, don't perform the check at all as we /* If this node is a slave, don't perform the check at all as we
* completely depend on the replication stream. */ * completely depend on the replication stream. */
if (nodeIsSlave(myself)) return C_OK; if (nodeIsSlave(myself)) return C_OK;
...@@ -4109,7 +4126,7 @@ void clusterReplyMultiBulkSlots(client *c) { ...@@ -4109,7 +4126,7 @@ void clusterReplyMultiBulkSlots(client *c) {
*/ */
int num_masters = 0; int num_masters = 0;
void *slot_replylen = addDeferredMultiBulkLength(c); void *slot_replylen = addReplyDeferredLen(c);
dictEntry *de; dictEntry *de;
dictIterator *di = dictGetSafeIterator(server.cluster->nodes); dictIterator *di = dictGetSafeIterator(server.cluster->nodes);
...@@ -4129,7 +4146,7 @@ void clusterReplyMultiBulkSlots(client *c) { ...@@ -4129,7 +4146,7 @@ void clusterReplyMultiBulkSlots(client *c) {
} }
if (start != -1 && (!bit || j == CLUSTER_SLOTS-1)) { if (start != -1 && (!bit || j == CLUSTER_SLOTS-1)) {
int nested_elements = 3; /* slots (2) + master addr (1). */ int nested_elements = 3; /* slots (2) + master addr (1). */
void *nested_replylen = addDeferredMultiBulkLength(c); void *nested_replylen = addReplyDeferredLen(c);
if (bit && j == CLUSTER_SLOTS-1) j++; if (bit && j == CLUSTER_SLOTS-1) j++;
...@@ -4145,7 +4162,7 @@ void clusterReplyMultiBulkSlots(client *c) { ...@@ -4145,7 +4162,7 @@ void clusterReplyMultiBulkSlots(client *c) {
start = -1; start = -1;
/* First node reply position is always the master */ /* First node reply position is always the master */
addReplyMultiBulkLen(c, 3); addReplyArrayLen(c, 3);
addReplyBulkCString(c, node->ip); addReplyBulkCString(c, node->ip);
addReplyLongLong(c, node->port); addReplyLongLong(c, node->port);
addReplyBulkCBuffer(c, node->name, CLUSTER_NAMELEN); addReplyBulkCBuffer(c, node->name, CLUSTER_NAMELEN);
...@@ -4155,19 +4172,19 @@ void clusterReplyMultiBulkSlots(client *c) { ...@@ -4155,19 +4172,19 @@ void clusterReplyMultiBulkSlots(client *c) {
/* This loop is copy/pasted from clusterGenNodeDescription() /* This loop is copy/pasted from clusterGenNodeDescription()
* with modifications for per-slot node aggregation */ * with modifications for per-slot node aggregation */
if (nodeFailed(node->slaves[i])) continue; if (nodeFailed(node->slaves[i])) continue;
addReplyMultiBulkLen(c, 3); addReplyArrayLen(c, 3);
addReplyBulkCString(c, node->slaves[i]->ip); addReplyBulkCString(c, node->slaves[i]->ip);
addReplyLongLong(c, node->slaves[i]->port); addReplyLongLong(c, node->slaves[i]->port);
addReplyBulkCBuffer(c, node->slaves[i]->name, CLUSTER_NAMELEN); addReplyBulkCBuffer(c, node->slaves[i]->name, CLUSTER_NAMELEN);
nested_elements++; nested_elements++;
} }
setDeferredMultiBulkLength(c, nested_replylen, nested_elements); setDeferredArrayLen(c, nested_replylen, nested_elements);
num_masters++; num_masters++;
} }
} }
} }
dictReleaseIterator(di); dictReleaseIterator(di);
setDeferredMultiBulkLength(c, slot_replylen, num_masters); setDeferredArrayLen(c, slot_replylen, num_masters);
} }
void clusterCommand(client *c) { void clusterCommand(client *c) {
...@@ -4183,7 +4200,7 @@ void clusterCommand(client *c) { ...@@ -4183,7 +4200,7 @@ void clusterCommand(client *c) {
"COUNT-failure-reports <node-id> -- Return number of failure reports for <node-id>.", "COUNT-failure-reports <node-id> -- Return number of failure reports for <node-id>.",
"COUNTKEYSINSLOT <slot> - Return the number of keys in <slot>.", "COUNTKEYSINSLOT <slot> - Return the number of keys in <slot>.",
"DELSLOTS <slot> [slot ...] -- Delete slots information from current node.", "DELSLOTS <slot> [slot ...] -- Delete slots information from current node.",
"FAILOVER [force|takeover] -- Promote current slave node to being a master.", "FAILOVER [force|takeover] -- Promote current replica node to being a master.",
"FORGET <node-id> -- Remove a node from the cluster.", "FORGET <node-id> -- Remove a node from the cluster.",
"GETKEYSINSLOT <slot> <count> -- Return key names stored by current node in a slot.", "GETKEYSINSLOT <slot> <count> -- Return key names stored by current node in a slot.",
"FLUSHSLOTS -- Delete current node own slots information.", "FLUSHSLOTS -- Delete current node own slots information.",
...@@ -4193,11 +4210,11 @@ void clusterCommand(client *c) { ...@@ -4193,11 +4210,11 @@ void clusterCommand(client *c) {
"MYID -- Return the node id.", "MYID -- Return the node id.",
"NODES -- Return cluster configuration seen by node. Output format:", "NODES -- Return cluster configuration seen by node. Output format:",
" <id> <ip:port> <flags> <master> <pings> <pongs> <epoch> <link> <slot> ... <slot>", " <id> <ip:port> <flags> <master> <pings> <pongs> <epoch> <link> <slot> ... <slot>",
"REPLICATE <node-id> -- Configure current node as slave to <node-id>.", "REPLICATE <node-id> -- Configure current node as replica to <node-id>.",
"RESET [hard|soft] -- Reset current node (default: soft).", "RESET [hard|soft] -- Reset current node (default: soft).",
"SET-config-epoch <epoch> - Set config epoch of current node.", "SET-config-epoch <epoch> - Set config epoch of current node.",
"SETSLOT <slot> (importing|migrating|stable|node <node-id>) -- Set slot state.", "SETSLOT <slot> (importing|migrating|stable|node <node-id>) -- Set slot state.",
"SLAVES <node-id> -- Return <node-id> slaves.", "REPLICAS <node-id> -- Return <node-id> replicas.",
"SLOTS -- Return information about slots range mappings. Each range is made of:", "SLOTS -- Return information about slots range mappings. Each range is made of:",
" start, end, master and replicas IP addresses, ports and ids", " start, end, master and replicas IP addresses, ports and ids",
NULL NULL
...@@ -4531,7 +4548,7 @@ NULL ...@@ -4531,7 +4548,7 @@ NULL
keys = zmalloc(sizeof(robj*)*maxkeys); keys = zmalloc(sizeof(robj*)*maxkeys);
numkeys = getKeysInSlot(slot, keys, maxkeys); numkeys = getKeysInSlot(slot, keys, maxkeys);
addReplyMultiBulkLen(c,numkeys); addReplyArrayLen(c,numkeys);
for (j = 0; j < numkeys; j++) { for (j = 0; j < numkeys; j++) {
addReplyBulk(c,keys[j]); addReplyBulk(c,keys[j]);
decrRefCount(keys[j]); decrRefCount(keys[j]);
...@@ -4574,7 +4591,7 @@ NULL ...@@ -4574,7 +4591,7 @@ NULL
/* Can't replicate a slave. */ /* Can't replicate a slave. */
if (nodeIsSlave(n)) { if (nodeIsSlave(n)) {
addReplyError(c,"I can only replicate a master, not a slave."); addReplyError(c,"I can only replicate a master, not a replica.");
return; return;
} }
...@@ -4593,7 +4610,8 @@ NULL ...@@ -4593,7 +4610,8 @@ NULL
clusterSetMaster(n); clusterSetMaster(n);
clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE|CLUSTER_TODO_SAVE_CONFIG); clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE|CLUSTER_TODO_SAVE_CONFIG);
addReply(c,shared.ok); addReply(c,shared.ok);
} else if (!strcasecmp(c->argv[1]->ptr,"slaves") && c->argc == 3) { } else if ((!strcasecmp(c->argv[1]->ptr,"slaves") ||
!strcasecmp(c->argv[1]->ptr,"replicas")) && c->argc == 3) {
/* CLUSTER SLAVES <NODE ID> */ /* CLUSTER SLAVES <NODE ID> */
clusterNode *n = clusterLookupNode(c->argv[2]->ptr); clusterNode *n = clusterLookupNode(c->argv[2]->ptr);
int j; int j;
...@@ -4609,7 +4627,7 @@ NULL ...@@ -4609,7 +4627,7 @@ NULL
return; return;
} }
addReplyMultiBulkLen(c,n->numslaves); addReplyArrayLen(c,n->numslaves);
for (j = 0; j < n->numslaves; j++) { for (j = 0; j < n->numslaves; j++) {
sds ni = clusterGenNodeDescription(n->slaves[j]); sds ni = clusterGenNodeDescription(n->slaves[j]);
addReplyBulkCString(c,ni); addReplyBulkCString(c,ni);
...@@ -4647,10 +4665,10 @@ NULL ...@@ -4647,10 +4665,10 @@ NULL
/* Check preconditions. */ /* Check preconditions. */
if (nodeIsMaster(myself)) { if (nodeIsMaster(myself)) {
addReplyError(c,"You should send CLUSTER FAILOVER to a slave"); addReplyError(c,"You should send CLUSTER FAILOVER to a replica");
return; return;
} else if (myself->slaveof == NULL) { } else if (myself->slaveof == NULL) {
addReplyError(c,"I'm a slave but my master is unknown to me"); addReplyError(c,"I'm a replica but my master is unknown to me");
return; return;
} else if (!force && } else if (!force &&
(nodeFailed(myself->slaveof) || (nodeFailed(myself->slaveof) ||
...@@ -4818,7 +4836,7 @@ void dumpCommand(client *c) { ...@@ -4818,7 +4836,7 @@ void dumpCommand(client *c) {
/* Check if the key is here. */ /* Check if the key is here. */
if ((o = lookupKeyRead(c->db,c->argv[1])) == NULL) { if ((o = lookupKeyRead(c->db,c->argv[1])) == NULL) {
addReply(c,shared.nullbulk); addReplyNull(c);
return; return;
} }
...@@ -5146,6 +5164,11 @@ try_again: ...@@ -5146,6 +5164,11 @@ try_again:
serverAssertWithInfo(c,NULL,rioWriteBulkLongLong(&cmd,dbid)); serverAssertWithInfo(c,NULL,rioWriteBulkLongLong(&cmd,dbid));
} }
int non_expired = 0; /* Number of keys that we'll find non expired.
Note that serializing large keys may take some time
so certain keys that were found non expired by the
lookupKey() function, may be expired later. */
/* Create RESTORE payload and generate the protocol to call the command. */ /* Create RESTORE payload and generate the protocol to call the command. */
for (j = 0; j < num_keys; j++) { for (j = 0; j < num_keys; j++) {
long long ttl = 0; long long ttl = 0;
...@@ -5153,8 +5176,17 @@ try_again: ...@@ -5153,8 +5176,17 @@ try_again:
if (expireat != -1) { if (expireat != -1) {
ttl = expireat-mstime(); ttl = expireat-mstime();
if (ttl < 0) {
continue;
}
if (ttl < 1) ttl = 1; if (ttl < 1) ttl = 1;
} }
/* Relocate valid (non expired) keys into the array in successive
* positions to remove holes created by the keys that were present
* in the first lookup but are now expired after the second lookup. */
kv[non_expired++] = kv[j];
serverAssertWithInfo(c,NULL, serverAssertWithInfo(c,NULL,
rioWriteBulkCount(&cmd,'*',replace ? 5 : 4)); rioWriteBulkCount(&cmd,'*',replace ? 5 : 4));
...@@ -5182,6 +5214,9 @@ try_again: ...@@ -5182,6 +5214,9 @@ try_again:
serverAssertWithInfo(c,NULL,rioWriteBulkString(&cmd,"REPLACE",7)); serverAssertWithInfo(c,NULL,rioWriteBulkString(&cmd,"REPLACE",7));
} }
/* Fix the actual number of keys we are migrating. */
num_keys = non_expired;
/* Transfer the query to the other node in 64K chunks. */ /* Transfer the query to the other node in 64K chunks. */
errno = 0; errno = 0;
{ {
...@@ -5217,6 +5252,10 @@ try_again: ...@@ -5217,6 +5252,10 @@ try_again:
int socket_error = 0; int socket_error = 0;
int del_idx = 1; /* Index of the key argument for the replicated DEL op. */ int del_idx = 1; /* Index of the key argument for the replicated DEL op. */
/* Allocate the new argument vector that will replace the current command,
* to propagate the MIGRATE as a DEL command (if no COPY option was given).
* We allocate num_keys+1 because the additional argument is for "DEL"
* command name itself. */
if (!copy) newargv = zmalloc(sizeof(robj*)*(num_keys+1)); if (!copy) newargv = zmalloc(sizeof(robj*)*(num_keys+1));
for (j = 0; j < num_keys; j++) { for (j = 0; j < num_keys; j++) {
...@@ -5417,9 +5456,17 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in ...@@ -5417,9 +5456,17 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in
multiCmd mc; multiCmd mc;
int i, slot = 0, migrating_slot = 0, importing_slot = 0, missing_keys = 0; int i, slot = 0, migrating_slot = 0, importing_slot = 0, missing_keys = 0;
/* Allow any key to be set if a module disabled cluster redirections. */
if (server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION)
return myself;
/* Set error code optimistically for the base case. */ /* Set error code optimistically for the base case. */
if (error_code) *error_code = CLUSTER_REDIR_NONE; if (error_code) *error_code = CLUSTER_REDIR_NONE;
/* Modules can turn off Redis Cluster redirection: this is useful
* when writing a module that implements a completely different
* distributed system. */
/* We handle all the cases as if they were EXEC commands, so we have /* We handle all the cases as if they were EXEC commands, so we have
* a common code path for everything */ * a common code path for everything */
if (cmd->proc == execCommand) { if (cmd->proc == execCommand) {
......
...@@ -100,6 +100,13 @@ typedef struct clusterLink { ...@@ -100,6 +100,13 @@ typedef struct clusterLink {
#define CLUSTERMSG_TYPE_MODULE 9 /* Module cluster API message. */ #define CLUSTERMSG_TYPE_MODULE 9 /* Module cluster API message. */
#define CLUSTERMSG_TYPE_COUNT 10 /* Total number of message types. */ #define CLUSTERMSG_TYPE_COUNT 10 /* Total number of message types. */
/* Flags that a module can set in order to prevent certain Redis Cluster
* features to be enabled. Useful when implementing a different distributed
* system on top of Redis Cluster message bus, using modules. */
#define CLUSTER_MODULE_FLAG_NONE 0
#define CLUSTER_MODULE_FLAG_NO_FAILOVER (1<<1)
#define CLUSTER_MODULE_FLAG_NO_REDIRECTION (1<<2)
/* This structure represent elements of node->fail_reports. */ /* This structure represent elements of node->fail_reports. */
typedef struct clusterNodeFailReport { typedef struct clusterNodeFailReport {
struct clusterNode *node; /* Node reporting the failure condition. */ struct clusterNode *node; /* Node reporting the failure condition. */
......
This diff is collapsed.
...@@ -62,7 +62,9 @@ ...@@ -62,7 +62,9 @@
#endif #endif
/* Test for backtrace() */ /* Test for backtrace() */
#if defined(__APPLE__) || (defined(__linux__) && defined(__GLIBC__)) #if defined(__APPLE__) || (defined(__linux__) && defined(__GLIBC__)) || \
defined(__FreeBSD__) || (defined(__OpenBSD__) && defined(USE_BACKTRACE))\
|| defined(__DragonFly__)
#define HAVE_BACKTRACE 1 #define HAVE_BACKTRACE 1
#endif #endif
......
...@@ -38,6 +38,8 @@ ...@@ -38,6 +38,8 @@
* C-level DB API * C-level DB API
*----------------------------------------------------------------------------*/ *----------------------------------------------------------------------------*/
int keyIsExpired(redisDb *db, robj *key);
/* Update LFU when an object is accessed. /* Update LFU when an object is accessed.
* Firstly, decrement the counter if the decrement time is reached. * Firstly, decrement the counter if the decrement time is reached.
* Then logarithmically increment the counter, and update the access time. */ * Then logarithmically increment the counter, and update the access time. */
...@@ -102,7 +104,10 @@ robj *lookupKeyReadWithFlags(redisDb *db, robj *key, int flags) { ...@@ -102,7 +104,10 @@ robj *lookupKeyReadWithFlags(redisDb *db, robj *key, int flags) {
/* Key expired. If we are in the context of a master, expireIfNeeded() /* Key expired. If we are in the context of a master, expireIfNeeded()
* returns 0 only when the key does not exist at all, so it's safe * returns 0 only when the key does not exist at all, so it's safe
* to return NULL ASAP. */ * to return NULL ASAP. */
if (server.masterhost == NULL) return NULL; if (server.masterhost == NULL) {
server.stat_keyspace_misses++;
return NULL;
}
/* However if we are in the context of a slave, expireIfNeeded() will /* However if we are in the context of a slave, expireIfNeeded() will
* not really try to expire the key, it only returns information * not really try to expire the key, it only returns information
...@@ -121,6 +126,7 @@ robj *lookupKeyReadWithFlags(redisDb *db, robj *key, int flags) { ...@@ -121,6 +126,7 @@ robj *lookupKeyReadWithFlags(redisDb *db, robj *key, int flags) {
server.current_client->cmd && server.current_client->cmd &&
server.current_client->cmd->flags & CMD_READONLY) server.current_client->cmd->flags & CMD_READONLY)
{ {
server.stat_keyspace_misses++;
return NULL; return NULL;
} }
} }
...@@ -184,14 +190,19 @@ void dbOverwrite(redisDb *db, robj *key, robj *val) { ...@@ -184,14 +190,19 @@ void dbOverwrite(redisDb *db, robj *key, robj *val) {
dictEntry *de = dictFind(db->dict,key->ptr); dictEntry *de = dictFind(db->dict,key->ptr);
serverAssertWithInfo(NULL,key,de != NULL); serverAssertWithInfo(NULL,key,de != NULL);
if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) { dictEntry auxentry = *de;
robj *old = dictGetVal(de); robj *old = dictGetVal(de);
int saved_lru = old->lru; if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) {
dictReplace(db->dict, key->ptr, val); val->lru = old->lru;
val->lru = saved_lru;
} else {
dictReplace(db->dict, key->ptr, val);
} }
dictSetVal(db->dict, de, val);
if (server.lazyfree_lazy_server_del) {
freeObjAsync(old);
dictSetVal(db->dict, &auxentry, NULL);
}
dictFreeVal(db->dict, &auxentry);
} }
/* High level Set operation. This function can be used in order to set /* High level Set operation. This function can be used in order to set
...@@ -201,7 +212,7 @@ void dbOverwrite(redisDb *db, robj *key, robj *val) { ...@@ -201,7 +212,7 @@ void dbOverwrite(redisDb *db, robj *key, robj *val) {
* 2) clients WATCHing for the destination key notified. * 2) clients WATCHing for the destination key notified.
* 3) The expire time of the key is reset (the key is made persistent). * 3) The expire time of the key is reset (the key is made persistent).
* *
* All the new keys in the database should be craeted via this interface. */ * All the new keys in the database should be created via this interface. */
void setKey(redisDb *db, robj *key, robj *val) { void setKey(redisDb *db, robj *key, robj *val) {
if (lookupKeyWrite(db,key) == NULL) { if (lookupKeyWrite(db,key) == NULL) {
dbAdd(db,key,val); dbAdd(db,key,val);
...@@ -230,7 +241,7 @@ robj *dbRandomKey(redisDb *db) { ...@@ -230,7 +241,7 @@ robj *dbRandomKey(redisDb *db) {
sds key; sds key;
robj *keyobj; robj *keyobj;
de = dictGetRandomKey(db->dict); de = dictGetFairRandomKey(db->dict);
if (de == NULL) return NULL; if (de == NULL) return NULL;
key = dictGetKey(de); key = dictGetKey(de);
...@@ -329,7 +340,7 @@ robj *dbUnshareStringValue(redisDb *db, robj *key, robj *o) { ...@@ -329,7 +340,7 @@ robj *dbUnshareStringValue(redisDb *db, robj *key, robj *o) {
* database(s). Otherwise -1 is returned in the specific case the * database(s). Otherwise -1 is returned in the specific case the
* DB number is out of range, and errno is set to EINVAL. */ * DB number is out of range, and errno is set to EINVAL. */
long long emptyDb(int dbnum, int flags, void(callback)(void*)) { long long emptyDb(int dbnum, int flags, void(callback)(void*)) {
int j, async = (flags & EMPTYDB_ASYNC); int async = (flags & EMPTYDB_ASYNC);
long long removed = 0; long long removed = 0;
if (dbnum < -1 || dbnum >= server.dbnum) { if (dbnum < -1 || dbnum >= server.dbnum) {
...@@ -337,8 +348,15 @@ long long emptyDb(int dbnum, int flags, void(callback)(void*)) { ...@@ -337,8 +348,15 @@ long long emptyDb(int dbnum, int flags, void(callback)(void*)) {
return -1; return -1;
} }
for (j = 0; j < server.dbnum; j++) { int startdb, enddb;
if (dbnum != -1 && dbnum != j) continue; if (dbnum == -1) {
startdb = 0;
enddb = server.dbnum-1;
} else {
startdb = enddb = dbnum;
}
for (int j = startdb; j <= enddb; j++) {
removed += dictSize(server.db[j].dict); removed += dictSize(server.db[j].dict);
if (async) { if (async) {
emptyDbAsync(&server.db[j]); emptyDbAsync(&server.db[j]);
...@@ -430,10 +448,7 @@ void flushallCommand(client *c) { ...@@ -430,10 +448,7 @@ void flushallCommand(client *c) {
signalFlushedDb(-1); signalFlushedDb(-1);
server.dirty += emptyDb(-1,flags,NULL); server.dirty += emptyDb(-1,flags,NULL);
addReply(c,shared.ok); addReply(c,shared.ok);
if (server.rdb_child_pid != -1) { if (server.rdb_child_pid != -1) killRDBChild();
kill(server.rdb_child_pid,SIGUSR1);
rdbRemoveTempFile(server.rdb_child_pid);
}
if (server.saveparamslen > 0) { if (server.saveparamslen > 0) {
/* Normally rdbSave() will reset dirty, but we don't want this here /* Normally rdbSave() will reset dirty, but we don't want this here
* as otherwise FLUSHALL will not be replicated nor put into the AOF. */ * as otherwise FLUSHALL will not be replicated nor put into the AOF. */
...@@ -507,7 +522,7 @@ void randomkeyCommand(client *c) { ...@@ -507,7 +522,7 @@ void randomkeyCommand(client *c) {
robj *key; robj *key;
if ((key = dbRandomKey(c->db)) == NULL) { if ((key = dbRandomKey(c->db)) == NULL) {
addReply(c,shared.nullbulk); addReplyNull(c);
return; return;
} }
...@@ -521,7 +536,7 @@ void keysCommand(client *c) { ...@@ -521,7 +536,7 @@ void keysCommand(client *c) {
sds pattern = c->argv[1]->ptr; sds pattern = c->argv[1]->ptr;
int plen = sdslen(pattern), allkeys; int plen = sdslen(pattern), allkeys;
unsigned long numkeys = 0; unsigned long numkeys = 0;
void *replylen = addDeferredMultiBulkLength(c); void *replylen = addReplyDeferredLen(c);
di = dictGetSafeIterator(c->db->dict); di = dictGetSafeIterator(c->db->dict);
allkeys = (pattern[0] == '*' && pattern[1] == '\0'); allkeys = (pattern[0] == '*' && pattern[1] == '\0');
...@@ -531,7 +546,7 @@ void keysCommand(client *c) { ...@@ -531,7 +546,7 @@ void keysCommand(client *c) {
if (allkeys || stringmatchlen(pattern,plen,key,sdslen(key),0)) { if (allkeys || stringmatchlen(pattern,plen,key,sdslen(key),0)) {
keyobj = createStringObject(key,sdslen(key)); keyobj = createStringObject(key,sdslen(key));
if (expireIfNeeded(c->db,keyobj) == 0) { if (!keyIsExpired(c->db,keyobj)) {
addReplyBulk(c,keyobj); addReplyBulk(c,keyobj);
numkeys++; numkeys++;
} }
...@@ -539,7 +554,7 @@ void keysCommand(client *c) { ...@@ -539,7 +554,7 @@ void keysCommand(client *c) {
} }
} }
dictReleaseIterator(di); dictReleaseIterator(di);
setDeferredMultiBulkLength(c,replylen,numkeys); setDeferredArrayLen(c,replylen,numkeys);
} }
/* This callback is used by scanGenericCommand in order to collect elements /* This callback is used by scanGenericCommand in order to collect elements
...@@ -764,10 +779,10 @@ void scanGenericCommand(client *c, robj *o, unsigned long cursor) { ...@@ -764,10 +779,10 @@ void scanGenericCommand(client *c, robj *o, unsigned long cursor) {
} }
/* Step 4: Reply to the client. */ /* Step 4: Reply to the client. */
addReplyMultiBulkLen(c, 2); addReplyArrayLen(c, 2);
addReplyBulkLongLong(c,cursor); addReplyBulkLongLong(c,cursor);
addReplyMultiBulkLen(c, listLength(keys)); addReplyArrayLen(c, listLength(keys));
while ((node = listFirst(keys)) != NULL) { while ((node = listFirst(keys)) != NULL) {
robj *kobj = listNodeValue(node); robj *kobj = listNodeValue(node);
addReplyBulk(c, kobj); addReplyBulk(c, kobj);
...@@ -1108,6 +1123,25 @@ void propagateExpire(redisDb *db, robj *key, int lazy) { ...@@ -1108,6 +1123,25 @@ void propagateExpire(redisDb *db, robj *key, int lazy) {
decrRefCount(argv[1]); decrRefCount(argv[1]);
} }
/* Check if the key is expired. */
int keyIsExpired(redisDb *db, robj *key) {
mstime_t when = getExpire(db,key);
if (when < 0) return 0; /* No expire for this key */
/* Don't expire anything while loading. It will be done later. */
if (server.loading) return 0;
/* If we are in the context of a Lua script, we pretend that time is
* blocked to when the Lua script started. This way a key can expire
* only the first time it is accessed and not in the middle of the
* script execution, making propagation to slaves / AOF consistent.
* See issue #1525 on Github for more information. */
mstime_t now = server.lua_caller ? server.lua_time_start : mstime();
return now > when;
}
/* This function is called when we are going to perform some operation /* This function is called when we are going to perform some operation
* in a given key, but such key may be already logically expired even if * in a given key, but such key may be already logically expired even if
* it still exists in the database. The main way this function is called * it still exists in the database. The main way this function is called
...@@ -1128,32 +1162,17 @@ void propagateExpire(redisDb *db, robj *key, int lazy) { ...@@ -1128,32 +1162,17 @@ void propagateExpire(redisDb *db, robj *key, int lazy) {
* The return value of the function is 0 if the key is still valid, * The return value of the function is 0 if the key is still valid,
* otherwise the function returns 1 if the key is expired. */ * otherwise the function returns 1 if the key is expired. */
int expireIfNeeded(redisDb *db, robj *key) { int expireIfNeeded(redisDb *db, robj *key) {
mstime_t when = getExpire(db,key); if (!keyIsExpired(db,key)) return 0;
mstime_t now;
if (when < 0) return 0; /* No expire for this key */
/* Don't expire anything while loading. It will be done later. */ /* If we are running in the context of a slave, instead of
if (server.loading) return 0; * evicting the expired key from the database, we return ASAP:
/* If we are in the context of a Lua script, we pretend that time is
* blocked to when the Lua script started. This way a key can expire
* only the first time it is accessed and not in the middle of the
* script execution, making propagation to slaves / AOF consistent.
* See issue #1525 on Github for more information. */
now = server.lua_caller ? server.lua_time_start : mstime();
/* If we are running in the context of a slave, return ASAP:
* the slave key expiration is controlled by the master that will * the slave key expiration is controlled by the master that will
* send us synthesized DEL operations for expired keys. * send us synthesized DEL operations for expired keys.
* *
* Still we try to return the right information to the caller, * Still we try to return the right information to the caller,
* that is, 0 if we think the key should be still valid, 1 if * that is, 0 if we think the key should be still valid, 1 if
* we think the key is expired at this time. */ * we think the key is expired at this time. */
if (server.masterhost != NULL) return now > when; if (server.masterhost != NULL) return 1;
/* Return when this key has not expired */
if (now <= when) return 0;
/* Delete the key */ /* Delete the key */
server.stat_expiredkeys++; server.stat_expiredkeys++;
......
This diff is collapsed.
...@@ -739,6 +739,30 @@ unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count) { ...@@ -739,6 +739,30 @@ unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count) {
return stored; return stored;
} }
/* This is like dictGetRandomKey() from the POV of the API, but will do more
* work to ensure a better distribution of the returned element.
*
* This function improves the distribution because the dictGetRandomKey()
* problem is that it selects a random bucket, then it selects a random
* element from the chain in the bucket. However elements being in different
* chain lengths will have different probabilities of being reported. With
* this function instead what we do is to consider a "linear" range of the table
* that may be constituted of N buckets with chains of different lengths
* appearing one after the other. Then we report a random element in the range.
* In this way we smooth away the problem of different chain lenghts. */
#define GETFAIR_NUM_ENTRIES 15
dictEntry *dictGetFairRandomKey(dict *d) {
dictEntry *entries[GETFAIR_NUM_ENTRIES];
unsigned int count = dictGetSomeKeys(d,entries,GETFAIR_NUM_ENTRIES);
/* Note that dictGetSomeKeys() may return zero elements in an unlucky
* run() even if there are actually elements inside the hash table. So
* when we get zero, we call the true dictGetRandomKey() that will always
* yeld the element if the hash table has at least one. */
if (count == 0) return dictGetRandomKey(d);
unsigned int idx = rand() % count;
return entries[idx];
}
/* Function to reverse bits. Algorithm from: /* Function to reverse bits. Algorithm from:
* http://graphics.stanford.edu/~seander/bithacks.html#ReverseParallel */ * http://graphics.stanford.edu/~seander/bithacks.html#ReverseParallel */
static unsigned long rev(unsigned long v) { static unsigned long rev(unsigned long v) {
......
...@@ -166,6 +166,7 @@ dictIterator *dictGetSafeIterator(dict *d); ...@@ -166,6 +166,7 @@ dictIterator *dictGetSafeIterator(dict *d);
dictEntry *dictNext(dictIterator *iter); dictEntry *dictNext(dictIterator *iter);
void dictReleaseIterator(dictIterator *iter); void dictReleaseIterator(dictIterator *iter);
dictEntry *dictGetRandomKey(dict *d); dictEntry *dictGetRandomKey(dict *d);
dictEntry *dictGetFairRandomKey(dict *d);
unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count); unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count);
void dictGetStats(char *buf, size_t bufsize, dict *d); void dictGetStats(char *buf, size_t bufsize, dict *d);
uint64_t dictGenHashFunction(const void *key, int len); uint64_t dictGenHashFunction(const void *key, int len);
......
...@@ -364,7 +364,7 @@ size_t freeMemoryGetNotCountedMemory(void) { ...@@ -364,7 +364,7 @@ size_t freeMemoryGetNotCountedMemory(void) {
} }
} }
if (server.aof_state != AOF_OFF) { if (server.aof_state != AOF_OFF) {
overhead += sdslen(server.aof_buf)+aofRewriteBufferSize(); overhead += sdsalloc(server.aof_buf)+aofRewriteBufferSize();
} }
return overhead; return overhead;
} }
...@@ -444,6 +444,10 @@ int getMaxmemoryState(size_t *total, size_t *logical, size_t *tofree, float *lev ...@@ -444,6 +444,10 @@ int getMaxmemoryState(size_t *total, size_t *logical, size_t *tofree, float *lev
* Otehrwise if we are over the memory limit, but not enough memory * Otehrwise if we are over the memory limit, but not enough memory
* was freed to return back under the limit, the function returns C_ERR. */ * was freed to return back under the limit, the function returns C_ERR. */
int freeMemoryIfNeeded(void) { int freeMemoryIfNeeded(void) {
/* By default replicas should ignore maxmemory
* and just be masters exact copies. */
if (server.masterhost && server.repl_slave_ignore_maxmemory) return C_OK;
size_t mem_reported, mem_tofree, mem_freed; size_t mem_reported, mem_tofree, mem_freed;
mstime_t latency, eviction_latency; mstime_t latency, eviction_latency;
long long delta; long long delta;
...@@ -618,3 +622,14 @@ cant_free: ...@@ -618,3 +622,14 @@ cant_free:
return C_ERR; return C_ERR;
} }
/* This is a wrapper for freeMemoryIfNeeded() that only really calls the
* function if right now there are the conditions to do so safely:
*
* - There must be no script in timeout condition.
* - Nor we are loading data right now.
*
*/
int freeMemoryIfNeededAndSafe(void) {
if (server.lua_timedout || server.loading) return C_OK;
return freeMemoryIfNeeded();
}
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment