Unverified Commit 49816941 authored by chendianqiang's avatar chendianqiang Committed by GitHub
Browse files

Merge pull request #2 from antirez/unstable

merge from redis
parents 68ceb466 f311a529
......@@ -11,4 +11,4 @@ then
echo "You need tcl 8.5 or newer in order to run the Redis test"
exit 1
fi
$TCLSH tests/test_helper.tcl $*
$TCLSH tests/test_helper.tcl "${@}"
......@@ -20,6 +20,21 @@
# The port that this sentinel instance will run on
port 26379
# By default Redis Sentinel does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/redis-sentinel.pid when
# daemonized.
daemonize no
# When running daemonized, Redis Sentinel writes a pid file in
# /var/run/redis-sentinel.pid by default. You can specify a custom pid file
# location here.
pidfile /var/run/redis-sentinel.pid
# Specify the log file name. Also the empty string can be used to force
# Sentinel to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
logfile ""
# sentinel announce-ip <ip>
# sentinel announce-port <port>
#
......@@ -58,11 +73,11 @@ dir /tmp
# be elected by the majority of the known Sentinels in order to
# start a failover, so no failover can be performed in minority.
#
# Slaves are auto-discovered, so you don't need to specify slaves in
# Replicas are auto-discovered, so you don't need to specify replicas in
# any way. Sentinel itself will rewrite this configuration file adding
# the slaves using additional configuration options.
# the replicas using additional configuration options.
# Also note that the configuration file is rewritten when a
# slave is promoted to master.
# replica is promoted to master.
#
# Note: master name should not include special characters or spaces.
# The valid charset is A-z 0-9 and the three characters ".-_".
......@@ -70,11 +85,11 @@ sentinel monitor mymaster 127.0.0.1 6379 2
# sentinel auth-pass <master-name> <password>
#
# Set the password to use to authenticate with the master and slaves.
# Set the password to use to authenticate with the master and replicas.
# Useful if there is a password set in the Redis instances to monitor.
#
# Note that the master password is also used for slaves, so it is not
# possible to set a different password in masters and slaves instances
# Note that the master password is also used for replicas, so it is not
# possible to set a different password in masters and replicas instances
# if you want to be able to monitor these instances with Sentinel.
#
# However you can have Redis instances without the authentication enabled
......@@ -89,7 +104,7 @@ sentinel monitor mymaster 127.0.0.1 6379 2
# sentinel down-after-milliseconds <master-name> <milliseconds>
#
# Number of milliseconds the master (or any attached slave or sentinel) should
# Number of milliseconds the master (or any attached replica or sentinel) should
# be unreachable (as in, not acceptable reply to PING, continuously, for the
# specified period) in order to consider it in S_DOWN state (Subjectively
# Down).
......@@ -97,11 +112,11 @@ sentinel monitor mymaster 127.0.0.1 6379 2
# Default is 30 seconds.
sentinel down-after-milliseconds mymaster 30000
# sentinel parallel-syncs <master-name> <numslaves>
# sentinel parallel-syncs <master-name> <numreplicas>
#
# How many slaves we can reconfigure to point to the new slave simultaneously
# during the failover. Use a low number if you use the slaves to serve query
# to avoid that all the slaves will be unreachable at about the same
# How many replicas we can reconfigure to point to the new replica simultaneously
# during the failover. Use a low number if you use the replicas to serve query
# to avoid that all the replicas will be unreachable at about the same
# time while performing the synchronization with the master.
sentinel parallel-syncs mymaster 1
......@@ -113,18 +128,18 @@ sentinel parallel-syncs mymaster 1
# already tried against the same master by a given Sentinel, is two
# times the failover timeout.
#
# - The time needed for a slave replicating to a wrong master according
# - The time needed for a replica replicating to a wrong master according
# to a Sentinel current configuration, to be forced to replicate
# with the right master, is exactly the failover timeout (counting since
# the moment a Sentinel detected the misconfiguration).
#
# - The time needed to cancel a failover that is already in progress but
# did not produced any configuration change (SLAVEOF NO ONE yet not
# acknowledged by the promoted slave).
# acknowledged by the promoted replica).
#
# - The maximum time a failover in progress waits for all the slaves to be
# reconfigured as slaves of the new master. However even after this time
# the slaves will be reconfigured by the Sentinels anyway, but not with
# - The maximum time a failover in progress waits for all the replicas to be
# reconfigured as replicas of the new master. However even after this time
# the replicas will be reconfigured by the Sentinels anyway, but not with
# the exact parallel-syncs progression as specified.
#
# Default is 3 minutes.
......@@ -185,7 +200,7 @@ sentinel failover-timeout mymaster 180000
# <role> is either "leader" or "observer"
#
# The arguments from-ip, from-port, to-ip, to-port are used to communicate
# the old address of the master and the new address of the elected slave
# the old address of the master and the new address of the elected replica
# (now a master).
#
# This script should be resistant to multiple invocations.
......@@ -213,12 +228,17 @@ sentinel deny-scripts-reconfig yes
#
# In such case it is possible to tell Sentinel to use different command names
# instead of the normal ones. For example if the master "mymaster", and the
# associated slaves, have "CONFIG" all renamed to "GUESSME", I could use:
# associated replicas, have "CONFIG" all renamed to "GUESSME", I could use:
#
# sentinel rename-command mymaster CONFIG GUESSME
# SENTINEL rename-command mymaster CONFIG GUESSME
#
# After such configuration is set, every time Sentinel would use CONFIG it will
# use GUESSME instead. Note that there is no actual need to respect the command
# case, so writing "config guessme" is the same in the example above.
#
# SENTINEL SET can also be used in order to perform this configuration at runtime.
#
# In order to set a command back to its original name (undo the renaming), it
# is possible to just rename a command to itsef:
#
# SENTINEL rename-command mymaster CONFIG CONFIG
......@@ -21,6 +21,11 @@ NODEPS:=clean distclean
# Default settings
STD=-std=c99 -pedantic -DREDIS_STATIC=''
ifneq (,$(findstring clang,$(CC)))
ifneq (,$(findstring FreeBSD,$(uname_S)))
STD+=-Wno-c11-extensions
endif
endif
WARN=-Wall -W -Wno-missing-field-initializers
OPT=$(OPTIMIZATION)
......@@ -41,6 +46,10 @@ endif
# To get ARM stack traces if Redis crashes we need a special C flag.
ifneq (,$(filter aarch64 armv,$(uname_M)))
CFLAGS+=-funwind-tables
else
ifneq (,$(findstring armv,$(uname_M)))
CFLAGS+=-funwind-tables
endif
endif
# Backwards compatibility for selecting an allocator
......@@ -93,10 +102,20 @@ else
ifeq ($(uname_S),OpenBSD)
# OpenBSD
FINAL_LIBS+= -lpthread
ifeq ($(USE_BACKTRACE),yes)
FINAL_CFLAGS+= -DUSE_BACKTRACE -I/usr/local/include
FINAL_LDFLAGS+= -L/usr/local/lib
FINAL_LIBS+= -lexecinfo
endif
else
ifeq ($(uname_S),FreeBSD)
# FreeBSD
FINAL_LIBS+= -lpthread
FINAL_LIBS+= -lpthread -lexecinfo
else
ifeq ($(uname_S),DragonFly)
# FreeBSD
FINAL_LIBS+= -lpthread -lexecinfo
else
# All the other OSes (notably Linux)
FINAL_LDFLAGS+= -rdynamic
......@@ -106,6 +125,7 @@ endif
endif
endif
endif
endif
# Include paths to dependencies
FINAL_CFLAGS+= -I../deps/hiredis -I../deps/linenoise -I../deps/lua/src
......@@ -144,7 +164,7 @@ endif
REDIS_SERVER_NAME=redis-server
REDIS_SENTINEL_NAME=redis-sentinel
REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o redis-check-aof.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o siphash.o rax.o t_stream.o listpack.o localtime.o
REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o redis-check-aof.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o siphash.o rax.o t_stream.o listpack.o localtime.o lolwut.o lolwut5.o acl.o gopher.o
REDIS_CLI_NAME=redis-cli
REDIS_CLI_OBJ=anet.o adlist.o dict.o redis-cli.o zmalloc.o release.o anet.o ae.o crc64.o siphash.o crc16.o
REDIS_BENCHMARK_NAME=redis-benchmark
......
This diff is collapsed.
......@@ -351,8 +351,8 @@ static int processTimeEvents(aeEventLoop *eventLoop) {
* if flags has AE_FILE_EVENTS set, file events are processed.
* if flags has AE_TIME_EVENTS set, time events are processed.
* if flags has AE_DONT_WAIT set the function returns ASAP until all
* if flags has AE_CALL_AFTER_SLEEP set, the aftersleep callback is called.
* the events that's possible to process without to wait are processed.
* if flags has AE_CALL_AFTER_SLEEP set, the aftersleep callback is called.
*
* The function returns the number of events processed. */
int aeProcessEvents(aeEventLoop *eventLoop, int flags)
......
......@@ -204,7 +204,7 @@ void aof_background_fsync(int fd) {
}
/* Kills an AOFRW child process if exists */
static void killAppendOnlyChild(void) {
void killAppendOnlyChild(void) {
int statloc;
/* No AOFRW child? return. */
if (server.aof_child_pid == -1) return;
......@@ -221,6 +221,8 @@ static void killAppendOnlyChild(void) {
server.aof_rewrite_time_start = -1;
/* Close pipes used for IPC between the two processes. */
aofClosePipes();
closeChildInfoPipe();
updateDictResizePolicy();
}
/* Called when the user switches from "appendonly yes" to "appendonly no"
......@@ -645,6 +647,8 @@ struct client *createFakeClient(void) {
c->obuf_soft_limit_reached_time = 0;
c->watched_keys = listCreate();
c->peerid = NULL;
c->resp = 2;
c->user = NULL;
listSetFreeMethod(c->reply,freeClientReplyValue);
listSetDupMethod(c->reply,dupClientReplyValue);
initClientMultiState(c);
......@@ -677,6 +681,7 @@ int loadAppendOnlyFile(char *filename) {
int old_aof_state = server.aof_state;
long loops = 0;
off_t valid_up_to = 0; /* Offset of latest well-formed command loaded. */
off_t valid_before_multi = 0; /* Offset before MULTI command loaded. */
if (fp == NULL) {
serverLog(LL_WARNING,"Fatal error: can't open the append log file for reading: %s",strerror(errno));
......@@ -777,16 +782,28 @@ int loadAppendOnlyFile(char *filename) {
/* Command lookup */
cmd = lookupCommand(argv[0]->ptr);
if (!cmd) {
serverLog(LL_WARNING,"Unknown command '%s' reading the append only file", (char*)argv[0]->ptr);
serverLog(LL_WARNING,
"Unknown command '%s' reading the append only file",
(char*)argv[0]->ptr);
exit(1);
}
if (cmd == server.multiCommand) valid_before_multi = valid_up_to;
/* Run the command in the context of a fake client */
fakeClient->cmd = cmd;
cmd->proc(fakeClient);
if (fakeClient->flags & CLIENT_MULTI &&
fakeClient->cmd->proc != execCommand)
{
queueMultiCommand(fakeClient);
} else {
cmd->proc(fakeClient);
}
/* The fake client should not have a reply */
serverAssert(fakeClient->bufpos == 0 && listLength(fakeClient->reply) == 0);
serverAssert(fakeClient->bufpos == 0 &&
listLength(fakeClient->reply) == 0);
/* The fake client should never get blocked */
serverAssert((fakeClient->flags & CLIENT_BLOCKED) == 0);
......@@ -798,8 +815,15 @@ int loadAppendOnlyFile(char *filename) {
}
/* This point can only be reached when EOF is reached without errors.
* If the client is in the middle of a MULTI/EXEC, log error and quit. */
if (fakeClient->flags & CLIENT_MULTI) goto uxeof;
* If the client is in the middle of a MULTI/EXEC, handle it as it was
* a short read, even if technically the protocol is correct: we want
* to remove the unprocessed tail and continue. */
if (fakeClient->flags & CLIENT_MULTI) {
serverLog(LL_WARNING,
"Revert incomplete MULTI/EXEC transaction in AOF file");
valid_up_to = valid_before_multi;
goto uxeof;
}
loaded_ok: /* DB loaded, cleanup and return C_OK to the caller. */
fclose(fp);
......@@ -1119,25 +1143,47 @@ int rewriteStreamObject(rio *r, robj *key, robj *o) {
streamID id;
int64_t numfields;
/* Reconstruct the stream data using XADD commands. */
while(streamIteratorGetID(&si,&id,&numfields)) {
/* Emit a two elements array for each item. The first is
* the ID, the second is an array of field-value pairs. */
if (s->length) {
/* Reconstruct the stream data using XADD commands. */
while(streamIteratorGetID(&si,&id,&numfields)) {
/* Emit a two elements array for each item. The first is
* the ID, the second is an array of field-value pairs. */
/* Emit the XADD <key> <id> ...fields... command. */
if (rioWriteBulkCount(r,'*',3+numfields*2) == 0) return 0;
/* Emit the XADD <key> <id> ...fields... command. */
if (rioWriteBulkCount(r,'*',3+numfields*2) == 0) return 0;
if (rioWriteBulkString(r,"XADD",4) == 0) return 0;
if (rioWriteBulkObject(r,key) == 0) return 0;
if (rioWriteBulkStreamID(r,&id) == 0) return 0;
while(numfields--) {
unsigned char *field, *value;
int64_t field_len, value_len;
streamIteratorGetField(&si,&field,&value,&field_len,&value_len);
if (rioWriteBulkString(r,(char*)field,field_len) == 0) return 0;
if (rioWriteBulkString(r,(char*)value,value_len) == 0) return 0;
}
}
} else {
/* Use the XADD MAXLEN 0 trick to generate an empty stream if
* the key we are serializing is an empty string, which is possible
* for the Stream type. */
if (rioWriteBulkCount(r,'*',7) == 0) return 0;
if (rioWriteBulkString(r,"XADD",4) == 0) return 0;
if (rioWriteBulkObject(r,key) == 0) return 0;
if (rioWriteBulkStreamID(r,&id) == 0) return 0;
while(numfields--) {
unsigned char *field, *value;
int64_t field_len, value_len;
streamIteratorGetField(&si,&field,&value,&field_len,&value_len);
if (rioWriteBulkString(r,(char*)field,field_len) == 0) return 0;
if (rioWriteBulkString(r,(char*)value,value_len) == 0) return 0;
}
if (rioWriteBulkString(r,"MAXLEN",6) == 0) return 0;
if (rioWriteBulkString(r,"0",1) == 0) return 0;
if (rioWriteBulkStreamID(r,&s->last_id) == 0) return 0;
if (rioWriteBulkString(r,"x",1) == 0) return 0;
if (rioWriteBulkString(r,"y",1) == 0) return 0;
}
/* Append XSETID after XADD, make sure lastid is correct,
* in case of XDEL lastid. */
if (rioWriteBulkCount(r,'*',3) == 0) return 0;
if (rioWriteBulkString(r,"XSETID",6) == 0) return 0;
if (rioWriteBulkObject(r,key) == 0) return 0;
if (rioWriteBulkStreamID(r,&s->last_id) == 0) return 0;
/* Create all the stream consumer groups. */
if (s->cgroups) {
raxIterator ri;
......
/* This file implements atomic counters using __atomic or __sync macros if
* available, otherwise synchronizing different threads using a mutex.
*
* The exported interaface is composed of three macros:
* The exported interface is composed of three macros:
*
* atomicIncr(var,count) -- Increment the atomic counter
* atomicGetIncr(var,oldvalue_var,count) -- Get and increment the atomic counter
......
......@@ -17,7 +17,7 @@
*
* The design is trivial, we have a structure representing a job to perform
* and a different thread and job queue for every job type.
* Every thread wait for new jobs in its queue, and process every job
* Every thread waits for new jobs in its queue, and process every job
* sequentially.
*
* Jobs of the same type are guaranteed to be processed from the least
......@@ -204,14 +204,14 @@ void *bioProcessBackgroundJobs(void *arg) {
}
zfree(job);
/* Unblock threads blocked on bioWaitStepOfType() if any. */
pthread_cond_broadcast(&bio_step_cond[type]);
/* Lock again before reiterating the loop, if there are no longer
* jobs to process we'll block again in pthread_cond_wait(). */
pthread_mutex_lock(&bio_mutex[type]);
listDelNode(bio_jobs[type],ln);
bio_pending[type]--;
/* Unblock threads blocked on bioWaitStepOfType() if any. */
pthread_cond_broadcast(&bio_step_cond[type]);
}
}
......
......@@ -1002,7 +1002,7 @@ void bitfieldCommand(client *c) {
highest_write_offset)) == NULL) return;
}
addReplyMultiBulkLen(c,numops);
addReplyArrayLen(c,numops);
/* Actually process the operations. */
for (j = 0; j < numops; j++) {
......@@ -1047,7 +1047,7 @@ void bitfieldCommand(client *c) {
setSignedBitfield(o->ptr,thisop->offset,
thisop->bits,newval);
} else {
addReply(c,shared.nullbulk);
addReplyNull(c);
}
} else {
uint64_t oldval, newval, wrapped, retval;
......@@ -1076,7 +1076,7 @@ void bitfieldCommand(client *c) {
setUnsignedBitfield(o->ptr,thisop->offset,
thisop->bits,newval);
} else {
addReply(c,shared.nullbulk);
addReplyNull(c);
}
}
changes++;
......
......@@ -126,12 +126,37 @@ void processUnblockedClients(void) {
* the code is conceptually more correct this way. */
if (!(c->flags & CLIENT_BLOCKED)) {
if (c->querybuf && sdslen(c->querybuf) > 0) {
processInputBuffer(c);
processInputBufferAndReplicate(c);
}
}
}
}
/* This function will schedule the client for reprocessing at a safe time.
*
* This is useful when a client was blocked for some reason (blocking opeation,
* CLIENT PAUSE, or whatever), because it may end with some accumulated query
* buffer that needs to be processed ASAP:
*
* 1. When a client is blocked, its readable handler is still active.
* 2. However in this case it only gets data into the query buffer, but the
* query is not parsed or executed once there is enough to proceed as
* usually (because the client is blocked... so we can't execute commands).
* 3. When the client is unblocked, without this function, the client would
* have to write some query in order for the readable handler to finally
* call processQueryBuffer*() on it.
* 4. With this function instead we can put the client in a queue that will
* process it for queries ready to be executed at a safe time.
*/
void queueClientForReprocessing(client *c) {
/* The client may already be into the unblocked list because of a previous
* blocking operation, don't add back it into the list multiple times. */
if (!(c->flags & CLIENT_UNBLOCKED)) {
c->flags |= CLIENT_UNBLOCKED;
listAddNodeTail(server.unblocked_clients,c);
}
}
/* Unblock a client calling the right function depending on the kind
* of operation the client is blocking for. */
void unblockClient(client *c) {
......@@ -152,12 +177,7 @@ void unblockClient(client *c) {
server.blocked_clients_by_type[c->btype]--;
c->flags &= ~CLIENT_BLOCKED;
c->btype = BLOCKED_NONE;
/* The client may already be into the unblocked list because of a previous
* blocking operation, don't add back it into the list multiple times. */
if (!(c->flags & CLIENT_UNBLOCKED)) {
c->flags |= CLIENT_UNBLOCKED;
listAddNodeTail(server.unblocked_clients,c);
}
queueClientForReprocessing(c);
}
/* This function gets called when a blocked client timed out in order to
......@@ -167,7 +187,7 @@ void replyToBlockedClientTimedOut(client *c) {
if (c->btype == BLOCKED_LIST ||
c->btype == BLOCKED_ZSET ||
c->btype == BLOCKED_STREAM) {
addReply(c,shared.nullmultibulk);
addReplyNullArray(c);
} else if (c->btype == BLOCKED_WAIT) {
addReplyLongLong(c,replicationCountAcksByOffset(c->bpop.reploffset));
} else if (c->btype == BLOCKED_MODULE) {
......@@ -195,7 +215,7 @@ void disconnectAllBlockedClients(void) {
if (c->flags & CLIENT_BLOCKED) {
addReplySds(c,sdsnew(
"-UNBLOCKED force unblock from blocking operation, "
"instance state changed (master -> slave?)\r\n"));
"instance state changed (master -> replica?)\r\n"));
unblockClient(c);
c->flags |= CLIENT_CLOSE_AFTER_REPLY;
}
......@@ -269,7 +289,7 @@ void handleClientsBlockedOnKeys(void) {
robj *dstkey = receiver->bpop.target;
int where = (receiver->lastcmd &&
receiver->lastcmd->proc == blpopCommand) ?
LIST_HEAD : LIST_TAIL;
LIST_HEAD : LIST_TAIL;
robj *value = listTypePop(o,where);
if (value) {
......@@ -285,7 +305,7 @@ void handleClientsBlockedOnKeys(void) {
{
/* If we failed serving the client we need
* to also undo the POP operation. */
listTypePush(o,value,where);
listTypePush(o,value,where);
}
if (dstkey) decrRefCount(dstkey);
......@@ -416,8 +436,12 @@ void handleClientsBlockedOnKeys(void) {
* the name of the stream and the data we
* extracted from it. Wrapped in a single-item
* array, since we have just one key. */
addReplyMultiBulkLen(receiver,1);
addReplyMultiBulkLen(receiver,2);
if (receiver->resp == 2) {
addReplyArrayLen(receiver,1);
addReplyArrayLen(receiver,2);
} else {
addReplyMapLen(receiver,1);
}
addReplyBulk(receiver,rl->key);
streamPropInfo pi = {
......
......@@ -1230,7 +1230,7 @@ void clearNodeFailureIfNeeded(clusterNode *node) {
serverLog(LL_NOTICE,
"Clear FAIL state for node %.40s: %s is reachable again.",
node->name,
nodeIsSlave(node) ? "slave" : "master without slots");
nodeIsSlave(node) ? "replica" : "master without slots");
node->flags &= ~CLUSTER_NODE_FAIL;
clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE|CLUSTER_TODO_SAVE_CONFIG);
}
......@@ -1589,6 +1589,12 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc
}
}
/* After updating the slots configuration, don't do any actual change
* in the state of the server if a module disabled Redis Cluster
* keys redirections. */
if (server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION)
return;
/* If at least one slot was reassigned from a node to another node
* with a greater configEpoch, it is possible that:
* 1) We are a master left without slots. This means that we were
......@@ -2059,7 +2065,7 @@ int clusterProcessPacket(clusterLink *link) {
server.cluster->mf_end = mstime() + CLUSTER_MF_TIMEOUT;
server.cluster->mf_slave = sender;
pauseClients(mstime()+(CLUSTER_MF_TIMEOUT*2));
serverLog(LL_WARNING,"Manual failover requested by slave %.40s.",
serverLog(LL_WARNING,"Manual failover requested by replica %.40s.",
sender->name);
} else if (type == CLUSTERMSG_TYPE_UPDATE) {
clusterNode *n; /* The node the update is about. */
......@@ -2873,7 +2879,7 @@ void clusterLogCantFailover(int reason) {
switch(reason) {
case CLUSTER_CANT_FAILOVER_DATA_AGE:
msg = "Disconnected from master for longer than allowed. "
"Please check the 'cluster-slave-validity-factor' configuration "
"Please check the 'cluster-replica-validity-factor' configuration "
"option.";
break;
case CLUSTER_CANT_FAILOVER_WAITING_DELAY:
......@@ -3054,7 +3060,7 @@ void clusterHandleSlaveFailover(void) {
server.cluster->failover_auth_time += added_delay;
server.cluster->failover_auth_rank = newrank;
serverLog(LL_WARNING,
"Slave rank updated to #%d, added %lld milliseconds of delay.",
"Replica rank updated to #%d, added %lld milliseconds of delay.",
newrank, added_delay);
}
}
......@@ -3210,7 +3216,8 @@ void clusterHandleSlaveMigration(int max_slaves) {
* the natural slaves of this instance to advertise their switch from
* the old master to the new one. */
if (target && candidate == myself &&
(mstime()-target->orphaned_time) > CLUSTER_SLAVE_MIGRATION_DELAY)
(mstime()-target->orphaned_time) > CLUSTER_SLAVE_MIGRATION_DELAY &&
!(server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER))
{
serverLog(LL_WARNING,"Migrating to orphaned master %.40s",
target->name);
......@@ -3321,14 +3328,18 @@ void clusterCron(void) {
int changed = 0;
if (prev_ip == NULL && curr_ip != NULL) changed = 1;
if (prev_ip != NULL && curr_ip == NULL) changed = 1;
if (prev_ip && curr_ip && strcmp(prev_ip,curr_ip)) changed = 1;
else if (prev_ip != NULL && curr_ip == NULL) changed = 1;
else if (prev_ip && curr_ip && strcmp(prev_ip,curr_ip)) changed = 1;
if (changed) {
if (prev_ip) zfree(prev_ip);
prev_ip = curr_ip;
if (prev_ip) prev_ip = zstrdup(prev_ip);
if (curr_ip) {
/* We always take a copy of the previous IP address, by
* duplicating the string. This way later we can check if
* the address really changed. */
prev_ip = zstrdup(prev_ip);
strncpy(myself->ip,server.cluster_announce_ip,NET_IP_STR_LEN);
myself->ip[NET_IP_STR_LEN-1] = '\0';
} else {
......@@ -3559,7 +3570,8 @@ void clusterCron(void) {
if (nodeIsSlave(myself)) {
clusterHandleManualFailover();
clusterHandleSlaveFailover();
if (!(server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER))
clusterHandleSlaveFailover();
/* If there are orphaned slaves, and we are a slave among the masters
* with the max number of non-failing slaves, consider migrating to
* the orphaned masters. Note that it does not make sense to try
......@@ -3865,6 +3877,11 @@ int verifyClusterConfigWithData(void) {
int j;
int update_config = 0;
/* Return ASAP if a module disabled cluster redirections. In that case
* every master can store keys about every possible hash slot. */
if (server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION)
return C_OK;
/* If this node is a slave, don't perform the check at all as we
* completely depend on the replication stream. */
if (nodeIsSlave(myself)) return C_OK;
......@@ -4109,7 +4126,7 @@ void clusterReplyMultiBulkSlots(client *c) {
*/
int num_masters = 0;
void *slot_replylen = addDeferredMultiBulkLength(c);
void *slot_replylen = addReplyDeferredLen(c);
dictEntry *de;
dictIterator *di = dictGetSafeIterator(server.cluster->nodes);
......@@ -4129,7 +4146,7 @@ void clusterReplyMultiBulkSlots(client *c) {
}
if (start != -1 && (!bit || j == CLUSTER_SLOTS-1)) {
int nested_elements = 3; /* slots (2) + master addr (1). */
void *nested_replylen = addDeferredMultiBulkLength(c);
void *nested_replylen = addReplyDeferredLen(c);
if (bit && j == CLUSTER_SLOTS-1) j++;
......@@ -4145,7 +4162,7 @@ void clusterReplyMultiBulkSlots(client *c) {
start = -1;
/* First node reply position is always the master */
addReplyMultiBulkLen(c, 3);
addReplyArrayLen(c, 3);
addReplyBulkCString(c, node->ip);
addReplyLongLong(c, node->port);
addReplyBulkCBuffer(c, node->name, CLUSTER_NAMELEN);
......@@ -4155,19 +4172,19 @@ void clusterReplyMultiBulkSlots(client *c) {
/* This loop is copy/pasted from clusterGenNodeDescription()
* with modifications for per-slot node aggregation */
if (nodeFailed(node->slaves[i])) continue;
addReplyMultiBulkLen(c, 3);
addReplyArrayLen(c, 3);
addReplyBulkCString(c, node->slaves[i]->ip);
addReplyLongLong(c, node->slaves[i]->port);
addReplyBulkCBuffer(c, node->slaves[i]->name, CLUSTER_NAMELEN);
nested_elements++;
}
setDeferredMultiBulkLength(c, nested_replylen, nested_elements);
setDeferredArrayLen(c, nested_replylen, nested_elements);
num_masters++;
}
}
}
dictReleaseIterator(di);
setDeferredMultiBulkLength(c, slot_replylen, num_masters);
setDeferredArrayLen(c, slot_replylen, num_masters);
}
void clusterCommand(client *c) {
......@@ -4183,7 +4200,7 @@ void clusterCommand(client *c) {
"COUNT-failure-reports <node-id> -- Return number of failure reports for <node-id>.",
"COUNTKEYSINSLOT <slot> - Return the number of keys in <slot>.",
"DELSLOTS <slot> [slot ...] -- Delete slots information from current node.",
"FAILOVER [force|takeover] -- Promote current slave node to being a master.",
"FAILOVER [force|takeover] -- Promote current replica node to being a master.",
"FORGET <node-id> -- Remove a node from the cluster.",
"GETKEYSINSLOT <slot> <count> -- Return key names stored by current node in a slot.",
"FLUSHSLOTS -- Delete current node own slots information.",
......@@ -4193,11 +4210,11 @@ void clusterCommand(client *c) {
"MYID -- Return the node id.",
"NODES -- Return cluster configuration seen by node. Output format:",
" <id> <ip:port> <flags> <master> <pings> <pongs> <epoch> <link> <slot> ... <slot>",
"REPLICATE <node-id> -- Configure current node as slave to <node-id>.",
"REPLICATE <node-id> -- Configure current node as replica to <node-id>.",
"RESET [hard|soft] -- Reset current node (default: soft).",
"SET-config-epoch <epoch> - Set config epoch of current node.",
"SETSLOT <slot> (importing|migrating|stable|node <node-id>) -- Set slot state.",
"SLAVES <node-id> -- Return <node-id> slaves.",
"REPLICAS <node-id> -- Return <node-id> replicas.",
"SLOTS -- Return information about slots range mappings. Each range is made of:",
" start, end, master and replicas IP addresses, ports and ids",
NULL
......@@ -4531,7 +4548,7 @@ NULL
keys = zmalloc(sizeof(robj*)*maxkeys);
numkeys = getKeysInSlot(slot, keys, maxkeys);
addReplyMultiBulkLen(c,numkeys);
addReplyArrayLen(c,numkeys);
for (j = 0; j < numkeys; j++) {
addReplyBulk(c,keys[j]);
decrRefCount(keys[j]);
......@@ -4574,7 +4591,7 @@ NULL
/* Can't replicate a slave. */
if (nodeIsSlave(n)) {
addReplyError(c,"I can only replicate a master, not a slave.");
addReplyError(c,"I can only replicate a master, not a replica.");
return;
}
......@@ -4593,7 +4610,8 @@ NULL
clusterSetMaster(n);
clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE|CLUSTER_TODO_SAVE_CONFIG);
addReply(c,shared.ok);
} else if (!strcasecmp(c->argv[1]->ptr,"slaves") && c->argc == 3) {
} else if ((!strcasecmp(c->argv[1]->ptr,"slaves") ||
!strcasecmp(c->argv[1]->ptr,"replicas")) && c->argc == 3) {
/* CLUSTER SLAVES <NODE ID> */
clusterNode *n = clusterLookupNode(c->argv[2]->ptr);
int j;
......@@ -4609,7 +4627,7 @@ NULL
return;
}
addReplyMultiBulkLen(c,n->numslaves);
addReplyArrayLen(c,n->numslaves);
for (j = 0; j < n->numslaves; j++) {
sds ni = clusterGenNodeDescription(n->slaves[j]);
addReplyBulkCString(c,ni);
......@@ -4647,10 +4665,10 @@ NULL
/* Check preconditions. */
if (nodeIsMaster(myself)) {
addReplyError(c,"You should send CLUSTER FAILOVER to a slave");
addReplyError(c,"You should send CLUSTER FAILOVER to a replica");
return;
} else if (myself->slaveof == NULL) {
addReplyError(c,"I'm a slave but my master is unknown to me");
addReplyError(c,"I'm a replica but my master is unknown to me");
return;
} else if (!force &&
(nodeFailed(myself->slaveof) ||
......@@ -4818,7 +4836,7 @@ void dumpCommand(client *c) {
/* Check if the key is here. */
if ((o = lookupKeyRead(c->db,c->argv[1])) == NULL) {
addReply(c,shared.nullbulk);
addReplyNull(c);
return;
}
......@@ -5146,6 +5164,11 @@ try_again:
serverAssertWithInfo(c,NULL,rioWriteBulkLongLong(&cmd,dbid));
}
int non_expired = 0; /* Number of keys that we'll find non expired.
Note that serializing large keys may take some time
so certain keys that were found non expired by the
lookupKey() function, may be expired later. */
/* Create RESTORE payload and generate the protocol to call the command. */
for (j = 0; j < num_keys; j++) {
long long ttl = 0;
......@@ -5153,8 +5176,17 @@ try_again:
if (expireat != -1) {
ttl = expireat-mstime();
if (ttl < 0) {
continue;
}
if (ttl < 1) ttl = 1;
}
/* Relocate valid (non expired) keys into the array in successive
* positions to remove holes created by the keys that were present
* in the first lookup but are now expired after the second lookup. */
kv[non_expired++] = kv[j];
serverAssertWithInfo(c,NULL,
rioWriteBulkCount(&cmd,'*',replace ? 5 : 4));
......@@ -5182,6 +5214,9 @@ try_again:
serverAssertWithInfo(c,NULL,rioWriteBulkString(&cmd,"REPLACE",7));
}
/* Fix the actual number of keys we are migrating. */
num_keys = non_expired;
/* Transfer the query to the other node in 64K chunks. */
errno = 0;
{
......@@ -5217,6 +5252,10 @@ try_again:
int socket_error = 0;
int del_idx = 1; /* Index of the key argument for the replicated DEL op. */
/* Allocate the new argument vector that will replace the current command,
* to propagate the MIGRATE as a DEL command (if no COPY option was given).
* We allocate num_keys+1 because the additional argument is for "DEL"
* command name itself. */
if (!copy) newargv = zmalloc(sizeof(robj*)*(num_keys+1));
for (j = 0; j < num_keys; j++) {
......@@ -5417,9 +5456,17 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in
multiCmd mc;
int i, slot = 0, migrating_slot = 0, importing_slot = 0, missing_keys = 0;
/* Allow any key to be set if a module disabled cluster redirections. */
if (server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION)
return myself;
/* Set error code optimistically for the base case. */
if (error_code) *error_code = CLUSTER_REDIR_NONE;
/* Modules can turn off Redis Cluster redirection: this is useful
* when writing a module that implements a completely different
* distributed system. */
/* We handle all the cases as if they were EXEC commands, so we have
* a common code path for everything */
if (cmd->proc == execCommand) {
......
......@@ -100,6 +100,13 @@ typedef struct clusterLink {
#define CLUSTERMSG_TYPE_MODULE 9 /* Module cluster API message. */
#define CLUSTERMSG_TYPE_COUNT 10 /* Total number of message types. */
/* Flags that a module can set in order to prevent certain Redis Cluster
* features to be enabled. Useful when implementing a different distributed
* system on top of Redis Cluster message bus, using modules. */
#define CLUSTER_MODULE_FLAG_NONE 0
#define CLUSTER_MODULE_FLAG_NO_FAILOVER (1<<1)
#define CLUSTER_MODULE_FLAG_NO_REDIRECTION (1<<2)
/* This structure represent elements of node->fail_reports. */
typedef struct clusterNodeFailReport {
struct clusterNode *node; /* Node reporting the failure condition. */
......
This diff is collapsed.
......@@ -62,7 +62,9 @@
#endif
/* Test for backtrace() */
#if defined(__APPLE__) || (defined(__linux__) && defined(__GLIBC__))
#if defined(__APPLE__) || (defined(__linux__) && defined(__GLIBC__)) || \
defined(__FreeBSD__) || (defined(__OpenBSD__) && defined(USE_BACKTRACE))\
|| defined(__DragonFly__)
#define HAVE_BACKTRACE 1
#endif
......
This diff is collapsed.
This diff is collapsed.
......@@ -739,6 +739,30 @@ unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count) {
return stored;
}
/* This is like dictGetRandomKey() from the POV of the API, but will do more
* work to ensure a better distribution of the returned element.
*
* This function improves the distribution because the dictGetRandomKey()
* problem is that it selects a random bucket, then it selects a random
* element from the chain in the bucket. However elements being in different
* chain lengths will have different probabilities of being reported. With
* this function instead what we do is to consider a "linear" range of the table
* that may be constituted of N buckets with chains of different lengths
* appearing one after the other. Then we report a random element in the range.
* In this way we smooth away the problem of different chain lenghts. */
#define GETFAIR_NUM_ENTRIES 15
dictEntry *dictGetFairRandomKey(dict *d) {
dictEntry *entries[GETFAIR_NUM_ENTRIES];
unsigned int count = dictGetSomeKeys(d,entries,GETFAIR_NUM_ENTRIES);
/* Note that dictGetSomeKeys() may return zero elements in an unlucky
* run() even if there are actually elements inside the hash table. So
* when we get zero, we call the true dictGetRandomKey() that will always
* yeld the element if the hash table has at least one. */
if (count == 0) return dictGetRandomKey(d);
unsigned int idx = rand() % count;
return entries[idx];
}
/* Function to reverse bits. Algorithm from:
* http://graphics.stanford.edu/~seander/bithacks.html#ReverseParallel */
static unsigned long rev(unsigned long v) {
......
......@@ -166,6 +166,7 @@ dictIterator *dictGetSafeIterator(dict *d);
dictEntry *dictNext(dictIterator *iter);
void dictReleaseIterator(dictIterator *iter);
dictEntry *dictGetRandomKey(dict *d);
dictEntry *dictGetFairRandomKey(dict *d);
unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count);
void dictGetStats(char *buf, size_t bufsize, dict *d);
uint64_t dictGenHashFunction(const void *key, int len);
......
......@@ -364,7 +364,7 @@ size_t freeMemoryGetNotCountedMemory(void) {
}
}
if (server.aof_state != AOF_OFF) {
overhead += sdslen(server.aof_buf)+aofRewriteBufferSize();
overhead += sdsalloc(server.aof_buf)+aofRewriteBufferSize();
}
return overhead;
}
......@@ -444,6 +444,10 @@ int getMaxmemoryState(size_t *total, size_t *logical, size_t *tofree, float *lev
* Otehrwise if we are over the memory limit, but not enough memory
* was freed to return back under the limit, the function returns C_ERR. */
int freeMemoryIfNeeded(void) {
/* By default replicas should ignore maxmemory
* and just be masters exact copies. */
if (server.masterhost && server.repl_slave_ignore_maxmemory) return C_OK;
size_t mem_reported, mem_tofree, mem_freed;
mstime_t latency, eviction_latency;
long long delta;
......@@ -618,3 +622,14 @@ cant_free:
return C_ERR;
}
/* This is a wrapper for freeMemoryIfNeeded() that only really calls the
* function if right now there are the conditions to do so safely:
*
* - There must be no script in timeout condition.
* - Nor we are loading data right now.
*
*/
int freeMemoryIfNeededAndSafe(void) {
if (server.lua_timedout || server.loading) return C_OK;
return freeMemoryIfNeeded();
}
......@@ -466,7 +466,7 @@ void georadiusGeneric(client *c, int flags) {
/* Look up the requested zset */
robj *zobj = NULL;
if ((zobj = lookupKeyReadOrReply(c, key, shared.emptymultibulk)) == NULL ||
if ((zobj = lookupKeyReadOrReply(c, key, shared.null[c->resp])) == NULL ||
checkType(c, zobj, OBJ_ZSET)) {
return;
}
......@@ -566,7 +566,7 @@ void georadiusGeneric(client *c, int flags) {
/* If no matching results, the user gets an empty reply. */
if (ga->used == 0 && storekey == NULL) {
addReply(c, shared.emptymultibulk);
addReplyNull(c);
geoArrayFree(ga);
return;
}
......@@ -597,11 +597,11 @@ void georadiusGeneric(client *c, int flags) {
if (withhash)
option_length++;
/* The multibulk len we send is exactly result_length. The result is
/* The array len we send is exactly result_length. The result is
* either all strings of just zset members *or* a nested multi-bulk
* reply containing the zset member string _and_ all the additional
* options the user enabled for this request. */
addReplyMultiBulkLen(c, returned_items);
addReplyArrayLen(c, returned_items);
/* Finally send results back to the caller */
int i;
......@@ -613,7 +613,7 @@ void georadiusGeneric(client *c, int flags) {
* as a nested multi-bulk. Add 1 to account for result value
* itself. */
if (option_length)
addReplyMultiBulkLen(c, option_length + 1);
addReplyArrayLen(c, option_length + 1);
addReplyBulkSds(c,gp->member);
gp->member = NULL;
......@@ -625,7 +625,7 @@ void georadiusGeneric(client *c, int flags) {
addReplyLongLong(c, gp->score);
if (withcoords) {
addReplyMultiBulkLen(c, 2);
addReplyArrayLen(c, 2);
addReplyHumanLongDouble(c, gp->longitude);
addReplyHumanLongDouble(c, gp->latitude);
}
......@@ -706,11 +706,11 @@ void geohashCommand(client *c) {
/* Geohash elements one after the other, using a null bulk reply for
* missing elements. */
addReplyMultiBulkLen(c,c->argc-2);
addReplyArrayLen(c,c->argc-2);
for (j = 2; j < c->argc; j++) {
double score;
if (!zobj || zsetScore(zobj, c->argv[j]->ptr, &score) == C_ERR) {
addReply(c,shared.nullbulk);
addReplyNull(c);
} else {
/* The internal format we use for geocoding is a bit different
* than the standard, since we use as initial latitude range
......@@ -721,7 +721,7 @@ void geohashCommand(client *c) {
/* Decode... */
double xy[2];
if (!decodeGeohash(score,xy)) {
addReply(c,shared.nullbulk);
addReplyNull(c);
continue;
}
......@@ -759,19 +759,19 @@ void geoposCommand(client *c) {
/* Report elements one after the other, using a null bulk reply for
* missing elements. */
addReplyMultiBulkLen(c,c->argc-2);
addReplyArrayLen(c,c->argc-2);
for (j = 2; j < c->argc; j++) {
double score;
if (!zobj || zsetScore(zobj, c->argv[j]->ptr, &score) == C_ERR) {
addReply(c,shared.nullmultibulk);
addReplyNullArray(c);
} else {
/* Decode... */
double xy[2];
if (!decodeGeohash(score,xy)) {
addReply(c,shared.nullmultibulk);
addReplyNullArray(c);
continue;
}
addReplyMultiBulkLen(c,2);
addReplyArrayLen(c,2);
addReplyHumanLongDouble(c,xy[0]);
addReplyHumanLongDouble(c,xy[1]);
}
......@@ -797,7 +797,7 @@ void geodistCommand(client *c) {
/* Look up the requested zset */
robj *zobj = NULL;
if ((zobj = lookupKeyReadOrReply(c, c->argv[1], shared.nullbulk))
if ((zobj = lookupKeyReadOrReply(c, c->argv[1], shared.null[c->resp]))
== NULL || checkType(c, zobj, OBJ_ZSET)) return;
/* Get the scores. We need both otherwise NULL is returned. */
......@@ -805,13 +805,13 @@ void geodistCommand(client *c) {
if (zsetScore(zobj, c->argv[2]->ptr, &score1) == C_ERR ||
zsetScore(zobj, c->argv[3]->ptr, &score2) == C_ERR)
{
addReply(c,shared.nullbulk);
addReplyNull(c);
return;
}
/* Decode & compute the distance. */
if (!decodeGeohash(score1,xyxy) || !decodeGeohash(score2,xyxy+2))
addReply(c,shared.nullbulk);
addReplyNull(c);
else
addReplyDoubleDistance(c,
geohashGetDistance(xyxy[0],xyxy[1],xyxy[2],xyxy[3]) / to_meter);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment