Commit d7971f96 authored by Oran Agra's avatar Oran Agra
Browse files

Merge remote-tracking branch 'origin/unstable' into 7.0

parents d2b5a579 acfb4f7a
...@@ -7,7 +7,7 @@ jobs: ...@@ -7,7 +7,7 @@ jobs:
test-ubuntu-latest: test-ubuntu-latest:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- name: make - name: make
# Fail build if there are warnings # Fail build if there are warnings
# build with TLS just for compilation coverage # build with TLS just for compilation coverage
...@@ -22,7 +22,7 @@ jobs: ...@@ -22,7 +22,7 @@ jobs:
test-sanitizer-address: test-sanitizer-address:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- name: make - name: make
run: make SANITIZER=address REDIS_CFLAGS='-Werror' run: make SANITIZER=address REDIS_CFLAGS='-Werror'
- name: testprep - name: testprep
...@@ -36,7 +36,7 @@ jobs: ...@@ -36,7 +36,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: debian:oldoldstable container: debian:oldoldstable
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- name: make - name: make
run: | run: |
apt-get update && apt-get install -y build-essential apt-get update && apt-get install -y build-essential
...@@ -45,14 +45,14 @@ jobs: ...@@ -45,14 +45,14 @@ jobs:
build-macos-latest: build-macos-latest:
runs-on: macos-latest runs-on: macos-latest
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- name: make - name: make
run: make REDIS_CFLAGS='-Werror' run: make REDIS_CFLAGS='-Werror'
build-32bit: build-32bit:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- name: make - name: make
run: | run: |
sudo apt-get update && sudo apt-get install libc6-dev-i386 sudo apt-get update && sudo apt-get install libc6-dev-i386
...@@ -61,7 +61,7 @@ jobs: ...@@ -61,7 +61,7 @@ jobs:
build-libc-malloc: build-libc-malloc:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- name: make - name: make
run: make REDIS_CFLAGS='-Werror' MALLOC=libc run: make REDIS_CFLAGS='-Werror' MALLOC=libc
...@@ -69,7 +69,7 @@ jobs: ...@@ -69,7 +69,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: centos:7 container: centos:7
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- name: make - name: make
run: | run: |
yum -y install gcc make yum -y install gcc make
......
...@@ -4,7 +4,7 @@ on: ...@@ -4,7 +4,7 @@ on:
push: push:
pull_request: pull_request:
schedule: schedule:
# run weekly new vulnerability was added to the the database # run weekly new vulnerability was added to the database
- cron: '0 0 * * 0' - cron: '0 0 * * 0'
jobs: jobs:
...@@ -20,7 +20,7 @@ jobs: ...@@ -20,7 +20,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v2 uses: actions/checkout@v3
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@v1 uses: github/codeql-action/init@v1
......
This diff is collapsed.
...@@ -12,7 +12,7 @@ jobs: ...@@ -12,7 +12,7 @@ jobs:
if: github.event_name != 'schedule' || github.repository == 'redis/redis' if: github.event_name != 'schedule' || github.repository == 'redis/redis'
timeout-minutes: 14400 timeout-minutes: 14400
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- name: Build - name: Build
run: make REDIS_CFLAGS=-Werror run: make REDIS_CFLAGS=-Werror
- name: Start redis-server - name: Start redis-server
...@@ -36,7 +36,7 @@ jobs: ...@@ -36,7 +36,7 @@ jobs:
if: github.event_name != 'schedule' || github.repository == 'redis/redis' if: github.event_name != 'schedule' || github.repository == 'redis/redis'
timeout-minutes: 14400 timeout-minutes: 14400
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- name: Build - name: Build
run: make REDIS_CFLAGS=-Werror run: make REDIS_CFLAGS=-Werror
- name: Start redis-server - name: Start redis-server
...@@ -63,7 +63,7 @@ jobs: ...@@ -63,7 +63,7 @@ jobs:
if: github.event_name != 'schedule' || github.repository == 'redis/redis' if: github.event_name != 'schedule' || github.repository == 'redis/redis'
timeout-minutes: 14400 timeout-minutes: 14400
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- name: Build - name: Build
run: make REDIS_CFLAGS=-Werror run: make REDIS_CFLAGS=-Werror
- name: Start redis-server - name: Start redis-server
......
...@@ -16,10 +16,10 @@ jobs: ...@@ -16,10 +16,10 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v2 uses: actions/checkout@v3
- name: pip cache - name: pip cache
uses: actions/cache@v2 uses: actions/cache@v3
with: with:
path: ~/.cache/pip path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }} key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
......
...@@ -126,7 +126,7 @@ protected-mode yes ...@@ -126,7 +126,7 @@ protected-mode yes
# #
# no - Block for any connection (remain immutable) # no - Block for any connection (remain immutable)
# yes - Allow for any connection (no protection) # yes - Allow for any connection (no protection)
# local - Allow only for local local connections. Ones originating from the # local - Allow only for local connections. Ones originating from the
# IPv4 address (127.0.0.1), IPv6 address (::1) or Unix domain sockets. # IPv4 address (127.0.0.1), IPv6 address (::1) or Unix domain sockets.
# #
# enable-protected-configs no # enable-protected-configs no
...@@ -627,7 +627,7 @@ repl-diskless-sync-max-replicas 0 ...@@ -627,7 +627,7 @@ repl-diskless-sync-max-replicas 0
# #
# In many cases the disk is slower than the network, and storing and loading # In many cases the disk is slower than the network, and storing and loading
# the RDB file may increase replication time (and even increase the master's # the RDB file may increase replication time (and even increase the master's
# Copy on Write memory and salve buffers). # Copy on Write memory and replica buffers).
# However, parsing the RDB file directly from the socket may mean that we have # However, parsing the RDB file directly from the socket may mean that we have
# to flush the contents of the current database before the full rdb was # to flush the contents of the current database before the full rdb was
# received. For this reason we have the following options: # received. For this reason we have the following options:
...@@ -1224,7 +1224,7 @@ replica-lazy-flush no ...@@ -1224,7 +1224,7 @@ replica-lazy-flush no
lazyfree-lazy-user-del no lazyfree-lazy-user-del no
# FLUSHDB, FLUSHALL, and SCRIPT FLUSH support both asynchronous and synchronous # FLUSHDB, FLUSHALL, SCRIPT FLUSH and FUNCTION FLUSH support both asynchronous and synchronous
# deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the # deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the
# commands. When neither flag is passed, this directive will be used to determine # commands. When neither flag is passed, this directive will be used to determine
# if the data should be deleted asynchronously. # if the data should be deleted asynchronously.
...@@ -1287,7 +1287,7 @@ lazyfree-lazy-user-flush no ...@@ -1287,7 +1287,7 @@ lazyfree-lazy-user-flush no
# attempt to have background child processes killed before all others, and # attempt to have background child processes killed before all others, and
# replicas killed before masters. # replicas killed before masters.
# #
# Redis supports three options: # Redis supports these options:
# #
# no: Don't make changes to oom-score-adj (default). # no: Don't make changes to oom-score-adj (default).
# yes: Alias to "relative" see below. # yes: Alias to "relative" see below.
...@@ -1640,7 +1640,7 @@ aof-timestamp-enabled no ...@@ -1640,7 +1640,7 @@ aof-timestamp-enabled no
# cluster-replica-no-failover no # cluster-replica-no-failover no
# This option, when set to yes, allows nodes to serve read traffic while the # This option, when set to yes, allows nodes to serve read traffic while the
# the cluster is in a down state, as long as it believes it owns the slots. # cluster is in a down state, as long as it believes it owns the slots.
# #
# This is useful for two cases. The first case is for when an application # This is useful for two cases. The first case is for when an application
# doesn't require consistency of data during node failures or network partitions. # doesn't require consistency of data during node failures or network partitions.
...@@ -1958,7 +1958,7 @@ activerehashing yes ...@@ -1958,7 +1958,7 @@ activerehashing yes
# The limit can be set differently for the three different classes of clients: # The limit can be set differently for the three different classes of clients:
# #
# normal -> normal clients including MONITOR clients # normal -> normal clients including MONITOR clients
# replica -> replica clients # replica -> replica clients
# pubsub -> clients subscribed to at least one pubsub channel or pattern # pubsub -> clients subscribed to at least one pubsub channel or pattern
# #
# The syntax of every client-output-buffer-limit directive is the following: # The syntax of every client-output-buffer-limit directive is the following:
...@@ -2164,7 +2164,7 @@ rdb-save-incremental-fsync yes ...@@ -2164,7 +2164,7 @@ rdb-save-incremental-fsync yes
# defragmentation process. If you are not sure about what they mean it is # defragmentation process. If you are not sure about what they mean it is
# a good idea to leave the defaults untouched. # a good idea to leave the defaults untouched.
# Enabled active defragmentation # Active defragmentation is disabled by default
# activedefrag no # activedefrag no
# Minimum amount of fragmentation waste to start active defrag # Minimum amount of fragmentation waste to start active defrag
......
...@@ -20,6 +20,7 @@ $TCLSH tests/test_helper.tcl \ ...@@ -20,6 +20,7 @@ $TCLSH tests/test_helper.tcl \
--single unit/moduleapi/fork \ --single unit/moduleapi/fork \
--single unit/moduleapi/testrdb \ --single unit/moduleapi/testrdb \
--single unit/moduleapi/infotest \ --single unit/moduleapi/infotest \
--single unit/moduleapi/moduleconfigs \
--single unit/moduleapi/infra \ --single unit/moduleapi/infra \
--single unit/moduleapi/propagate \ --single unit/moduleapi/propagate \
--single unit/moduleapi/hooks \ --single unit/moduleapi/hooks \
......
...@@ -120,10 +120,7 @@ typedef struct { ...@@ -120,10 +120,7 @@ typedef struct {
* understand if the command can be executed. */ * understand if the command can be executed. */
uint64_t allowed_commands[USER_COMMAND_BITS_COUNT/64]; uint64_t allowed_commands[USER_COMMAND_BITS_COUNT/64];
/* allowed_firstargs is used by ACL rules to block access to a command unless a /* allowed_firstargs is used by ACL rules to block access to a command unless a
* specific argv[1] is given (or argv[2] in case it is applied on a sub-command). * specific argv[1] is given.
* For example, a user can use the rule "-select +select|0" to block all
* SELECT commands, except "SELECT 0".
* And for a sub-command: "+config -config|set +config|set|loglevel"
* *
* For each command ID (corresponding to the command bit set in allowed_commands), * For each command ID (corresponding to the command bit set in allowed_commands),
* This array points to an array of SDS strings, terminated by a NULL pointer, * This array points to an array of SDS strings, terminated by a NULL pointer,
...@@ -1531,6 +1528,37 @@ static int ACLSelectorCheckKey(aclSelector *selector, const char *key, int keyle ...@@ -1531,6 +1528,37 @@ static int ACLSelectorCheckKey(aclSelector *selector, const char *key, int keyle
return ACL_DENIED_KEY; return ACL_DENIED_KEY;
} }
/* Checks if the provided selector selector has access specified in flags
* to all keys in the keyspace. For example, CMD_KEY_READ access requires either
* '%R~*', '~*', or allkeys to be granted to the selector. Returns 1 if all
* the access flags are satisfied with this selector or 0 otherwise.
*/
static int ACLSelectorHasUnrestrictedKeyAccess(aclSelector *selector, int flags) {
/* The selector can access any key */
if (selector->flags & SELECTOR_FLAG_ALLKEYS) return 1;
listIter li;
listNode *ln;
listRewind(selector->patterns,&li);
int access_flags = 0;
if (flags & CMD_KEY_ACCESS) access_flags |= ACL_READ_PERMISSION;
if (flags & CMD_KEY_INSERT) access_flags |= ACL_WRITE_PERMISSION;
if (flags & CMD_KEY_DELETE) access_flags |= ACL_WRITE_PERMISSION;
if (flags & CMD_KEY_UPDATE) access_flags |= ACL_WRITE_PERMISSION;
/* Test this key against every pattern. */
while((ln = listNext(&li))) {
keyPattern *pattern = listNodeValue(ln);
if ((pattern->flags & access_flags) != access_flags)
continue;
if (!strcmp(pattern->pattern,"*")) {
return 1;
}
}
return 0;
}
/* Checks a channel against a provided list of channels. The is_pattern /* Checks a channel against a provided list of channels. The is_pattern
* argument should only be used when subscribing (not when publishing) * argument should only be used when subscribing (not when publishing)
* and controls whether the input channel is evaluated as a channel pattern * and controls whether the input channel is evaluated as a channel pattern
...@@ -1675,6 +1703,39 @@ int ACLUserCheckKeyPerm(user *u, const char *key, int keylen, int flags) { ...@@ -1675,6 +1703,39 @@ int ACLUserCheckKeyPerm(user *u, const char *key, int keylen, int flags) {
return ACL_DENIED_KEY; return ACL_DENIED_KEY;
} }
/* Checks if the user can execute the given command with the added restriction
* it must also have the access specified in flags to any key in the key space.
* For example, CMD_KEY_READ access requires either '%R~*', '~*', or allkeys to be
* granted in addition to the access required by the command. Returns 1
* if the user has access or 0 otherwise.
*/
int ACLUserCheckCmdWithUnrestrictedKeyAccess(user *u, struct redisCommand *cmd, robj **argv, int argc, int flags) {
listIter li;
listNode *ln;
int local_idxptr;
/* If there is no associated user, the connection can run anything. */
if (u == NULL) return 1;
/* For multiple selectors, we cache the key result in between selector
* calls to prevent duplicate lookups. */
aclKeyResultCache cache;
initACLKeyResultCache(&cache);
/* Check each selector sequentially */
listRewind(u->selectors,&li);
while((ln = listNext(&li))) {
aclSelector *s = (aclSelector *) listNodeValue(ln);
int acl_retval = ACLSelectorCheckCmd(s, cmd, argv, argc, &local_idxptr, &cache);
if (acl_retval == ACL_OK && ACLSelectorHasUnrestrictedKeyAccess(s, flags)) {
cleanupACLKeyResultCache(&cache);
return 1;
}
}
cleanupACLKeyResultCache(&cache);
return 0;
}
/* Check if the channel can be accessed by the client according to /* Check if the channel can be accessed by the client according to
* the ACLs associated with the specified user. * the ACLs associated with the specified user.
* *
...@@ -2411,6 +2472,22 @@ void addACLLogEntry(client *c, int reason, int context, int argpos, sds username ...@@ -2411,6 +2472,22 @@ void addACLLogEntry(client *c, int reason, int context, int argpos, sds username
} }
} }
const char* getAclErrorMessage(int acl_res) {
/* Notice that a variant of this code also exists on aclCommand so
* it also need to be updated on changed. */
switch (acl_res) {
case ACL_DENIED_CMD:
return "can't run this command or subcommand";
case ACL_DENIED_KEY:
return "can't access at least one of the keys mentioned in the command arguments";
case ACL_DENIED_CHANNEL:
return "can't publish to the channel mentioned in the command";
default:
return "lacking the permissions for the command";
}
serverPanic("Reached deadcode on getAclErrorMessage");
}
/* ============================================================================= /* =============================================================================
* ACL related commands * ACL related commands
* ==========================================================================*/ * ==========================================================================*/
...@@ -2793,13 +2870,22 @@ setuser_cleanup: ...@@ -2793,13 +2870,22 @@ setuser_cleanup:
return; return;
} }
if ((cmd->arity > 0 && cmd->arity != c->argc-3) ||
(c->argc-3 < -cmd->arity))
{
addReplyErrorFormat(c,"wrong number of arguments for '%s' command", cmd->fullname);
return;
}
int idx; int idx;
int result = ACLCheckAllUserCommandPerm(u, cmd, c->argv + 3, c->argc - 3, &idx); int result = ACLCheckAllUserCommandPerm(u, cmd, c->argv + 3, c->argc - 3, &idx);
/* Notice that a variant of this code also exists on getAclErrorMessage so
* it also need to be updated on changed. */
if (result != ACL_OK) { if (result != ACL_OK) {
sds err = sdsempty(); sds err = sdsempty();
if (result == ACL_DENIED_CMD) { if (result == ACL_DENIED_CMD) {
err = sdscatfmt(err, "This user has no permissions to run " err = sdscatfmt(err, "This user has no permissions to run "
"the '%s' command", c->cmd->fullname); "the '%s' command", cmd->fullname);
} else if (result == ACL_DENIED_KEY) { } else if (result == ACL_DENIED_KEY) {
err = sdscatfmt(err, "This user has no permissions to access " err = sdscatfmt(err, "This user has no permissions to access "
"the '%s' key", c->argv[idx + 3]->ptr); "the '%s' key", c->argv[idx + 3]->ptr);
......
...@@ -813,10 +813,10 @@ int openNewIncrAofForAppend(void) { ...@@ -813,10 +813,10 @@ int openNewIncrAofForAppend(void) {
* AOFs has not reached the limit threshold. * AOFs has not reached the limit threshold.
* */ * */
#define AOF_REWRITE_LIMITE_THRESHOLD 3 #define AOF_REWRITE_LIMITE_THRESHOLD 3
#define AOF_REWRITE_LIMITE_NAX_MINUTES 60 /* 1 hour */ #define AOF_REWRITE_LIMITE_MAX_MINUTES 60 /* 1 hour */
int aofRewriteLimited(void) { int aofRewriteLimited(void) {
int limit = 0; int limit = 0;
static int limit_deley_minutes = 0; static int limit_delay_minutes = 0;
static time_t next_rewrite_time = 0; static time_t next_rewrite_time = 0;
unsigned long incr_aof_num = listLength(server.aof_manifest->incr_aof_list); unsigned long incr_aof_num = listLength(server.aof_manifest->incr_aof_list);
...@@ -824,25 +824,25 @@ int aofRewriteLimited(void) { ...@@ -824,25 +824,25 @@ int aofRewriteLimited(void) {
if (server.unixtime < next_rewrite_time) { if (server.unixtime < next_rewrite_time) {
limit = 1; limit = 1;
} else { } else {
if (limit_deley_minutes == 0) { if (limit_delay_minutes == 0) {
limit = 1; limit = 1;
limit_deley_minutes = 1; limit_delay_minutes = 1;
} else { } else {
limit_deley_minutes *= 2; limit_delay_minutes *= 2;
} }
if (limit_deley_minutes > AOF_REWRITE_LIMITE_NAX_MINUTES) { if (limit_delay_minutes > AOF_REWRITE_LIMITE_MAX_MINUTES) {
limit_deley_minutes = AOF_REWRITE_LIMITE_NAX_MINUTES; limit_delay_minutes = AOF_REWRITE_LIMITE_MAX_MINUTES;
} }
next_rewrite_time = server.unixtime + limit_deley_minutes * 60; next_rewrite_time = server.unixtime + limit_delay_minutes * 60;
serverLog(LL_WARNING, serverLog(LL_WARNING,
"Background AOF rewrite has repeatedly failed %ld times and triggered the limit, will retry in %d minutes", "Background AOF rewrite has repeatedly failed %ld times and triggered the limit, will retry in %d minutes",
incr_aof_num, limit_deley_minutes); incr_aof_num, limit_delay_minutes);
} }
} else { } else {
limit_deley_minutes = 0; limit_delay_minutes = 0;
next_rewrite_time = 0; next_rewrite_time = 0;
} }
...@@ -2142,19 +2142,9 @@ static int rewriteFunctions(rio *aof) { ...@@ -2142,19 +2142,9 @@ static int rewriteFunctions(rio *aof) {
dictEntry *entry = NULL; dictEntry *entry = NULL;
while ((entry = dictNext(iter))) { while ((entry = dictNext(iter))) {
functionLibInfo *li = dictGetVal(entry); functionLibInfo *li = dictGetVal(entry);
if (li->desc) { if (rioWrite(aof, "*3\r\n", 4) == 0) goto werr;
if (rioWrite(aof, "*7\r\n", 4) == 0) goto werr;
} else {
if (rioWrite(aof, "*5\r\n", 4) == 0) goto werr;
}
char function_load[] = "$8\r\nFUNCTION\r\n$4\r\nLOAD\r\n"; char function_load[] = "$8\r\nFUNCTION\r\n$4\r\nLOAD\r\n";
if (rioWrite(aof, function_load, sizeof(function_load) - 1) == 0) goto werr; if (rioWrite(aof, function_load, sizeof(function_load) - 1) == 0) goto werr;
if (rioWriteBulkString(aof, li->ei->name, sdslen(li->ei->name)) == 0) goto werr;
if (rioWriteBulkString(aof, li->name, sdslen(li->name)) == 0) goto werr;
if (li->desc) {
if (rioWriteBulkString(aof, "description", 11) == 0) goto werr;
if (rioWriteBulkString(aof, li->desc, sdslen(li->desc)) == 0) goto werr;
}
if (rioWriteBulkString(aof, li->code, sdslen(li->code)) == 0) goto werr; if (rioWriteBulkString(aof, li->code, sdslen(li->code)) == 0) goto werr;
} }
dictReleaseIterator(iter); dictReleaseIterator(iter);
......
...@@ -478,19 +478,21 @@ int getBitfieldTypeFromArgument(client *c, robj *o, int *sign, int *bits) { ...@@ -478,19 +478,21 @@ int getBitfieldTypeFromArgument(client *c, robj *o, int *sign, int *bits) {
* so that the 'maxbit' bit can be addressed. The object is finally * so that the 'maxbit' bit can be addressed. The object is finally
* returned. Otherwise if the key holds a wrong type NULL is returned and * returned. Otherwise if the key holds a wrong type NULL is returned and
* an error is sent to the client. */ * an error is sent to the client. */
robj *lookupStringForBitCommand(client *c, uint64_t maxbit, int *created) { robj *lookupStringForBitCommand(client *c, uint64_t maxbit, int *dirty) {
size_t byte = maxbit >> 3; size_t byte = maxbit >> 3;
robj *o = lookupKeyWrite(c->db,c->argv[1]); robj *o = lookupKeyWrite(c->db,c->argv[1]);
if (checkType(c,o,OBJ_STRING)) return NULL; if (checkType(c,o,OBJ_STRING)) return NULL;
if (dirty) *dirty = 0;
if (o == NULL) { if (o == NULL) {
if (created) *created = 1;
o = createObject(OBJ_STRING,sdsnewlen(NULL, byte+1)); o = createObject(OBJ_STRING,sdsnewlen(NULL, byte+1));
dbAdd(c->db,c->argv[1],o); dbAdd(c->db,c->argv[1],o);
if (dirty) *dirty = 1;
} else { } else {
if (created) *created = 0;
o = dbUnshareStringValue(c->db,c->argv[1],o); o = dbUnshareStringValue(c->db,c->argv[1],o);
size_t oldlen = sdslen(o->ptr);
o->ptr = sdsgrowzero(o->ptr,byte+1); o->ptr = sdsgrowzero(o->ptr,byte+1);
if (dirty && oldlen != sdslen(o->ptr)) *dirty = 1;
} }
return o; return o;
} }
...@@ -547,8 +549,8 @@ void setbitCommand(client *c) { ...@@ -547,8 +549,8 @@ void setbitCommand(client *c) {
return; return;
} }
int created; int dirty;
if ((o = lookupStringForBitCommand(c,bitoffset,&created)) == NULL) return; if ((o = lookupStringForBitCommand(c,bitoffset,&dirty)) == NULL) return;
/* Get current values */ /* Get current values */
byte = bitoffset >> 3; byte = bitoffset >> 3;
...@@ -556,10 +558,10 @@ void setbitCommand(client *c) { ...@@ -556,10 +558,10 @@ void setbitCommand(client *c) {
bit = 7 - (bitoffset & 0x7); bit = 7 - (bitoffset & 0x7);
bitval = byteval & (1 << bit); bitval = byteval & (1 << bit);
/* Either it is newly created, or the bit changes before and after. /* Either it is newly created, changed length, or the bit changes before and after.
* Note that the bitval here is actually a decimal number. * Note that the bitval here is actually a decimal number.
* So we need to use `!!` to convert it to 0 or 1 for comparison. */ * So we need to use `!!` to convert it to 0 or 1 for comparison. */
if (created || (!!bitval != on)) { if (dirty || (!!bitval != on)) {
/* Update byte with new bit value. */ /* Update byte with new bit value. */
byteval &= ~(1 << bit); byteval &= ~(1 << bit);
byteval |= ((on & 0x1) << bit); byteval |= ((on & 0x1) << bit);
...@@ -1028,7 +1030,7 @@ struct bitfieldOp { ...@@ -1028,7 +1030,7 @@ struct bitfieldOp {
void bitfieldGeneric(client *c, int flags) { void bitfieldGeneric(client *c, int flags) {
robj *o; robj *o;
uint64_t bitoffset; uint64_t bitoffset;
int j, numops = 0, changes = 0, created = 0; int j, numops = 0, changes = 0, dirty = 0;
struct bitfieldOp *ops = NULL; /* Array of ops to execute at end. */ struct bitfieldOp *ops = NULL; /* Array of ops to execute at end. */
int owtype = BFOVERFLOW_WRAP; /* Overflow type. */ int owtype = BFOVERFLOW_WRAP; /* Overflow type. */
int readonly = 1; int readonly = 1;
...@@ -1122,7 +1124,7 @@ void bitfieldGeneric(client *c, int flags) { ...@@ -1122,7 +1124,7 @@ void bitfieldGeneric(client *c, int flags) {
/* Lookup by making room up to the farthest bit reached by /* Lookup by making room up to the farthest bit reached by
* this operation. */ * this operation. */
if ((o = lookupStringForBitCommand(c, if ((o = lookupStringForBitCommand(c,
highest_write_offset,&created)) == NULL) { highest_write_offset,&dirty)) == NULL) {
zfree(ops); zfree(ops);
return; return;
} }
...@@ -1172,7 +1174,7 @@ void bitfieldGeneric(client *c, int flags) { ...@@ -1172,7 +1174,7 @@ void bitfieldGeneric(client *c, int flags) {
setSignedBitfield(o->ptr,thisop->offset, setSignedBitfield(o->ptr,thisop->offset,
thisop->bits,newval); thisop->bits,newval);
if (created || (oldval != newval)) if (dirty || (oldval != newval))
changes++; changes++;
} else { } else {
addReplyNull(c); addReplyNull(c);
...@@ -1204,7 +1206,7 @@ void bitfieldGeneric(client *c, int flags) { ...@@ -1204,7 +1206,7 @@ void bitfieldGeneric(client *c, int flags) {
setUnsignedBitfield(o->ptr,thisop->offset, setUnsignedBitfield(o->ptr,thisop->offset,
thisop->bits,newval); thisop->bits,newval);
if (created || (oldval != newval)) if (dirty || (oldval != newval))
changes++; changes++;
} else { } else {
addReplyNull(c); addReplyNull(c);
......
...@@ -141,12 +141,7 @@ void processUnblockedClients(void) { ...@@ -141,12 +141,7 @@ void processUnblockedClients(void) {
* the code is conceptually more correct this way. */ * the code is conceptually more correct this way. */
if (!(c->flags & CLIENT_BLOCKED)) { if (!(c->flags & CLIENT_BLOCKED)) {
/* If we have a queued command, execute it now. */ /* If we have a queued command, execute it now. */
if (processPendingCommandsAndResetClient(c) == C_OK) { if (processPendingCommandAndInputBuffer(c) == C_ERR) {
/* Now process client if it has more data in it's buffer. */
if (c->querybuf && sdslen(c->querybuf) > 0) {
if (processInputBuffer(c) == C_ERR) c = NULL;
}
} else {
c = NULL; c = NULL;
} }
} }
...@@ -204,7 +199,7 @@ void unblockClient(client *c) { ...@@ -204,7 +199,7 @@ void unblockClient(client *c) {
* we do not do it immediately after the command returns (when the * we do not do it immediately after the command returns (when the
* client got blocked) in order to be still able to access the argument * client got blocked) in order to be still able to access the argument
* vector from module callbacks and updateStatsOnUnblock. */ * vector from module callbacks and updateStatsOnUnblock. */
if (c->btype != BLOCKED_POSTPONE) { if (c->btype != BLOCKED_POSTPONE && c->btype != BLOCKED_SHUTDOWN) {
freeClientOriginalArgv(c); freeClientOriginalArgv(c);
resetClient(c); resetClient(c);
} }
...@@ -288,25 +283,24 @@ void disconnectAllBlockedClients(void) { ...@@ -288,25 +283,24 @@ void disconnectAllBlockedClients(void) {
* when there may be clients blocked on a list key, and there may be new * when there may be clients blocked on a list key, and there may be new
* data to fetch (the key is ready). */ * data to fetch (the key is ready). */
void serveClientsBlockedOnListKey(robj *o, readyList *rl) { void serveClientsBlockedOnListKey(robj *o, readyList *rl) {
/* Optimization: If no clients are in type BLOCKED_LIST,
* we can skip this loop. */
if (!server.blocked_clients_by_type[BLOCKED_LIST]) return;
/* We serve clients in the same order they blocked for /* We serve clients in the same order they blocked for
* this key, from the first blocked to the last. */ * this key, from the first blocked to the last. */
dictEntry *de = dictFind(rl->db->blocking_keys,rl->key); dictEntry *de = dictFind(rl->db->blocking_keys,rl->key);
if (de) { if (de) {
list *clients = dictGetVal(de); list *clients = dictGetVal(de);
int numclients = listLength(clients); listNode *ln;
int deleted = 0; listIter li;
listRewind(clients,&li);
while(numclients--) {
listNode *clientnode = listFirst(clients);
client *receiver = clientnode->value;
if (receiver->btype != BLOCKED_LIST) { while((ln = listNext(&li))) {
/* Put at the tail, so that at the next call client *receiver = listNodeValue(ln);
* we'll not run into it again. */ if (receiver->btype != BLOCKED_LIST) continue;
listRotateHeadToTail(clients);
continue;
}
int deleted = 0;
robj *dstkey = receiver->bpop.target; robj *dstkey = receiver->bpop.target;
int wherefrom = receiver->bpop.blockpos.wherefrom; int wherefrom = receiver->bpop.blockpos.wherefrom;
int whereto = receiver->bpop.blockpos.whereto; int whereto = receiver->bpop.blockpos.whereto;
...@@ -342,25 +336,24 @@ void serveClientsBlockedOnListKey(robj *o, readyList *rl) { ...@@ -342,25 +336,24 @@ void serveClientsBlockedOnListKey(robj *o, readyList *rl) {
* when there may be clients blocked on a sorted set key, and there may be new * when there may be clients blocked on a sorted set key, and there may be new
* data to fetch (the key is ready). */ * data to fetch (the key is ready). */
void serveClientsBlockedOnSortedSetKey(robj *o, readyList *rl) { void serveClientsBlockedOnSortedSetKey(robj *o, readyList *rl) {
/* Optimization: If no clients are in type BLOCKED_ZSET,
* we can skip this loop. */
if (!server.blocked_clients_by_type[BLOCKED_ZSET]) return;
/* We serve clients in the same order they blocked for /* We serve clients in the same order they blocked for
* this key, from the first blocked to the last. */ * this key, from the first blocked to the last. */
dictEntry *de = dictFind(rl->db->blocking_keys,rl->key); dictEntry *de = dictFind(rl->db->blocking_keys,rl->key);
if (de) { if (de) {
list *clients = dictGetVal(de); list *clients = dictGetVal(de);
int numclients = listLength(clients); listNode *ln;
int deleted = 0; listIter li;
listRewind(clients,&li);
while (numclients--) {
listNode *clientnode = listFirst(clients);
client *receiver = clientnode->value;
if (receiver->btype != BLOCKED_ZSET) { while((ln = listNext(&li))) {
/* Put at the tail, so that at the next call client *receiver = listNodeValue(ln);
* we'll not run into it again. */ if (receiver->btype != BLOCKED_ZSET) continue;
listRotateHeadToTail(clients);
continue;
}
int deleted = 0;
long llen = zsetLength(o); long llen = zsetLength(o);
long count = receiver->bpop.count; long count = receiver->bpop.count;
int where = receiver->bpop.blockpos.wherefrom; int where = receiver->bpop.blockpos.wherefrom;
...@@ -407,6 +400,10 @@ void serveClientsBlockedOnSortedSetKey(robj *o, readyList *rl) { ...@@ -407,6 +400,10 @@ void serveClientsBlockedOnSortedSetKey(robj *o, readyList *rl) {
* when there may be clients blocked on a stream key, and there may be new * when there may be clients blocked on a stream key, and there may be new
* data to fetch (the key is ready). */ * data to fetch (the key is ready). */
void serveClientsBlockedOnStreamKey(robj *o, readyList *rl) { void serveClientsBlockedOnStreamKey(robj *o, readyList *rl) {
/* Optimization: If no clients are in type BLOCKED_STREAM,
* we can skip this loop. */
if (!server.blocked_clients_by_type[BLOCKED_STREAM]) return;
dictEntry *de = dictFind(rl->db->blocking_keys,rl->key); dictEntry *de = dictFind(rl->db->blocking_keys,rl->key);
stream *s = o->ptr; stream *s = o->ptr;
...@@ -520,30 +517,21 @@ unblock_receiver: ...@@ -520,30 +517,21 @@ unblock_receiver:
* see if the key is really able to serve the client, and in that case, * see if the key is really able to serve the client, and in that case,
* unblock it. */ * unblock it. */
void serveClientsBlockedOnKeyByModule(readyList *rl) { void serveClientsBlockedOnKeyByModule(readyList *rl) {
dictEntry *de;
/* Optimization: If no clients are in type BLOCKED_MODULE, /* Optimization: If no clients are in type BLOCKED_MODULE,
* we can skip this loop. */ * we can skip this loop. */
if (!server.blocked_clients_by_type[BLOCKED_MODULE]) return; if (!server.blocked_clients_by_type[BLOCKED_MODULE]) return;
/* We serve clients in the same order they blocked for /* We serve clients in the same order they blocked for
* this key, from the first blocked to the last. */ * this key, from the first blocked to the last. */
de = dictFind(rl->db->blocking_keys,rl->key); dictEntry *de = dictFind(rl->db->blocking_keys,rl->key);
if (de) { if (de) {
list *clients = dictGetVal(de); list *clients = dictGetVal(de);
int numclients = listLength(clients); listNode *ln;
listIter li;
while(numclients--) { listRewind(clients,&li);
listNode *clientnode = listFirst(clients);
client *receiver = clientnode->value;
/* Put at the tail, so that at the next call
* we'll not run into it again: clients here may not be
* ready to be served, so they'll remain in the list
* sometimes. We want also be able to skip clients that are
* not blocked for the MODULE type safely. */
listRotateHeadToTail(clients);
while((ln = listNext(&li))) {
client *receiver = listNodeValue(ln);
if (receiver->btype != BLOCKED_MODULE) continue; if (receiver->btype != BLOCKED_MODULE) continue;
/* Note that if *this* client cannot be served by this key, /* Note that if *this* client cannot be served by this key,
...@@ -566,6 +554,49 @@ void serveClientsBlockedOnKeyByModule(readyList *rl) { ...@@ -566,6 +554,49 @@ void serveClientsBlockedOnKeyByModule(readyList *rl) {
} }
} }
/* Helper function for handleClientsBlockedOnKeys(). This function is called
* when there may be clients blocked, via XREADGROUP, on an existing stream which
* was deleted. We need to unblock the clients in that case.
* The idea is that a client that is blocked via XREADGROUP is different from
* any other blocking type in the sense that it depends on the existence of both
* the key and the group. Even if the key is deleted and then revived with XADD
* it won't help any clients blocked on XREADGROUP because the group no longer
* exist, so they would fail with -NOGROUP anyway.
* The conclusion is that it's better to unblock these client (with error) upon
* the deletion of the key, rather than waiting for the first XADD. */
void unblockDeletedStreamReadgroupClients(readyList *rl) {
/* Optimization: If no clients are in type BLOCKED_STREAM,
* we can skip this loop. */
if (!server.blocked_clients_by_type[BLOCKED_STREAM]) return;
/* We serve clients in the same order they blocked for
* this key, from the first blocked to the last. */
dictEntry *de = dictFind(rl->db->blocking_keys,rl->key);
if (de) {
list *clients = dictGetVal(de);
listNode *ln;
listIter li;
listRewind(clients,&li);
while((ln = listNext(&li))) {
client *receiver = listNodeValue(ln);
if (receiver->btype != BLOCKED_STREAM || !receiver->bpop.xread_group)
continue;
long long prev_error_replies = server.stat_total_error_replies;
client *old_client = server.current_client;
server.current_client = receiver;
monotime replyTimer;
elapsedStart(&replyTimer);
addReplyError(receiver, "-UNBLOCKED the stream key no longer exists");
updateStatsOnUnblock(receiver, 0, elapsedUs(replyTimer), server.stat_total_error_replies != prev_error_replies);
unblockClient(receiver);
afterCommand(receiver);
server.current_client = old_client;
}
}
}
/* This function should be called by Redis every time a single command, /* This function should be called by Redis every time a single command,
* a MULTI/EXEC block, or a Lua script, terminated its execution after * a MULTI/EXEC block, or a Lua script, terminated its execution after
* being called by a client. It handles serving clients blocked in * being called by a client. It handles serving clients blocked in
...@@ -624,17 +655,27 @@ void handleClientsBlockedOnKeys(void) { ...@@ -624,17 +655,27 @@ void handleClientsBlockedOnKeys(void) {
/* Serve clients blocked on the key. */ /* Serve clients blocked on the key. */
robj *o = lookupKeyReadWithFlags(rl->db, rl->key, LOOKUP_NONOTIFY | LOOKUP_NOSTATS); robj *o = lookupKeyReadWithFlags(rl->db, rl->key, LOOKUP_NONOTIFY | LOOKUP_NOSTATS);
if (o != NULL) { if (o != NULL) {
if (o->type == OBJ_LIST) int objtype = o->type;
if (objtype == OBJ_LIST)
serveClientsBlockedOnListKey(o,rl); serveClientsBlockedOnListKey(o,rl);
else if (o->type == OBJ_ZSET) else if (objtype == OBJ_ZSET)
serveClientsBlockedOnSortedSetKey(o,rl); serveClientsBlockedOnSortedSetKey(o,rl);
else if (o->type == OBJ_STREAM) else if (objtype == OBJ_STREAM)
serveClientsBlockedOnStreamKey(o,rl); serveClientsBlockedOnStreamKey(o,rl);
/* We want to serve clients blocked on module keys /* We want to serve clients blocked on module keys
* regardless of the object type: we don't know what the * regardless of the object type: we don't know what the
* module is trying to accomplish right now. */ * module is trying to accomplish right now. */
serveClientsBlockedOnKeyByModule(rl); serveClientsBlockedOnKeyByModule(rl);
/* If we have XREADGROUP clients blocked on this key, and
* the key is not a stream, it must mean that the key was
* overwritten by either SET or something like
* (MULTI, DEL key, SADD key e, EXEC).
* In this case we need to unblock all these clients. */
if (objtype != OBJ_STREAM)
unblockDeletedStreamReadgroupClients(rl);
} else { } else {
/* Unblock all XREADGROUP clients of this deleted key */
unblockDeletedStreamReadgroupClients(rl);
/* Edge case: If lookupKeyReadWithFlags decides to expire the key we have to /* Edge case: If lookupKeyReadWithFlags decides to expire the key we have to
* take care of the propagation here, because afterCommand wasn't called */ * take care of the propagation here, because afterCommand wasn't called */
if (server.also_propagate.numops > 0) if (server.also_propagate.numops > 0)
...@@ -823,4 +864,3 @@ void signalKeyAsReady(redisDb *db, robj *key, int type) { ...@@ -823,4 +864,3 @@ void signalKeyAsReady(redisDb *db, robj *key, int type) {
incrRefCount(key); incrRefCount(key);
serverAssert(dictAdd(db->ready_keys,key,NULL) == DICT_OK); serverAssert(dictAdd(db->ready_keys,key,NULL) == DICT_OK);
} }
...@@ -525,3 +525,18 @@ CallReply *callReplyCreate(sds reply, list *deferred_error_list, void *private_d ...@@ -525,3 +525,18 @@ CallReply *callReplyCreate(sds reply, list *deferred_error_list, void *private_d
res->deferred_error_list = deferred_error_list; res->deferred_error_list = deferred_error_list;
return res; return res;
} }
/* Create a new CallReply struct from the reply blob representing an error message.
* Automatically creating deferred_error_list and set a copy of the reply in it.
* Refer to callReplyCreate for detailed explanation. */
CallReply *callReplyCreateError(sds reply, void *private_data) {
sds err_buff = reply;
if (err_buff[0] != '-') {
err_buff = sdscatfmt(sdsempty(), "-ERR %S\r\n", reply);
sdsfree(reply);
}
list *deferred_error_list = listCreate();
listSetFreeMethod(deferred_error_list, (void (*)(void*))sdsfree);
listAddNodeTail(deferred_error_list, sdsnew(err_buff));
return callReplyCreate(err_buff, deferred_error_list, private_data);
}
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
typedef struct CallReply CallReply; typedef struct CallReply CallReply;
CallReply *callReplyCreate(sds reply, list *deferred_error_list, void *private_data); CallReply *callReplyCreate(sds reply, list *deferred_error_list, void *private_data);
CallReply *callReplyCreateError(sds reply, void *private_data);
int callReplyType(CallReply *rep); int callReplyType(CallReply *rep);
const char *callReplyGetString(CallReply *rep, size_t *len); const char *callReplyGetString(CallReply *rep, size_t *len);
long long callReplyGetLongLong(CallReply *rep); long long callReplyGetLongLong(CallReply *rep);
......
...@@ -299,7 +299,7 @@ static sds percentDecode(const char *pe, size_t len) { ...@@ -299,7 +299,7 @@ static sds percentDecode(const char *pe, size_t len) {
} }
/* Parse a URI and extract the server connection information. /* Parse a URI and extract the server connection information.
* URI scheme is based on the the provisional specification[1] excluding support * URI scheme is based on the provisional specification[1] excluding support
* for query parameters. Valid URIs are: * for query parameters. Valid URIs are:
* scheme: "redis://" * scheme: "redis://"
* authority: [[<username> ":"] <password> "@"] [<hostname> [":" <port>]] * authority: [[<username> ":"] <password> "@"] [<hostname> [":" <port>]]
...@@ -371,3 +371,28 @@ void freeCliConnInfo(cliConnInfo connInfo){ ...@@ -371,3 +371,28 @@ void freeCliConnInfo(cliConnInfo connInfo){
if (connInfo.auth) sdsfree(connInfo.auth); if (connInfo.auth) sdsfree(connInfo.auth);
if (connInfo.user) sdsfree(connInfo.user); if (connInfo.user) sdsfree(connInfo.user);
} }
/*
* Escape a Unicode string for JSON output (--json), following RFC 7159:
* https://datatracker.ietf.org/doc/html/rfc7159#section-7
*/
sds escapeJsonString(sds s, const char *p, size_t len) {
s = sdscatlen(s,"\"",1);
while(len--) {
switch(*p) {
case '\\':
case '"':
s = sdscatprintf(s,"\\%c",*p);
break;
case '\n': s = sdscatlen(s,"\\n",2); break;
case '\f': s = sdscatlen(s,"\\f",2); break;
case '\r': s = sdscatlen(s,"\\r",2); break;
case '\t': s = sdscatlen(s,"\\t",2); break;
case '\b': s = sdscatlen(s,"\\b",2); break;
default:
s = sdscatprintf(s,(*p >= 0 && *p <= 0x1f) ? "\\u%04x" : "%c",*p);
}
p++;
}
return sdscatlen(s,"\"",1);
}
...@@ -48,4 +48,7 @@ sds unquoteCString(char *str); ...@@ -48,4 +48,7 @@ sds unquoteCString(char *str);
void parseRedisUri(const char *uri, const char* tool_name, cliConnInfo *connInfo, int *tls_flag); void parseRedisUri(const char *uri, const char* tool_name, cliConnInfo *connInfo, int *tls_flag);
void freeCliConnInfo(cliConnInfo connInfo); void freeCliConnInfo(cliConnInfo connInfo);
sds escapeJsonString(sds s, const char *p, size_t len);
#endif /* __CLICOMMON_H */ #endif /* __CLICOMMON_H */
This diff is collapsed.
...@@ -118,7 +118,9 @@ typedef struct clusterNode { ...@@ -118,7 +118,9 @@ typedef struct clusterNode {
int flags; /* CLUSTER_NODE_... */ int flags; /* CLUSTER_NODE_... */
uint64_t configEpoch; /* Last configEpoch observed for this node */ uint64_t configEpoch; /* Last configEpoch observed for this node */
unsigned char slots[CLUSTER_SLOTS/8]; /* slots handled by this node */ unsigned char slots[CLUSTER_SLOTS/8]; /* slots handled by this node */
sds slots_info; /* Slots info represented by string. */ uint16_t *slot_info_pairs; /* Slots info represented as (start/end) pair (consecutive index). */
int slot_info_pairs_count; /* Used number of slots in slot_info_pairs */
int slot_info_pairs_alloc; /* Allocated number of slots in slot_info_pairs */
int numslots; /* Number of slots handled by this node */ int numslots; /* Number of slots handled by this node */
int numslaves; /* Number of slave nodes, if this is a master */ int numslaves; /* Number of slave nodes, if this is a master */
struct clusterNode **slaves; /* pointers to slave nodes */ struct clusterNode **slaves; /* pointers to slave nodes */
...@@ -375,7 +377,8 @@ void clusterInit(void); ...@@ -375,7 +377,8 @@ void clusterInit(void);
void clusterCron(void); void clusterCron(void);
void clusterBeforeSleep(void); void clusterBeforeSleep(void);
clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, int argc, int *hashslot, int *ask); clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, int argc, int *hashslot, int *ask);
clusterNode *clusterLookupNode(const char *name); int verifyClusterNodeId(const char *name, int length);
clusterNode *clusterLookupNode(const char *name, int length);
int clusterRedirectBlockedClientIfNeeded(client *c); int clusterRedirectBlockedClientIfNeeded(client *c);
void clusterRedirectClient(client *c, clusterNode *n, int hashslot, int error_code); void clusterRedirectClient(client *c, clusterNode *n, int hashslot, int error_code);
void migrateCloseTimedoutSockets(void); void migrateCloseTimedoutSockets(void);
......
This diff is collapsed.
...@@ -8,10 +8,6 @@ ...@@ -8,10 +8,6 @@
"container": "CLIENT", "container": "CLIENT",
"function": "clientCommand", "function": "clientCommand",
"history": [ "history": [
[
"3.2.10",
"Client pause prevents client pause and key eviction as well."
],
[ [
"6.2.0", "6.2.0",
"`CLIENT PAUSE WRITE` mode added along with the `mode` option." "`CLIENT PAUSE WRITE` mode added along with the `mode` option."
......
{
"SHARDS": {
"summary": "Get array of cluster slots to node mappings",
"complexity": "O(N) where N is the total number of cluster nodes",
"group": "cluster",
"since": "7.0.0",
"arity": 2,
"container": "CLUSTER",
"function": "clusterCommand",
"history": [],
"command_flags": [
"STALE"
],
"command_tips": [
"NONDETERMINISTIC_OUTPUT"
]
}
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment