Commit d7971f96 authored by Oran Agra's avatar Oran Agra
Browse files

Merge remote-tracking branch 'origin/unstable' into 7.0

parents d2b5a579 acfb4f7a
......@@ -7,7 +7,7 @@ jobs:
test-ubuntu-latest:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: make
# Fail build if there are warnings
# build with TLS just for compilation coverage
......@@ -22,7 +22,7 @@ jobs:
test-sanitizer-address:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: make
run: make SANITIZER=address REDIS_CFLAGS='-Werror'
- name: testprep
......@@ -36,7 +36,7 @@ jobs:
runs-on: ubuntu-latest
container: debian:oldoldstable
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: make
run: |
apt-get update && apt-get install -y build-essential
......@@ -45,14 +45,14 @@ jobs:
build-macos-latest:
runs-on: macos-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: make
run: make REDIS_CFLAGS='-Werror'
build-32bit:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: make
run: |
sudo apt-get update && sudo apt-get install libc6-dev-i386
......@@ -61,7 +61,7 @@ jobs:
build-libc-malloc:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: make
run: make REDIS_CFLAGS='-Werror' MALLOC=libc
......@@ -69,7 +69,7 @@ jobs:
runs-on: ubuntu-latest
container: centos:7
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: make
run: |
yum -y install gcc make
......
......@@ -4,7 +4,7 @@ on:
push:
pull_request:
schedule:
# run weekly new vulnerability was added to the the database
# run weekly new vulnerability was added to the database
- cron: '0 0 * * 0'
jobs:
......@@ -20,7 +20,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
......
......@@ -11,7 +11,7 @@ on:
inputs:
skipjobs:
description: 'jobs to skip (delete the ones you wanna keep, do not leave empty)'
default: 'valgrind,sanitizer,tls,freebsd,macos,alpine,32bit,ubuntu,centos,malloc'
default: 'valgrind,sanitizer,tls,freebsd,macos,alpine,32bit,iothreads,ubuntu,centos,malloc'
skiptests:
description: 'tests to skip (delete the ones you wanna keep, do not leave empty)'
default: 'redis,modules,sentinel,cluster,unittest'
......@@ -34,8 +34,8 @@ jobs:
test-ubuntu-jemalloc:
runs-on: ubuntu-latest
if: |
github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'redis/redis') && !contains(github.event.inputs.skipjobs, 'ubuntu')
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'ubuntu')
timeout-minutes: 14400
steps:
- name: prep
......@@ -44,7 +44,7 @@ jobs:
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipping: ${{github.event.inputs.skipjobs}} and ${{github.event.inputs.skiptests}}"
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
......@@ -71,8 +71,8 @@ jobs:
test-ubuntu-libc-malloc:
runs-on: ubuntu-latest
if: |
github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'redis/redis') && !contains(github.event.inputs.skipjobs, 'malloc')
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'malloc')
timeout-minutes: 14400
steps:
- name: prep
......@@ -80,7 +80,7 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
......@@ -104,8 +104,8 @@ jobs:
test-ubuntu-no-malloc-usable-size:
runs-on: ubuntu-latest
if: |
github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'redis/redis') && !contains(github.event.inputs.skipjobs, 'malloc')
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'malloc')
timeout-minutes: 14400
steps:
- name: prep
......@@ -113,7 +113,7 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
......@@ -137,8 +137,8 @@ jobs:
test-ubuntu-32bit:
runs-on: ubuntu-latest
if: |
(github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'redis/redis')) && !contains(github.event.inputs.skipjobs, '32bit')
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, '32bit')
timeout-minutes: 14400
steps:
- name: prep
......@@ -146,7 +146,7 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
......@@ -177,8 +177,8 @@ jobs:
test-ubuntu-tls:
runs-on: ubuntu-latest
if: |
(github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'redis/redis')) && !contains(github.event.inputs.skipjobs, 'tls')
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'tls')
timeout-minutes: 14400
steps:
- name: prep
......@@ -186,7 +186,47 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: |
make BUILD_TLS=yes REDIS_CFLAGS='-Werror'
- name: testprep
run: |
sudo apt-get install tcl8.6 tclx tcl-tls
./utils/gen-test-certs.sh
- name: test
if: true && !contains(github.event.inputs.skiptests, 'redis')
run: |
./runtest --accurate --verbose --dump-logs --tls --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: |
./runtest-moduleapi --verbose --dump-logs --tls --dump-logs ${{github.event.inputs.test_args}}
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: |
./runtest-sentinel --tls ${{github.event.inputs.cluster_test_args}}
- name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: |
./runtest-cluster --tls ${{github.event.inputs.cluster_test_args}}
test-ubuntu-tls-no-tls:
runs-on: ubuntu-latest
if: |
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'tls')
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
......@@ -200,29 +240,25 @@ jobs:
- name: test
if: true && !contains(github.event.inputs.skiptests, 'redis')
run: |
./runtest --accurate --verbose --tls --dump-logs ${{github.event.inputs.test_args}}
./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: |
./runtest-moduleapi --verbose --tls --dump-logs ${{github.event.inputs.test_args}}
./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: |
./runtest-sentinel --tls ${{github.event.inputs.cluster_test_args}}
./runtest-sentinel ${{github.event.inputs.cluster_test_args}}
- name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: |
./runtest-cluster --tls ${{github.event.inputs.cluster_test_args}}
./runtest-cluster ${{github.event.inputs.cluster_test_args}}
test-ubuntu-io-threads:
runs-on: ubuntu-latest
if: |
(github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'redis/redis')) && !contains(github.event.inputs.skipjobs, 'iothreads')
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'iothreads')
timeout-minutes: 14400
steps:
- name: prep
......@@ -230,7 +266,7 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
......@@ -246,11 +282,11 @@ jobs:
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: ./runtest-cluster --config io-threads 4 --config io-threads-do-reads yes ${{github.event.inputs.cluster_test_args}}
test-valgrind:
test-valgrind-test:
runs-on: ubuntu-latest
if: |
(github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'redis/redis')) && !contains(github.event.inputs.skipjobs, 'valgrind')
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'valgrind') && !contains(github.event.inputs.skiptests, 'redis')
timeout-minutes: 14400
steps:
- name: prep
......@@ -258,7 +294,7 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
......@@ -271,6 +307,29 @@ jobs:
- name: test
if: true && !contains(github.event.inputs.skiptests, 'redis')
run: ./runtest --valgrind --no-latency --verbose --clients 1 --timeout 2400 --dump-logs ${{github.event.inputs.test_args}}
test-valgrind-misc:
runs-on: ubuntu-latest
if: |
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'valgrind') && !(contains(github.event.inputs.skiptests, 'modules') && contains(github.event.inputs.skiptests, 'unittest'))
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: make valgrind REDIS_CFLAGS='-Werror -DREDIS_TEST'
- name: testprep
run: |
sudo apt-get update
sudo apt-get install tcl8.6 tclx valgrind -y
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: ./runtest-moduleapi --valgrind --no-latency --verbose --clients 1 --timeout 2400 --dump-logs ${{github.event.inputs.test_args}}
......@@ -280,11 +339,11 @@ jobs:
valgrind --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.txt ./src/redis-server test all
if grep -q 0x err.txt; then cat err.txt; exit 1; fi
test-valgrind-no-malloc-usable-size:
test-valgrind-no-malloc-usable-size-test:
runs-on: ubuntu-latest
if: |
(github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'redis/redis')) && !contains(github.event.inputs.skipjobs, 'valgrind')
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'valgrind') && !contains(github.event.inputs.skiptests, 'redis')
timeout-minutes: 14400
steps:
- name: prep
......@@ -292,12 +351,12 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: make valgrind CFLAGS="-DNO_MALLOC_USABLE_SIZE" REDIS_CFLAGS='-Werror'
run: make valgrind CFLAGS="-DNO_MALLOC_USABLE_SIZE -DREDIS_TEST" REDIS_CFLAGS='-Werror'
- name: testprep
run: |
sudo apt-get update
......@@ -305,15 +364,43 @@ jobs:
- name: test
if: true && !contains(github.event.inputs.skiptests, 'redis')
run: ./runtest --valgrind --no-latency --verbose --clients 1 --timeout 2400 --dump-logs ${{github.event.inputs.test_args}}
test-valgrind-no-malloc-usable-size-misc:
runs-on: ubuntu-latest
if: |
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'valgrind') && !(contains(github.event.inputs.skiptests, 'modules') && contains(github.event.inputs.skiptests, 'unittest'))
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: make valgrind CFLAGS="-DNO_MALLOC_USABLE_SIZE -DREDIS_TEST" REDIS_CFLAGS='-Werror'
- name: testprep
run: |
sudo apt-get update
sudo apt-get install tcl8.6 tclx valgrind -y
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: ./runtest-moduleapi --valgrind --no-latency --verbose --clients 1 --timeout 2400 --dump-logs ${{github.event.inputs.test_args}}
- name: unittest
if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: |
valgrind --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.txt ./src/redis-server test all
if grep -q 0x err.txt; then cat err.txt; exit 1; fi
test-sanitizer-address:
runs-on: ubuntu-latest
if: |
(github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'redis/redis')) && !contains(github.event.inputs.skipjobs, 'sanitizer')
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'sanitizer')
timeout-minutes: 14400
strategy:
matrix:
......@@ -326,7 +413,7 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
......@@ -355,8 +442,8 @@ jobs:
test-sanitizer-undefined:
runs-on: ubuntu-latest
if: |
(github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'redis/redis')) && !contains(github.event.inputs.skipjobs, 'sanitizer')
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'sanitizer')
timeout-minutes: 14400
strategy:
matrix:
......@@ -369,7 +456,7 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
......@@ -398,8 +485,8 @@ jobs:
test-centos7-jemalloc:
runs-on: ubuntu-latest
if: |
github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'redis/redis') && !contains(github.event.inputs.skipjobs, 'centos')
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'centos')
container: centos:7
timeout-minutes: 14400
steps:
......@@ -408,7 +495,7 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
......@@ -434,8 +521,8 @@ jobs:
test-centos7-tls:
runs-on: ubuntu-latest
if: |
(github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'redis/redis')) && !contains(github.event.inputs.skipjobs, 'tls')
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'tls')
container: centos:7
timeout-minutes: 14400
steps:
......@@ -444,7 +531,50 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: |
yum -y install centos-release-scl epel-release
yum -y install devtoolset-7 openssl-devel openssl
scl enable devtoolset-7 "make BUILD_TLS=yes REDIS_CFLAGS='-Werror'"
- name: testprep
run: |
yum -y install tcl tcltls tclx
./utils/gen-test-certs.sh
- name: test
if: true && !contains(github.event.inputs.skiptests, 'redis')
run: |
./runtest --accurate --verbose --dump-logs --tls --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: |
./runtest-moduleapi --verbose --dump-logs --tls --dump-logs ${{github.event.inputs.test_args}}
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: |
./runtest-sentinel --tls ${{github.event.inputs.cluster_test_args}}
- name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: |
./runtest-cluster --tls ${{github.event.inputs.cluster_test_args}}
test-centos7-tls-no-tls:
runs-on: ubuntu-latest
if: |
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'tls')
container: centos:7
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
......@@ -460,29 +590,25 @@ jobs:
- name: test
if: true && !contains(github.event.inputs.skiptests, 'redis')
run: |
./runtest --accurate --verbose --tls --dump-logs ${{github.event.inputs.test_args}}
./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: |
./runtest-moduleapi --verbose --tls --dump-logs ${{github.event.inputs.test_args}}
./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: |
./runtest-sentinel --tls ${{github.event.inputs.cluster_test_args}}
./runtest-sentinel ${{github.event.inputs.cluster_test_args}}
- name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: |
./runtest-cluster --tls ${{github.event.inputs.cluster_test_args}}
./runtest-cluster ${{github.event.inputs.cluster_test_args}}
test-macos-latest:
runs-on: macos-latest
if: |
(github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'redis/redis')) && !contains(github.event.inputs.skipjobs, 'macos')
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'macos') && !(contains(github.event.inputs.skiptests, 'redis') && contains(github.event.inputs.skiptests, 'modules'))
timeout-minutes: 14400
steps:
- name: prep
......@@ -490,7 +616,7 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
......@@ -502,9 +628,47 @@ jobs:
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: ./runtest-moduleapi --verbose --no-latency --dump-logs ${{github.event.inputs.test_args}}
test-macos-latest-sentinel:
runs-on: macos-latest
if: |
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'macos') && !contains(github.event.inputs.skiptests, 'sentinel')
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: make REDIS_CFLAGS='-Werror'
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}}
test-macos-latest-cluster:
runs-on: macos-latest
if: |
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'macos') && !contains(github.event.inputs.skiptests, 'cluster')
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: make REDIS_CFLAGS='-Werror'
- name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}}
......@@ -512,8 +676,8 @@ jobs:
test-freebsd:
runs-on: macos-10.15
if: |
(github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'redis/redis')) && !contains(github.event.inputs.skipjobs, 'freebsd')
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'freebsd') && !(contains(github.event.inputs.skiptests, 'redis') && contains(github.event.inputs.skiptests, 'modules'))
timeout-minutes: 14400
steps:
- name: prep
......@@ -521,7 +685,7 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
......@@ -536,14 +700,66 @@ jobs:
gmake || exit 1 ;
if echo "${{github.event.inputs.skiptests}}" | grep -vq redis ; then ./runtest --verbose --timeout 2400 --no-latency --dump-logs ${{github.event.inputs.test_args}} || exit 1 ; fi ;
if echo "${{github.event.inputs.skiptests}}" | grep -vq modules ; then MAKE=gmake ./runtest-moduleapi --verbose --timeout 2400 --no-latency --dump-logs ${{github.event.inputs.test_args}} || exit 1 ; fi ;
test-freebsd-sentinel:
runs-on: macos-10.15
if: |
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'freebsd') && !contains(github.event.inputs.skiptests, 'sentinel')
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: test
uses: vmactions/freebsd-vm@v0.1.6
with:
usesh: true
sync: rsync
copyback: false
prepare: pkg install -y bash gmake lang/tcl86 lang/tclx
run: >
gmake || exit 1 ;
if echo "${{github.event.inputs.skiptests}}" | grep -vq sentinel ; then ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} || exit 1 ; fi ;
test-freebsd-cluster:
runs-on: macos-10.15
if: |
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'freebsd') && !contains(github.event.inputs.skiptests, 'cluster')
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: test
uses: vmactions/freebsd-vm@v0.1.6
with:
usesh: true
sync: rsync
copyback: false
prepare: pkg install -y bash gmake lang/tcl86 lang/tclx
run: >
gmake || exit 1 ;
if echo "${{github.event.inputs.skiptests}}" | grep -vq cluster ; then ./runtest-cluster ${{github.event.inputs.cluster_test_args}} || exit 1 ; fi ;
test-alpine-jemalloc:
runs-on: ubuntu-latest
if: |
(github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'redis/redis')) && !contains(github.event.inputs.skipjobs, 'alpine')
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'alpine')
container: alpine:latest
steps:
- name: prep
......@@ -551,7 +767,7 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
......@@ -577,8 +793,8 @@ jobs:
test-alpine-libc-malloc:
runs-on: ubuntu-latest
if: |
(github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'redis/redis')) && !contains(github.event.inputs.skipjobs, 'alpine')
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'alpine')
container: alpine:latest
steps:
- name: prep
......@@ -586,7 +802,7 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
......
......@@ -12,7 +12,7 @@ jobs:
if: github.event_name != 'schedule' || github.repository == 'redis/redis'
timeout-minutes: 14400
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Build
run: make REDIS_CFLAGS=-Werror
- name: Start redis-server
......@@ -36,7 +36,7 @@ jobs:
if: github.event_name != 'schedule' || github.repository == 'redis/redis'
timeout-minutes: 14400
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Build
run: make REDIS_CFLAGS=-Werror
- name: Start redis-server
......@@ -63,7 +63,7 @@ jobs:
if: github.event_name != 'schedule' || github.repository == 'redis/redis'
timeout-minutes: 14400
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Build
run: make REDIS_CFLAGS=-Werror
- name: Start redis-server
......
......@@ -16,10 +16,10 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: pip cache
uses: actions/cache@v2
uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
......
......@@ -126,7 +126,7 @@ protected-mode yes
#
# no - Block for any connection (remain immutable)
# yes - Allow for any connection (no protection)
# local - Allow only for local local connections. Ones originating from the
# local - Allow only for local connections. Ones originating from the
# IPv4 address (127.0.0.1), IPv6 address (::1) or Unix domain sockets.
#
# enable-protected-configs no
......@@ -627,7 +627,7 @@ repl-diskless-sync-max-replicas 0
#
# In many cases the disk is slower than the network, and storing and loading
# the RDB file may increase replication time (and even increase the master's
# Copy on Write memory and salve buffers).
# Copy on Write memory and replica buffers).
# However, parsing the RDB file directly from the socket may mean that we have
# to flush the contents of the current database before the full rdb was
# received. For this reason we have the following options:
......@@ -1224,7 +1224,7 @@ replica-lazy-flush no
lazyfree-lazy-user-del no
# FLUSHDB, FLUSHALL, and SCRIPT FLUSH support both asynchronous and synchronous
# FLUSHDB, FLUSHALL, SCRIPT FLUSH and FUNCTION FLUSH support both asynchronous and synchronous
# deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the
# commands. When neither flag is passed, this directive will be used to determine
# if the data should be deleted asynchronously.
......@@ -1287,7 +1287,7 @@ lazyfree-lazy-user-flush no
# attempt to have background child processes killed before all others, and
# replicas killed before masters.
#
# Redis supports three options:
# Redis supports these options:
#
# no: Don't make changes to oom-score-adj (default).
# yes: Alias to "relative" see below.
......@@ -1640,7 +1640,7 @@ aof-timestamp-enabled no
# cluster-replica-no-failover no
# This option, when set to yes, allows nodes to serve read traffic while the
# the cluster is in a down state, as long as it believes it owns the slots.
# cluster is in a down state, as long as it believes it owns the slots.
#
# This is useful for two cases. The first case is for when an application
# doesn't require consistency of data during node failures or network partitions.
......@@ -1958,7 +1958,7 @@ activerehashing yes
# The limit can be set differently for the three different classes of clients:
#
# normal -> normal clients including MONITOR clients
# replica -> replica clients
# replica -> replica clients
# pubsub -> clients subscribed to at least one pubsub channel or pattern
#
# The syntax of every client-output-buffer-limit directive is the following:
......@@ -2164,7 +2164,7 @@ rdb-save-incremental-fsync yes
# defragmentation process. If you are not sure about what they mean it is
# a good idea to leave the defaults untouched.
# Enabled active defragmentation
# Active defragmentation is disabled by default
# activedefrag no
# Minimum amount of fragmentation waste to start active defrag
......
......@@ -20,6 +20,7 @@ $TCLSH tests/test_helper.tcl \
--single unit/moduleapi/fork \
--single unit/moduleapi/testrdb \
--single unit/moduleapi/infotest \
--single unit/moduleapi/moduleconfigs \
--single unit/moduleapi/infra \
--single unit/moduleapi/propagate \
--single unit/moduleapi/hooks \
......
......@@ -120,10 +120,7 @@ typedef struct {
* understand if the command can be executed. */
uint64_t allowed_commands[USER_COMMAND_BITS_COUNT/64];
/* allowed_firstargs is used by ACL rules to block access to a command unless a
* specific argv[1] is given (or argv[2] in case it is applied on a sub-command).
* For example, a user can use the rule "-select +select|0" to block all
* SELECT commands, except "SELECT 0".
* And for a sub-command: "+config -config|set +config|set|loglevel"
* specific argv[1] is given.
*
* For each command ID (corresponding to the command bit set in allowed_commands),
* This array points to an array of SDS strings, terminated by a NULL pointer,
......@@ -1531,6 +1528,37 @@ static int ACLSelectorCheckKey(aclSelector *selector, const char *key, int keyle
return ACL_DENIED_KEY;
}
/* Checks if the provided selector selector has access specified in flags
* to all keys in the keyspace. For example, CMD_KEY_READ access requires either
* '%R~*', '~*', or allkeys to be granted to the selector. Returns 1 if all
* the access flags are satisfied with this selector or 0 otherwise.
*/
static int ACLSelectorHasUnrestrictedKeyAccess(aclSelector *selector, int flags) {
/* The selector can access any key */
if (selector->flags & SELECTOR_FLAG_ALLKEYS) return 1;
listIter li;
listNode *ln;
listRewind(selector->patterns,&li);
int access_flags = 0;
if (flags & CMD_KEY_ACCESS) access_flags |= ACL_READ_PERMISSION;
if (flags & CMD_KEY_INSERT) access_flags |= ACL_WRITE_PERMISSION;
if (flags & CMD_KEY_DELETE) access_flags |= ACL_WRITE_PERMISSION;
if (flags & CMD_KEY_UPDATE) access_flags |= ACL_WRITE_PERMISSION;
/* Test this key against every pattern. */
while((ln = listNext(&li))) {
keyPattern *pattern = listNodeValue(ln);
if ((pattern->flags & access_flags) != access_flags)
continue;
if (!strcmp(pattern->pattern,"*")) {
return 1;
}
}
return 0;
}
/* Checks a channel against a provided list of channels. The is_pattern
* argument should only be used when subscribing (not when publishing)
* and controls whether the input channel is evaluated as a channel pattern
......@@ -1675,6 +1703,39 @@ int ACLUserCheckKeyPerm(user *u, const char *key, int keylen, int flags) {
return ACL_DENIED_KEY;
}
/* Checks if the user can execute the given command with the added restriction
* it must also have the access specified in flags to any key in the key space.
* For example, CMD_KEY_READ access requires either '%R~*', '~*', or allkeys to be
* granted in addition to the access required by the command. Returns 1
* if the user has access or 0 otherwise.
*/
int ACLUserCheckCmdWithUnrestrictedKeyAccess(user *u, struct redisCommand *cmd, robj **argv, int argc, int flags) {
listIter li;
listNode *ln;
int local_idxptr;
/* If there is no associated user, the connection can run anything. */
if (u == NULL) return 1;
/* For multiple selectors, we cache the key result in between selector
* calls to prevent duplicate lookups. */
aclKeyResultCache cache;
initACLKeyResultCache(&cache);
/* Check each selector sequentially */
listRewind(u->selectors,&li);
while((ln = listNext(&li))) {
aclSelector *s = (aclSelector *) listNodeValue(ln);
int acl_retval = ACLSelectorCheckCmd(s, cmd, argv, argc, &local_idxptr, &cache);
if (acl_retval == ACL_OK && ACLSelectorHasUnrestrictedKeyAccess(s, flags)) {
cleanupACLKeyResultCache(&cache);
return 1;
}
}
cleanupACLKeyResultCache(&cache);
return 0;
}
/* Check if the channel can be accessed by the client according to
* the ACLs associated with the specified user.
*
......@@ -2411,6 +2472,22 @@ void addACLLogEntry(client *c, int reason, int context, int argpos, sds username
}
}
const char* getAclErrorMessage(int acl_res) {
/* Notice that a variant of this code also exists on aclCommand so
* it also need to be updated on changed. */
switch (acl_res) {
case ACL_DENIED_CMD:
return "can't run this command or subcommand";
case ACL_DENIED_KEY:
return "can't access at least one of the keys mentioned in the command arguments";
case ACL_DENIED_CHANNEL:
return "can't publish to the channel mentioned in the command";
default:
return "lacking the permissions for the command";
}
serverPanic("Reached deadcode on getAclErrorMessage");
}
/* =============================================================================
* ACL related commands
* ==========================================================================*/
......@@ -2793,13 +2870,22 @@ setuser_cleanup:
return;
}
if ((cmd->arity > 0 && cmd->arity != c->argc-3) ||
(c->argc-3 < -cmd->arity))
{
addReplyErrorFormat(c,"wrong number of arguments for '%s' command", cmd->fullname);
return;
}
int idx;
int result = ACLCheckAllUserCommandPerm(u, cmd, c->argv + 3, c->argc - 3, &idx);
/* Notice that a variant of this code also exists on getAclErrorMessage so
* it also need to be updated on changed. */
if (result != ACL_OK) {
sds err = sdsempty();
if (result == ACL_DENIED_CMD) {
err = sdscatfmt(err, "This user has no permissions to run "
"the '%s' command", c->cmd->fullname);
"the '%s' command", cmd->fullname);
} else if (result == ACL_DENIED_KEY) {
err = sdscatfmt(err, "This user has no permissions to access "
"the '%s' key", c->argv[idx + 3]->ptr);
......
......@@ -813,10 +813,10 @@ int openNewIncrAofForAppend(void) {
* AOFs has not reached the limit threshold.
* */
#define AOF_REWRITE_LIMITE_THRESHOLD 3
#define AOF_REWRITE_LIMITE_NAX_MINUTES 60 /* 1 hour */
#define AOF_REWRITE_LIMITE_MAX_MINUTES 60 /* 1 hour */
int aofRewriteLimited(void) {
int limit = 0;
static int limit_deley_minutes = 0;
static int limit_delay_minutes = 0;
static time_t next_rewrite_time = 0;
unsigned long incr_aof_num = listLength(server.aof_manifest->incr_aof_list);
......@@ -824,25 +824,25 @@ int aofRewriteLimited(void) {
if (server.unixtime < next_rewrite_time) {
limit = 1;
} else {
if (limit_deley_minutes == 0) {
if (limit_delay_minutes == 0) {
limit = 1;
limit_deley_minutes = 1;
limit_delay_minutes = 1;
} else {
limit_deley_minutes *= 2;
limit_delay_minutes *= 2;
}
if (limit_deley_minutes > AOF_REWRITE_LIMITE_NAX_MINUTES) {
limit_deley_minutes = AOF_REWRITE_LIMITE_NAX_MINUTES;
if (limit_delay_minutes > AOF_REWRITE_LIMITE_MAX_MINUTES) {
limit_delay_minutes = AOF_REWRITE_LIMITE_MAX_MINUTES;
}
next_rewrite_time = server.unixtime + limit_deley_minutes * 60;
next_rewrite_time = server.unixtime + limit_delay_minutes * 60;
serverLog(LL_WARNING,
"Background AOF rewrite has repeatedly failed %ld times and triggered the limit, will retry in %d minutes",
incr_aof_num, limit_deley_minutes);
incr_aof_num, limit_delay_minutes);
}
} else {
limit_deley_minutes = 0;
limit_delay_minutes = 0;
next_rewrite_time = 0;
}
......@@ -2142,19 +2142,9 @@ static int rewriteFunctions(rio *aof) {
dictEntry *entry = NULL;
while ((entry = dictNext(iter))) {
functionLibInfo *li = dictGetVal(entry);
if (li->desc) {
if (rioWrite(aof, "*7\r\n", 4) == 0) goto werr;
} else {
if (rioWrite(aof, "*5\r\n", 4) == 0) goto werr;
}
if (rioWrite(aof, "*3\r\n", 4) == 0) goto werr;
char function_load[] = "$8\r\nFUNCTION\r\n$4\r\nLOAD\r\n";
if (rioWrite(aof, function_load, sizeof(function_load) - 1) == 0) goto werr;
if (rioWriteBulkString(aof, li->ei->name, sdslen(li->ei->name)) == 0) goto werr;
if (rioWriteBulkString(aof, li->name, sdslen(li->name)) == 0) goto werr;
if (li->desc) {
if (rioWriteBulkString(aof, "description", 11) == 0) goto werr;
if (rioWriteBulkString(aof, li->desc, sdslen(li->desc)) == 0) goto werr;
}
if (rioWriteBulkString(aof, li->code, sdslen(li->code)) == 0) goto werr;
}
dictReleaseIterator(iter);
......
......@@ -478,19 +478,21 @@ int getBitfieldTypeFromArgument(client *c, robj *o, int *sign, int *bits) {
* so that the 'maxbit' bit can be addressed. The object is finally
* returned. Otherwise if the key holds a wrong type NULL is returned and
* an error is sent to the client. */
robj *lookupStringForBitCommand(client *c, uint64_t maxbit, int *created) {
robj *lookupStringForBitCommand(client *c, uint64_t maxbit, int *dirty) {
size_t byte = maxbit >> 3;
robj *o = lookupKeyWrite(c->db,c->argv[1]);
if (checkType(c,o,OBJ_STRING)) return NULL;
if (dirty) *dirty = 0;
if (o == NULL) {
if (created) *created = 1;
o = createObject(OBJ_STRING,sdsnewlen(NULL, byte+1));
dbAdd(c->db,c->argv[1],o);
if (dirty) *dirty = 1;
} else {
if (created) *created = 0;
o = dbUnshareStringValue(c->db,c->argv[1],o);
size_t oldlen = sdslen(o->ptr);
o->ptr = sdsgrowzero(o->ptr,byte+1);
if (dirty && oldlen != sdslen(o->ptr)) *dirty = 1;
}
return o;
}
......@@ -547,8 +549,8 @@ void setbitCommand(client *c) {
return;
}
int created;
if ((o = lookupStringForBitCommand(c,bitoffset,&created)) == NULL) return;
int dirty;
if ((o = lookupStringForBitCommand(c,bitoffset,&dirty)) == NULL) return;
/* Get current values */
byte = bitoffset >> 3;
......@@ -556,10 +558,10 @@ void setbitCommand(client *c) {
bit = 7 - (bitoffset & 0x7);
bitval = byteval & (1 << bit);
/* Either it is newly created, or the bit changes before and after.
/* Either it is newly created, changed length, or the bit changes before and after.
* Note that the bitval here is actually a decimal number.
* So we need to use `!!` to convert it to 0 or 1 for comparison. */
if (created || (!!bitval != on)) {
if (dirty || (!!bitval != on)) {
/* Update byte with new bit value. */
byteval &= ~(1 << bit);
byteval |= ((on & 0x1) << bit);
......@@ -1028,7 +1030,7 @@ struct bitfieldOp {
void bitfieldGeneric(client *c, int flags) {
robj *o;
uint64_t bitoffset;
int j, numops = 0, changes = 0, created = 0;
int j, numops = 0, changes = 0, dirty = 0;
struct bitfieldOp *ops = NULL; /* Array of ops to execute at end. */
int owtype = BFOVERFLOW_WRAP; /* Overflow type. */
int readonly = 1;
......@@ -1122,7 +1124,7 @@ void bitfieldGeneric(client *c, int flags) {
/* Lookup by making room up to the farthest bit reached by
* this operation. */
if ((o = lookupStringForBitCommand(c,
highest_write_offset,&created)) == NULL) {
highest_write_offset,&dirty)) == NULL) {
zfree(ops);
return;
}
......@@ -1172,7 +1174,7 @@ void bitfieldGeneric(client *c, int flags) {
setSignedBitfield(o->ptr,thisop->offset,
thisop->bits,newval);
if (created || (oldval != newval))
if (dirty || (oldval != newval))
changes++;
} else {
addReplyNull(c);
......@@ -1204,7 +1206,7 @@ void bitfieldGeneric(client *c, int flags) {
setUnsignedBitfield(o->ptr,thisop->offset,
thisop->bits,newval);
if (created || (oldval != newval))
if (dirty || (oldval != newval))
changes++;
} else {
addReplyNull(c);
......
......@@ -141,12 +141,7 @@ void processUnblockedClients(void) {
* the code is conceptually more correct this way. */
if (!(c->flags & CLIENT_BLOCKED)) {
/* If we have a queued command, execute it now. */
if (processPendingCommandsAndResetClient(c) == C_OK) {
/* Now process client if it has more data in it's buffer. */
if (c->querybuf && sdslen(c->querybuf) > 0) {
if (processInputBuffer(c) == C_ERR) c = NULL;
}
} else {
if (processPendingCommandAndInputBuffer(c) == C_ERR) {
c = NULL;
}
}
......@@ -204,7 +199,7 @@ void unblockClient(client *c) {
* we do not do it immediately after the command returns (when the
* client got blocked) in order to be still able to access the argument
* vector from module callbacks and updateStatsOnUnblock. */
if (c->btype != BLOCKED_POSTPONE) {
if (c->btype != BLOCKED_POSTPONE && c->btype != BLOCKED_SHUTDOWN) {
freeClientOriginalArgv(c);
resetClient(c);
}
......@@ -288,25 +283,24 @@ void disconnectAllBlockedClients(void) {
* when there may be clients blocked on a list key, and there may be new
* data to fetch (the key is ready). */
void serveClientsBlockedOnListKey(robj *o, readyList *rl) {
/* Optimization: If no clients are in type BLOCKED_LIST,
* we can skip this loop. */
if (!server.blocked_clients_by_type[BLOCKED_LIST]) return;
/* We serve clients in the same order they blocked for
* this key, from the first blocked to the last. */
dictEntry *de = dictFind(rl->db->blocking_keys,rl->key);
if (de) {
list *clients = dictGetVal(de);
int numclients = listLength(clients);
int deleted = 0;
while(numclients--) {
listNode *clientnode = listFirst(clients);
client *receiver = clientnode->value;
listNode *ln;
listIter li;
listRewind(clients,&li);
if (receiver->btype != BLOCKED_LIST) {
/* Put at the tail, so that at the next call
* we'll not run into it again. */
listRotateHeadToTail(clients);
continue;
}
while((ln = listNext(&li))) {
client *receiver = listNodeValue(ln);
if (receiver->btype != BLOCKED_LIST) continue;
int deleted = 0;
robj *dstkey = receiver->bpop.target;
int wherefrom = receiver->bpop.blockpos.wherefrom;
int whereto = receiver->bpop.blockpos.whereto;
......@@ -342,25 +336,24 @@ void serveClientsBlockedOnListKey(robj *o, readyList *rl) {
* when there may be clients blocked on a sorted set key, and there may be new
* data to fetch (the key is ready). */
void serveClientsBlockedOnSortedSetKey(robj *o, readyList *rl) {
/* Optimization: If no clients are in type BLOCKED_ZSET,
* we can skip this loop. */
if (!server.blocked_clients_by_type[BLOCKED_ZSET]) return;
/* We serve clients in the same order they blocked for
* this key, from the first blocked to the last. */
dictEntry *de = dictFind(rl->db->blocking_keys,rl->key);
if (de) {
list *clients = dictGetVal(de);
int numclients = listLength(clients);
int deleted = 0;
while (numclients--) {
listNode *clientnode = listFirst(clients);
client *receiver = clientnode->value;
listNode *ln;
listIter li;
listRewind(clients,&li);
if (receiver->btype != BLOCKED_ZSET) {
/* Put at the tail, so that at the next call
* we'll not run into it again. */
listRotateHeadToTail(clients);
continue;
}
while((ln = listNext(&li))) {
client *receiver = listNodeValue(ln);
if (receiver->btype != BLOCKED_ZSET) continue;
int deleted = 0;
long llen = zsetLength(o);
long count = receiver->bpop.count;
int where = receiver->bpop.blockpos.wherefrom;
......@@ -407,6 +400,10 @@ void serveClientsBlockedOnSortedSetKey(robj *o, readyList *rl) {
* when there may be clients blocked on a stream key, and there may be new
* data to fetch (the key is ready). */
void serveClientsBlockedOnStreamKey(robj *o, readyList *rl) {
/* Optimization: If no clients are in type BLOCKED_STREAM,
* we can skip this loop. */
if (!server.blocked_clients_by_type[BLOCKED_STREAM]) return;
dictEntry *de = dictFind(rl->db->blocking_keys,rl->key);
stream *s = o->ptr;
......@@ -520,30 +517,21 @@ unblock_receiver:
* see if the key is really able to serve the client, and in that case,
* unblock it. */
void serveClientsBlockedOnKeyByModule(readyList *rl) {
dictEntry *de;
/* Optimization: If no clients are in type BLOCKED_MODULE,
* we can skip this loop. */
if (!server.blocked_clients_by_type[BLOCKED_MODULE]) return;
/* We serve clients in the same order they blocked for
* this key, from the first blocked to the last. */
de = dictFind(rl->db->blocking_keys,rl->key);
dictEntry *de = dictFind(rl->db->blocking_keys,rl->key);
if (de) {
list *clients = dictGetVal(de);
int numclients = listLength(clients);
while(numclients--) {
listNode *clientnode = listFirst(clients);
client *receiver = clientnode->value;
/* Put at the tail, so that at the next call
* we'll not run into it again: clients here may not be
* ready to be served, so they'll remain in the list
* sometimes. We want also be able to skip clients that are
* not blocked for the MODULE type safely. */
listRotateHeadToTail(clients);
listNode *ln;
listIter li;
listRewind(clients,&li);
while((ln = listNext(&li))) {
client *receiver = listNodeValue(ln);
if (receiver->btype != BLOCKED_MODULE) continue;
/* Note that if *this* client cannot be served by this key,
......@@ -566,6 +554,49 @@ void serveClientsBlockedOnKeyByModule(readyList *rl) {
}
}
/* Helper function for handleClientsBlockedOnKeys(). This function is called
* when there may be clients blocked, via XREADGROUP, on an existing stream which
* was deleted. We need to unblock the clients in that case.
* The idea is that a client that is blocked via XREADGROUP is different from
* any other blocking type in the sense that it depends on the existence of both
* the key and the group. Even if the key is deleted and then revived with XADD
* it won't help any clients blocked on XREADGROUP because the group no longer
* exist, so they would fail with -NOGROUP anyway.
* The conclusion is that it's better to unblock these client (with error) upon
* the deletion of the key, rather than waiting for the first XADD. */
void unblockDeletedStreamReadgroupClients(readyList *rl) {
/* Optimization: If no clients are in type BLOCKED_STREAM,
* we can skip this loop. */
if (!server.blocked_clients_by_type[BLOCKED_STREAM]) return;
/* We serve clients in the same order they blocked for
* this key, from the first blocked to the last. */
dictEntry *de = dictFind(rl->db->blocking_keys,rl->key);
if (de) {
list *clients = dictGetVal(de);
listNode *ln;
listIter li;
listRewind(clients,&li);
while((ln = listNext(&li))) {
client *receiver = listNodeValue(ln);
if (receiver->btype != BLOCKED_STREAM || !receiver->bpop.xread_group)
continue;
long long prev_error_replies = server.stat_total_error_replies;
client *old_client = server.current_client;
server.current_client = receiver;
monotime replyTimer;
elapsedStart(&replyTimer);
addReplyError(receiver, "-UNBLOCKED the stream key no longer exists");
updateStatsOnUnblock(receiver, 0, elapsedUs(replyTimer), server.stat_total_error_replies != prev_error_replies);
unblockClient(receiver);
afterCommand(receiver);
server.current_client = old_client;
}
}
}
/* This function should be called by Redis every time a single command,
* a MULTI/EXEC block, or a Lua script, terminated its execution after
* being called by a client. It handles serving clients blocked in
......@@ -624,17 +655,27 @@ void handleClientsBlockedOnKeys(void) {
/* Serve clients blocked on the key. */
robj *o = lookupKeyReadWithFlags(rl->db, rl->key, LOOKUP_NONOTIFY | LOOKUP_NOSTATS);
if (o != NULL) {
if (o->type == OBJ_LIST)
int objtype = o->type;
if (objtype == OBJ_LIST)
serveClientsBlockedOnListKey(o,rl);
else if (o->type == OBJ_ZSET)
else if (objtype == OBJ_ZSET)
serveClientsBlockedOnSortedSetKey(o,rl);
else if (o->type == OBJ_STREAM)
else if (objtype == OBJ_STREAM)
serveClientsBlockedOnStreamKey(o,rl);
/* We want to serve clients blocked on module keys
* regardless of the object type: we don't know what the
* module is trying to accomplish right now. */
serveClientsBlockedOnKeyByModule(rl);
/* If we have XREADGROUP clients blocked on this key, and
* the key is not a stream, it must mean that the key was
* overwritten by either SET or something like
* (MULTI, DEL key, SADD key e, EXEC).
* In this case we need to unblock all these clients. */
if (objtype != OBJ_STREAM)
unblockDeletedStreamReadgroupClients(rl);
} else {
/* Unblock all XREADGROUP clients of this deleted key */
unblockDeletedStreamReadgroupClients(rl);
/* Edge case: If lookupKeyReadWithFlags decides to expire the key we have to
* take care of the propagation here, because afterCommand wasn't called */
if (server.also_propagate.numops > 0)
......@@ -823,4 +864,3 @@ void signalKeyAsReady(redisDb *db, robj *key, int type) {
incrRefCount(key);
serverAssert(dictAdd(db->ready_keys,key,NULL) == DICT_OK);
}
......@@ -525,3 +525,18 @@ CallReply *callReplyCreate(sds reply, list *deferred_error_list, void *private_d
res->deferred_error_list = deferred_error_list;
return res;
}
/* Create a new CallReply struct from the reply blob representing an error message.
* Automatically creating deferred_error_list and set a copy of the reply in it.
* Refer to callReplyCreate for detailed explanation. */
CallReply *callReplyCreateError(sds reply, void *private_data) {
sds err_buff = reply;
if (err_buff[0] != '-') {
err_buff = sdscatfmt(sdsempty(), "-ERR %S\r\n", reply);
sdsfree(reply);
}
list *deferred_error_list = listCreate();
listSetFreeMethod(deferred_error_list, (void (*)(void*))sdsfree);
listAddNodeTail(deferred_error_list, sdsnew(err_buff));
return callReplyCreate(err_buff, deferred_error_list, private_data);
}
......@@ -35,6 +35,7 @@
typedef struct CallReply CallReply;
CallReply *callReplyCreate(sds reply, list *deferred_error_list, void *private_data);
CallReply *callReplyCreateError(sds reply, void *private_data);
int callReplyType(CallReply *rep);
const char *callReplyGetString(CallReply *rep, size_t *len);
long long callReplyGetLongLong(CallReply *rep);
......
......@@ -299,7 +299,7 @@ static sds percentDecode(const char *pe, size_t len) {
}
/* Parse a URI and extract the server connection information.
* URI scheme is based on the the provisional specification[1] excluding support
* URI scheme is based on the provisional specification[1] excluding support
* for query parameters. Valid URIs are:
* scheme: "redis://"
* authority: [[<username> ":"] <password> "@"] [<hostname> [":" <port>]]
......@@ -371,3 +371,28 @@ void freeCliConnInfo(cliConnInfo connInfo){
if (connInfo.auth) sdsfree(connInfo.auth);
if (connInfo.user) sdsfree(connInfo.user);
}
/*
* Escape a Unicode string for JSON output (--json), following RFC 7159:
* https://datatracker.ietf.org/doc/html/rfc7159#section-7
*/
sds escapeJsonString(sds s, const char *p, size_t len) {
s = sdscatlen(s,"\"",1);
while(len--) {
switch(*p) {
case '\\':
case '"':
s = sdscatprintf(s,"\\%c",*p);
break;
case '\n': s = sdscatlen(s,"\\n",2); break;
case '\f': s = sdscatlen(s,"\\f",2); break;
case '\r': s = sdscatlen(s,"\\r",2); break;
case '\t': s = sdscatlen(s,"\\t",2); break;
case '\b': s = sdscatlen(s,"\\b",2); break;
default:
s = sdscatprintf(s,(*p >= 0 && *p <= 0x1f) ? "\\u%04x" : "%c",*p);
}
p++;
}
return sdscatlen(s,"\"",1);
}
......@@ -48,4 +48,7 @@ sds unquoteCString(char *str);
void parseRedisUri(const char *uri, const char* tool_name, cliConnInfo *connInfo, int *tls_flag);
void freeCliConnInfo(cliConnInfo connInfo);
sds escapeJsonString(sds s, const char *p, size_t len);
#endif /* __CLICOMMON_H */
......@@ -56,7 +56,6 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request);
void clusterUpdateState(void);
int clusterNodeGetSlotBit(clusterNode *n, int slot);
sds clusterGenNodesDescription(int filter, int use_pport);
clusterNode *clusterLookupNode(const char *name);
list *clusterGetNodesServingMySlots(clusterNode *node);
int clusterNodeAddSlave(clusterNode *master, clusterNode *slave);
int clusterAddSlot(clusterNode *n, int slot);
......@@ -74,6 +73,8 @@ void clusterCloseAllSlots(void);
void clusterSetNodeAsMaster(clusterNode *n);
void clusterDelNode(clusterNode *delnode);
sds representClusterNodeFlags(sds ci, uint16_t flags);
sds representSlotInfo(sds ci, uint16_t *slot_info_pairs, int slot_info_pairs_count);
void clusterFreeNodesSlotsInfo(clusterNode *n);
uint64_t clusterGetMaxEpoch(void);
int clusterBumpConfigEpochWithoutConsensus(void);
void moduleCallClusterReceivers(const char *sender_id, uint64_t module_id, uint8_t type, const unsigned char *payload, uint32_t len);
......@@ -210,7 +211,11 @@ int clusterLoadConfig(char *filename) {
}
/* Create this node if it does not exist */
n = clusterLookupNode(argv[0]);
if (verifyClusterNodeId(argv[0], sdslen(argv[0])) == C_ERR) {
sdsfreesplitres(argv, argc);
goto fmterr;
}
n = clusterLookupNode(argv[0], sdslen(argv[0]));
if (!n) {
n = createClusterNode(argv[0],0);
clusterAddNode(n);
......@@ -218,6 +223,17 @@ int clusterLoadConfig(char *filename) {
/* Format for the node address information:
* ip:port[@cport][,hostname] */
/* Hostname is an optional argument that defines the endpoint
* that can be reported to clients instead of IP. */
char *hostname = strchr(argv[1], ',');
if (hostname) {
*hostname = '\0';
hostname++;
n->hostname = sdscpy(n->hostname, hostname);
} else if (sdslen(n->hostname) != 0) {
sdsclear(n->hostname);
}
/* Address and port */
if ((p = strrchr(argv[1],':')) == NULL) {
sdsfreesplitres(argv,argc);
......@@ -237,17 +253,6 @@ int clusterLoadConfig(char *filename) {
* base port. */
n->cport = busp ? atoi(busp) : n->port + CLUSTER_PORT_INCR;
/* Hostname is an optional argument that defines the endpoint
* that can be reported to clients instead of IP. */
char *hostname = strchr(p, ',');
if (hostname) {
*hostname = '\0';
hostname++;
n->hostname = sdscpy(n->hostname, hostname);
} else if (sdslen(n->hostname) != 0) {
sdsclear(n->hostname);
}
/* The plaintext port for client in a TLS cluster (n->pport) is not
* stored in nodes.conf. It is received later over the bus protocol. */
......@@ -286,7 +291,11 @@ int clusterLoadConfig(char *filename) {
/* Get master if any. Set the master and populate master's
* slave list. */
if (argv[3][0] != '-') {
master = clusterLookupNode(argv[3]);
if (verifyClusterNodeId(argv[3], sdslen(argv[3])) == C_ERR) {
sdsfreesplitres(argv, argc);
goto fmterr;
}
master = clusterLookupNode(argv[3], sdslen(argv[3]));
if (!master) {
master = createClusterNode(argv[3],0);
clusterAddNode(master);
......@@ -322,7 +331,14 @@ int clusterLoadConfig(char *filename) {
goto fmterr;
}
p += 3;
cn = clusterLookupNode(p);
char *pr = strchr(p, ']');
size_t node_len = pr - p;
if (pr == NULL || verifyClusterNodeId(p, node_len) == C_ERR) {
sdsfreesplitres(argv, argc);
goto fmterr;
}
cn = clusterLookupNode(p, CLUSTER_NAMELEN);
if (!cn) {
cn = createClusterNode(p,0);
clusterAddNode(cn);
......@@ -796,7 +812,7 @@ void setClusterNodeToInboundClusterLink(clusterNode *node, clusterLink *link) {
* we would always process the disconnection of the existing inbound link before
* accepting a new existing inbound link. Therefore, it's possible to have more than
* one inbound link from the same node at the same time. */
serverLog(LL_DEBUG, "Replacing inbound link fd %d from node %s with fd %d",
serverLog(LL_DEBUG, "Replacing inbound link fd %d from node %.40s with fd %d",
node->inbound_link->conn->fd, node->name, link->conn->fd);
}
node->inbound_link = link;
......@@ -942,7 +958,9 @@ clusterNode *createClusterNode(char *nodename, int flags) {
node->configEpoch = 0;
node->flags = flags;
memset(node->slots,0,sizeof(node->slots));
node->slots_info = NULL;
node->slot_info_pairs = NULL;
node->slot_info_pairs_count = 0;
node->slot_info_pairs_alloc = 0;
node->numslots = 0;
node->numslaves = 0;
node->slaves = NULL;
......@@ -1178,12 +1196,23 @@ void clusterDelNode(clusterNode *delnode) {
freeClusterNode(delnode);
}
/* Node lookup by name */
clusterNode *clusterLookupNode(const char *name) {
sds s = sdsnewlen(name, CLUSTER_NAMELEN);
dictEntry *de;
/* Cluster node sanity check. Returns C_OK if the node id
* is valid an C_ERR otherwise. */
int verifyClusterNodeId(const char *name, int length) {
if (length != CLUSTER_NAMELEN) return C_ERR;
for (int i = 0; i < length; i++) {
if (name[i] >= 'a' && name[i] <= 'z') continue;
if (name[i] >= '0' && name[i] <= '9') continue;
return C_ERR;
}
return C_OK;
}
de = dictFind(server.cluster->nodes,s);
/* Node lookup by name */
clusterNode *clusterLookupNode(const char *name, int length) {
if (verifyClusterNodeId(name, length) != C_OK) return NULL;
sds s = sdsnewlen(name, length);
dictEntry *de = dictFind(server.cluster->nodes, s);
sdsfree(s);
if (de == NULL) return NULL;
return dictGetVal(de);
......@@ -1599,7 +1628,7 @@ int clusterStartHandshake(char *ip, int port, int cport) {
void clusterProcessGossipSection(clusterMsg *hdr, clusterLink *link) {
uint16_t count = ntohs(hdr->count);
clusterMsgDataGossip *g = (clusterMsgDataGossip*) hdr->data.ping.gossip;
clusterNode *sender = link->node ? link->node : clusterLookupNode(hdr->sender);
clusterNode *sender = link->node ? link->node : clusterLookupNode(hdr->sender, CLUSTER_NAMELEN);
while(count--) {
uint16_t flags = ntohs(g->flags);
......@@ -1618,7 +1647,7 @@ void clusterProcessGossipSection(clusterMsg *hdr, clusterLink *link) {
}
/* Update our state accordingly to the gossip sections */
node = clusterLookupNode(g->nodename);
node = clusterLookupNode(g->nodename, CLUSTER_NAMELEN);
if (node) {
/* We already know this node.
Handle failure reports, only when the sender is a master. */
......@@ -1895,6 +1924,17 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc
clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG|
CLUSTER_TODO_UPDATE_STATE|
CLUSTER_TODO_FSYNC_CONFIG);
} else if (myself->slaveof && myself->slaveof->slaveof) {
/* Safeguard against sub-replicas. A replica's master can turn itself
* into a replica if its last slot is removed. If no other node takes
* over the slot, there is nothing else to trigger replica migration. */
serverLog(LL_WARNING,
"I'm a sub-replica! Reconfiguring myself as a replica of grandmaster %.40s",
myself->slaveof->slaveof->name);
clusterSetMaster(myself->slaveof->slaveof);
clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG|
CLUSTER_TODO_UPDATE_STATE|
CLUSTER_TODO_FSYNC_CONFIG);
} else if (dirty_slots_count) {
/* If we are here, we received an update message which removed
* ownership for certain slots we still have keys about, but still
......@@ -1970,7 +2010,7 @@ int writeHostnamePingExt(clusterMsgPingExt **cursor) {
/* We previously validated the extensions, so this function just needs to
* handle the extensions. */
void clusterProcessPingExtensions(clusterMsg *hdr, clusterLink *link) {
clusterNode *sender = link->node ? link->node : clusterLookupNode(hdr->sender);
clusterNode *sender = link->node ? link->node : clusterLookupNode(hdr->sender, CLUSTER_NAMELEN);
char *ext_hostname = NULL;
uint16_t extensions = ntohs(hdr->extensions);
/* Loop through all the extensions and process them */
......@@ -2003,7 +2043,7 @@ static clusterNode *getNodeFromLinkAndMsg(clusterLink *link, clusterMsg *hdr) {
sender = link->node;
} else {
/* Otherwise, fetch sender based on the message */
sender = clusterLookupNode(hdr->sender);
sender = clusterLookupNode(hdr->sender, CLUSTER_NAMELEN);
/* We know the sender node but haven't associate it with the link. This must
* be an inbound link because only for inbound links we didn't know which node
* to associate when they were created. */
......@@ -2213,7 +2253,7 @@ int clusterProcessPacket(clusterLink *link) {
if (type == CLUSTERMSG_TYPE_PING || type == CLUSTERMSG_TYPE_PONG ||
type == CLUSTERMSG_TYPE_MEET)
{
serverLog(LL_DEBUG,"%s packet received: %s",
serverLog(LL_DEBUG,"%s packet received: %.40s",
clusterGetMessageTypeString(type),
link->node ? link->node->name : "NULL");
if (!link->inbound) {
......@@ -2314,7 +2354,7 @@ int clusterProcessPacket(clusterLink *link) {
clusterSetNodeAsMaster(sender);
} else {
/* Node is a slave. */
clusterNode *master = clusterLookupNode(hdr->slaveof);
clusterNode *master = clusterLookupNode(hdr->slaveof, CLUSTER_NAMELEN);
if (nodeIsMaster(sender)) {
/* Master turned into a slave! Reconfigure the node. */
......@@ -2429,7 +2469,7 @@ int clusterProcessPacket(clusterLink *link) {
clusterNode *failing;
if (sender) {
failing = clusterLookupNode(hdr->data.fail.about.nodename);
failing = clusterLookupNode(hdr->data.fail.about.nodename, CLUSTER_NAMELEN);
if (failing &&
!(failing->flags & (CLUSTER_NODE_FAIL|CLUSTER_NODE_MYSELF)))
{
......@@ -2517,7 +2557,7 @@ int clusterProcessPacket(clusterLink *link) {
ntohu64(hdr->data.update.nodecfg.configEpoch);
if (!sender) return 1; /* We don't know the sender. */
n = clusterLookupNode(hdr->data.update.nodecfg.nodename);
n = clusterLookupNode(hdr->data.update.nodecfg.nodename, CLUSTER_NAMELEN);
if (!n) return 1; /* We don't know the reported node. */
if (n->configEpoch >= reportedConfigEpoch) return 1; /* Nothing new. */
......@@ -3148,7 +3188,7 @@ int clusterSendModuleMessageToTarget(const char *target, uint64_t module_id, uin
clusterNode *node = NULL;
if (target != NULL) {
node = clusterLookupNode(target);
node = clusterLookupNode(target, strlen(target));
if (node == NULL || node->link == NULL) return C_ERR;
}
......@@ -4561,6 +4601,22 @@ sds representClusterNodeFlags(sds ci, uint16_t flags) {
return ci;
}
/* Concatenate the slot ownership information to the given SDS string 'ci'.
* If the slot ownership is in a contiguous block, it's represented as start-end pair,
* else each slot is added separately. */
sds representSlotInfo(sds ci, uint16_t *slot_info_pairs, int slot_info_pairs_count) {
for (int i = 0; i< slot_info_pairs_count; i+=2) {
unsigned long start = slot_info_pairs[i];
unsigned long end = slot_info_pairs[i+1];
if (start == end) {
ci = sdscatfmt(ci, " %i", start);
} else {
ci = sdscatfmt(ci, " %i-%i", start, end);
}
}
return ci;
}
/* Generate a csv-alike representation of the specified cluster node.
* See clusterGenNodesDescription() top comment for more information.
*
......@@ -4609,8 +4665,8 @@ sds clusterGenNodeDescription(clusterNode *node, int use_pport) {
/* Slots served by this instance. If we already have slots info,
* append it directly, otherwise, generate slots only if it has. */
if (node->slots_info) {
ci = sdscatsds(ci, node->slots_info);
if (node->slot_info_pairs) {
ci = representSlotInfo(ci, node->slot_info_pairs, node->slot_info_pairs_count);
} else if (node->numslots > 0) {
start = -1;
for (j = 0; j < CLUSTER_SLOTS; j++) {
......@@ -4670,12 +4726,15 @@ void clusterGenNodesSlotsInfo(int filter) {
* or end of slot. */
if (i == CLUSTER_SLOTS || n != server.cluster->slots[i]) {
if (!(n->flags & filter)) {
if (n->slots_info == NULL) n->slots_info = sdsempty();
if (start == i-1) {
n->slots_info = sdscatfmt(n->slots_info," %i",start);
} else {
n->slots_info = sdscatfmt(n->slots_info," %i-%i",start,i-1);
if (n->slot_info_pairs_count+2 > n->slot_info_pairs_alloc) {
if (n->slot_info_pairs_alloc == 0)
n->slot_info_pairs_alloc = 8;
else
n->slot_info_pairs_alloc *= 2;
n->slot_info_pairs = zrealloc(n->slot_info_pairs, n->slot_info_pairs_alloc * sizeof(uint16_t));
}
n->slot_info_pairs[n->slot_info_pairs_count++] = start;
n->slot_info_pairs[n->slot_info_pairs_count++] = i-1;
}
if (i == CLUSTER_SLOTS) break;
n = server.cluster->slots[i];
......@@ -4684,6 +4743,13 @@ void clusterGenNodesSlotsInfo(int filter) {
}
}
void clusterFreeNodesSlotsInfo(clusterNode *n) {
zfree(n->slot_info_pairs);
n->slot_info_pairs = NULL;
n->slot_info_pairs_count = 0;
n->slot_info_pairs_alloc = 0;
}
/* Generate a csv-alike representation of the nodes we are aware of,
* including the "myself" node, and return an SDS string containing the
* representation (it is up to the caller to free it).
......@@ -4718,10 +4784,7 @@ sds clusterGenNodesDescription(int filter, int use_pport) {
ci = sdscatlen(ci,"\n",1);
/* Release slots info. */
if (node->slots_info) {
sdsfree(node->slots_info);
node->slots_info = NULL;
}
clusterFreeNodesSlotsInfo(node);
}
dictReleaseIterator(di);
return ci;
......@@ -4942,6 +5005,136 @@ void addNodeReplyForClusterSlot(client *c, clusterNode *node, int start_slot, in
setDeferredArrayLen(c, nested_replylen, nested_elements);
}
/* Add detailed information of a node to the output buffer of the given client. */
void addNodeDetailsToShardReply(client *c, clusterNode *node) {
int reply_count = 0;
void *node_replylen = addReplyDeferredLen(c);
addReplyBulkCString(c, "id");
addReplyBulkCBuffer(c, node->name, CLUSTER_NAMELEN);
reply_count++;
/* We use server.tls_cluster as a proxy for whether or not
* the remote port is the tls port or not */
int plaintext_port = server.tls_cluster ? node->pport : node->port;
int tls_port = server.tls_cluster ? node->port : 0;
if (plaintext_port) {
addReplyBulkCString(c, "port");
addReplyLongLong(c, plaintext_port);
reply_count++;
}
if (tls_port) {
addReplyBulkCString(c, "tls-port");
addReplyLongLong(c, tls_port);
reply_count++;
}
addReplyBulkCString(c, "ip");
addReplyBulkCString(c, node->ip);
reply_count++;
addReplyBulkCString(c, "endpoint");
addReplyBulkCString(c, getPreferredEndpoint(node));
reply_count++;
if (node->hostname) {
addReplyBulkCString(c, "hostname");
addReplyBulkCString(c, node->hostname);
reply_count++;
}
long long node_offset;
if (node->flags & CLUSTER_NODE_MYSELF) {
node_offset = nodeIsSlave(node) ? replicationGetSlaveOffset() : server.master_repl_offset;
} else {
node_offset = node->repl_offset;
}
addReplyBulkCString(c, "role");
addReplyBulkCString(c, nodeIsSlave(node) ? "replica" : "master");
reply_count++;
addReplyBulkCString(c, "replication-offset");
addReplyLongLong(c, node_offset);
reply_count++;
addReplyBulkCString(c, "health");
const char *health_msg = NULL;
if (nodeFailed(node)) {
health_msg = "fail";
} else if (nodeIsSlave(node) && node_offset == 0) {
health_msg = "loading";
} else {
health_msg = "online";
}
addReplyBulkCString(c, health_msg);
reply_count++;
setDeferredMapLen(c, node_replylen, reply_count);
}
/* Add the shard reply of a single shard based off the given primary node. */
void addShardReplyForClusterShards(client *c, clusterNode *node, uint16_t *slot_info_pairs, int slot_pairs_count) {
addReplyMapLen(c, 2);
addReplyBulkCString(c, "slots");
if (slot_info_pairs) {
serverAssert((slot_pairs_count % 2) == 0);
addReplyArrayLen(c, slot_pairs_count);
for (int i = 0; i < slot_pairs_count; i++)
addReplyBulkLongLong(c, (unsigned long)slot_info_pairs[i]);
} else {
/* If no slot info pair is provided, the node owns no slots */
addReplyArrayLen(c, 0);
}
addReplyBulkCString(c, "nodes");
list *nodes_for_slot = clusterGetNodesServingMySlots(node);
/* At least the provided node should be serving its slots */
serverAssert(nodes_for_slot);
addReplyArrayLen(c, listLength(nodes_for_slot));
if (listLength(nodes_for_slot) != 0) {
listIter li;
listNode *ln;
listRewind(nodes_for_slot, &li);
while ((ln = listNext(&li))) {
clusterNode *node = listNodeValue(ln);
addNodeDetailsToShardReply(c, node);
}
listRelease(nodes_for_slot);
}
}
/* Add to the output buffer of the given client, an array of slot (start, end)
* pair owned by the shard, also the primary and set of replica(s) along with
* information about each node. */
void clusterReplyShards(client *c) {
void *shard_replylen = addReplyDeferredLen(c);
int shard_count = 0;
/* This call will add slot_info_pairs to all nodes */
clusterGenNodesSlotsInfo(0);
dictIterator *di = dictGetSafeIterator(server.cluster->nodes);
dictEntry *de;
/* Iterate over all the available nodes in the cluster, for each primary
* node return generate the cluster shards response. if the primary node
* doesn't own any slot, cluster shard response contains the node related
* information and an empty slots array. */
while((de = dictNext(di)) != NULL) {
clusterNode *n = dictGetVal(de);
if (nodeIsSlave(n)) {
/* You can force a replica to own slots, even though it'll get reverted,
* so freeing the slot pair here just in case. */
clusterFreeNodesSlotsInfo(n);
continue;
}
shard_count++;
/* n->slot_info_pairs is set to NULL when the the node owns no slots. */
addShardReplyForClusterShards(c, n, n->slot_info_pairs, n->slot_info_pairs_count);
clusterFreeNodesSlotsInfo(n);
}
dictReleaseIterator(di);
setDeferredArrayLen(c, shard_replylen, shard_count);
}
void clusterReplyMultiBulkSlots(client * c) {
/* Format: 1) 1) start slot
* 2) end slot
......@@ -5035,6 +5228,8 @@ void clusterCommand(client *c) {
"SLOTS",
" Return information about slots range mappings. Each range is made of:",
" start, end, master and replicas IP addresses, ports and ids",
"SHARDS",
" Return information about slot range mappings and the nodes associated with them.",
"LINKS",
" Return information about all network links between this node and its peers.",
" Output format is an array where each array element is a map containing attributes of a link",
......@@ -5084,6 +5279,9 @@ NULL
} else if (!strcasecmp(c->argv[1]->ptr,"slots") && c->argc == 2) {
/* CLUSTER SLOTS */
clusterReplyMultiBulkSlots(c);
} else if (!strcasecmp(c->argv[1]->ptr,"shards") && c->argc == 2) {
/* CLUSTER SHARDS */
clusterReplyShards(c);
} else if (!strcasecmp(c->argv[1]->ptr,"flushslots") && c->argc == 2) {
/* CLUSTER FLUSHSLOTS */
if (dictSize(server.db[0].dict) != 0) {
......@@ -5181,7 +5379,8 @@ NULL
addReplyErrorFormat(c,"I'm not the owner of hash slot %u",slot);
return;
}
if ((n = clusterLookupNode(c->argv[4]->ptr)) == NULL) {
n = clusterLookupNode(c->argv[4]->ptr, sdslen(c->argv[4]->ptr));
if (n == NULL) {
addReplyErrorFormat(c,"I don't know about node %s",
(char*)c->argv[4]->ptr);
return;
......@@ -5197,7 +5396,8 @@ NULL
"I'm already the owner of hash slot %u",slot);
return;
}
if ((n = clusterLookupNode(c->argv[4]->ptr)) == NULL) {
n = clusterLookupNode(c->argv[4]->ptr, sdslen(c->argv[4]->ptr));
if (n == NULL) {
addReplyErrorFormat(c,"I don't know about node %s",
(char*)c->argv[4]->ptr);
return;
......@@ -5213,8 +5413,7 @@ NULL
server.cluster->migrating_slots_to[slot] = NULL;
} else if (!strcasecmp(c->argv[3]->ptr,"node") && c->argc == 5) {
/* CLUSTER SETSLOT <SLOT> NODE <NODE ID> */
clusterNode *n = clusterLookupNode(c->argv[4]->ptr);
n = clusterLookupNode(c->argv[4]->ptr, sdslen(c->argv[4]->ptr));
if (!n) {
addReplyErrorFormat(c,"Unknown node %s",
(char*)c->argv[4]->ptr);
......@@ -5241,9 +5440,26 @@ NULL
server.cluster->migrating_slots_to[slot])
server.cluster->migrating_slots_to[slot] = NULL;
int slot_was_mine = server.cluster->slots[slot] == myself;
clusterDelSlot(slot);
clusterAddSlot(n,slot);
/* If we are a master left without slots, we should turn into a
* replica of the new master. */
if (slot_was_mine &&
n != myself &&
myself->numslots == 0 &&
server.cluster_allow_replica_migration)
{
serverLog(LL_WARNING,
"Configuration change detected. Reconfiguring myself "
"as a replica of %.40s", n->name);
clusterSetMaster(n);
clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG |
CLUSTER_TODO_UPDATE_STATE |
CLUSTER_TODO_FSYNC_CONFIG);
}
/* If this node was importing this slot, assigning the slot to
* itself also clears the importing status. */
if (n == myself &&
......@@ -5409,8 +5625,7 @@ NULL
}
} else if (!strcasecmp(c->argv[1]->ptr,"forget") && c->argc == 3) {
/* CLUSTER FORGET <NODE ID> */
clusterNode *n = clusterLookupNode(c->argv[2]->ptr);
clusterNode *n = clusterLookupNode(c->argv[2]->ptr, sdslen(c->argv[2]->ptr));
if (!n) {
addReplyErrorFormat(c,"Unknown node %s", (char*)c->argv[2]->ptr);
return;
......@@ -5428,9 +5643,8 @@ NULL
addReply(c,shared.ok);
} else if (!strcasecmp(c->argv[1]->ptr,"replicate") && c->argc == 3) {
/* CLUSTER REPLICATE <NODE ID> */
clusterNode *n = clusterLookupNode(c->argv[2]->ptr);
/* Lookup the specified node in our table. */
clusterNode *n = clusterLookupNode(c->argv[2]->ptr, sdslen(c->argv[2]->ptr));
if (!n) {
addReplyErrorFormat(c,"Unknown node %s", (char*)c->argv[2]->ptr);
return;
......@@ -5466,7 +5680,7 @@ NULL
} else if ((!strcasecmp(c->argv[1]->ptr,"slaves") ||
!strcasecmp(c->argv[1]->ptr,"replicas")) && c->argc == 3) {
/* CLUSTER SLAVES <NODE ID> */
clusterNode *n = clusterLookupNode(c->argv[2]->ptr);
clusterNode *n = clusterLookupNode(c->argv[2]->ptr, sdslen(c->argv[2]->ptr));
int j;
/* Lookup the specified node in our table. */
......@@ -5493,7 +5707,7 @@ NULL
c->argc == 3)
{
/* CLUSTER COUNT-FAILURE-REPORTS <NODE ID> */
clusterNode *n = clusterLookupNode(c->argv[2]->ptr);
clusterNode *n = clusterLookupNode(c->argv[2]->ptr, sdslen(c->argv[2]->ptr));
if (!n) {
addReplyErrorFormat(c,"Unknown node %s", (char*)c->argv[2]->ptr);
......
......@@ -118,7 +118,9 @@ typedef struct clusterNode {
int flags; /* CLUSTER_NODE_... */
uint64_t configEpoch; /* Last configEpoch observed for this node */
unsigned char slots[CLUSTER_SLOTS/8]; /* slots handled by this node */
sds slots_info; /* Slots info represented by string. */
uint16_t *slot_info_pairs; /* Slots info represented as (start/end) pair (consecutive index). */
int slot_info_pairs_count; /* Used number of slots in slot_info_pairs */
int slot_info_pairs_alloc; /* Allocated number of slots in slot_info_pairs */
int numslots; /* Number of slots handled by this node */
int numslaves; /* Number of slave nodes, if this is a master */
struct clusterNode **slaves; /* pointers to slave nodes */
......@@ -375,7 +377,8 @@ void clusterInit(void);
void clusterCron(void);
void clusterBeforeSleep(void);
clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, int argc, int *hashslot, int *ask);
clusterNode *clusterLookupNode(const char *name);
int verifyClusterNodeId(const char *name, int length);
clusterNode *clusterLookupNode(const char *name, int length);
int clusterRedirectBlockedClientIfNeeded(client *c);
void clusterRedirectClient(client *c, clusterNode *n, int hashslot, int error_code);
void migrateCloseTimedoutSockets(void);
......
......@@ -602,6 +602,17 @@ struct redisCommandArg CLUSTER_SETSLOT_Args[] = {
{0}
};
/********** CLUSTER SHARDS ********************/
/* CLUSTER SHARDS history */
#define CLUSTER_SHARDS_History NULL
/* CLUSTER SHARDS tips */
const char *CLUSTER_SHARDS_tips[] = {
"nondeterministic_output",
NULL
};
/********** CLUSTER SLAVES ********************/
/* CLUSTER SLAVES history */
......@@ -624,7 +635,7 @@ struct redisCommandArg CLUSTER_SLAVES_Args[] = {
/* CLUSTER SLOTS history */
commandHistory CLUSTER_SLOTS_History[] = {
{"4.0.0","Added node IDs."},
{"7.0.0","Added additional networking metadata and added support for hostnames and unknown endpoints."},
{"7.0.0","Added additional networking metadata field."},
{0}
};
......@@ -660,8 +671,9 @@ struct redisCommand CLUSTER_Subcommands[] = {
{"saveconfig","Forces the node to save cluster state on disk","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_CLUSTER,CLUSTER_SAVECONFIG_History,CLUSTER_SAVECONFIG_tips,clusterCommand,2,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0},
{"set-config-epoch","Set the configuration epoch in a new node","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_CLUSTER,CLUSTER_SET_CONFIG_EPOCH_History,CLUSTER_SET_CONFIG_EPOCH_tips,clusterCommand,3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,.args=CLUSTER_SET_CONFIG_EPOCH_Args},
{"setslot","Bind a hash slot to a specific node","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_CLUSTER,CLUSTER_SETSLOT_History,CLUSTER_SETSLOT_tips,clusterCommand,-4,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,.args=CLUSTER_SETSLOT_Args},
{"slaves","List replica nodes of the specified master node","O(1)","3.0.0",CMD_DOC_NONE,"`CLUSTER REPLICAS`","5.0.0",COMMAND_GROUP_CLUSTER,CLUSTER_SLAVES_History,CLUSTER_SLAVES_tips,clusterCommand,3,CMD_ADMIN|CMD_STALE,0,.args=CLUSTER_SLAVES_Args},
{"slots","Get array of Cluster slot to node mappings","O(N) where N is the total number of Cluster nodes","3.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_CLUSTER,CLUSTER_SLOTS_History,CLUSTER_SLOTS_tips,clusterCommand,2,CMD_STALE,0},
{"shards","Get array of cluster slots to node mappings","O(N) where N is the total number of cluster nodes","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_CLUSTER,CLUSTER_SHARDS_History,CLUSTER_SHARDS_tips,clusterCommand,2,CMD_STALE,0},
{"slaves","List replica nodes of the specified master node","O(1)","3.0.0",CMD_DOC_DEPRECATED,"`CLUSTER REPLICAS`","5.0.0",COMMAND_GROUP_CLUSTER,CLUSTER_SLAVES_History,CLUSTER_SLAVES_tips,clusterCommand,3,CMD_ADMIN|CMD_STALE,0,.args=CLUSTER_SLAVES_Args},
{"slots","Get array of Cluster slot to node mappings","O(N) where N is the total number of Cluster nodes","3.0.0",CMD_DOC_DEPRECATED,"`CLUSTER SHARDS`","7.0.0",COMMAND_GROUP_CLUSTER,CLUSTER_SLOTS_History,CLUSTER_SLOTS_tips,clusterCommand,2,CMD_STALE,0},
{0}
};
......@@ -871,7 +883,6 @@ struct redisCommandArg CLIENT_NO_EVICT_Args[] = {
/* CLIENT PAUSE history */
commandHistory CLIENT_PAUSE_History[] = {
{"3.2.10","Client pause prevents client pause and key eviction as well."},
{"6.2.0","`CLIENT PAUSE WRITE` mode added along with the `mode` option."},
{0}
};
......@@ -1312,7 +1323,7 @@ struct redisCommandArg MIGRATE_username_password_Subargs[] = {
/* MIGRATE argument table */
struct redisCommandArg MIGRATE_Args[] = {
{"host",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"port",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"port",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"key_or_empty_string",ARG_TYPE_ONEOF,-1,NULL,NULL,NULL,CMD_ARG_NONE,.subargs=MIGRATE_key_or_empty_string_Subargs},
{"destination-db",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"timeout",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
......@@ -1404,7 +1415,10 @@ struct redisCommandArg OBJECT_IDLETIME_Args[] = {
#define OBJECT_REFCOUNT_History NULL
/* OBJECT REFCOUNT tips */
#define OBJECT_REFCOUNT_tips NULL
const char *OBJECT_REFCOUNT_tips[] = {
"nondeterministic_output",
NULL
};
/* OBJECT REFCOUNT argument table */
struct redisCommandArg OBJECT_REFCOUNT_Args[] = {
......@@ -1549,10 +1563,7 @@ NULL
/********** RENAME ********************/
/* RENAME history */
commandHistory RENAME_History[] = {
{"3.2.0","The command no longer returns an error when source and destination names are the same."},
{0}
};
#define RENAME_History NULL
/* RENAME tips */
#define RENAME_tips NULL
......@@ -1658,7 +1669,7 @@ struct redisCommandArg SORT_Args[] = {
{"key",ARG_TYPE_KEY,0,NULL,NULL,NULL,CMD_ARG_NONE},
{"pattern",ARG_TYPE_PATTERN,1,"BY",NULL,NULL,CMD_ARG_OPTIONAL},
{"offset_count",ARG_TYPE_BLOCK,-1,"LIMIT",NULL,NULL,CMD_ARG_OPTIONAL,.subargs=SORT_offset_count_Subargs},
{"pattern",ARG_TYPE_STRING,1,"GET",NULL,NULL,CMD_ARG_OPTIONAL|CMD_ARG_MULTIPLE|CMD_ARG_MULTIPLE_TOKEN},
{"pattern",ARG_TYPE_PATTERN,1,"GET",NULL,NULL,CMD_ARG_OPTIONAL|CMD_ARG_MULTIPLE|CMD_ARG_MULTIPLE_TOKEN},
{"order",ARG_TYPE_ONEOF,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL,.subargs=SORT_order_Subargs},
{"sorting",ARG_TYPE_PURE_TOKEN,-1,"ALPHA",NULL,NULL,CMD_ARG_OPTIONAL},
{"destination",ARG_TYPE_KEY,2,"STORE",NULL,NULL,CMD_ARG_OPTIONAL},
......@@ -1692,7 +1703,7 @@ struct redisCommandArg SORT_RO_Args[] = {
{"key",ARG_TYPE_KEY,0,NULL,NULL,NULL,CMD_ARG_NONE},
{"pattern",ARG_TYPE_PATTERN,1,"BY",NULL,NULL,CMD_ARG_OPTIONAL},
{"offset_count",ARG_TYPE_BLOCK,-1,"LIMIT",NULL,NULL,CMD_ARG_OPTIONAL,.subargs=SORT_RO_offset_count_Subargs},
{"pattern",ARG_TYPE_STRING,1,"GET",NULL,NULL,CMD_ARG_OPTIONAL|CMD_ARG_MULTIPLE|CMD_ARG_MULTIPLE_TOKEN},
{"pattern",ARG_TYPE_PATTERN,1,"GET",NULL,NULL,CMD_ARG_OPTIONAL|CMD_ARG_MULTIPLE|CMD_ARG_MULTIPLE_TOKEN},
{"order",ARG_TYPE_ONEOF,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL,.subargs=SORT_RO_order_Subargs},
{"sorting",ARG_TYPE_PURE_TOKEN,-1,"ALPHA",NULL,NULL,CMD_ARG_OPTIONAL},
{0}
......@@ -3036,7 +3047,7 @@ struct redisCommandArg PUBLISH_Args[] = {
/* PUBSUB CHANNELS argument table */
struct redisCommandArg PUBSUB_CHANNELS_Args[] = {
{"pattern",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL},
{"pattern",ARG_TYPE_PATTERN,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL},
{0}
};
......@@ -3080,7 +3091,7 @@ struct redisCommandArg PUBSUB_NUMSUB_Args[] = {
/* PUBSUB SHARDCHANNELS argument table */
struct redisCommandArg PUBSUB_SHARDCHANNELS_Args[] = {
{"pattern",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL},
{"pattern",ARG_TYPE_PATTERN,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL},
{0}
};
......@@ -3163,10 +3174,7 @@ struct redisCommandArg SSUBSCRIBE_Args[] = {
/********** SUBSCRIBE ********************/
/* SUBSCRIBE history */
commandHistory SUBSCRIBE_History[] = {
{"6.2.0","`RESET` can be called to exit subscribed state."},
{0}
};
#define SUBSCRIBE_History NULL
/* SUBSCRIBE tips */
#define SUBSCRIBE_tips NULL
......@@ -3384,7 +3392,10 @@ NULL
#define FUNCTION_LIST_History NULL
/* FUNCTION LIST tips */
#define FUNCTION_LIST_tips NULL
const char *FUNCTION_LIST_tips[] = {
"nondeterministic_output_order",
NULL
};
/* FUNCTION LIST argument table */
struct redisCommandArg FUNCTION_LIST_Args[] = {
......@@ -3407,10 +3418,7 @@ NULL
/* FUNCTION LOAD argument table */
struct redisCommandArg FUNCTION_LOAD_Args[] = {
{"engine-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"library-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"replace",ARG_TYPE_PURE_TOKEN,-1,"REPLACE",NULL,NULL,CMD_ARG_OPTIONAL},
{"library-description",ARG_TYPE_STRING,-1,"DESCRIPTION",NULL,NULL,CMD_ARG_OPTIONAL},
{"function-code",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{0}
};
......@@ -3449,8 +3457,9 @@ struct redisCommandArg FUNCTION_RESTORE_Args[] = {
/* FUNCTION STATS tips */
const char *FUNCTION_STATS_tips[] = {
"nondeterministic_output",
"request_policy:all_shards",
"response_policy:one_succeeded",
"response_policy:special",
NULL
};
......@@ -3462,7 +3471,7 @@ struct redisCommand FUNCTION_Subcommands[] = {
{"help","Show helpful text about the different subcommands","O(1)","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SCRIPTING,FUNCTION_HELP_History,FUNCTION_HELP_tips,functionHelpCommand,2,CMD_LOADING|CMD_STALE,ACL_CATEGORY_SCRIPTING},
{"kill","Kill the function currently in execution.","O(1)","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SCRIPTING,FUNCTION_KILL_History,FUNCTION_KILL_tips,functionKillCommand,2,CMD_NOSCRIPT|CMD_ALLOW_BUSY,ACL_CATEGORY_SCRIPTING},
{"list","List information about all the functions","O(N) where N is the number of functions","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SCRIPTING,FUNCTION_LIST_History,FUNCTION_LIST_tips,functionListCommand,-2,CMD_NOSCRIPT,ACL_CATEGORY_SCRIPTING,.args=FUNCTION_LIST_Args},
{"load","Create a function with the given arguments (name, code, description)","O(1) (considering compilation time is redundant)","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SCRIPTING,FUNCTION_LOAD_History,FUNCTION_LOAD_tips,functionLoadCommand,-5,CMD_NOSCRIPT|CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_SCRIPTING,.args=FUNCTION_LOAD_Args},
{"load","Create a function with the given arguments (name, code, description)","O(1) (considering compilation time is redundant)","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SCRIPTING,FUNCTION_LOAD_History,FUNCTION_LOAD_tips,functionLoadCommand,-3,CMD_NOSCRIPT|CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_SCRIPTING,.args=FUNCTION_LOAD_Args},
{"restore","Restore all the functions on the given payload","O(N) where N is the number of functions on the payload","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SCRIPTING,FUNCTION_RESTORE_History,FUNCTION_RESTORE_tips,functionRestoreCommand,-3,CMD_NOSCRIPT|CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_SCRIPTING,.args=FUNCTION_RESTORE_Args},
{"stats","Return information about the function currently running (name, description, duration)","O(1)","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SCRIPTING,FUNCTION_STATS_History,FUNCTION_STATS_tips,functionStatsCommand,2,CMD_NOSCRIPT|CMD_ALLOW_BUSY,ACL_CATEGORY_SCRIPTING},
{0}
......@@ -3520,7 +3529,7 @@ struct redisCommandArg SCRIPT_EXISTS_Args[] = {
/* SCRIPT FLUSH history */
commandHistory SCRIPT_FLUSH_History[] = {
{"6.2.0","Added the `ASYNC` and `SYNC` flushing mode modifiers, as well as the **lazyfree-lazy-user-flush** configuration directive."},
{"6.2.0","Added the `ASYNC` and `SYNC` flushing mode modifiers."},
{0}
};
......@@ -3609,6 +3618,12 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL CKQUORUM tips */
#define SENTINEL_CKQUORUM_tips NULL
/* SENTINEL CKQUORUM argument table */
struct redisCommandArg SENTINEL_CKQUORUM_Args[] = {
{"master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{0}
};
/********** SENTINEL CONFIG ********************/
/* SENTINEL CONFIG history */
......@@ -3617,6 +3632,26 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL CONFIG tips */
#define SENTINEL_CONFIG_tips NULL
/* SENTINEL CONFIG set_or_get set_param_value argument table */
struct redisCommandArg SENTINEL_CONFIG_set_or_get_set_param_value_Subargs[] = {
{"parameter",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"value",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{0}
};
/* SENTINEL CONFIG set_or_get argument table */
struct redisCommandArg SENTINEL_CONFIG_set_or_get_Subargs[] = {
{"set_param_value",ARG_TYPE_BLOCK,-1,"SET",NULL,NULL,CMD_ARG_MULTIPLE,.subargs=SENTINEL_CONFIG_set_or_get_set_param_value_Subargs},
{"parameter",ARG_TYPE_STRING,-1,"GET",NULL,NULL,CMD_ARG_MULTIPLE},
{0}
};
/* SENTINEL CONFIG argument table */
struct redisCommandArg SENTINEL_CONFIG_Args[] = {
{"set_or_get",ARG_TYPE_ONEOF,-1,NULL,NULL,NULL,CMD_ARG_NONE,.subargs=SENTINEL_CONFIG_set_or_get_Subargs},
{0}
};
/********** SENTINEL DEBUG ********************/
/* SENTINEL DEBUG history */
......@@ -3625,6 +3660,19 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL DEBUG tips */
#define SENTINEL_DEBUG_tips NULL
/* SENTINEL DEBUG parameter_value argument table */
struct redisCommandArg SENTINEL_DEBUG_parameter_value_Subargs[] = {
{"parameter",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"value",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{0}
};
/* SENTINEL DEBUG argument table */
struct redisCommandArg SENTINEL_DEBUG_Args[] = {
{"parameter_value",ARG_TYPE_BLOCK,-1,NULL,NULL,NULL,CMD_ARG_MULTIPLE,.subargs=SENTINEL_DEBUG_parameter_value_Subargs},
{0}
};
/********** SENTINEL FAILOVER ********************/
/* SENTINEL FAILOVER history */
......@@ -3633,6 +3681,12 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL FAILOVER tips */
#define SENTINEL_FAILOVER_tips NULL
/* SENTINEL FAILOVER argument table */
struct redisCommandArg SENTINEL_FAILOVER_Args[] = {
{"master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{0}
};
/********** SENTINEL FLUSHCONFIG ********************/
/* SENTINEL FLUSHCONFIG history */
......@@ -3649,6 +3703,12 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL GET_MASTER_ADDR_BY_NAME tips */
#define SENTINEL_GET_MASTER_ADDR_BY_NAME_tips NULL
/* SENTINEL GET_MASTER_ADDR_BY_NAME argument table */
struct redisCommandArg SENTINEL_GET_MASTER_ADDR_BY_NAME_Args[] = {
{"master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{0}
};
/********** SENTINEL HELP ********************/
/* SENTINEL HELP history */
......@@ -3665,6 +3725,12 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL INFO_CACHE tips */
#define SENTINEL_INFO_CACHE_tips NULL
/* SENTINEL INFO_CACHE argument table */
struct redisCommandArg SENTINEL_INFO_CACHE_Args[] = {
{"nodename",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_MULTIPLE},
{0}
};
/********** SENTINEL IS_MASTER_DOWN_BY_ADDR ********************/
/* SENTINEL IS_MASTER_DOWN_BY_ADDR history */
......@@ -3673,6 +3739,15 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL IS_MASTER_DOWN_BY_ADDR tips */
#define SENTINEL_IS_MASTER_DOWN_BY_ADDR_tips NULL
/* SENTINEL IS_MASTER_DOWN_BY_ADDR argument table */
struct redisCommandArg SENTINEL_IS_MASTER_DOWN_BY_ADDR_Args[] = {
{"ip",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"port",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"current-epoch",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"runid",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{0}
};
/********** SENTINEL MASTER ********************/
/* SENTINEL MASTER history */
......@@ -3681,6 +3756,12 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL MASTER tips */
#define SENTINEL_MASTER_tips NULL
/* SENTINEL MASTER argument table */
struct redisCommandArg SENTINEL_MASTER_Args[] = {
{"master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{0}
};
/********** SENTINEL MASTERS ********************/
/* SENTINEL MASTERS history */
......@@ -3697,6 +3778,15 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL MONITOR tips */
#define SENTINEL_MONITOR_tips NULL
/* SENTINEL MONITOR argument table */
struct redisCommandArg SENTINEL_MONITOR_Args[] = {
{"name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"ip",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"port",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"quorum",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{0}
};
/********** SENTINEL MYID ********************/
/* SENTINEL MYID history */
......@@ -3721,6 +3811,12 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL REMOVE tips */
#define SENTINEL_REMOVE_tips NULL
/* SENTINEL REMOVE argument table */
struct redisCommandArg SENTINEL_REMOVE_Args[] = {
{"master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{0}
};
/********** SENTINEL REPLICAS ********************/
/* SENTINEL REPLICAS history */
......@@ -3729,6 +3825,12 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL REPLICAS tips */
#define SENTINEL_REPLICAS_tips NULL
/* SENTINEL REPLICAS argument table */
struct redisCommandArg SENTINEL_REPLICAS_Args[] = {
{"master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{0}
};
/********** SENTINEL RESET ********************/
/* SENTINEL RESET history */
......@@ -3737,6 +3839,12 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL RESET tips */
#define SENTINEL_RESET_tips NULL
/* SENTINEL RESET argument table */
struct redisCommandArg SENTINEL_RESET_Args[] = {
{"pattern",ARG_TYPE_PATTERN,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{0}
};
/********** SENTINEL SENTINELS ********************/
/* SENTINEL SENTINELS history */
......@@ -3745,6 +3853,12 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL SENTINELS tips */
#define SENTINEL_SENTINELS_tips NULL
/* SENTINEL SENTINELS argument table */
struct redisCommandArg SENTINEL_SENTINELS_Args[] = {
{"master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{0}
};
/********** SENTINEL SET ********************/
/* SENTINEL SET history */
......@@ -3753,6 +3867,20 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL SET tips */
#define SENTINEL_SET_tips NULL
/* SENTINEL SET option_value argument table */
struct redisCommandArg SENTINEL_SET_option_value_Subargs[] = {
{"option",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"value",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{0}
};
/* SENTINEL SET argument table */
struct redisCommandArg SENTINEL_SET_Args[] = {
{"master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"option_value",ARG_TYPE_BLOCK,-1,NULL,NULL,NULL,CMD_ARG_MULTIPLE,.subargs=SENTINEL_SET_option_value_Subargs},
{0}
};
/********** SENTINEL SIMULATE_FAILURE ********************/
/* SENTINEL SIMULATE_FAILURE history */
......@@ -3761,28 +3889,42 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL SIMULATE_FAILURE tips */
#define SENTINEL_SIMULATE_FAILURE_tips NULL
/* SENTINEL SIMULATE_FAILURE mode argument table */
struct redisCommandArg SENTINEL_SIMULATE_FAILURE_mode_Subargs[] = {
{"crash-after-election",ARG_TYPE_PURE_TOKEN,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"crash-after-promotion",ARG_TYPE_PURE_TOKEN,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"help",ARG_TYPE_PURE_TOKEN,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{0}
};
/* SENTINEL SIMULATE_FAILURE argument table */
struct redisCommandArg SENTINEL_SIMULATE_FAILURE_Args[] = {
{"mode",ARG_TYPE_ONEOF,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL|CMD_ARG_MULTIPLE,.subargs=SENTINEL_SIMULATE_FAILURE_mode_Subargs},
{0}
};
/* SENTINEL command table */
struct redisCommand SENTINEL_Subcommands[] = {
{"ckquorum","Check for a Sentinel quorum",NULL,"2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_CKQUORUM_History,SENTINEL_CKQUORUM_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
{"config","Configure Sentinel","O(1)","6.2.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_CONFIG_History,SENTINEL_CONFIG_tips,sentinelCommand,-3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
{"debug","List or update the current configurable parameters","O(N) where N is the number of configurable parameters","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_DEBUG_History,SENTINEL_DEBUG_tips,sentinelCommand,-2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
{"failover","Force a failover",NULL,"2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_FAILOVER_History,SENTINEL_FAILOVER_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
{"ckquorum","Check for a Sentinel quorum",NULL,"2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_CKQUORUM_History,SENTINEL_CKQUORUM_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_CKQUORUM_Args},
{"config","Configure Sentinel","O(1)","6.2.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_CONFIG_History,SENTINEL_CONFIG_tips,sentinelCommand,-3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_CONFIG_Args},
{"debug","List or update the current configurable parameters","O(N) where N is the number of configurable parameters","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_DEBUG_History,SENTINEL_DEBUG_tips,sentinelCommand,-2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_DEBUG_Args},
{"failover","Force a failover",NULL,"2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_FAILOVER_History,SENTINEL_FAILOVER_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_FAILOVER_Args},
{"flushconfig","Rewrite configuration file","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_FLUSHCONFIG_History,SENTINEL_FLUSHCONFIG_tips,sentinelCommand,2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
{"get-master-addr-by-name","Get port and address of a master","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_GET_MASTER_ADDR_BY_NAME_History,SENTINEL_GET_MASTER_ADDR_BY_NAME_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
{"get-master-addr-by-name","Get port and address of a master","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_GET_MASTER_ADDR_BY_NAME_History,SENTINEL_GET_MASTER_ADDR_BY_NAME_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_GET_MASTER_ADDR_BY_NAME_Args},
{"help","Show helpful text about the different subcommands","O(1)","6.2.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_HELP_History,SENTINEL_HELP_tips,sentinelCommand,2,CMD_LOADING|CMD_STALE|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
{"info-cache","Get cached INFO from the instances in the deployment","O(N) where N is the number of instances","3.2.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_INFO_CACHE_History,SENTINEL_INFO_CACHE_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
{"is-master-down-by-addr","Check if a master is down","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_IS_MASTER_DOWN_BY_ADDR_History,SENTINEL_IS_MASTER_DOWN_BY_ADDR_tips,sentinelCommand,6,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
{"master","Shows the state of a master","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_MASTER_History,SENTINEL_MASTER_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
{"info-cache","Get cached INFO from the instances in the deployment","O(N) where N is the number of instances","3.2.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_INFO_CACHE_History,SENTINEL_INFO_CACHE_tips,sentinelCommand,-3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_INFO_CACHE_Args},
{"is-master-down-by-addr","Check if a master is down","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_IS_MASTER_DOWN_BY_ADDR_History,SENTINEL_IS_MASTER_DOWN_BY_ADDR_tips,sentinelCommand,6,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_IS_MASTER_DOWN_BY_ADDR_Args},
{"master","Shows the state of a master","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_MASTER_History,SENTINEL_MASTER_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_MASTER_Args},
{"masters","List the monitored masters","O(N) where N is the number of masters","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_MASTERS_History,SENTINEL_MASTERS_tips,sentinelCommand,2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
{"monitor","Start monitoring","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_MONITOR_History,SENTINEL_MONITOR_tips,sentinelCommand,6,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
{"monitor","Start monitoring","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_MONITOR_History,SENTINEL_MONITOR_tips,sentinelCommand,6,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_MONITOR_Args},
{"myid","Get the Sentinel instance ID","O(1)","6.2.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_MYID_History,SENTINEL_MYID_tips,sentinelCommand,2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
{"pending-scripts","Get information about pending scripts",NULL,"2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_PENDING_SCRIPTS_History,SENTINEL_PENDING_SCRIPTS_tips,sentinelCommand,2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
{"remove","Stop monitoring","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_REMOVE_History,SENTINEL_REMOVE_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
{"replicas","List the monitored replicas","O(N) where N is the number of replicas","5.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_REPLICAS_History,SENTINEL_REPLICAS_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
{"reset","Reset masters by name pattern","O(N) where N is the number of monitored masters","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_RESET_History,SENTINEL_RESET_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
{"sentinels","List the Sentinel instances","O(N) where N is the number of Sentinels","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_SENTINELS_History,SENTINEL_SENTINELS_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
{"set","Change the configuration of a monitored master","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_SET_History,SENTINEL_SET_tips,sentinelCommand,-5,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
{"simulate-failure","Simulate failover scenarios",NULL,"3.2.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_SIMULATE_FAILURE_History,SENTINEL_SIMULATE_FAILURE_tips,sentinelCommand,-3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
{"remove","Stop monitoring","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_REMOVE_History,SENTINEL_REMOVE_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_REMOVE_Args},
{"replicas","List the monitored replicas","O(N) where N is the number of replicas","5.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_REPLICAS_History,SENTINEL_REPLICAS_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_REPLICAS_Args},
{"reset","Reset masters by name pattern","O(N) where N is the number of monitored masters","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_RESET_History,SENTINEL_RESET_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_RESET_Args},
{"sentinels","List the Sentinel instances","O(N) where N is the number of Sentinels","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_SENTINELS_History,SENTINEL_SENTINELS_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_SENTINELS_Args},
{"set","Change the configuration of a monitored master","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_SET_History,SENTINEL_SET_tips,sentinelCommand,-5,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_SET_Args},
{"simulate-failure","Simulate failover scenarios",NULL,"3.2.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_SIMULATE_FAILURE_History,SENTINEL_SIMULATE_FAILURE_tips,sentinelCommand,-3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_SIMULATE_FAILURE_Args},
{0}
};
......@@ -4023,7 +4165,10 @@ struct redisCommandArg BGSAVE_Args[] = {
#define COMMAND_DOCS_History NULL
/* COMMAND DOCS tips */
#define COMMAND_DOCS_tips NULL
const char *COMMAND_DOCS_tips[] = {
"nondeterministic_output_order",
NULL
};
/* COMMAND DOCS argument table */
struct redisCommandArg COMMAND_DOCS_Args[] = {
......@@ -4064,7 +4209,10 @@ commandHistory COMMAND_INFO_History[] = {
};
/* COMMAND INFO tips */
#define COMMAND_INFO_tips NULL
const char *COMMAND_INFO_tips[] = {
"nondeterministic_output_order",
NULL
};
/* COMMAND INFO argument table */
struct redisCommandArg COMMAND_INFO_Args[] = {
......@@ -4078,7 +4226,10 @@ struct redisCommandArg COMMAND_INFO_Args[] = {
#define COMMAND_LIST_History NULL
/* COMMAND LIST tips */
#define COMMAND_LIST_tips NULL
const char *COMMAND_LIST_tips[] = {
"nondeterministic_output_order",
NULL
};
/* COMMAND LIST filterby argument table */
struct redisCommandArg COMMAND_LIST_filterby_Subargs[] = {
......@@ -4099,7 +4250,7 @@ struct redisCommand COMMAND_Subcommands[] = {
{"count","Get total number of Redis commands","O(1)","2.8.13",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,COMMAND_COUNT_History,COMMAND_COUNT_tips,commandCountCommand,2,CMD_LOADING|CMD_STALE,ACL_CATEGORY_CONNECTION},
{"docs","Get array of specific Redis command documentation","O(N) where N is the number of commands to look up","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,COMMAND_DOCS_History,COMMAND_DOCS_tips,commandDocsCommand,-2,CMD_LOADING|CMD_STALE,ACL_CATEGORY_CONNECTION,.args=COMMAND_DOCS_Args},
{"getkeys","Extract keys given a full Redis command","O(N) where N is the number of arguments to the command","2.8.13",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,COMMAND_GETKEYS_History,COMMAND_GETKEYS_tips,commandGetKeysCommand,-4,CMD_LOADING|CMD_STALE,ACL_CATEGORY_CONNECTION},
{"getkeysandflags","Extract keys given a full Redis command","O(N) where N is the number of arguments to the command","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,COMMAND_GETKEYSANDFLAGS_History,COMMAND_GETKEYSANDFLAGS_tips,commandGetKeysAndFlagsCommand,-4,CMD_LOADING|CMD_STALE,ACL_CATEGORY_CONNECTION},
{"getkeysandflags","Extract keys and access flags given a full Redis command","O(N) where N is the number of arguments to the command","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,COMMAND_GETKEYSANDFLAGS_History,COMMAND_GETKEYSANDFLAGS_tips,commandGetKeysAndFlagsCommand,-4,CMD_LOADING|CMD_STALE,ACL_CATEGORY_CONNECTION},
{"help","Show helpful text about the different subcommands","O(1)","5.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,COMMAND_HELP_History,COMMAND_HELP_tips,commandHelpCommand,2,CMD_LOADING|CMD_STALE,ACL_CATEGORY_CONNECTION},
{"info","Get array of specific Redis command details, or all when no argument is given.","O(N) where N is the number of commands to look up","2.8.13",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,COMMAND_INFO_History,COMMAND_INFO_tips,commandInfoCommand,-2,CMD_LOADING|CMD_STALE,ACL_CATEGORY_CONNECTION,.args=COMMAND_INFO_Args},
{"list","Get an array of Redis command names","O(N) where N is the total number of Redis commands","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,COMMAND_LIST_History,COMMAND_LIST_tips,commandListCommand,-2,CMD_LOADING|CMD_STALE,ACL_CATEGORY_CONNECTION,.args=COMMAND_LIST_Args},
......@@ -4113,7 +4264,7 @@ struct redisCommand COMMAND_Subcommands[] = {
/* COMMAND tips */
const char *COMMAND_tips[] = {
"nondeterministic_output",
"nondeterministic_output_order",
NULL
};
......@@ -4259,7 +4410,7 @@ struct redisCommandArg FAILOVER_Args[] = {
/* FLUSHALL history */
commandHistory FLUSHALL_History[] = {
{"4.0.0","Added the `ASYNC` flushing mode modifier."},
{"6.2.0","Added the `SYNC` flushing mode modifier and the **lazyfree-lazy-user-flush** configuration directive."},
{"6.2.0","Added the `SYNC` flushing mode modifier."},
{0}
};
......@@ -4288,7 +4439,7 @@ struct redisCommandArg FLUSHALL_Args[] = {
/* FLUSHDB history */
commandHistory FLUSHDB_History[] = {
{"4.0.0","Added the `ASYNC` flushing mode modifier."},
{"6.2.0","Added the `SYNC` flushing mode modifier and the **lazyfree-lazy-user-flush** configuration directive."},
{"6.2.0","Added the `SYNC` flushing mode modifier."},
{0}
};
......@@ -4351,7 +4502,12 @@ NULL
#define LATENCY_DOCTOR_History NULL
/* LATENCY DOCTOR tips */
#define LATENCY_DOCTOR_tips NULL
const char *LATENCY_DOCTOR_tips[] = {
"nondeterministic_output",
"request_policy:all_nodes",
"response_policy:special",
NULL
};
/********** LATENCY GRAPH ********************/
......@@ -4359,7 +4515,12 @@ NULL
#define LATENCY_GRAPH_History NULL
/* LATENCY GRAPH tips */
#define LATENCY_GRAPH_tips NULL
const char *LATENCY_GRAPH_tips[] = {
"nondeterministic_output",
"request_policy:all_nodes",
"response_policy:special",
NULL
};
/* LATENCY GRAPH argument table */
struct redisCommandArg LATENCY_GRAPH_Args[] = {
......@@ -4381,7 +4542,12 @@ struct redisCommandArg LATENCY_GRAPH_Args[] = {
#define LATENCY_HISTOGRAM_History NULL
/* LATENCY HISTOGRAM tips */
#define LATENCY_HISTOGRAM_tips NULL
const char *LATENCY_HISTOGRAM_tips[] = {
"nondeterministic_output",
"request_policy:all_nodes",
"response_policy:special",
NULL
};
/* LATENCY HISTOGRAM argument table */
struct redisCommandArg LATENCY_HISTOGRAM_Args[] = {
......@@ -4395,7 +4561,12 @@ struct redisCommandArg LATENCY_HISTOGRAM_Args[] = {
#define LATENCY_HISTORY_History NULL
/* LATENCY HISTORY tips */
#define LATENCY_HISTORY_tips NULL
const char *LATENCY_HISTORY_tips[] = {
"nondeterministic_output",
"request_policy:all_nodes",
"response_policy:special",
NULL
};
/* LATENCY HISTORY argument table */
struct redisCommandArg LATENCY_HISTORY_Args[] = {
......@@ -4409,7 +4580,12 @@ struct redisCommandArg LATENCY_HISTORY_Args[] = {
#define LATENCY_LATEST_History NULL
/* LATENCY LATEST tips */
#define LATENCY_LATEST_tips NULL
const char *LATENCY_LATEST_tips[] = {
"nondeterministic_output",
"request_policy:all_nodes",
"response_policy:special",
NULL
};
/********** LATENCY RESET ********************/
......@@ -4417,7 +4593,11 @@ struct redisCommandArg LATENCY_HISTORY_Args[] = {
#define LATENCY_RESET_History NULL
/* LATENCY RESET tips */
#define LATENCY_RESET_tips NULL
const char *LATENCY_RESET_tips[] = {
"request_policy:all_nodes",
"response_policy:all_succeeded",
NULL
};
/* LATENCY RESET argument table */
struct redisCommandArg LATENCY_RESET_Args[] = {
......@@ -4467,6 +4647,8 @@ struct redisCommandArg LOLWUT_Args[] = {
/* MEMORY DOCTOR tips */
const char *MEMORY_DOCTOR_tips[] = {
"nondeterministic_output",
"request_policy:all_shards",
"response_policy:special",
NULL
};
......@@ -4486,6 +4668,8 @@ NULL
/* MEMORY MALLOC_STATS tips */
const char *MEMORY_MALLOC_STATS_tips[] = {
"nondeterministic_output",
"request_policy:all_shards",
"response_policy:special",
NULL
};
......@@ -4495,7 +4679,11 @@ NULL
#define MEMORY_PURGE_History NULL
/* MEMORY PURGE tips */
#define MEMORY_PURGE_tips NULL
const char *MEMORY_PURGE_tips[] = {
"request_policy:all_shards",
"response_policy:all_succeeded",
NULL
};
/********** MEMORY STATS ********************/
......@@ -4505,6 +4693,8 @@ NULL
/* MEMORY STATS tips */
const char *MEMORY_STATS_tips[] = {
"nondeterministic_output",
"request_policy:all_shards",
"response_policy:special",
NULL
};
......@@ -4556,7 +4746,10 @@ struct redisCommand MEMORY_Subcommands[] = {
#define MODULE_LIST_History NULL
/* MODULE LIST tips */
#define MODULE_LIST_tips NULL
const char *MODULE_LIST_tips[] = {
"nondeterministic_output_order",
NULL
};
/********** MODULE LOAD ********************/
......@@ -4573,6 +4766,35 @@ struct redisCommandArg MODULE_LOAD_Args[] = {
{0}
};
/********** MODULE LOADEX ********************/
/* MODULE LOADEX history */
#define MODULE_LOADEX_History NULL
/* MODULE LOADEX tips */
#define MODULE_LOADEX_tips NULL
/* MODULE LOADEX configs argument table */
struct redisCommandArg MODULE_LOADEX_configs_Subargs[] = {
{"name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"value",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{0}
};
/* MODULE LOADEX args argument table */
struct redisCommandArg MODULE_LOADEX_args_Subargs[] = {
{"arg",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{0}
};
/* MODULE LOADEX argument table */
struct redisCommandArg MODULE_LOADEX_Args[] = {
{"path",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"configs",ARG_TYPE_BLOCK,-1,"CONFIG",NULL,NULL,CMD_ARG_OPTIONAL|CMD_ARG_MULTIPLE,.subargs=MODULE_LOADEX_configs_Subargs},
{"args",ARG_TYPE_BLOCK,-1,"ARGS",NULL,NULL,CMD_ARG_OPTIONAL|CMD_ARG_MULTIPLE,.subargs=MODULE_LOADEX_args_Subargs},
{0}
};
/********** MODULE UNLOAD ********************/
/* MODULE UNLOAD history */
......@@ -4592,6 +4814,7 @@ struct redisCommand MODULE_Subcommands[] = {
{"help","Show helpful text about the different subcommands","O(1)","5.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,MODULE_HELP_History,MODULE_HELP_tips,moduleCommand,2,CMD_LOADING|CMD_STALE,0},
{"list","List all modules loaded by the server","O(N) where N is the number of loaded modules.","4.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,MODULE_LIST_History,MODULE_LIST_tips,moduleCommand,2,CMD_ADMIN|CMD_NOSCRIPT,0},
{"load","Load a module","O(1)","4.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,MODULE_LOAD_History,MODULE_LOAD_tips,moduleCommand,-3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_NOSCRIPT|CMD_PROTECTED,0,.args=MODULE_LOAD_Args},
{"loadex","Load a module with extended parameters","O(1)","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,MODULE_LOADEX_History,MODULE_LOADEX_tips,moduleCommand,-3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_NOSCRIPT|CMD_PROTECTED,0,.args=MODULE_LOADEX_Args},
{"unload","Unload a module","O(1)","4.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,MODULE_UNLOAD_History,MODULE_UNLOAD_tips,moduleCommand,3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_NOSCRIPT|CMD_PROTECTED,0,.args=MODULE_UNLOAD_Args},
{0}
};
......@@ -4607,12 +4830,7 @@ struct redisCommand MODULE_Subcommands[] = {
/********** MONITOR ********************/
/* MONITOR history */
commandHistory MONITOR_History[] = {
{"6.0.0","`AUTH` excluded from the command's output."},
{"6.2.0","`RESET` can be called to exit monitor mode."},
{"6.2.4","`AUTH`, `HELLO`, `EVAL`, `EVAL_RO`, `EVALSHA` and `EVALSHA_RO` included in the command's output."},
{0}
};
#define MONITOR_History NULL
/* MONITOR tips */
#define MONITOR_tips NULL
......@@ -4627,7 +4845,7 @@ commandHistory MONITOR_History[] = {
/* PSYNC argument table */
struct redisCommandArg PSYNC_Args[] = {
{"replicationid",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"replicationid",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"offset",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{0}
};
......@@ -4651,7 +4869,7 @@ struct redisCommandArg PSYNC_Args[] = {
/* REPLICAOF argument table */
struct redisCommandArg REPLICAOF_Args[] = {
{"host",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"port",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"port",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{0}
};
......@@ -4683,7 +4901,7 @@ struct redisCommandArg REPLICAOF_Args[] = {
/* SHUTDOWN history */
commandHistory SHUTDOWN_History[] = {
{"7.0.0","Added the `NOW`, `FORCE` and `ABORT` modifiers. Introduced waiting for lagging replicas before exiting."},
{"7.0.0","Added the `NOW`, `FORCE` and `ABORT` modifiers."},
{0}
};
......@@ -4717,7 +4935,7 @@ struct redisCommandArg SHUTDOWN_Args[] = {
/* SLAVEOF argument table */
struct redisCommandArg SLAVEOF_Args[] = {
{"host",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"port",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"port",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{0}
};
......@@ -6652,7 +6870,11 @@ struct redisCommandArg MSET_Args[] = {
#define MSETNX_History NULL
/* MSETNX tips */
#define MSETNX_tips NULL
const char *MSETNX_tips[] = {
"request_policy:multi_shard",
"response_policy:agg_min",
NULL
};
/* MSETNX key_value argument table */
struct redisCommandArg MSETNX_key_value_Subargs[] = {
......
......@@ -8,10 +8,6 @@
"container": "CLIENT",
"function": "clientCommand",
"history": [
[
"3.2.10",
"Client pause prevents client pause and key eviction as well."
],
[
"6.2.0",
"`CLIENT PAUSE WRITE` mode added along with the `mode` option."
......
{
"SHARDS": {
"summary": "Get array of cluster slots to node mappings",
"complexity": "O(N) where N is the total number of cluster nodes",
"group": "cluster",
"since": "7.0.0",
"arity": 2,
"container": "CLUSTER",
"function": "clusterCommand",
"history": [],
"command_flags": [
"STALE"
],
"command_tips": [
"NONDETERMINISTIC_OUTPUT"
]
}
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment