Unverified Commit 445aa844 authored by Oran Agra's avatar Oran Agra Committed by GitHub
Browse files

Merge Redis 6.2.0 GA

Redis 6.2.0 GA
parents 2dba1e39 f098fe31
......@@ -14,7 +14,7 @@ jobs:
run: make REDIS_CFLAGS='-Werror' BUILD_TLS=yes
- name: test
run: |
sudo apt-get install tcl8.5
sudo apt-get install tcl8.6
./runtest --verbose
- name: module api test
run: ./runtest-moduleapi --verbose
......@@ -58,14 +58,3 @@ jobs:
run: |
yum -y install gcc make
make REDIS_CFLAGS='-Werror'
build-freebsd:
runs-on: macos-latest
steps:
- uses: actions/checkout@v2
- name: make
uses: vmactions/freebsd-vm@v0.1.0
with:
usesh: true
prepare: pkg install -y gmake
run: gmake
......@@ -20,8 +20,8 @@ jobs:
run: make
- name: test
run: |
sudo apt-get install tcl8.5
./runtest --accurate --verbose
sudo apt-get install tcl8.6
./runtest --accurate --verbose --dump-logs
- name: module api test
run: ./runtest-moduleapi --verbose
- name: sentinel tests
......@@ -39,8 +39,8 @@ jobs:
run: make MALLOC=libc
- name: test
run: |
sudo apt-get install tcl8.5
./runtest --accurate --verbose
sudo apt-get install tcl8.6
./runtest --accurate --verbose --dump-logs
- name: module api test
run: ./runtest-moduleapi --verbose
- name: sentinel tests
......@@ -60,8 +60,8 @@ jobs:
make 32bit
- name: test
run: |
sudo apt-get install tcl8.5
./runtest --accurate --verbose
sudo apt-get install tcl8.6
./runtest --accurate --verbose --dump-logs
- name: module api test
run: |
make -C tests/modules 32bit # the script below doesn't have an argument, we must build manually ahead of time
......@@ -82,10 +82,10 @@ jobs:
make BUILD_TLS=yes
- name: test
run: |
sudo apt-get install tcl8.5 tcl-tls
sudo apt-get install tcl8.6 tcl-tls
./utils/gen-test-certs.sh
./runtest --accurate --verbose --tls
./runtest --accurate --verbose
./runtest --accurate --verbose --tls --dump-logs
./runtest --accurate --verbose --dump-logs
- name: module api test
run: |
./runtest-moduleapi --verbose --tls
......@@ -110,8 +110,8 @@ jobs:
make
- name: test
run: |
sudo apt-get install tcl8.5 tcl-tls
./runtest --config io-threads 4 --config io-threads-do-reads yes --accurate --verbose --tags network
sudo apt-get install tcl8.6 tcl-tls
./runtest --config io-threads 4 --config io-threads-do-reads yes --accurate --verbose --tags network --dump-logs
- name: cluster tests
run: |
./runtest-cluster --config io-threads 4 --config io-threads-do-reads yes
......@@ -127,10 +127,10 @@ jobs:
- name: test
run: |
sudo apt-get update
sudo apt-get install tcl8.5 valgrind -y
./runtest --valgrind --verbose --clients 1
sudo apt-get install tcl8.6 valgrind -y
./runtest --valgrind --verbose --clients 1 --dump-logs
- name: module api test
run: ./runtest-moduleapi --valgrind --verbose --clients 1
run: ./runtest-moduleapi --valgrind --no-latency --verbose --clients 1
test-centos7-jemalloc:
runs-on: ubuntu-latest
......@@ -146,7 +146,7 @@ jobs:
- name: test
run: |
yum -y install which tcl
./runtest --accurate --verbose
./runtest --accurate --verbose --dump-logs
- name: module api test
run: ./runtest-moduleapi --verbose
- name: sentinel tests
......@@ -170,8 +170,8 @@ jobs:
run: |
yum -y install tcl tcltls
./utils/gen-test-certs.sh
./runtest --accurate --verbose --tls
./runtest --accurate --verbose
./runtest --accurate --verbose --tls --dump-logs
./runtest --accurate --verbose --dump-logs
- name: module api test
run: |
./runtest-moduleapi --verbose --tls
......@@ -195,7 +195,7 @@ jobs:
run: make
- name: test
run: |
./runtest --accurate --verbose --no-latency
./runtest --accurate --verbose --no-latency --dump-logs
- name: module api test
run: ./runtest-moduleapi --verbose
- name: sentinel tests
......@@ -210,13 +210,14 @@ jobs:
steps:
- uses: actions/checkout@v2
- name: test
uses: vmactions/freebsd-vm@v0.1.0
uses: vmactions/freebsd-vm@v0.1.2
with:
usesh: true
prepare: pkg install -y gmake lang/tcl85
run: |
gmake
./runtest --accurate --verbose --no-latency
MAKE=gmake ./runtest-moduleapi --verbose
./runtest-sentinel
sync: rsync
prepare: pkg install -y bash gmake lang/tcl86
run: >
gmake &&
./runtest --accurate --verbose --no-latency --dump-logs &&
MAKE=gmake ./runtest-moduleapi --verbose &&
./runtest-sentinel &&
./runtest-cluster
Redis 6.2 release notes
=======================
--------------------------------------------------------------------------------
Upgrade urgency levels:
LOW: No need to upgrade unless there are new features you want to use.
MODERATE: Program an upgrade of the server, but it's not urgent.
HIGH: There is a critical bug that may affect a subset of users. Upgrade!
CRITICAL: There is a critical bug affecting MOST USERS. Upgrade ASAP.
SECURITY: There are security fixes in the release.
--------------------------------------------------------------------------------
================================================================================
Redis 6.2.0 GA Released Tue Feb 22 14:00:00 IST 2021
================================================================================
Upgrade urgency: SECURITY if you use 32bit build of redis (see bellow), MODERATE
if you used earlier versions of Redis 6.2, LOW otherwise.
Integer overflow on 32-bit systems (CVE-2021-21309):
Redis 4.0 or newer uses a configurable limit for the maximum supported bulk
input size. By default, it is 512MB which is a safe value for all platforms.
If the limit is significantly increased, receiving a large request from a client
may trigger several integer overflow scenarios, which would result with buffer
overflow and heap corruption.
Here is a comprehensive list of changes in this release compared to 6.2 RC3,
each one includes the PR number that added it, so you can get more details
at https://github.com/redis/redis/pull/<number>
Bug fixes:
* Avoid 32-bit overflows when proto-max-bulk-len is set high (#8522)
* Fix broken protocol in client tracking tracking-redir-broken message (#8456)
* Avoid unsafe field name characters in INFO commandstats, errorstats, modules (#8492)
* XINFO able to access expired keys during CLIENT PAUSE WRITE (#8436)
* Fix allowed length for REPLCONF ip-address, needed due to Sentinel's support for hostnames (#8517)
* Fix broken protocol in redis-benchmark when used with -a or --dbnum (#8486)
* XADD counts deleted records too when considering switching to a new listpack (#8390)
Bug fixes that are only applicable to previous releases of Redis 6.2:
* Fixes in GEOSEARCH bybox (accuracy and mismatch between width and height) (#8445)
* Fix risk of OOM panic in HRANDFIELD, ZRANDMEMBER commands with huge negative count (#8429)
* Fix duplicate replicas issue in Sentinel, needed due to hostname support (#8481)
* Fix Sentinel configuration rewrite, an improvement of #8271 (#8480)
Command behavior changes:
* SRANDMEMBER uses RESP3 array type instead of set type (#8504)
* EXPIRE, EXPIREAT, SETEX, GETEX: Return error when provided expire time overflows (#8287)
Other behavior changes:
* Remove ACL subcommand validation if fully added command exists. (#8483)
Improvements:
* Optimize sorting in GEORADIUS / GEOSEARCH with COUNT (#8326)
* Optimize HRANDFIELD and ZRANDMEMBER case 4 when ziplist encoded (#8444)
* Optimize in-place replacement of elements in HSET, HINCRBY, LSET (#8493)
* Remove redundant list to store pubsub patterns (#8472)
* Add --insecure option to command line tools (#8416)
Info fields and introspection changes:
* Add INFO fields to track progress of BGSAVE, AOFRW, replication (#8414)
Modules:
* RM_ZsetRem: Delete key if empty, the bug could leave empty zset keys (#8453)
* RM_HashSet: Add COUNT_ALL flag and set errno (#8446)
================================================================================
Redis 6.2 RC3 Released Tue Feb 1 14:00:00 IST 2021
================================================================================
......@@ -305,11 +373,11 @@ and we don't get reports of serious issues for a while.
A special thank you for the amount of work put into this release by:
- Oran Agra
- Yossi Gottlieb
- Filipe Oliveira
- Viktor Söderqvist
- Yang Bodong
- Filipe Oliveira
- Guy Benoish
- Itamar Haber
- Yang Bodong
- Madelyn Olson
- Wang Yuan
- Felipe Machado
......@@ -322,15 +390,17 @@ A special thank you for the amount of work put into this release by:
- Allen Farris
- Chen Yang
- Nitai Caro
- sundb
- Meir Shpilraien
- maohuazhu
- Valentino Geron
- Zhao Zhao
- sundb
- Qu Chen
- George Prekas
- Tyson Andre
- Uri Yagelnik
- Michael Grunder
- Huang Zw
- alexronke-channeladvisor
- Andy Pan
- Wu Yunlong
......
Contributor Covenant Code of Conduct
Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others’ private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
this email address: redis@redis.io.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
1. Correction
Community Impact: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
Consequence: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
2. Warning
Community Impact: A violation through a single incident or series
of actions.
Consequence: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
3. Temporary Ban
Community Impact: A serious violation of community standards, including
sustained inappropriate behavior.
Consequence: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
4. Permanent Ban
Community Impact: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
Consequence: A permanent ban from any sort of public interaction within
the community.
Attribution
This Code of Conduct is adapted from the Contributor Covenant,
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by Mozilla’s code of conduct
enforcement ladder.
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
\ No newline at end of file
......@@ -32,5 +32,7 @@ $TCLSH tests/test_helper.tcl \
--single unit/moduleapi/getkeys \
--single unit/moduleapi/test_lazyfree \
--single unit/moduleapi/defrag \
--single unit/moduleapi/hash \
--single unit/moduleapi/zset \
--single unit/moduleapi/stream \
"${@}"
......@@ -814,8 +814,6 @@ void ACLAddAllowedSubcommand(user *u, unsigned long id, const char *sub) {
* invalid (contains non allowed characters).
* ENOENT: The command name or command category provided with + or - is not
* known.
* EBUSY: The subcommand you want to add is about a command that is currently
* fully added.
* EEXIST: You are adding a key pattern after "*" was already added. This is
* almost surely an error on the user side.
* EISDIR: You are adding a channel pattern after "*" was already added. This is
......@@ -976,22 +974,12 @@ int ACLSetUser(user *u, const char *op, ssize_t oplen) {
return C_ERR;
}
/* The command should not be set right now in the command
* bitmap, because adding a subcommand of a fully added
* command is probably an error on the user side. */
unsigned long id = ACLGetCommandID(copy);
if (ACLGetUserCommandBit(u,id) == 1) {
zfree(copy);
errno = EBUSY;
return C_ERR;
/* Add the subcommand to the list of valid ones, if the command is not set. */
if (ACLGetUserCommandBit(u,id) == 0) {
ACLAddAllowedSubcommand(u,id,sub);
}
/* Add the subcommand to the list of valid ones. */
ACLAddAllowedSubcommand(u,id,sub);
/* We have to clear the command bit so that we force the
* subcommand check. */
ACLSetUserCommandBit(u,id,0);
zfree(copy);
}
} else if (op[0] == '-' && op[1] != '@') {
......@@ -1030,10 +1018,6 @@ const char *ACLSetUserStringError(void) {
errmsg = "Unknown command or category name in ACL";
else if (errno == EINVAL)
errmsg = "Syntax error";
else if (errno == EBUSY)
errmsg = "Adding a subcommand of a command already fully "
"added is not allowed. Remove the command to start. "
"Example: -DEBUG +DEBUG|DIGEST";
else if (errno == EEXIST)
errmsg = "Adding a pattern after the * pattern (or the "
"'allkeys' flag) is not valid and does not have any "
......@@ -2253,7 +2237,7 @@ void authCommand(client *c) {
return;
}
username = createStringObject("default",7);
username = shared.default_username;
password = c->argv[1];
} else {
username = c->argv[1];
......@@ -2265,9 +2249,5 @@ void authCommand(client *c) {
} else {
addReplyError(c,"-WRONGPASS invalid username-password pair or user is disabled.");
}
/* Free the "default" string object we created for the two
* arguments form. */
if (c->argc == 2) decrRefCount(username);
}
......@@ -588,11 +588,10 @@ sds catAppendOnlyExpireAtCommand(sds buf, struct redisCommand *cmd, robj *key, r
}
decrRefCount(seconds);
argv[0] = createStringObject("PEXPIREAT",9);
argv[0] = shared.pexpireat;
argv[1] = key;
argv[2] = createStringObjectFromLongLong(when);
buf = catAppendOnlyGenericCommand(buf, 3, argv);
decrRefCount(argv[0]);
decrRefCount(argv[2]);
return buf;
}
......@@ -1441,7 +1440,7 @@ int rewriteAppendOnlyFileRio(rio *aof) {
size_t processed = 0;
int j;
long key_count = 0;
long long cow_updated_time = 0;
long long updated_time = 0;
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
......@@ -1502,18 +1501,16 @@ int rewriteAppendOnlyFileRio(rio *aof) {
aofReadDiffFromParent();
}
/* Update COW info every 1 second (approximately).
/* Update info every 1 second (approximately).
* in order to avoid calling mstime() on each iteration, we will
* check the diff every 1024 keys */
if ((key_count & 1023) == 0) {
key_count = 0;
if ((key_count++ & 1023) == 0) {
long long now = mstime();
if (now - cow_updated_time >= 1000) {
sendChildCOWInfo(CHILD_TYPE_AOF, 0, "AOF rewrite");
cow_updated_time = now;
if (now - updated_time >= 1000) {
sendChildInfo(CHILD_INFO_TYPE_CURRENT_INFO, key_count, "AOF rewrite");
updated_time = now;
}
}
key_count++;
}
dictReleaseIterator(di);
di = NULL;
......@@ -1614,7 +1611,7 @@ int rewriteAppendOnlyFile(char *filename) {
size_t bytes_to_write = sdslen(server.aof_child_diff);
const char *buf = server.aof_child_diff;
long long cow_updated_time = mstime();
long long key_count = dbTotalServerKeyCount();
while (bytes_to_write) {
/* We write the AOF buffer in chunk of 8MB so that we can check the time in between them */
size_t chunk_size = bytes_to_write < (8<<20) ? bytes_to_write : (8<<20);
......@@ -1628,7 +1625,7 @@ int rewriteAppendOnlyFile(char *filename) {
/* Update COW info */
long long now = mstime();
if (now - cow_updated_time >= 1000) {
sendChildCOWInfo(CHILD_TYPE_AOF, 0, "AOF rewrite");
sendChildInfo(CHILD_INFO_TYPE_CURRENT_INFO, key_count, "AOF rewrite");
cow_updated_time = now;
}
}
......@@ -1762,7 +1759,7 @@ int rewriteAppendOnlyFileBackground(void) {
redisSetCpuAffinity(server.aof_rewrite_cpulist);
snprintf(tmpfile,256,"temp-rewriteaof-bg-%d.aof", (int) getpid());
if (rewriteAppendOnlyFile(tmpfile) == C_OK) {
sendChildCOWInfo(CHILD_TYPE_AOF, 1, "AOF rewrite");
sendChildCowInfo(CHILD_INFO_TYPE_AOF_COW_SIZE, "AOF rewrite");
exitFromChild(0);
} else {
exitFromChild(1);
......
......@@ -31,9 +31,10 @@
#include <unistd.h>
typedef struct {
int process_type; /* AOF or RDB child? */
int on_exit; /* COW size of active or exited child */
size_t cow_size; /* Copy on write size. */
size_t keys;
size_t cow;
double progress;
childInfoType information_type; /* Type of information */
} child_info_data;
/* Open a child-parent channel used in order to move information about the
......@@ -64,39 +65,49 @@ void closeChildInfoPipe(void) {
}
}
/* Send COW data to parent. */
void sendChildInfo(int process_type, int on_exit, size_t cow_size) {
/* Send save data to parent. */
void sendChildInfoGeneric(childInfoType info_type, size_t keys, double progress, char *pname) {
if (server.child_info_pipe[1] == -1) return;
child_info_data buffer = {.process_type = process_type, .on_exit = on_exit, .cow_size = cow_size};
ssize_t wlen = sizeof(buffer);
child_info_data data = {0}; /* zero everything, including padding to sattisfy valgrind */
data.information_type = info_type;
data.keys = keys;
data.cow = zmalloc_get_private_dirty(-1);
data.progress = progress;
if (write(server.child_info_pipe[1],&buffer,wlen) != wlen) {
/* Nothing to do on error, this will be detected by the other side. */
if (data.cow) {
serverLog((info_type == CHILD_INFO_TYPE_CURRENT_INFO) ? LL_VERBOSE : LL_NOTICE,
"%s: %zu MB of memory used by copy-on-write",
pname, data.cow/(1024*1024));
}
}
/* Update COW data. */
void updateChildInfo(int process_type, int on_exit, size_t cow_size) {
if (!on_exit) {
server.stat_current_cow_bytes = cow_size;
return;
ssize_t wlen = sizeof(data);
if (write(server.child_info_pipe[1], &data, wlen) != wlen) {
/* Nothing to do on error, this will be detected by the other side. */
}
}
if (process_type == CHILD_TYPE_RDB) {
server.stat_rdb_cow_bytes = cow_size;
} else if (process_type == CHILD_TYPE_AOF) {
server.stat_aof_cow_bytes = cow_size;
} else if (process_type == CHILD_TYPE_MODULE) {
server.stat_module_cow_bytes = cow_size;
/* Update Child info. */
void updateChildInfo(childInfoType information_type, size_t cow, size_t keys, double progress) {
if (information_type == CHILD_INFO_TYPE_CURRENT_INFO) {
server.stat_current_cow_bytes = cow;
server.stat_current_save_keys_processed = keys;
if (progress != -1) server.stat_module_progress = progress;
} else if (information_type == CHILD_INFO_TYPE_AOF_COW_SIZE) {
server.stat_aof_cow_bytes = cow;
} else if (information_type == CHILD_INFO_TYPE_RDB_COW_SIZE) {
server.stat_rdb_cow_bytes = cow;
} else if (information_type == CHILD_INFO_TYPE_MODULE_COW_SIZE) {
server.stat_module_cow_bytes = cow;
}
}
/* Read COW info data from the pipe.
* if complete data read into the buffer, process type, copy-on-write type and copy-on-write size
* are stored into *process_type, *on_exit and *cow_size respectively and returns 1.
/* Read child info data from the pipe.
* if complete data read into the buffer,
* data is stored into *buffer, and returns 1.
* otherwise, the partial data is left in the buffer, waiting for the next read, and returns 0. */
int readChildInfo(int *process_type, int *on_exit, size_t *cow_size) {
int readChildInfo(childInfoType *information_type, size_t *cow, size_t *keys, double* progress) {
/* We are using here a static buffer in combination with the server.child_info_nread to handle short reads */
static child_info_data buffer;
ssize_t wlen = sizeof(buffer);
......@@ -111,25 +122,27 @@ int readChildInfo(int *process_type, int *on_exit, size_t *cow_size) {
/* We have complete child info */
if (server.child_info_nread == wlen) {
*process_type = buffer.process_type;
*on_exit = buffer.on_exit;
*cow_size = buffer.cow_size;
*information_type = buffer.information_type;
*cow = buffer.cow;
*keys = buffer.keys;
*progress = buffer.progress;
return 1;
} else {
return 0;
}
}
/* Receive COW data from child. */
/* Receive info data from child. */
void receiveChildInfo(void) {
if (server.child_info_pipe[0] == -1) return;
int process_type;
int on_exit;
size_t cow_size;
size_t cow;
size_t keys;
double progress;
childInfoType information_type;
/* Drain the pipe and update child info so that we get the final message. */
while (readChildInfo(&process_type, &on_exit, &cow_size)) {
updateChildInfo(process_type, on_exit, cow_size);
while (readChildInfo(&information_type, &cow, &keys, &progress)) {
updateChildInfo(information_type, cow, keys, progress);
}
}
......@@ -54,7 +54,7 @@ int cliSecureConnection(redisContext *c, cliSSLconfig config, const char **err)
goto error;
}
SSL_CTX_set_options(ssl_ctx, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3);
SSL_CTX_set_verify(ssl_ctx, SSL_VERIFY_PEER, NULL);
SSL_CTX_set_verify(ssl_ctx, config.skip_cert_verify ? SSL_VERIFY_NONE : SSL_VERIFY_PEER, NULL);
if (config.cacert || config.cacertdir) {
if (!SSL_CTX_load_verify_locations(ssl_ctx, config.cacert, config.cacertdir)) {
......
......@@ -10,6 +10,8 @@ typedef struct cliSSLconfig {
char *cacert;
/* Directory where trusted CA certificates are stored, or NULL */
char *cacertdir;
/* Skip server certificate verification. */
int skip_cert_verify;
/* Client certificate to authenticate with, or NULL */
char *cert;
/* Private key file to authenticate with, or NULL */
......
......@@ -2126,7 +2126,7 @@ int clusterProcessPacket(clusterLink *link) {
/* Don't bother creating useless objects if there are no
* Pub/Sub subscribers. */
if (dictSize(server.pubsub_channels) ||
listLength(server.pubsub_patterns))
dictSize(server.pubsub_patterns))
{
channel_len = ntohl(hdr->data.publish.msg.channel_len);
message_len = ntohl(hdr->data.publish.msg.message_len);
......@@ -2815,7 +2815,7 @@ void clusterPropagatePublish(robj *channel, robj *message) {
* SLAVE node specific functions
* -------------------------------------------------------------------------- */
/* This function sends a FAILOVE_AUTH_REQUEST message to every node in order to
/* This function sends a FAILOVER_AUTH_REQUEST message to every node in order to
* see if there is the quorum for this slave instance to failover its failing
* master.
*
......
......@@ -2517,7 +2517,7 @@ standardConfig configs[] = {
createLongLongConfig("cluster-node-timeout", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.cluster_node_timeout, 15000, INTEGER_CONFIG, NULL, NULL),
createLongLongConfig("slowlog-log-slower-than", NULL, MODIFIABLE_CONFIG, -1, LLONG_MAX, server.slowlog_log_slower_than, 10000, INTEGER_CONFIG, NULL, NULL),
createLongLongConfig("latency-monitor-threshold", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.latency_monitor_threshold, 0, INTEGER_CONFIG, NULL, NULL),
createLongLongConfig("proto-max-bulk-len", NULL, MODIFIABLE_CONFIG, 1024*1024, LLONG_MAX, server.proto_max_bulk_len, 512ll*1024*1024, MEMORY_CONFIG, NULL, NULL), /* Bulk request max size */
createLongLongConfig("proto-max-bulk-len", NULL, MODIFIABLE_CONFIG, 1024*1024, LONG_MAX, server.proto_max_bulk_len, 512ll*1024*1024, MEMORY_CONFIG, NULL, NULL), /* Bulk request max size */
createLongLongConfig("stream-node-max-entries", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.stream_node_max_entries, 100, INTEGER_CONFIG, NULL, NULL),
createLongLongConfig("repl-backlog-size", NULL, MODIFIABLE_CONFIG, 1, LLONG_MAX, server.repl_backlog_size, 1024*1024, MEMORY_CONFIG, NULL, updateReplBacklogSize), /* Default: 1mb */
......
......@@ -106,9 +106,8 @@ robj *lookupKeyReadWithFlags(redisDb *db, robj *key, int flags) {
robj *val;
if (expireIfNeeded(db,key) == 1) {
/* Key expired. If we are in the context of a master, expireIfNeeded()
* returns 0 only when the key does not exist at all, so it's safe
* to return NULL ASAP. */
/* If we are in the context of a master, expireIfNeeded() returns 1
* when the key is no longer valid, so we can return NULL ASAP. */
if (server.masterhost == NULL)
goto keymiss;
......
......@@ -638,7 +638,7 @@ int defragRaxNode(raxNode **noderef) {
}
/* returns 0 if no more work needs to be been done, and 1 if time is up and more work is needed. */
int scanLaterStraemListpacks(robj *ob, unsigned long *cursor, long long endtime, long long *defragged) {
int scanLaterStreamListpacks(robj *ob, unsigned long *cursor, long long endtime, long long *defragged) {
static unsigned char last[sizeof(streamID)];
raxIterator ri;
long iterations = 0;
......@@ -958,7 +958,7 @@ int defragLaterItem(dictEntry *de, unsigned long *cursor, long long endtime) {
} else if (ob->type == OBJ_HASH) {
server.stat_active_defrag_hits += scanLaterHash(ob, cursor);
} else if (ob->type == OBJ_STREAM) {
return scanLaterStraemListpacks(ob, cursor, endtime, &server.stat_active_defrag_hits);
return scanLaterStreamListpacks(ob, cursor, endtime, &server.stat_active_defrag_hits);
} else if (ob->type == OBJ_MODULE) {
return moduleLateDefrag(dictGetKey(de), ob, cursor, endtime, &server.stat_active_defrag_hits);
} else {
......
......@@ -126,7 +126,7 @@ int _dictInit(dict *d, dictType *type,
d->type = type;
d->privdata = privDataPtr;
d->rehashidx = -1;
d->iterators = 0;
d->pauserehash = 0;
return DICT_OK;
}
......@@ -264,7 +264,7 @@ long long timeInMilliseconds(void) {
* than 0, and is smaller than 1 in most cases. The exact upper bound
* depends on the running time of dictRehash(d,100).*/
int dictRehashMilliseconds(dict *d, int ms) {
if (d->iterators > 0) return 0;
if (d->pauserehash > 0) return 0;
long long start = timeInMilliseconds();
int rehashes = 0;
......@@ -276,8 +276,8 @@ int dictRehashMilliseconds(dict *d, int ms) {
return rehashes;
}
/* This function performs just a step of rehashing, and only if there are
* no safe iterators bound to our hash table. When we have iterators in the
/* This function performs just a step of rehashing, and only if hashing has
* not been paused for our hash table. When we have iterators in the
* middle of a rehashing we can't mess with the two hash tables otherwise
* some element can be missed or duplicated.
*
......@@ -285,7 +285,7 @@ int dictRehashMilliseconds(dict *d, int ms) {
* dictionary so that the hash table automatically migrates from H1 to H2
* while it is actively used. */
static void _dictRehashStep(dict *d) {
if (d->iterators == 0) dictRehash(d,1);
if (d->pauserehash == 0) dictRehash(d,1);
}
/* Add an element to the target hash table */
......@@ -301,7 +301,7 @@ int dictAdd(dict *d, void *key, void *val)
/* Low level add or find:
* This function adds the entry but instead of setting a value returns the
* dictEntry structure to the user, that will make sure to fill the value
* field as he wishes.
* field as they wish.
*
* This function is also directly exposed to the user API to be called
* mainly in order to store non-pointers inside the hash value, example:
......@@ -593,7 +593,7 @@ dictEntry *dictNext(dictIterator *iter)
dictht *ht = &iter->d->ht[iter->table];
if (iter->index == -1 && iter->table == 0) {
if (iter->safe)
iter->d->iterators++;
dictPauseRehashing(iter->d);
else
iter->fingerprint = dictFingerprint(iter->d);
}
......@@ -625,7 +625,7 @@ void dictReleaseIterator(dictIterator *iter)
{
if (!(iter->index == -1 && iter->table == 0)) {
if (iter->safe)
iter->d->iterators--;
dictResumeRehashing(iter->d);
else
assert(iter->fingerprint == dictFingerprint(iter->d));
}
......@@ -896,9 +896,8 @@ unsigned long dictScan(dict *d,
if (dictSize(d) == 0) return 0;
/* Having a safe iterator means no rehashing can happen, see _dictRehashStep.
* This is needed in case the scan callback tries to do dictFind or alike. */
d->iterators++;
/* This is needed in case the scan callback tries to do dictFind or alike. */
dictPauseRehashing(d);
if (!dictIsRehashing(d)) {
t0 = &(d->ht[0]);
......@@ -966,8 +965,7 @@ unsigned long dictScan(dict *d,
} while (v & (m0 ^ m1));
}
/* undo the ++ at the top */
d->iterators--;
dictResumeRehashing(d);
return v;
}
......@@ -1056,7 +1054,7 @@ void dictEmpty(dict *d, void(callback)(void*)) {
_dictClear(d,&d->ht[0],callback);
_dictClear(d,&d->ht[1],callback);
d->rehashidx = -1;
d->iterators = 0;
d->pauserehash = 0;
}
void dictEnableResize(void) {
......
......@@ -82,7 +82,7 @@ typedef struct dict {
void *privdata;
dictht ht[2];
long rehashidx; /* rehashing not in progress if rehashidx == -1 */
unsigned long iterators; /* number of iterators currently running */
int16_t pauserehash; /* If >0 rehashing is paused (<0 indicates coding error) */
} dict;
/* If safe is set to 1 this is a safe iterator, that means, you can call
......@@ -150,6 +150,8 @@ typedef void (dictScanBucketFunction)(void *privdata, dictEntry **bucketref);
#define dictSlots(d) ((d)->ht[0].size+(d)->ht[1].size)
#define dictSize(d) ((d)->ht[0].used+(d)->ht[1].used)
#define dictIsRehashing(d) ((d)->rehashidx != -1)
#define dictPauseRehashing(d) (d)->pauserehash++
#define dictResumeRehashing(d) (d)->pauserehash--
/* If our unsigned long type can store a 64 bit number, use a 64 bit PRNG. */
#if ULONG_MAX >= 0xffffffffffffffff
......
......@@ -135,7 +135,7 @@ void evictionPoolAlloc(void) {
/* This is an helper function for performEvictions(), it is used in order
* to populate the evictionPool with a few entries every time we want to
* expire a key. Keys with idle time smaller than one of the current
* expire a key. Keys with idle time bigger than one of the current
* keys are added. Keys are always added if there are free entries.
*
* We insert keys on place in ascending order, so keys with the smaller
......
......@@ -83,9 +83,8 @@ int activeExpireCycleTryExpire(redisDb *db, dictEntry *de, long long now) {
* keys that can be removed from the keyspace.
*
* Every expire cycle tests multiple databases: the next call will start
* again from the next db, with the exception of exists for time limit: in that
* case we restart again from the last database we were processing. Anyway
* no more than CRON_DBS_PER_CALL databases are tested at every iteration.
* again from the next db. No more than CRON_DBS_PER_CALL databases are
* tested at every iteration.
*
* The function can perform more or less work, depending on the "type"
* argument. It can execute a "fast cycle" or a "slow cycle". The slow
......@@ -141,7 +140,7 @@ void activeExpireCycle(int type) {
/* This function has some global state in order to continue the work
* incrementally across calls. */
static unsigned int current_db = 0; /* Last DB tested. */
static unsigned int current_db = 0; /* Next DB to test. */
static int timelimit_exit = 0; /* Time limit hit in previous call? */
static long long last_fast_cycle = 0; /* When last fast cycle ran. */
......@@ -507,10 +506,15 @@ void expireGenericCommand(client *c, long long basetime, int unit) {
if (getLongLongFromObjectOrReply(c, param, &when, NULL) != C_OK)
return;
int negative_when = when < 0;
if (unit == UNIT_SECONDS) when *= 1000;
when += basetime;
if (((when < 0) && !negative_when) || ((when-basetime > 0) && negative_when)) {
/* EXPIRE allows negative numbers, but we can at least detect an
* overflow by either unit conversion or basetime addition. */
addReplyErrorFormat(c, "invalid expire time in %s", c->cmd->name);
return;
}
/* No key, return zero. */
if (lookupKeyWrite(c->db,key) == NULL) {
addReply(c,shared.czero);
......
......@@ -31,6 +31,7 @@
#include "geo.h"
#include "geohash_helper.h"
#include "debugmacro.h"
#include "pqsort.h"
/* Things exported from t_zset.c only for geo.c, since it is the only other
* part of Redis that requires close zset introspection. */
......@@ -174,10 +175,10 @@ int extractDistanceOrReply(client *c, robj **argv,
* that should be in the form: <number> <number> <unit>, and return C_OK or C_ERR means success or failure
* *conversions is populated with the coefficient to use in order to convert meters to the unit.*/
int extractBoxOrReply(client *c, robj **argv, double *conversion,
double *height, double *width) {
double *width, double *height) {
double h, w;
if ((getDoubleFromObjectOrReply(c, argv[0], &h, "need numeric height") != C_OK) ||
(getDoubleFromObjectOrReply(c, argv[1], &w, "need numeric width") != C_OK)) {
if ((getDoubleFromObjectOrReply(c, argv[0], &w, "need numeric width") != C_OK) ||
(getDoubleFromObjectOrReply(c, argv[1], &h, "need numeric height") != C_OK)) {
return C_ERR;
}
......@@ -223,8 +224,10 @@ int geoAppendIfWithinShape(geoArray *ga, GeoShape *shape, double score, sds memb
if (!geohashGetDistanceIfInRadiusWGS84(shape->xy[0], shape->xy[1], xy[0], xy[1],
shape->t.radius*shape->conversion, &distance)) return C_ERR;
} else if (shape->type == RECTANGLE_TYPE) {
if (!geohashGetDistanceIfInRectangle(shape->bounds, shape->xy[0], shape->xy[1],
xy[0], xy[1], &distance)) return C_ERR;
if (!geohashGetDistanceIfInRectangle(shape->t.r.width * shape->conversion,
shape->t.r.height * shape->conversion,
shape->xy[0], shape->xy[1], xy[0], xy[1], &distance))
return C_ERR;
}
/* Append the new element. */
......@@ -634,8 +637,8 @@ void georadiusGeneric(client *c, int srcKeyIndex, int flags) {
flags & GEOSEARCH &&
!byradius)
{
if (extractBoxOrReply(c, c->argv+base_args+i+1, &shape.conversion, &shape.t.r.height,
&shape.t.r.width) != C_OK) return;
if (extractBoxOrReply(c, c->argv+base_args+i+1, &shape.conversion, &shape.t.r.width,
&shape.t.r.height) != C_OK) return;
shape.type = RECTANGLE_TYPE;
bybox = 1;
i += 3;
......@@ -699,10 +702,20 @@ void georadiusGeneric(client *c, int srcKeyIndex, int flags) {
long option_length = 0;
/* Process [optional] requested sorting */
if (sort == SORT_ASC) {
qsort(ga->array, result_length, sizeof(geoPoint), sort_gp_asc);
} else if (sort == SORT_DESC) {
qsort(ga->array, result_length, sizeof(geoPoint), sort_gp_desc);
if (sort != SORT_NONE) {
int (*sort_gp_callback)(const void *a, const void *b) = NULL;
if (sort == SORT_ASC) {
sort_gp_callback = sort_gp_asc;
} else if (sort == SORT_DESC) {
sort_gp_callback = sort_gp_desc;
}
if (returned_items == result_length) {
qsort(ga->array, result_length, sizeof(geoPoint), sort_gp_callback);
} else {
pqsort(ga->array, result_length, sizeof(geoPoint), sort_gp_callback,
0, (returned_items - 1));
}
}
if (storekey == NULL) {
......
......@@ -85,20 +85,16 @@ uint8_t geohashEstimateStepsByRadius(double range_meters, double lat) {
/* Return the bounding box of the search area by shape (see geohash.h GeoShape)
* bounds[0] - bounds[2] is the minimum and maximum longitude
* while bounds[1] - bounds[3] is the minimum and maximum latitude.
* since the higher the latitude, the shorter the arc length, the box shape is as follows
* (left and right edges are actually bent), as shown in the following diagram:
*
* This function does not behave correctly with very large radius values, for
* instance for the coordinates 81.634948934258375 30.561509253718668 and a
* radius of 7083 kilometers, it reports as bounding boxes:
*
* min_lon 7.680495, min_lat -33.119473, max_lon 155.589402, max_lat 94.242491
*
* However, for instance, a min_lon of 7.680495 is not correct, because the
* point -1.27579540014266968 61.33421815228281559 is at less than 7000
* kilometers away.
*
* Since this function is currently only used as an optimization, the
* optimization is not used for very big radiuses, however the function
* should be fixed. */
* \-----------------/ -------- \-----------------/
* \ / / \ \ /
* \ (long,lat) / / (long,lat) \ \ (long,lat) /
* \ / / \ / \
* --------- /----------------\ /--------------\
* Northern Hemisphere Southern Hemisphere Around the equator
*/
int geohashBoundingBox(GeoShape *shape, double *bounds) {
if (!bounds) return 0;
double longitude = shape->xy[0];
......@@ -106,10 +102,14 @@ int geohashBoundingBox(GeoShape *shape, double *bounds) {
double height = shape->conversion * (shape->type == CIRCULAR_TYPE ? shape->t.radius : shape->t.r.height/2);
double width = shape->conversion * (shape->type == CIRCULAR_TYPE ? shape->t.radius : shape->t.r.width/2);
const double long_delta = rad_deg(width/EARTH_RADIUS_IN_METERS/cos(deg_rad(latitude)));
const double lat_delta = rad_deg(height/EARTH_RADIUS_IN_METERS);
bounds[0] = longitude - long_delta;
bounds[2] = longitude + long_delta;
const double long_delta_top = rad_deg(width/EARTH_RADIUS_IN_METERS/cos(deg_rad(latitude+lat_delta)));
const double long_delta_bottom = rad_deg(width/EARTH_RADIUS_IN_METERS/cos(deg_rad(latitude-lat_delta)));
/* The directions of the northern and southern hemispheres
* are opposite, so we choice different points as min/max long/lat */
int southern_hemisphere = latitude < 0 ? 1 : 0;
bounds[0] = southern_hemisphere ? longitude-long_delta_bottom : longitude-long_delta_top;
bounds[2] = southern_hemisphere ? longitude+long_delta_bottom : longitude+long_delta_top;
bounds[1] = latitude - lat_delta;
bounds[3] = latitude + lat_delta;
return 1;
......@@ -137,12 +137,10 @@ GeoHashRadius geohashCalculateAreasByShapeWGS84(GeoShape *shape) {
double latitude = shape->xy[1];
/* radius_meters is calculated differently in different search types:
* 1) CIRCULAR_TYPE, just use radius.
* 2) RECTANGLE_TYPE, in order to calculate accurately, we should use
* sqrt((width/2)^2 + (height/2)^2), so that the box is bound by a circle,
* But the current code a simpler approach resulting in a smaller circle,
* which is safe because we search the 8 nearby boxes anyway. */
* 2) RECTANGLE_TYPE, we use sqrt((width/2)^2 + (height/2)^2) to
* calculate the distance from the center point to the corner */
double radius_meters = shape->type == CIRCULAR_TYPE ? shape->t.radius :
shape->t.r.width > shape->t.r.height ? shape->t.r.width/2 : shape->t.r.height/2;
sqrt((shape->t.r.width/2)*(shape->t.r.width/2) + (shape->t.r.height/2)*(shape->t.r.height/2));
radius_meters *= shape->conversion;
steps = geohashEstimateStepsByRadius(radius_meters,latitude);
......@@ -245,14 +243,21 @@ int geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2,
return geohashGetDistanceIfInRadius(x1, y1, x2, y2, radius, distance);
}
/* Judge whether a point is in the axis-aligned rectangle.
* bounds : see geohash.h GeoShape::bounds
/* Judge whether a point is in the axis-aligned rectangle, when the distance
* between a searched point and the center point is less than or equal to
* height/2 or width/2 in height and width, the point is in the rectangle.
*
* width_m, height_m: the rectangle
* x1, y1 : the center of the box
* x2, y2 : the point to be searched
*/
int geohashGetDistanceIfInRectangle(double *bounds, double x1, double y1,
int geohashGetDistanceIfInRectangle(double width_m, double height_m, double x1, double y1,
double x2, double y2, double *distance) {
if (x2 < bounds[0] || x2 > bounds[2] || y2 < bounds[1] || y2 > bounds[3]) return 0;
double lon_distance = geohashGetDistance(x2, y2, x1, y2);
double lat_distance = geohashGetDistance(x2, y2, x2, y1);
if (lon_distance > width_m/2 || lat_distance > height_m/2) {
return 0;
}
*distance = geohashGetDistance(x1, y1, x2, y2);
return 1;
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment