Unverified Commit 33fc0fbf authored by Moti Cohen's avatar Moti Cohen Committed by GitHub
Browse files

HFE to support AOF and replicas (#13285)

* For replica sake, rewrite commands `H*EXPIRE*` , `HSETF`, `HGETF` to
have absolute unix time in msec.
* On active-expiration of field, propagate HDEL to replica
(`propagateHashFieldDeletion()`)
* On lazy-expiration, propagate HDEL to replica (`hashTypeGetValue()`
now calls `hashTypeDelete()`. It also takes care to call
`propagateHashFieldDeletion()`).
* Fix `H*EXPIRE*` command such that if it gets flag `LT` and it doesn’t
have any expiration on the field then it will considered as valid
condition.

Note, replicas doesn’t make any active expiration, and should avoid lazy
expiration. On `hashTypeGetValue()` it doesn't check expiration (As long
as the master didn’t request to delete the field, it is valid)

TODO: 
* Attach `dbid` to HASH metadata. See
[here](https://github.com/redis/redis/pull/13209#discussion_r1593385850

)

---------
Co-authored-by: default avatardebing.sun <debing.sun@redis.com>
parent 6a11d458
......@@ -1939,7 +1939,7 @@ int rewriteSortedSetObject(rio *r, robj *key, robj *o) {
*
* The function returns 0 on error, non-zero on success. */
static int rioWriteHashIteratorCursor(rio *r, hashTypeIterator *hi, int what) {
if (hi->encoding == OBJ_ENCODING_LISTPACK) {
if ((hi->encoding == OBJ_ENCODING_LISTPACK) || (hi->encoding == OBJ_ENCODING_LISTPACK_EX)) {
unsigned char *vstr = NULL;
unsigned int vlen = UINT_MAX;
long long vll = LLONG_MAX;
......@@ -1963,37 +1963,60 @@ static int rioWriteHashIteratorCursor(rio *r, hashTypeIterator *hi, int what) {
/* Emit the commands needed to rebuild a hash object.
* The function returns 0 on error, 1 on success. */
int rewriteHashObject(rio *r, robj *key, robj *o) {
int res = 0; /*fail*/
hashTypeIterator *hi;
long long count = 0, items = hashTypeLength(o, 0);
int isHFE = hashTypeGetMinExpire(o) != EB_EXPIRE_TIME_INVALID;
hi = hashTypeInitIterator(o);
while (hashTypeNext(hi, 0) != C_ERR) {
if (count == 0) {
int cmd_items = (items > AOF_REWRITE_ITEMS_PER_CMD) ?
AOF_REWRITE_ITEMS_PER_CMD : items;
if (!rioWriteBulkCount(r,'*',2+cmd_items*2) ||
!rioWriteBulkString(r,"HMSET",5) ||
!rioWriteBulkObject(r,key))
{
hashTypeReleaseIterator(hi);
return 0;
if (!isHFE) {
while (hashTypeNext(hi, 0) != C_ERR) {
if (count == 0) {
int cmd_items = (items > AOF_REWRITE_ITEMS_PER_CMD) ?
AOF_REWRITE_ITEMS_PER_CMD : items;
if (!rioWriteBulkCount(r, '*', 2 + cmd_items * 2) ||
!rioWriteBulkString(r, "HMSET", 5) ||
!rioWriteBulkObject(r, key))
goto reHashEnd;
}
}
if (!rioWriteHashIteratorCursor(r, hi, OBJ_HASH_KEY) ||
!rioWriteHashIteratorCursor(r, hi, OBJ_HASH_VALUE))
{
hashTypeReleaseIterator(hi);
return 0;
if (!rioWriteHashIteratorCursor(r, hi, OBJ_HASH_KEY) ||
!rioWriteHashIteratorCursor(r, hi, OBJ_HASH_VALUE))
goto reHashEnd;
if (++count == AOF_REWRITE_ITEMS_PER_CMD) count = 0;
items--;
}
} else {
while (hashTypeNext(hi, 0) != C_ERR) {
char hmsetCmd[] = "*4\r\n$5\r\nHMSET\r\n";
if ( (!rioWrite(r, hmsetCmd, sizeof(hmsetCmd) - 1)) ||
(!rioWriteBulkObject(r, key)) ||
(!rioWriteHashIteratorCursor(r, hi, OBJ_HASH_KEY)) ||
(!rioWriteHashIteratorCursor(r, hi, OBJ_HASH_VALUE)) )
goto reHashEnd;
if (hi->expire_time != EB_EXPIRE_TIME_INVALID) {
char cmd[] = "*6\r\n$10\r\nHPEXPIREAT\r\n";
if ( (!rioWrite(r, cmd, sizeof(cmd) - 1)) ||
(!rioWriteBulkObject(r, key)) ||
(!rioWriteBulkLongLong(r, hi->expire_time)) ||
(!rioWriteBulkString(r, "FIELDS", 6)) ||
(!rioWriteBulkString(r, "1", 1)) ||
(!rioWriteHashIteratorCursor(r, hi, OBJ_HASH_KEY)) )
goto reHashEnd;
}
}
if (++count == AOF_REWRITE_ITEMS_PER_CMD) count = 0;
items--;
}
hashTypeReleaseIterator(hi);
res = 1; /* success */
return 1;
reHashEnd:
hashTypeReleaseIterator(hi);
return res;
}
/* Helper for rewriteStreamObject() that generates a bulk string into the
......
......@@ -174,6 +174,7 @@ void dumpCommand(client *c) {
/* RESTORE key ttl serialized-value [REPLACE] [ABSTTL] [IDLETIME seconds] [FREQ frequency] */
void restoreCommand(client *c) {
uint64_t minExpiredField = EB_EXPIRE_TIME_INVALID;
long long ttl, lfu_freq = -1, lru_idle = -1, lru_clock = -1;
rio payload;
int j, type, replace = 0, absttl = 0;
......@@ -237,7 +238,7 @@ void restoreCommand(client *c) {
rioInitWithBuffer(&payload,c->argv[3]->ptr);
if (((type = rdbLoadObjectType(&payload)) == -1) ||
((obj = rdbLoadObject(type,&payload,key->ptr,c->db,0,NULL)) == NULL))
((obj = rdbLoadObject(type,&payload,key->ptr,c->db,NULL, &minExpiredField)) == NULL))
{
addReplyError(c,"Bad data format");
return;
......@@ -263,7 +264,13 @@ void restoreCommand(client *c) {
}
/* Create the key and set the TTL if any */
dbAdd(c->db,key,obj);
dictEntry *de = dbAdd(c->db,key,obj);
/* If minExpiredField was set, then the object is hash with expiration
* on fields and need to register it in global HFE DS */
if (minExpiredField != EB_EXPIRE_TIME_INVALID)
hashTypeAddToExpires(c->db, dictGetKey(de), obj, minExpiredField);
if (ttl) {
setExpire(c,c->db,key,ttl);
if (!absttl) {
......
......@@ -276,6 +276,11 @@ static void dbSetValue(redisDb *db, robj *key, robj *val, int overwrite, dictEnt
old = dictGetVal(de);
}
kvstoreDictSetVal(db->keys, slot, de, val);
/* if hash with HFEs, take care to remove from global HFE DS */
if (old->type == OBJ_HASH)
hashTypeRemoveFromExpires(&db->hexpires, old);
if (server.lazyfree_lazy_server_del) {
freeObjAsync(key,old,db->id);
} else {
......@@ -1632,7 +1637,8 @@ void copyCommand(client *c) {
if (expire != -1)
setExpire(c, dst, newkey, expire);
/* If hash with expiration on fields then add it to 'dst' global HFE DS */
/* If minExpiredField was set, then the object is hash with expiration
* on fields and need to register it in global HFE DS */
if (minHashExpire != EB_EXPIRE_TIME_INVALID)
hashTypeAddToExpires(dst, dictGetKey(deCopy), newobj, minHashExpire);
......@@ -1768,11 +1774,13 @@ void swapMainDbWithTempDb(redisDb *tempDb) {
* remain in the same DB they were. */
activedb->keys = newdb->keys;
activedb->expires = newdb->expires;
activedb->hexpires = newdb->hexpires;
activedb->avg_ttl = newdb->avg_ttl;
activedb->expires_cursor = newdb->expires_cursor;
newdb->keys = aux.keys;
newdb->expires = aux.expires;
newdb->hexpires = aux.hexpires;
newdb->avg_ttl = aux.avg_ttl;
newdb->expires_cursor = aux.expires_cursor;
......
......@@ -204,13 +204,18 @@ void xorObjectDigest(redisDb *db, robj *keyobj, unsigned char *digest, robj *o)
unsigned char eledigest[20];
sds sdsele;
/* field */
memset(eledigest,0,20);
sdsele = hashTypeCurrentObjectNewSds(hi,OBJ_HASH_KEY);
mixDigest(eledigest,sdsele,sdslen(sdsele));
sdsfree(sdsele);
/* val */
sdsele = hashTypeCurrentObjectNewSds(hi,OBJ_HASH_VALUE);
mixDigest(eledigest,sdsele,sdslen(sdsele));
sdsfree(sdsele);
/* hash-field expiration (HFE) */
if (hi->expire_time != EB_EXPIRE_TIME_INVALID)
xorDigest(eledigest,"!!hexpire!!",11);
xorDigest(digest,eledigest,20);
}
hashTypeReleaseIterator(hi);
......
......@@ -23,6 +23,12 @@
* #define EB_VALIDATE_DEBUG 1
*/
#if (REDIS_TEST || EB_VALIDATE_DEBUG) && !defined(EB_TEST_BENCHMARK)
#define EB_VALIDATE_STRUCTURE(eb, type) ebValidate(eb, type)
#else
#define EB_VALIDATE_STRUCTURE(eb, type) // Do nothing
#endif
/*** BENCHMARK
*
* To benchmark ebuckets creation and active-expire with 10 million items, apply
......@@ -190,7 +196,7 @@ static inline uint64_t raxKey2BucketKey(unsigned char *raxKey) {
* Before: [segHdr] -> {item1,..,item16} -> [..]
* After: [segHdr] -> {newItem} -> [nextSegHdr] -> {item1,..,item16} -> [..]
*
* Take care to persist `segHdr` to be the same instance after the change.
* Taken care to persist `segHdr` to be the same instance after the change.
* This is important because the rax tree is pointing to it. */
static int ebSegAddExtended(EbucketsType *type, FirstSegHdr *firstSegHdr, eItem newItem) {
/* Allocate nextSegHdr and let it take the items of first segment header */
......@@ -1390,9 +1396,8 @@ int ebRemove(ebuckets *eb, EbucketsType *type, eItem item) {
if (res)
type->getExpireMeta(item)->trash = 1;
#if (REDIS_TEST || EB_VALIDATE_DEBUG) && !defined(EB_TEST_BENCHMARK)
ebValidate(*eb, type);
#endif
EB_VALIDATE_STRUCTURE(*eb, type);
return res;
}
......@@ -1435,9 +1440,9 @@ int ebAdd(ebuckets *eb, EbucketsType *type, eItem item, uint64_t expireTime) {
/* Add item to rax */
res = ebAddToRax(eb, type, item, EB_BUCKET_KEY(expireTime));
}
#if (REDIS_TEST || EB_VALIDATE_DEBUG) && !defined(EB_TEST_BENCHMARK)
ebValidate(*eb, type);
#endif
EB_VALIDATE_STRUCTURE(*eb, type);
return res;
}
......@@ -1521,9 +1526,9 @@ END_ACTEXP:
ebAdd(eb, type, updateList, ebGetMetaExpTime(mItem));
updateList = next;
}
#if (REDIS_TEST || EB_VALIDATE_DEBUG) && !defined(EB_TEST_BENCHMARK)
ebValidate(*eb, type);
#endif
EB_VALIDATE_STRUCTURE(*eb, type);
return;
}
......
......@@ -139,7 +139,7 @@
* The idea of it is to trim the rax tree depth, avoid having too many branches,
* and reduce frequent modifications of the tree to the minimum.
*/
#define EB_BUCKET_KEY_PRECISION 0 /* 1024msec */
#define EB_BUCKET_KEY_PRECISION 0 /* TBD: modify to 10 */
/* From expiration time to bucket-key */
#define EB_BUCKET_KEY(exptime) ((exptime) >> EB_BUCKET_KEY_PRECISION)
......
......@@ -5271,7 +5271,10 @@ int RM_HashSet(RedisModuleKey *key, int flags, ...) {
 
/* Handle XX and NX */
if (flags & (REDISMODULE_HASH_XX|REDISMODULE_HASH_NX)) {
int exists = hashTypeExists(key->value, field->ptr);
int isHashDeleted;
int exists = hashTypeExists(key->db, key->value, field->ptr, &isHashDeleted);
/* hash-field-expiration is not exposed to modules */
serverAssert(isHashDeleted == 0);
if (((flags & REDISMODULE_HASH_XX) && !exists) ||
((flags & REDISMODULE_HASH_NX) && exists))
{
......@@ -5282,7 +5285,7 @@ int RM_HashSet(RedisModuleKey *key, int flags, ...) {
 
/* Handle deletion if value is REDISMODULE_HASH_DELETE. */
if (value == REDISMODULE_HASH_DELETE) {
count += hashTypeDelete(key->value, field->ptr);
count += hashTypeDelete(key->value, field->ptr, 1);
if (flags & REDISMODULE_HASH_CFIELDS) decrRefCount(field);
continue;
}
......@@ -5374,14 +5377,22 @@ int RM_HashGet(RedisModuleKey *key, int flags, ...) {
/* Query the hash for existence or value object. */
if (flags & REDISMODULE_HASH_EXISTS) {
existsptr = va_arg(ap,int*);
if (key->value)
*existsptr = hashTypeExists(key->value,field->ptr);
else
if (key->value) {
int isHashDeleted;
*existsptr = hashTypeExists(key->db, key->value, field->ptr, &isHashDeleted);
/* hash-field-expiration is not exposed to modules */
serverAssert(isHashDeleted == 0);
} else {
*existsptr = 0;
}
} else {
int isHashDeleted;
valueptr = va_arg(ap,RedisModuleString**);
if (key->value) {
*valueptr = hashTypeGetValueObject(key->value,field->ptr);
*valueptr = hashTypeGetValueObject(key->db,key->value,field->ptr, &isHashDeleted);
/* Currently hash-field-expiration is not exposed to modules */
serverAssert(isHashDeleted == 0);
if (*valueptr) {
robj *decoded = getDecodedObject(*valueptr);
decrRefCount(*valueptr);
......
......@@ -209,6 +209,9 @@ static inline int mstrIsMetaAttached(mstr s) { return s[-1] & MSTR_META_MASK; }
/* return whether if a specific flag-index is set */
static inline int mstrGetFlag(mstr s, int flagIdx) { return *mstrFlagsRef(s) & (1 << flagIdx); }
/* DEBUG */
void mstrPrint(mstr s, struct mstrKind *kind, int verbose);
/* See comment above about MSTR-ALIGNMENT(2) */
static_assert(sizeof(struct mstrhdr5 ) % 2 == 1, "must be odd");
static_assert(sizeof(struct mstrhdr8 ) % 2 == 1, "must be odd");
......
......@@ -3757,7 +3757,9 @@ void replaceClientCommandVector(client *c, int argc, robj **argv) {
* 1. Make sure there are no "holes" and all the arguments are set.
* 2. If the original argument vector was longer than the one we
* want to end with, it's up to the caller to set c->argc and
* free the no longer used objects on c->argv. */
* free the no longer used objects on c->argv.
* 3. To remove argument at i'th index, pass NULL as new value
*/
void rewriteClientCommandArgument(client *c, int i, robj *newval) {
robj *oldval;
retainOriginalCommandVector(c);
......@@ -3775,9 +3777,18 @@ void rewriteClientCommandArgument(client *c, int i, robj *newval) {
}
oldval = c->argv[i];
if (oldval) c->argv_len_sum -= getStringObjectLen(oldval);
if (newval) c->argv_len_sum += getStringObjectLen(newval);
c->argv[i] = newval;
incrRefCount(newval);
if (newval) {
c->argv[i] = newval;
incrRefCount(newval);
c->argv_len_sum += getStringObjectLen(newval);
} else {
/* move the remaining arguments one step left */
for (int j = i+1; j < c->argc; j++) {
c->argv[j-1] = c->argv[j];
}
c->argv[--c->argc] = NULL;
}
if (oldval) decrRefCount(oldval);
/* If this is the command name make sure to fix c->cmd. */
......
This diff is collapsed.
......@@ -103,11 +103,12 @@
#define RDB_MODULE_OPCODE_STRING 5 /* String. */
/* rdbLoad...() functions flags. */
#define RDB_LOAD_NONE 0
#define RDB_LOAD_ENC (1<<0)
#define RDB_LOAD_PLAIN (1<<1)
#define RDB_LOAD_SDS (1<<2)
#define RDB_LOAD_HFLD (1<<3)
#define RDB_LOAD_NONE 0
#define RDB_LOAD_ENC (1<<0)
#define RDB_LOAD_PLAIN (1<<1)
#define RDB_LOAD_SDS (1<<2)
#define RDB_LOAD_HFLD (1<<3)
#define RDB_LOAD_HFLD_TTL (1<<4)
/* flags on the purpose of rdb save or load */
#define RDBFLAGS_NONE 0 /* No special RDB loading or saving. */
......@@ -119,8 +120,9 @@
/* When rdbLoadObject() returns NULL, the err flag is
* set to hold the type of error that occurred */
#define RDB_LOAD_ERR_EMPTY_KEY 1 /* Error of empty key */
#define RDB_LOAD_ERR_OTHER 2 /* Any other errors */
#define RDB_LOAD_ERR_EMPTY_KEY 1 /* Error of empty key */
#define RDB_LOAD_ERR_EXPIRED_HASH 2 /* Expired hash since all its fields are expired */
#define RDB_LOAD_ERR_OTHER 3 /* Any other errors */
ssize_t rdbWriteRaw(rio *rdb, void *p, size_t len);
int rdbSaveType(rio *rdb, unsigned char type);
......@@ -141,7 +143,7 @@ int rdbSaveToFile(const char *filename);
int rdbSave(int req, char *filename, rdbSaveInfo *rsi, int rdbflags);
ssize_t rdbSaveObject(rio *rdb, robj *o, robj *key, int dbid);
size_t rdbSavedObjectLen(robj *o, robj *key, int dbid);
robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, redisDb *db, int rdbflags, int *error);
robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, redisDb *db, int *error, uint64_t *minExpiredField);
void backgroundSaveDoneHandler(int exitcode, int bysignal);
int rdbSaveKeyValuePair(rio *rdb, robj *key, robj *val, long long expiretime,int dbid);
ssize_t rdbSaveSingleModuleAux(rio *rdb, int when, moduleType *mt);
......
......@@ -331,7 +331,8 @@ int redis_check_rdb(char *rdbfilename, FILE *fp) {
rdbstate.keys++;
/* Read value */
rdbstate.doing = RDB_CHECK_DOING_READ_OBJECT_VALUE;
if ((val = rdbLoadObject(type,&rdb,key->ptr,NULL,0,NULL)) == NULL) goto eoferr;
if ((val = rdbLoadObject(type,&rdb,key->ptr,NULL,NULL,NULL)) == NULL)
goto eoferr;
/* Check if the key already expired. */
if (expiretime != -1 && expiretime < now)
rdbstate.already_expired++;
......
......@@ -1955,6 +1955,8 @@ void createSharedObjects(void) {
shared.persist = createStringObject("PERSIST",7);
shared.set = createStringObject("SET",3);
shared.eval = createStringObject("EVAL",4);
shared.hpexpireat = createStringObject("HPEXPIREAT",10);
shared.hdel = createStringObject("HDEL",4);
/* Shared command argument */
shared.left = createStringObject("left",4);
......@@ -2707,7 +2709,6 @@ void initServer(void) {
server.rdb_save_time_start = -1;
server.rdb_last_load_keys_expired = 0;
server.rdb_last_load_keys_loaded = 0;
server.rdb_last_load_hash_fields_expired = 0;
server.dirty = 0;
resetServerStats();
/* A few stats we don't want to reset: server startup time, and peak mem. */
......@@ -5771,7 +5772,6 @@ sds genRedisInfoString(dict *section_dict, int all_sections, int everything) {
"rdb_last_cow_size:%zu\r\n", server.stat_rdb_cow_bytes,
"rdb_last_load_keys_expired:%lld\r\n", server.rdb_last_load_keys_expired,
"rdb_last_load_keys_loaded:%lld\r\n", server.rdb_last_load_keys_loaded,
"rdb_last_load_hash_fields_expired:%lld\r\n", server.rdb_last_load_hash_fields_expired,
"aof_enabled:%d\r\n", server.aof_state != AOF_OFF,
"aof_rewrite_in_progress:%d\r\n", server.child_type == CHILD_TYPE_AOF,
"aof_rewrite_scheduled:%d\r\n", server.aof_rewrite_scheduled,
......
......@@ -1317,7 +1317,8 @@ struct sharedObjectsStruct {
*unsubscribebulk, *psubscribebulk, *punsubscribebulk, *del, *unlink,
*rpop, *lpop, *lpush, *rpoplpush, *lmove, *blmove, *zpopmin, *zpopmax,
*emptyscan, *multi, *exec, *left, *right, *hset, *srem, *xgroup, *xclaim,
*script, *replconf, *eval, *persist, *set, *pexpireat, *pexpire,
*script, *replconf, *eval, *persist, *set, *pexpireat, *pexpire,
*hdel, *hpexpireat,
*time, *pxat, *absttl, *retrycount, *force, *justid, *entriesread,
*lastid, *ping, *setid, *keepttl, *load, *createconsumer,
*getack, *special_asterick, *special_equals, *default_username, *redacted,
......@@ -1802,7 +1803,6 @@ struct redisServer {
long long dirty_before_bgsave; /* Used to restore dirty on failed BGSAVE */
long long rdb_last_load_keys_expired; /* number of expired keys when loading RDB */
long long rdb_last_load_keys_loaded; /* number of loaded keys when loading RDB */
long long rdb_last_load_hash_fields_expired; /* number of expired hash fields when loading RDB */
struct saveparam *saveparams; /* Save points array for RDB */
int saveparamslen; /* Number of saving points */
char *rdb_filename; /* Name of RDB file */
......@@ -3166,6 +3166,20 @@ typedef struct listpackEx {
are ordered by ttl. */
} listpackEx;
/* Each dict of hash object that has fields with time-Expiration will have the
* following metadata attached to dict header */
typedef struct dictExpireMetadata {
ExpireMeta expireMeta; /* embedded ExpireMeta in dict.
To be used in order to register the hash in the
global ebuckets (i.e db->hexpires) with next,
minimum, hash-field to expire */
ebuckets hfe; /* DS of Hash Fields Expiration, associated to each hash */
sds key; /* reference to the key, same one that stored in
db->dict. Will be used from active-expiration flow
for notification and deletion of the object, if
needed. */
} dictExpireMetadata;
/* Hash data type */
#define HASH_SET_TAKE_FIELD (1<<0)
#define HASH_SET_TAKE_VALUE (1<<1)
......@@ -3173,8 +3187,8 @@ typedef struct listpackEx {
void hashTypeConvert(robj *o, int enc, ebuckets *hexpires);
void hashTypeTryConversion(redisDb *db, robj *subject, robj **argv, int start, int end);
int hashTypeExists(robj *o, sds key);
int hashTypeDelete(robj *o, sds key);
int hashTypeExists(redisDb *db, robj *o, sds key, int *isHashDeleted);
int hashTypeDelete(robj *o, void *key, int isSdsField);
unsigned long hashTypeLength(const robj *o, int subtractExpiredFields);
hashTypeIterator *hashTypeInitIterator(robj *subject);
void hashTypeReleaseIterator(hashTypeIterator *hi);
......@@ -3190,24 +3204,24 @@ void hashTypeCurrentObject(hashTypeIterator *hi, int what, unsigned char **vstr,
unsigned int *vlen, long long *vll, uint64_t *expireTime);
sds hashTypeCurrentObjectNewSds(hashTypeIterator *hi, int what);
hfield hashTypeCurrentObjectNewHfield(hashTypeIterator *hi);
robj *hashTypeGetValueObject(robj *o, sds field);
robj *hashTypeGetValueObject(redisDb *db, robj *o, sds field, int *isHashDeleted);
int hashTypeSet(redisDb *db, robj *o, sds field, sds value, int flags);
robj *hashTypeDup(robj *o, sds newkey, uint64_t *minHashExpire);
uint64_t hashTypeRemoveFromExpires(ebuckets *hexpires, robj *o);
void hashTypeAddToExpires(redisDb *db, sds key, robj *hashObj, uint64_t expireTime);
void hashTypeFree(robj *o);
int hashTypeIsExpired(const robj *o, uint64_t expireAt);
uint64_t hashTypeGetMinExpire(robj *o);
unsigned char *hashTypeListpackGetLp(robj *o);
uint64_t hashTypeGetMinExpire(robj *o);
void hashTypeUpdateKeyRef(robj *o, sds newkey);
ebuckets *hashTypeGetDictMetaHFE(dict *d);
void listpackExExpire(robj *o, ExpireInfo *info);
int hashTypeSetExRdb(redisDb *db, robj *o, sds field, sds value, uint64_t expire_at);
uint64_t hashTypeGetMinExpire(robj *keyObj);
uint64_t hashTypeGetNextTimeToExpire(robj *o);
void initDictExpireMetadata(sds key, robj *o);
struct listpackEx *listpackExCreate(void);
void listpackExAddNew(robj *o, sds field, sds value, uint64_t expireAt);
void listpackExAddNew(robj *o, char *field, size_t flen,
char *value, size_t vlen, uint64_t expireAt);
/* Hash-Field data type (of t_hash.c) */
hfield hfieldNew(const void *field, size_t fieldlen, int withExpireMeta);
......
......@@ -94,7 +94,12 @@ robj *lookupKeyByPattern(redisDb *db, robj *pattern, robj *subst) {
/* Retrieve value from hash by the field name. The returned object
* is a new object with refcount already incremented. */
o = hashTypeGetValueObject(o, fieldobj->ptr);
int isHashDeleted;
o = hashTypeGetValueObject(db, o, fieldobj->ptr, &isHashDeleted);
if (isHashDeleted)
goto noobj;
} else {
if (o->type != OBJ_STRING) goto noobj;
......
This diff is collapsed.
......@@ -179,6 +179,17 @@ start_server {} {
$master set $j somevalue px 10
}
##### hash-field-expiration
# Hashes of type OBJ_ENCODING_LISTPACK_EX won't be discarded during
# RDB load, even if they are expired.
$master hset myhash1 f1 v1 f2 v2 f3 v3
$master hpexpire myhash1 10 FIELDS 3 f1 f2 f3
# Hashes of type RDB_TYPE_HASH_METADATA will be discarded during RDB load.
$master config set hash-max-listpack-entries 0
$master hset myhash2 f1 v1 f2 v2
$master hpexpire myhash2 10 FIELDS 2 f1 f2
$master config set hash-max-listpack-entries 1
after 20
wait_for_condition 500 100 {
......
......@@ -421,14 +421,20 @@ set server_path [tmpdir "server.partial-hfield-exp-test"]
# verifies writing and reading hash key with expiring and persistent fields
start_server [list overrides [list "dir" $server_path]] {
foreach {type lp_entries} {listpack 512 dict 0} {
test "hash field expiration save and load rdb one expired field, ($type)" {
test "HFE - save and load expired fields, expired soon after, or long after ($type)" {
r config set hash-max-listpack-entries $lp_entries
r FLUSHALL
r HMSET key a 1 b 2 c 3 d 4
r HEXPIREAT key 2524600800 FIELDS 2 a b
r HPEXPIRE key 100 FIELDS 1 d
r HMSET key a 1 b 2 c 3 d 4 e 5
# expected to be expired long after restart
r HEXPIREAT key 2524600800 FIELDS 1 a
# expected long TTL value (6 bytes) is saved and loaded correctly
r HPEXPIREAT key 188900976391764 FIELDS 1 b
# expected to be already expired after restart
r HPEXPIRE key 80 FIELDS 1 d
# expected to be expired soon after restart
r HPEXPIRE key 200 FIELDS 1 e
r save
# sleep 101 ms to make sure d will expire after restart
......@@ -437,14 +443,14 @@ start_server [list overrides [list "dir" $server_path]] {
wait_done_loading r
assert_equal [lsort [r hgetall key]] "1 2 3 a b c"
assert_equal [r hexpiretime key FIELDS 3 a b c] {2524600800 2524600800 -1}
assert_equal [r hpexpiretime key FIELDS 3 a b c] {2524600800000 188900976391764 -1}
assert_equal [s rdb_last_load_keys_loaded] 1
# hash keys saved in listpack encoding are loaded as a blob,
# so individual field expiry is not verified on load
if {$type eq "dict"} {
assert_equal [s rdb_last_load_hash_fields_expired] 1
# wait until expired_hash_fields equals 2
wait_for_condition 10 100 {
[s expired_hash_fields] == 2
} else {
assert_equal [s rdb_last_load_hash_fields_expired] 0
fail "Value of expired_hash_fields is not as expected"
}
}
}
......@@ -455,7 +461,7 @@ set server_path [tmpdir "server.all-hfield-exp-test"]
# verifies writing hash with several expired keys, and active-expiring it on load
start_server [list overrides [list "dir" $server_path]] {
foreach {type lp_entries} {listpack 512 dict 0} {
test "hash field expiration save and load rdb all fields expired, ($type)" {
test "HFE - save and load rdb all fields expired, ($type)" {
r config set hash-max-listpack-entries $lp_entries
r FLUSHALL
......@@ -470,53 +476,15 @@ start_server [list overrides [list "dir" $server_path]] {
restart_server 0 true false
wait_done_loading r
# hash keys saved as listpack-encoded are saved and loaded as a blob
# so individual field validation is not checked during load.
# Therefore, if the key was saved as dict it is expected that
# all 4 fields were expired during load, and thus the key was
# "declared" an empty key.
# On the other hand, if the key was saved as listpack, it is
# expected that no field was expired on load and the key was loaded,
# even though all its fields are actually expired.
if {$type eq "dict"} {
assert_equal [s rdb_last_load_keys_loaded] 0
assert_equal [s rdb_last_load_hash_fields_expired] 4
} else {
assert_equal [s rdb_last_load_keys_loaded] 1
assert_equal [s rdb_last_load_hash_fields_expired] 0
}
# it is expected that no field was expired on load and the key was
# loaded, even though all its fields are actually expired.
assert_equal [s rdb_last_load_keys_loaded] 1
# in listpack encoding, the fields (and key) will be expired by
# lazy expiry
assert_equal [r hgetall key] {}
}
}
}
set server_path [tmpdir "server.long-ttl-test"]
# verifies a long TTL value (6 bytes) is saved and loaded correctly
start_server [list overrides [list "dir" $server_path]] {
foreach {type lp_entries} {listpack 512 dict 0} {
test "hash field expiration save and load rdb long TTL, ($type)" {
r config set hash-max-listpack-entries $lp_entries
r FLUSHALL
r HSET key a 1
# set expiry to 0xabcdef987654 (6 bytes)
r HPEXPIREAT key 188900976391764 FIELDS 1 a
r save
restart_server 0 true false
wait_done_loading r
assert_equal [r hget key a ] 1
assert_equal [r hpexpiretime key FIELDS 1 a] {188900976391764}
}
}
}
set server_path [tmpdir "server.listpack-to-dict-test"]
test "save listpack, load dict" {
......@@ -540,7 +508,6 @@ test "save listpack, load dict" {
# first verify d was not expired during load (no expiry when loading
# a hash that was saved listpack-encoded)
assert_equal [s rdb_last_load_keys_loaded] 1
assert_equal [s rdb_last_load_hash_fields_expired] 0
# d should be lazy expired in hgetall
assert_equal [lsort [r hgetall key]] "1 2 3 a b c"
......@@ -570,7 +537,6 @@ test "save dict, load listpack" {
# verify d was expired during load
assert_equal [s rdb_last_load_keys_loaded] 1
assert_equal [s rdb_last_load_hash_fields_expired] 1
assert_equal [lsort [r hgetall key]] "1 2 3 a b c"
assert_match "*encoding:listpack*" [r debug object key]
......@@ -602,7 +568,6 @@ foreach {type lp_entries} {listpack 512 dict 0} {
}
assert_equal [s rdb_last_load_keys_loaded] 1
assert_equal [s rdb_last_load_hash_fields_expired] 0
# hgetall might lazy expire fields, so it's only called after the stat asserts
assert_equal [lsort [r hgetall key]] "1 2 5 6 a b e f"
......@@ -632,7 +597,6 @@ foreach {type lp_entries} {listpack 512 dict 0} {
after 500
assert_equal [s rdb_last_load_keys_loaded] 1
assert_equal [s rdb_last_load_hash_fields_expired] 0
assert_equal [s expired_hash_fields] 0
# hgetall will lazy expire fields, so it's only called after the stat asserts
......
......@@ -293,6 +293,9 @@ proc findKeyWithType {r type} {
proc createComplexDataset {r ops {opt {}}} {
set useexpire [expr {[lsearch -exact $opt useexpire] != -1}]
# TODO: Remove usehexpire on next commit, when RDB will support replication
set usehexpire [expr {[lsearch -exact $opt usehexpire] != -1}]
if {[lsearch -exact $opt usetag] != -1} {
set tag "{t}"
} else {
......@@ -386,6 +389,10 @@ proc createComplexDataset {r ops {opt {}}} {
{hash} {
randpath {{*}$r hset $k $f $v} \
{{*}$r hdel $k $f}
if { [{*}$r hexists $k $f] && $usehexpire && rand() < 0.5} {
{*}$r hexpire $k 1000 FIELDS 1 $f
}
}
}
}
......
......@@ -124,7 +124,8 @@ start_server {tags {"other"}} {
if {$::accurate} {set numops 10000} else {set numops 1000}
test {Check consistency of different data types after a reload} {
r flushdb
createComplexDataset r $numops usetag
# TODO: integrate usehexpire following next commit that will support replication
createComplexDataset r $numops {usetag usehexpire}
if {$::ignoredigest} {
set _ 1
} else {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment