Unverified Commit c18ff056 authored by Moti Cohen's avatar Moti Cohen Committed by GitHub
Browse files

Hash Field Expiration - Basic support

- Add ebuckets & mstr data structures
- Integrate active & lazy expiration
- Add most of the commands 
- Add support for dict (listpack is missing)
TODOs:  RDB, notification, listpack, HSET, HGETF, defrag, aof
parent 4581d432
...@@ -745,7 +745,7 @@ int moduleDelKeyIfEmpty(RedisModuleKey *key) { ...@@ -745,7 +745,7 @@ int moduleDelKeyIfEmpty(RedisModuleKey *key) {
case OBJ_LIST: isempty = listTypeLength(o) == 0; break; case OBJ_LIST: isempty = listTypeLength(o) == 0; break;
case OBJ_SET: isempty = setTypeSize(o) == 0; break; case OBJ_SET: isempty = setTypeSize(o) == 0; break;
case OBJ_ZSET: isempty = zsetLength(o) == 0; break; case OBJ_ZSET: isempty = zsetLength(o) == 0; break;
case OBJ_HASH: isempty = hashTypeLength(o) == 0; break; case OBJ_HASH: isempty = hashTypeLength(o, 0) == 0; break;
case OBJ_STREAM: isempty = streamLength(o) == 0; break; case OBJ_STREAM: isempty = streamLength(o) == 0; break;
default: isempty = 0; default: isempty = 0;
} }
...@@ -4168,7 +4168,7 @@ size_t RM_ValueLength(RedisModuleKey *key) { ...@@ -4168,7 +4168,7 @@ size_t RM_ValueLength(RedisModuleKey *key) {
case OBJ_LIST: return listTypeLength(key->value); case OBJ_LIST: return listTypeLength(key->value);
case OBJ_SET: return setTypeSize(key->value); case OBJ_SET: return setTypeSize(key->value);
case OBJ_ZSET: return zsetLength(key->value); case OBJ_ZSET: return zsetLength(key->value);
case OBJ_HASH: return hashTypeLength(key->value); case OBJ_HASH: return hashTypeLength(key->value, 0); /* OPEN: To subtract expired fields? */
case OBJ_STREAM: return streamLength(key->value); case OBJ_STREAM: return streamLength(key->value);
default: return 0; default: return 0;
} }
...@@ -5296,7 +5296,7 @@ int RM_HashSet(RedisModuleKey *key, int flags, ...) { ...@@ -5296,7 +5296,7 @@ int RM_HashSet(RedisModuleKey *key, int flags, ...) {
   
robj *argv[2] = {field,value}; robj *argv[2] = {field,value};
hashTypeTryConversion(key->value,argv,0,1); hashTypeTryConversion(key->value,argv,0,1);
int updated = hashTypeSet(key->value, field->ptr, value->ptr, low_flags); int updated = hashTypeSet(key->db, key->value, field->ptr, value->ptr, low_flags);
count += (flags & REDISMODULE_HASH_COUNT_ALL) ? 1 : updated; count += (flags & REDISMODULE_HASH_COUNT_ALL) ? 1 : updated;
   
/* If CFIELDS is active, SDS string ownership is now of hashTypeSet(), /* If CFIELDS is active, SDS string ownership is now of hashTypeSet(),
...@@ -11071,18 +11071,22 @@ static void moduleScanKeyCallback(void *privdata, const dictEntry *de) { ...@@ -11071,18 +11071,22 @@ static void moduleScanKeyCallback(void *privdata, const dictEntry *de) {
ScanKeyCBData *data = privdata; ScanKeyCBData *data = privdata;
sds key = dictGetKey(de); sds key = dictGetKey(de);
robj *o = data->key->value; robj *o = data->key->value;
robj *field = createStringObject(key, sdslen(key)); robj *field = NULL;
robj *value = NULL; robj *value = NULL;
if (o->type == OBJ_SET) { if (o->type == OBJ_SET) {
value = NULL; value = NULL;
} else if (o->type == OBJ_HASH) { } else if (o->type == OBJ_HASH) {
sds val = dictGetVal(de); sds val = dictGetVal(de);
field = createStringObject(key, hfieldlen(key));
value = createStringObject(val, sdslen(val)); value = createStringObject(val, sdslen(val));
} else if (o->type == OBJ_ZSET) { } else if (o->type == OBJ_ZSET) {
double *val = (double*)dictGetVal(de); double *val = (double*)dictGetVal(de);
value = createStringObjectFromLongDouble(*val, 0); value = createStringObjectFromLongDouble(*val, 0);
} }
   
/* if type is OBJ_HASH then key is of type hfield. Otherwise sds. */
if (!field) field = createStringObject(key, sdslen(key));
data->fn(data->key, field, value, data->user_data); data->fn(data->key, field, value, data->user_data);
decrRefCount(field); decrRefCount(field);
if (value) decrRefCount(value); if (value) decrRefCount(value);
......
/*
* Copyright Redis Ltd. 2024 - present
*
* Licensed under your choice of the Redis Source Available License 2.0 (RSALv2)
* or the Server Side Public License v1 (SSPLv1).
*/
#include <string.h>
#include <assert.h>
#include "sdsalloc.h"
#include "mstr.h"
#include "stdio.h"
#define NULL_SIZE 1
static inline char mstrReqType(size_t string_size);
static inline int mstrHdrSize(char type);
static inline int mstrSumMetaLen(mstrKind *k, mstrFlags flags);
static inline size_t mstrAllocLen(const mstr s, struct mstrKind *kind);
/*** mstr API ***/
/* Create mstr without any metadata attached, based on string 'initStr'.
* - If initStr equals NULL, then only allocation will be made.
* - string of mstr is always null-terminated.
*/
mstr mstrNew(const char *initStr, size_t lenStr, int trymalloc) {
unsigned char *pInfo; /* pointer to mstr info field */
void *sh;
mstr s;
char type = mstrReqType(lenStr);
int mstrHdr = mstrHdrSize(type);
assert(lenStr + mstrHdr + 1 > lenStr); /* Catch size_t overflow */
size_t len = mstrHdr + lenStr + NULL_SIZE;
sh = trymalloc? s_trymalloc(len) : s_malloc(len);
if (sh == NULL) return NULL;
s = (char*)sh + mstrHdr;
pInfo = ((unsigned char*)s) - 1;
switch(type) {
case MSTR_TYPE_5: {
*pInfo = CREATE_MSTR_INFO(lenStr, 0 /*ismeta*/, type);
break;
}
case MSTR_TYPE_8: {
MSTR_HDR_VAR(8,s);
*pInfo = CREATE_MSTR_INFO(0 /*unused*/, 0 /*ismeta*/, type);
sh->len = lenStr;
break;
}
case MSTR_TYPE_16: {
MSTR_HDR_VAR(16,s);
*pInfo = CREATE_MSTR_INFO(0 /*unused*/, 0 /*ismeta*/, type);
sh->len = lenStr;
break;
}
case MSTR_TYPE_64: {
MSTR_HDR_VAR(64,s);
*pInfo = CREATE_MSTR_INFO(0 /*unused*/, 0 /*ismeta*/, type);
sh->len = lenStr;
break;
}
}
if (initStr && lenStr)
memcpy(s, initStr, lenStr);
s[lenStr] = '\0';
return s;
}
/* Creates mstr with given string. Reserve space for metadata.
*
* Note: mstrNew(s,l) and mstrNewWithMeta(s,l,0) are not the same. The first allocates
* just string. The second allocates a string with flags (yet without any metadata
* structures allocated).
*/
mstr mstrNewWithMeta(struct mstrKind *kind, const char *initStr, size_t lenStr, mstrFlags metaFlags, int trymalloc) {
unsigned char *pInfo; /* pointer to mstr info field */
char *allocMstr;
mstr mstrPtr;
char type = mstrReqType(lenStr);
int mstrHdr = mstrHdrSize(type);
int sumMetaLen = mstrSumMetaLen(kind, metaFlags);
/* mstrSumMetaLen() + sizeof(mstrFlags) + sizeof(mstrhdrX) + lenStr */
size_t allocLen = sumMetaLen + sizeof(mstrFlags) + mstrHdr + lenStr + NULL_SIZE;
allocMstr = trymalloc? s_trymalloc(allocLen) : s_malloc(allocLen);
if (allocMstr == NULL) return NULL;
/* metadata is located at the beginning of the allocation, then meta-flags and lastly the string */
mstrFlags *pMetaFlags = (mstrFlags *) (allocMstr + sumMetaLen) ;
mstrPtr = ((char*) pMetaFlags) + sizeof(mstrFlags) + mstrHdr;
pInfo = ((unsigned char*)mstrPtr) - 1;
switch(type) {
case MSTR_TYPE_5: {
*pInfo = CREATE_MSTR_INFO(lenStr, 1 /*ismeta*/, type);
break;
}
case MSTR_TYPE_8: {
MSTR_HDR_VAR(8, mstrPtr);
sh->len = lenStr;
*pInfo = CREATE_MSTR_INFO(0 /*unused*/, 1 /*ismeta*/, type);
break;
}
case MSTR_TYPE_16: {
MSTR_HDR_VAR(16, mstrPtr);
sh->len = lenStr;
*pInfo = CREATE_MSTR_INFO(0 /*unused*/, 1 /*ismeta*/, type);
break;
}
case MSTR_TYPE_64: {
MSTR_HDR_VAR(64, mstrPtr);
sh->len = lenStr;
*pInfo = CREATE_MSTR_INFO(0 /*unused*/, 1 /*ismeta*/, type);
break;
}
}
*pMetaFlags = metaFlags;
if (initStr != NULL) memcpy(mstrPtr, initStr, lenStr);
mstrPtr[lenStr] = '\0';
return mstrPtr;
}
/* Create copy of mstr. Flags can be modified. For each metadata flag, if
* same flag is set on both, then copy its metadata. */
mstr mstrNewCopy(struct mstrKind *kind, mstr src, mstrFlags newFlags) {
mstr dst;
/* if no flags are set, then just copy the string */
if (newFlags == 0) return mstrNew(src, mstrlen(src), 0);
dst = mstrNewWithMeta(kind, src, mstrlen(src), newFlags, 0);
memcpy(dst, src, mstrlen(src) + 1);
/* if metadata is attached to src, then selectively copy metadata */
if (mstrIsMetaAttached(src)) {
mstrFlags *pFlags1 = mstrFlagsRef(src),
*pFlags2 = mstrFlagsRef(dst);
mstrFlags flags1Shift = *pFlags1,
flags2Shift = *pFlags2;
unsigned char *at1 = ((unsigned char *) pFlags1),
*at2 = ((unsigned char *) pFlags2);
/* if the flag is set on both, then copy the metadata */
for (int i = 0; flags1Shift != 0; ++i) {
int isFlag1Set = flags1Shift & 0x1;
int isFlag2Set = flags2Shift & 0x1;
if (isFlag1Set) at1 -= kind->metaSize[i];
if (isFlag2Set) at2 -= kind->metaSize[i];
if (isFlag1Set && isFlag2Set)
memcpy(at2, at1, kind->metaSize[i]);
flags1Shift >>= 1;
flags2Shift >>= 1;
}
}
return dst;
}
/* Free mstring. Note, mstrKind is required to eval sizeof metadata and find start
* of allocation but if mstrIsMetaAttached(s) is false, you can pass NULL as well.
*/
void mstrFree(struct mstrKind *kind, mstr s) {
if (s != NULL)
s_free(mstrGetAllocPtr(kind, s));
}
/* return ref to metadata flags. Useful to modify directly flags which doesn't
* include metadata payload */
mstrFlags *mstrFlagsRef(mstr s) {
switch(s[-1]&MSTR_TYPE_MASK) {
case MSTR_TYPE_5:
return ((mstrFlags *) (s - sizeof(struct mstrhdr5))) - 1;
case MSTR_TYPE_8:
return ((mstrFlags *) (s - sizeof(struct mstrhdr8))) - 1;
case MSTR_TYPE_16:
return ((mstrFlags *) (s - sizeof(struct mstrhdr16))) - 1;
default: /* MSTR_TYPE_64: */
return ((mstrFlags *) (s - sizeof(struct mstrhdr64))) - 1;
}
}
/* Return a reference to corresponding metadata of the specified metadata flag
* index (flagIdx). If the metadata doesn't exist, it still returns a reference
* to the starting location where it would have been written among other metadatas.
* To verify if `flagIdx` of some metadata is attached, use `mstrGetFlag(s, flagIdx)`.
*/
void *mstrMetaRef(mstr s, struct mstrKind *kind, int flagIdx) {
int metaOffset = 0;
/* start iterating from flags backward */
mstrFlags *pFlags = mstrFlagsRef(s);
mstrFlags tmp = *pFlags;
for (int i = 0 ; i <= flagIdx ; ++i) {
if (tmp & 0x1) metaOffset += kind->metaSize[i];
tmp >>= 1;
}
return ((char *)pFlags) - metaOffset;
}
/* mstr layout: [meta-data#N]...[meta-data#0][mstrFlags][mstrhdr][string][null] */
void *mstrGetAllocPtr(struct mstrKind *kind, mstr str) {
if (!mstrIsMetaAttached(str))
return (char*)str - mstrHdrSize(str[-1]);
int totalMetaLen = mstrSumMetaLen(kind, *mstrFlagsRef(str));
return (char*)str - mstrHdrSize(str[-1]) - sizeof(mstrFlags) - totalMetaLen;
}
/* Prints in the following fashion:
* [0x7f8bd8816017] my_mstr: foo (strLen=3, mstrLen=11, isMeta=1, metaFlags=0x1)
* [0x7f8bd8816010] >> meta[0]: 0x78 0x56 0x34 0x12 (metaLen=4)
*/
void mstrPrint(mstr s, struct mstrKind *kind, int verbose) {
mstrFlags mflags, tmp;
int isMeta = mstrIsMetaAttached(s);
tmp = mflags = (isMeta) ? *mstrFlagsRef(s) : 0;
if (!isMeta) {
printf("[%p] %s: %s (strLen=%zu, mstrLen=%zu, isMeta=0)\n",
(void *)s, kind->name, s, mstrlen(s), mstrAllocLen(s, kind));
return;
}
printf("[%p] %s: %s (strLen=%zu, mstrLen=%zu, isMeta=1, metaFlags=0x%x)\n",
(void *)s, kind->name, s, mstrlen(s), mstrAllocLen(s, kind), mflags);
if (verbose) {
for (unsigned int i = 0 ; i < NUM_MSTR_FLAGS ; ++i) {
if (tmp & 0x1) {
int mSize = kind->metaSize[i];
void *mRef = mstrMetaRef(s, kind, i);
printf("[%p] >> meta[%d]:", mRef, i);
for (int j = 0 ; j < mSize ; ++j) {
printf(" 0x%02x", ((unsigned char *) mRef)[j]);
}
printf(" (metaLen=%d)\n", mSize);
}
tmp >>= 1;
}
}
}
/* return length of the string (ignoring metadata attached) */
size_t mstrlen(const mstr s) {
unsigned char info = s[-1];
switch(info & MSTR_TYPE_MASK) {
case MSTR_TYPE_5:
return MSTR_TYPE_5_LEN(info);
case MSTR_TYPE_8:
return MSTR_HDR(8,s)->len;
case MSTR_TYPE_16:
return MSTR_HDR(16,s)->len;
default: /* MSTR_TYPE_64: */
return MSTR_HDR(64,s)->len;
}
}
/*** mstr internals ***/
static inline int mstrSumMetaLen(mstrKind *k, mstrFlags flags) {
int total = 0;
int i = 0 ;
while (flags) {
total += (flags & 0x1) ? k->metaSize[i] : 0;
flags >>= 1;
++i;
}
return total;
}
/* mstrSumMetaLen() + sizeof(mstrFlags) + sizeof(mstrhdrX) + strlen + '\0' */
static inline size_t mstrAllocLen(const mstr s, struct mstrKind *kind) {
int hdrlen;
mstrFlags *pMetaFlags;
size_t strlen = 0;
int isMeta = mstrIsMetaAttached(s);
unsigned char info = s[-1];
switch(info & MSTR_TYPE_MASK) {
case MSTR_TYPE_5:
strlen = MSTR_TYPE_5_LEN(info);
hdrlen = sizeof(struct mstrhdr5);
pMetaFlags = ((mstrFlags *) MSTR_HDR(5, s)) - 1;
break;
case MSTR_TYPE_8:
strlen = MSTR_HDR(8,s)->len;
hdrlen = sizeof(struct mstrhdr8);
pMetaFlags = ((mstrFlags *) MSTR_HDR(8, s)) - 1;
break;
case MSTR_TYPE_16:
strlen = MSTR_HDR(16,s)->len;
hdrlen = sizeof(struct mstrhdr16);
pMetaFlags = ((mstrFlags *) MSTR_HDR(16, s)) - 1;
break;
default: /* MSTR_TYPE_64: */
strlen = MSTR_HDR(64,s)->len;
hdrlen = sizeof(struct mstrhdr64);
pMetaFlags = ((mstrFlags *) MSTR_HDR(64, s)) - 1;
break;
}
return hdrlen + strlen + NULL_SIZE + ((isMeta) ? (mstrSumMetaLen(kind, *pMetaFlags) + sizeof(mstrFlags)) : 0);
}
/* returns pointer to the beginning of malloc() of mstr */
void *mstrGetStartAlloc(mstr s, struct mstrKind *kind) {
int hdrlen;
mstrFlags *pMetaFlags;
int isMeta = mstrIsMetaAttached(s);
switch(s[-1]&MSTR_TYPE_MASK) {
case MSTR_TYPE_5:
hdrlen = sizeof(struct mstrhdr5);
pMetaFlags = ((mstrFlags *) MSTR_HDR(5, s)) - 1;
break;
case MSTR_TYPE_8:
hdrlen = sizeof(struct mstrhdr8);
pMetaFlags = ((mstrFlags *) MSTR_HDR(8, s)) - 1;
break;
case MSTR_TYPE_16:
hdrlen = sizeof(struct mstrhdr16);
pMetaFlags = ((mstrFlags *) MSTR_HDR(16, s)) - 1;
break;
default: /* MSTR_TYPE_64: */
hdrlen = sizeof(struct mstrhdr64);
pMetaFlags = ((mstrFlags *) MSTR_HDR(64, s)) - 1;
break;
}
return (char *) s - hdrlen - ((isMeta) ? (mstrSumMetaLen(kind, *pMetaFlags) + sizeof(mstrFlags)) : 0);
}
static inline int mstrHdrSize(char type) {
switch(type&MSTR_TYPE_MASK) {
case MSTR_TYPE_5:
return sizeof(struct mstrhdr5);
case MSTR_TYPE_8:
return sizeof(struct mstrhdr8);
case MSTR_TYPE_16:
return sizeof(struct mstrhdr16);
case MSTR_TYPE_64:
return sizeof(struct mstrhdr64);
}
return 0;
}
static inline char mstrReqType(size_t string_size) {
if (string_size < 1<<5)
return MSTR_TYPE_5;
if (string_size < 1<<8)
return MSTR_TYPE_8;
if (string_size < 1<<16)
return MSTR_TYPE_16;
return MSTR_TYPE_64;
}
#ifdef REDIS_TEST
#include <stdlib.h>
#include <assert.h>
#include "testhelp.h"
#include "limits.h"
#ifndef UNUSED
#define UNUSED(x) (void)(x)
#endif
/* Challenge mstr with metadata interesting enough that can include the case of hfield and hkey and more */
#define B(idx) (1<<(idx))
#define META_IDX_MYMSTR_TTL4 0
#define META_IDX_MYMSTR_TTL8 1
#define META_IDX_MYMSTR_TYPE_ENC_LRU 2 // 4Bbit type, 4bit encoding, 24bits lru
#define META_IDX_MYMSTR_VALUE_PTR 3
#define META_IDX_MYMSTR_FLAG_NO_META 4
#define TEST_CONTEXT(context) printf("\nContext: %s \n", context);
int mstrTest(int argc, char **argv, int flags) {
UNUSED(argc);
UNUSED(argv);
UNUSED(flags);
struct mstrKind kind_mymstr = {
.name = "my_mstr",
.metaSize[META_IDX_MYMSTR_TTL4] = 4,
.metaSize[META_IDX_MYMSTR_TTL8] = 8,
.metaSize[META_IDX_MYMSTR_TYPE_ENC_LRU] = 4,
.metaSize[META_IDX_MYMSTR_VALUE_PTR] = 8,
.metaSize[META_IDX_MYMSTR_FLAG_NO_META] = 0,
};
TEST_CONTEXT("Create simple short mstr")
{
char *str = "foo";
mstr s = mstrNew(str, strlen(str), 0);
size_t expStrLen = strlen(str);
test_cond("Verify str length and alloc length",
mstrAllocLen(s, NULL) == (1 + expStrLen + 1) && /* mstrhdr5 + str + null */
mstrlen(s) == expStrLen && /* expected strlen(str) */
memcmp(s, str, expStrLen + 1) == 0);
mstrFree(&kind_mymstr, s);
}
TEST_CONTEXT("Create simple 40 bytes mstr")
{
char *str = "0123456789012345678901234567890123456789"; // 40 bytes
mstr s = mstrNew(str, strlen(str), 0);
test_cond("Verify str length and alloc length",
mstrAllocLen(s, NULL) == (3 + 40 + 1) && /* mstrhdr8 + str + null */
mstrlen(s) == 40 &&
memcmp(s,str,40) == 0);
mstrFree(&kind_mymstr, s);
}
TEST_CONTEXT("Create mstr with random characters")
{
long unsigned int i;
char str[66000];
for (i = 0 ; i < sizeof(str) ; ++i) str[i] = rand() % 256;
size_t len[] = { 31, 32, 33, 255, 256, 257, 65535, 65536, 65537, 66000};
for (i = 0 ; i < sizeof(len) / sizeof(len[0]) ; ++i) {
char title[100];
mstr s = mstrNew(str, len[i], 0);
size_t mstrhdrSize = (len[i] < 1<<5) ? sizeof(struct mstrhdr5) :
(len[i] < 1<<8) ? sizeof(struct mstrhdr8) :
(len[i] < 1<<16) ? sizeof(struct mstrhdr16) :
sizeof(struct mstrhdr64);
snprintf(title, sizeof(title), "Verify string of length %zu", len[i]);
test_cond(title,
mstrAllocLen(s, NULL) == (mstrhdrSize + len[i] + 1) && /* mstrhdrX + str + null */
mstrlen(s) == len[i] &&
memcmp(s,str,len[i]) == 0);
mstrFree(&kind_mymstr, s);
}
}
TEST_CONTEXT("Create short mstr with TTL4")
{
uint32_t *ttl;
mstr s = mstrNewWithMeta(&kind_mymstr,
"foo",
strlen("foo"),
B(META_IDX_MYMSTR_TTL4), /* allocate with TTL4 metadata */
0);
ttl = mstrMetaRef(s, &kind_mymstr, META_IDX_MYMSTR_TTL4);
*ttl = 0x12345678;
test_cond("Verify memory-allocation and string lengths",
mstrAllocLen(s, &kind_mymstr) == (1 + 3 + 2 + 1 + 4) && /* mstrhdr5 + str + null + mstrFlags + TLL */
mstrlen(s) == 3);
unsigned char expMem[] = {0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x1c, 'f', 'o', 'o', '\0' };
uint32_t value = 0x12345678;
memcpy(expMem, &value, sizeof(uint32_t));
test_cond("Verify string and TTL4 payload", memcmp(
mstrMetaRef(s, &kind_mymstr, 0) , expMem, sizeof(expMem)) == 0);
test_cond("Verify mstrIsMetaAttached() function works", mstrIsMetaAttached(s) != 0);
mstrFree(&kind_mymstr, s);
}
TEST_CONTEXT("Create short mstr with TTL4 and value ptr ")
{
mstr s = mstrNewWithMeta(&kind_mymstr, "foo", strlen("foo"),
B(META_IDX_MYMSTR_TTL4) | B(META_IDX_MYMSTR_VALUE_PTR), 0);
*((uint32_t *) (mstrMetaRef(s, &kind_mymstr,
META_IDX_MYMSTR_TTL4))) = 0x12345678;
test_cond("Verify length and alloc length",
mstrAllocLen(s, &kind_mymstr) == (1 + 3 + 1 + 2 + 4 + 8) && /* mstrhdr5 + str + null + mstrFlags + TLL + PTR */
mstrlen(s) == 3);
mstrFree(&kind_mymstr, s);
}
TEST_CONTEXT("Copy mstr and add it TTL4")
{
mstr s1 = mstrNew("foo", strlen("foo"), 0);
mstr s2 = mstrNewCopy(&kind_mymstr, s1, B(META_IDX_MYMSTR_TTL4));
*((uint32_t *) (mstrMetaRef(s2, &kind_mymstr, META_IDX_MYMSTR_TTL4))) = 0x12345678;
test_cond("Verify new mstr includes TTL4",
mstrAllocLen(s2, &kind_mymstr) == (1 + 3 + 1 + 2 + 4) && /* mstrhdr5 + str + null + mstrFlags + TTL4 */
mstrlen(s2) == 3 && /* 'foo' = 3bytes */
memcmp(s2, "foo\0", 4) == 0);
mstr s3 = mstrNewCopy(&kind_mymstr, s2, B(META_IDX_MYMSTR_TTL4));
unsigned char expMem[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0x1, 0x0, 0x1c, 'f', 'o', 'o', '\0' };
uint32_t value = 0x12345678;
memcpy(expMem, &value, sizeof(uint32_t));
char *ppp = mstrGetStartAlloc(s3, &kind_mymstr);
test_cond("Verify string and TTL4 payload",
memcmp(ppp, expMem, sizeof(expMem)) == 0);
mstrPrint(s3, &kind_mymstr, 1);
mstrFree(&kind_mymstr, s1);
mstrFree(&kind_mymstr, s2);
mstrFree(&kind_mymstr, s3);
}
return 0;
}
#endif
/*
* Copyright Redis Ltd. 2024 - present
*
* Licensed under your choice of the Redis Source Available License 2.0 (RSALv2)
* or the Server Side Public License v1 (SSPLv1).
*
*
* WHAT IS MSTR (M-STRING)?
* ------------------------
* mstr stands for immutable string with optional metadata attached.
*
* sds string is widely used across the system and serves as a general purpose
* container to hold data. The need to optimize memory and aggregate strings
* along with metadata and store it into Redis data-structures as single bulk keep
* reoccur. One thought might be, why not to extend sds to support metadata. The
* answer is that sds is mutable string in its nature, with wide API (split, join,
* etc.). Pushing metadata logic into sds will make it very fragile, and complex
* to maintain.
*
* Another idea involved using a simple struct with flags and a dynamic buf[] at the
* end. While this could be viable, it introduces considerable complexity and would
* need maintenance across different contexts.
*
* As an alternative, we introduce a new implementation of immutable strings,
* with limited API, and with the option to attach metadata. The representation
* of the string, without any metadata, in its basic form, resembles SDS but
* without the API to manipulate the string. Only to attach metadata to it. The
* following diagram shows the memory layout of mstring (mstrhdr8) when no
* metadata is attached:
*
* +----------------------------------------------+
* | mstrhdr8 | c-string | |
* +--------------------------------+-------------+
* |8b |2b |1b |5b |?bytes |8b|
* | Len | Type |m-bit=0 | Unused | String |\0|
* +----------------------------------------------+
* ^
* |
* mstrNew() returns pointer to here --+
*
* If metadata-flag is set, depicted in diagram above as m-bit in the diagram,
* then the header will be preceded with additional 16 bits of metadata flags such
* that if i'th bit is set, then the i'th metadata structure is attached to the
* mstring. The metadata layout and their sizes are defined by mstrKind structure
* (More below).
*
* The following diagram shows the memory layout of mstr (mstrhdr8) when 3 bits in mFlags
* are set to indicate that 3 fields of metadata are attached to the mstring at the
* beginning.
*
* +-------------------------------------------------------------------------------+
* | METADATA FIELDS | mflags | mstrhdr8 | c-string | |
* +-----------------------+--------+--------------------------------+-------------+
* |?bytes |?bytes |?bytes |16b |8b |2b |1b |5b |?bytes |8b|
* | Meta3 | Meta2 | Meta0 | 0x1101 | Len | Type |m-bit=1 | Unused | String |\0|
* +-------------------------------------------------------------------------------+
* ^
* |
* mstrNewWithMeta() returns pointer to here --+
*
* mstr allows to define different kinds (groups) of mstrings, each with its
* own unique metadata layout. For example, in case of hash-fields, all instances of
* it can optionally have TTL metadata attached to it. This is achieved by first
* prototyping a single mstrKind structure that defines the metadata layout and sizes
* of this specific kind. Now each hash-field instance has still the freedom to
* attach or not attach the metadata to it, and metadata flags (mFlags) of the
* instance will reflect this decision.
*
* In the future, the keys of Redis keyspace can be another kind of mstring that
* has TTL, LRU or even dictEntry metadata embedded into. Unlike vptr in c++, this
* struct won't be attached to mstring but will be passed as yet another argument
* to API, to save memory. In addition, each instance of a given mstrkind can hold
* any subset of metadata and the 8 bits of metadata-flags will reflect it.
*
* The following example shows how to define mstrKind for possible future keyspace
* that aggregates several keyspace related metadata into one compact, singly
* allocated, mstring.
*
* typedef enum HkeyMetaFlags {
* HKEY_META_VAL_REF_COUNT = 0, // refcount
* HKEY_META_VAL_REF = 1, // Val referenced
* HKEY_META_EXPIRE = 2, // TTL and more
* HKEY_META_TYPE_ENC_LRU = 3, // TYPE + LRU + ENC
* HKEY_META_DICT_ENT_NEXT = 4, // Next dict entry
* // Following two must be together and in this order
* HKEY_META_VAL_EMBED8 = 5, // Val embedded, max 7 bytes
* HKEY_META_VAL_EMBED16 = 6, // Val embedded, max 15 bytes (23 with EMBED8)
* } HkeyMetaFlags;
*
* mstrKind hkeyKind = {
* .name = "hkey",
* .metaSize[HKEY_META_VAL_REF_COUNT] = 4,
* .metaSize[HKEY_META_VAL_REF] = 8,
* .metaSize[HKEY_META_EXPIRE] = sizeof(ExpireMeta),
* .metaSize[HKEY_META_TYPE_ENC_LRU] = 8,
* .metaSize[HKEY_META_DICT_ENT_NEXT] = 8,
* .metaSize[HKEY_META_VAL_EMBED8] = 8,
* .metaSize[HKEY_META_VAL_EMBED16] = 16,
* };
*
* MSTR-ALIGNMENT
* --------------
* There are two types of alignments to take into consideration:
* 1. Alignment of the metadata.
* 2. Alignment of returned mstr pointer
*
* 1) As the metadatas layout are reversed to their enumeration, it is recommended
* to put metadata with "better" alignment first in memory layout (enumerated
* last) and the worst, or those that simply don't require any alignment will be
* last in memory layout (enumerated first). This is similar the to the applied
* consideration when defining new struct in C. Note also that each metadata
* might either be attached to mstr or not which complicates the design phase
* of a new mstrKind a little.
*
* In the example above, HKEY_META_VAL_REF_COUNT, with worst alignment of 4
* bytes, is enumerated first, and therefore, will be last in memory layout.
*
* 2) Few optimizations in Redis rely on the fact that sds address is always an odd
* pointer. We can achieve the same with a little effort. It was already taken
* care that all headers of type mstrhdrX has odd size. With that in mind, if
* a new kind of mstr is required to be limited to odd addresses, then we must
* make sure that sizes of all related metadatas that are defined in mstrKind
* are even in size.
*/
#ifndef __MSTR_H
#define __MSTR_H
#include <sys/types.h>
#include <stdarg.h>
#include <stdint.h>
/* Selective copy of ifndef from server.h instead of including it */
#ifndef static_assert
#define static_assert(expr, lit) extern char __static_assert_failure[(expr) ? 1:-1]
#endif
#define MSTR_TYPE_5 0
#define MSTR_TYPE_8 1
#define MSTR_TYPE_16 2
#define MSTR_TYPE_64 3
#define MSTR_TYPE_MASK 3
#define MSTR_TYPE_BITS 2
#define MSTR_META_MASK 4
#define MSTR_HDR(T,s) ((struct mstrhdr##T *)((s)-(sizeof(struct mstrhdr##T))))
#define MSTR_HDR_VAR(T,s) struct mstrhdr##T *sh = (void*)((s)-(sizeof(struct mstrhdr##T)));
#define MSTR_META_BITS 1 /* is metadata attached? */
#define MSTR_TYPE_5_LEN(f) ((f) >> (MSTR_TYPE_BITS + MSTR_META_BITS))
#define CREATE_MSTR_INFO(len, ismeta, type) ( (((len<<MSTR_META_BITS) + ismeta) << (MSTR_TYPE_BITS)) | type )
/* mimic plain c-string */
typedef char *mstr;
/* Flags that can be set on mstring to indicate for attached metadata. It is
* */
typedef uint16_t mstrFlags;
struct __attribute__ ((__packed__)) mstrhdr5 {
unsigned char info; /* 2 lsb of type, 1 metadata, and 5 msb of string length */
char buf[];
};
struct __attribute__ ((__packed__)) mstrhdr8 {
uint8_t unused; /* To achieve odd size header (See comment above) */
uint8_t len;
unsigned char info; /* 2 lsb of type, 6 unused bits */
char buf[];
};
struct __attribute__ ((__packed__)) mstrhdr16 {
uint16_t len;
unsigned char info; /* 2 lsb of type, 6 unused bits */
char buf[];
};
struct __attribute__ ((__packed__)) mstrhdr64 {
uint64_t len;
unsigned char info; /* 2 lsb of type, 6 unused bits */
char buf[];
};
#define NUM_MSTR_FLAGS (sizeof(mstrFlags)*8)
/* mstrKind is used to define a kind (a group) of mstring with its own metadata layout */
typedef struct mstrKind {
const char *name;
int metaSize[NUM_MSTR_FLAGS];
} mstrKind;
mstr mstrNew(const char *initStr, size_t lenStr, int trymalloc);
mstr mstrNewWithMeta(struct mstrKind *kind, const char *initStr, size_t lenStr, mstrFlags flags, int trymalloc);
mstr mstrNewCopy(struct mstrKind *kind, mstr src, mstrFlags newFlags);
void *mstrGetAllocPtr(struct mstrKind *kind, mstr str);
void mstrFree(struct mstrKind *kind, mstr s);
mstrFlags *mstrFlagsRef(mstr s);
void *mstrMetaRef(mstr s, struct mstrKind *kind, int flagIdx);
size_t mstrlen(const mstr s);
/* return non-zero if metadata is attached to mstring */
static inline int mstrIsMetaAttached(mstr s) { return s[-1] & MSTR_META_MASK; }
/* return whether if a specific flag-index is set */
static inline int mstrGetFlag(mstr s, int flagIdx) { return *mstrFlagsRef(s) & (1 << flagIdx); }
/* See comment above about MSTR-ALIGNMENT(2) */
static_assert(sizeof(struct mstrhdr5 ) % 2 == 1, "must be odd");
static_assert(sizeof(struct mstrhdr8 ) % 2 == 1, "must be odd");
static_assert(sizeof(struct mstrhdr16 ) % 2 == 1, "must be odd");
static_assert(sizeof(struct mstrhdr64 ) % 2 == 1, "must be odd");
static_assert(sizeof(mstrFlags ) % 2 == 0, "must be even to keep mstr pointer odd");
#ifdef REDIS_TEST
int mstrTest(int argc, char *argv[], int flags);
#endif
#endif
...@@ -31,6 +31,14 @@ size_t sdsZmallocSize(sds s) { ...@@ -31,6 +31,14 @@ size_t sdsZmallocSize(sds s) {
return zmalloc_size(sh); return zmalloc_size(sh);
} }
/* Return the size consumed from the allocator, for the specified hfield with
* metadata (mstr), including internal fragmentation. This function is used in
* order to compute the client output buffer size. */
size_t hfieldZmallocSize(hfield s) {
void *sh = hfieldGetAllocPtr(s);
return zmalloc_size(sh);
}
/* Return the amount of memory used by the sds string at object->ptr /* Return the amount of memory used by the sds string at object->ptr
* for a string object. This includes internal fragmentation. */ * for a string object. This includes internal fragmentation. */
size_t getStringObjectSdsUsedMemory(robj *o) { size_t getStringObjectSdsUsedMemory(robj *o) {
......
...@@ -80,7 +80,7 @@ sds keyspaceEventsFlagsToString(int flags) { ...@@ -80,7 +80,7 @@ sds keyspaceEventsFlagsToString(int flags) {
* 'event' is a C string representing the event name. * 'event' is a C string representing the event name.
* 'key' is a Redis object representing the key name. * 'key' is a Redis object representing the key name.
* 'dbid' is the database ID where the key lives. */ * 'dbid' is the database ID where the key lives. */
void notifyKeyspaceEvent(int type, char *event, robj *key, int dbid) { void notifyKeyspaceEvent(int type, const char *event, robj *key, int dbid) {
sds chan; sds chan;
robj *chanobj, *eventobj; robj *chanobj, *eventobj;
int len = -1; int len = -1;
......
...@@ -979,7 +979,6 @@ size_t streamRadixTreeMemoryUsage(rax *rax) { ...@@ -979,7 +979,6 @@ size_t streamRadixTreeMemoryUsage(rax *rax) {
* are checked and averaged to estimate the total size. */ * are checked and averaged to estimate the total size. */
#define OBJ_COMPUTE_SIZE_DEF_SAMPLES 5 /* Default sample size. */ #define OBJ_COMPUTE_SIZE_DEF_SAMPLES 5 /* Default sample size. */
size_t objectComputeSize(robj *key, robj *o, size_t sample_size, int dbid) { size_t objectComputeSize(robj *key, robj *o, size_t sample_size, int dbid) {
sds ele, ele2;
dict *d; dict *d;
dictIterator *di; dictIterator *di;
struct dictEntry *de; struct dictEntry *de;
...@@ -1016,7 +1015,7 @@ size_t objectComputeSize(robj *key, robj *o, size_t sample_size, int dbid) { ...@@ -1016,7 +1015,7 @@ size_t objectComputeSize(robj *key, robj *o, size_t sample_size, int dbid) {
di = dictGetIterator(d); di = dictGetIterator(d);
asize = sizeof(*o)+sizeof(dict)+(sizeof(struct dictEntry*)*dictBuckets(d)); asize = sizeof(*o)+sizeof(dict)+(sizeof(struct dictEntry*)*dictBuckets(d));
while((de = dictNext(di)) != NULL && samples < sample_size) { while((de = dictNext(di)) != NULL && samples < sample_size) {
ele = dictGetKey(de); sds ele = dictGetKey(de);
elesize += dictEntryMemUsage() + sdsZmallocSize(ele); elesize += dictEntryMemUsage() + sdsZmallocSize(ele);
samples++; samples++;
} }
...@@ -1057,9 +1056,9 @@ size_t objectComputeSize(robj *key, robj *o, size_t sample_size, int dbid) { ...@@ -1057,9 +1056,9 @@ size_t objectComputeSize(robj *key, robj *o, size_t sample_size, int dbid) {
di = dictGetIterator(d); di = dictGetIterator(d);
asize = sizeof(*o)+sizeof(dict)+(sizeof(struct dictEntry*)*dictBuckets(d)); asize = sizeof(*o)+sizeof(dict)+(sizeof(struct dictEntry*)*dictBuckets(d));
while((de = dictNext(di)) != NULL && samples < sample_size) { while((de = dictNext(di)) != NULL && samples < sample_size) {
ele = dictGetKey(de); hfield ele = dictGetKey(de);
ele2 = dictGetVal(de); sds ele2 = dictGetVal(de);
elesize += sdsZmallocSize(ele) + sdsZmallocSize(ele2); elesize += hfieldZmallocSize(ele) + sdsZmallocSize(ele2);
elesize += dictEntryMemUsage(); elesize += dictEntryMemUsage();
samples++; samples++;
} }
......
...@@ -173,11 +173,16 @@ raxNode *raxNewNode(size_t children, int datafield) { ...@@ -173,11 +173,16 @@ raxNode *raxNewNode(size_t children, int datafield) {
/* Allocate a new rax and return its pointer. On out of memory the function /* Allocate a new rax and return its pointer. On out of memory the function
* returns NULL. */ * returns NULL. */
rax *raxNew(void) { rax *raxNew(void) {
rax *rax = rax_malloc(sizeof(*rax)); return raxNewWithMetadata(0);
}
/* Allocate a new rax with metadata */
rax *raxNewWithMetadata(int metaSize) {
rax *rax = rax_malloc(sizeof(*rax) + metaSize);
if (rax == NULL) return NULL; if (rax == NULL) return NULL;
rax->numele = 0; rax->numele = 0;
rax->numnodes = 1; rax->numnodes = 1;
rax->head = raxNewNode(0,0); rax->head = raxNewNode(0, 0);
if (rax->head == NULL) { if (rax->head == NULL) {
rax_free(rax); rax_free(rax);
return NULL; return NULL;
...@@ -1210,6 +1215,25 @@ void raxRecursiveFree(rax *rax, raxNode *n, void (*free_callback)(void*)) { ...@@ -1210,6 +1215,25 @@ void raxRecursiveFree(rax *rax, raxNode *n, void (*free_callback)(void*)) {
rax->numnodes--; rax->numnodes--;
} }
/* Same as raxRecursiveFree() with context argument */
void raxRecursiveFreeWithCtx(rax *rax, raxNode *n,
void (*free_callback)(void *item, void *ctx), void *ctx) {
debugnode("free traversing",n);
int numchildren = n->iscompr ? 1 : n->size;
raxNode **cp = raxNodeLastChildPtr(n);
while(numchildren--) {
raxNode *child;
memcpy(&child,cp,sizeof(child));
raxRecursiveFreeWithCtx(rax,child,free_callback, ctx);
cp--;
}
debugnode("free depth-first",n);
if (free_callback && n->iskey && !n->isnull)
free_callback(raxGetData(n), ctx);
rax_free(n);
rax->numnodes--;
}
/* Free a whole radix tree, calling the specified callback in order to /* Free a whole radix tree, calling the specified callback in order to
* free the auxiliary data. */ * free the auxiliary data. */
void raxFreeWithCallback(rax *rax, void (*free_callback)(void*)) { void raxFreeWithCallback(rax *rax, void (*free_callback)(void*)) {
...@@ -1218,6 +1242,15 @@ void raxFreeWithCallback(rax *rax, void (*free_callback)(void*)) { ...@@ -1218,6 +1242,15 @@ void raxFreeWithCallback(rax *rax, void (*free_callback)(void*)) {
rax_free(rax); rax_free(rax);
} }
/* Free a whole radix tree, calling the specified callback in order to
* free the auxiliary data. */
void raxFreeWithCbAndContext(rax *rax,
void (*free_callback)(void *item, void *ctx), void *ctx) {
raxRecursiveFreeWithCtx(rax,rax->head,free_callback,ctx);
assert(rax->numnodes == 0);
rax_free(rax);
}
/* Free a whole radix tree. */ /* Free a whole radix tree. */
void raxFree(rax *rax) { void raxFree(rax *rax) {
raxFreeWithCallback(rax,NULL); raxFreeWithCallback(rax,NULL);
......
...@@ -113,6 +113,7 @@ typedef struct rax { ...@@ -113,6 +113,7 @@ typedef struct rax {
raxNode *head; raxNode *head;
uint64_t numele; uint64_t numele;
uint64_t numnodes; uint64_t numnodes;
void *metadata[];
} rax; } rax;
/* Stack data structure used by raxLowWalk() in order to, optionally, return /* Stack data structure used by raxLowWalk() in order to, optionally, return
...@@ -166,12 +167,16 @@ typedef struct raxIterator { ...@@ -166,12 +167,16 @@ typedef struct raxIterator {
/* Exported API. */ /* Exported API. */
rax *raxNew(void); rax *raxNew(void);
rax *raxNewWithMetadata(int metaSize);
int raxInsert(rax *rax, unsigned char *s, size_t len, void *data, void **old); int raxInsert(rax *rax, unsigned char *s, size_t len, void *data, void **old);
int raxTryInsert(rax *rax, unsigned char *s, size_t len, void *data, void **old); int raxTryInsert(rax *rax, unsigned char *s, size_t len, void *data, void **old);
int raxRemove(rax *rax, unsigned char *s, size_t len, void **old); int raxRemove(rax *rax, unsigned char *s, size_t len, void **old);
int raxFind(rax *rax, unsigned char *s, size_t len, void **value); int raxFind(rax *rax, unsigned char *s, size_t len, void **value);
void raxFree(rax *rax); void raxFree(rax *rax);
void raxFreeWithCallback(rax *rax, void (*free_callback)(void*)); void raxFreeWithCallback(rax *rax, void (*free_callback)(void*));
void raxFreeWithCbAndContext(rax *rax,
void (*free_callback)(void *item, void *ctx),
void *ctx);
void raxStart(raxIterator *it, rax *rt); void raxStart(raxIterator *it, rax *rt);
int raxSeek(raxIterator *it, const char *op, unsigned char *ele, size_t len); int raxSeek(raxIterator *it, const char *op, unsigned char *ele, size_t len);
int raxNext(raxIterator *it); int raxNext(raxIterator *it);
......
...@@ -268,8 +268,9 @@ int rdbEncodeInteger(long long value, unsigned char *enc) { ...@@ -268,8 +268,9 @@ int rdbEncodeInteger(long long value, unsigned char *enc) {
* The returned value changes according to the flags, see * The returned value changes according to the flags, see
* rdbGenericLoadStringObject() for more info. */ * rdbGenericLoadStringObject() for more info. */
void *rdbLoadIntegerObject(rio *rdb, int enctype, int flags, size_t *lenptr) { void *rdbLoadIntegerObject(rio *rdb, int enctype, int flags, size_t *lenptr) {
int plain = flags & RDB_LOAD_PLAIN; int plainFlag = flags & RDB_LOAD_PLAIN;
int sds = flags & RDB_LOAD_SDS; int sdsFlag = flags & RDB_LOAD_SDS;
int hfldFlag = flags & RDB_LOAD_HFLD;
int encode = flags & RDB_LOAD_ENC; int encode = flags & RDB_LOAD_ENC;
unsigned char enc[4]; unsigned char enc[4];
long long val; long long val;
...@@ -295,11 +296,17 @@ void *rdbLoadIntegerObject(rio *rdb, int enctype, int flags, size_t *lenptr) { ...@@ -295,11 +296,17 @@ void *rdbLoadIntegerObject(rio *rdb, int enctype, int flags, size_t *lenptr) {
rdbReportCorruptRDB("Unknown RDB integer encoding type %d",enctype); rdbReportCorruptRDB("Unknown RDB integer encoding type %d",enctype);
return NULL; /* Never reached. */ return NULL; /* Never reached. */
} }
if (plain || sds) { if (plainFlag || sdsFlag || hfldFlag) {
char buf[LONG_STR_SIZE], *p; char buf[LONG_STR_SIZE], *p;
int len = ll2string(buf,sizeof(buf),val); int len = ll2string(buf,sizeof(buf),val);
if (lenptr) *lenptr = len; if (lenptr) *lenptr = len;
p = plain ? zmalloc(len) : sdsnewlen(SDS_NOINIT,len); if (plainFlag) {
p = zmalloc(len);
} else if (sdsFlag) {
p = sdsnewlen(SDS_NOINIT,len);
} else { /* hfldFlag */
p = hfieldNew(NULL, len, 0);
}
memcpy(p,buf,len); memcpy(p,buf,len);
return p; return p;
} else if (encode) { } else if (encode) {
...@@ -368,8 +375,11 @@ ssize_t rdbSaveLzfStringObject(rio *rdb, unsigned char *s, size_t len) { ...@@ -368,8 +375,11 @@ ssize_t rdbSaveLzfStringObject(rio *rdb, unsigned char *s, size_t len) {
* changes according to 'flags'. For more info check the * changes according to 'flags'. For more info check the
* rdbGenericLoadStringObject() function. */ * rdbGenericLoadStringObject() function. */
void *rdbLoadLzfStringObject(rio *rdb, int flags, size_t *lenptr) { void *rdbLoadLzfStringObject(rio *rdb, int flags, size_t *lenptr) {
int plain = flags & RDB_LOAD_PLAIN; int plainFlag = flags & RDB_LOAD_PLAIN;
int sds = flags & RDB_LOAD_SDS; int sdsFlag = flags & RDB_LOAD_SDS;
int hfldFlag = flags & RDB_LOAD_HFLD;
int robjFlag = (!(plainFlag || sdsFlag || hfldFlag)); /* not plain/sds/hfld */
uint64_t len, clen; uint64_t len, clen;
unsigned char *c = NULL; unsigned char *c = NULL;
char *val = NULL; char *val = NULL;
...@@ -382,11 +392,14 @@ void *rdbLoadLzfStringObject(rio *rdb, int flags, size_t *lenptr) { ...@@ -382,11 +392,14 @@ void *rdbLoadLzfStringObject(rio *rdb, int flags, size_t *lenptr) {
} }
/* Allocate our target according to the uncompressed size. */ /* Allocate our target according to the uncompressed size. */
if (plain) { if (plainFlag) {
val = ztrymalloc(len); val = ztrymalloc(len);
} else { } else if (sdsFlag || robjFlag) {
val = sdstrynewlen(SDS_NOINIT,len); val = sdstrynewlen(SDS_NOINIT,len);
} else { /* hfldFlag */
val = hfieldTryNew(NULL, len, 0);
} }
if (!val) { if (!val) {
serverLog(isRestoreContext()? LL_VERBOSE: LL_WARNING, "rdbLoadLzfStringObject failed allocating %llu bytes", (unsigned long long)len); serverLog(isRestoreContext()? LL_VERBOSE: LL_WARNING, "rdbLoadLzfStringObject failed allocating %llu bytes", (unsigned long long)len);
goto err; goto err;
...@@ -402,17 +415,17 @@ void *rdbLoadLzfStringObject(rio *rdb, int flags, size_t *lenptr) { ...@@ -402,17 +415,17 @@ void *rdbLoadLzfStringObject(rio *rdb, int flags, size_t *lenptr) {
} }
zfree(c); zfree(c);
if (plain || sds) { return (robjFlag) ? createObject(OBJ_STRING,val) : (void *) val;
return val;
} else {
return createObject(OBJ_STRING,val);
}
err: err:
zfree(c); zfree(c);
if (plain) if (plainFlag) {
zfree(val); zfree(val);
else } else if (sdsFlag || robjFlag) {
sdsfree(val); sdsfree(val);
} else { /* hfldFlag*/
hfieldFree(val);
}
return NULL; return NULL;
} }
...@@ -495,8 +508,12 @@ ssize_t rdbSaveStringObject(rio *rdb, robj *obj) { ...@@ -495,8 +508,12 @@ ssize_t rdbSaveStringObject(rio *rdb, robj *obj) {
* On I/O error NULL is returned. * On I/O error NULL is returned.
*/ */
void *rdbGenericLoadStringObject(rio *rdb, int flags, size_t *lenptr) { void *rdbGenericLoadStringObject(rio *rdb, int flags, size_t *lenptr) {
int plain = flags & RDB_LOAD_PLAIN; void *buf;
int sds = flags & RDB_LOAD_SDS; int plainFlag = flags & RDB_LOAD_PLAIN;
int sdsFlag = flags & RDB_LOAD_SDS;
int hfldFlag = flags & RDB_LOAD_HFLD;
int robjFlag = (!(plainFlag || sdsFlag || hfldFlag)); /* not plain/sds/hfld */
int isencoded; int isencoded;
unsigned long long len; unsigned long long len;
...@@ -517,22 +534,8 @@ void *rdbGenericLoadStringObject(rio *rdb, int flags, size_t *lenptr) { ...@@ -517,22 +534,8 @@ void *rdbGenericLoadStringObject(rio *rdb, int flags, size_t *lenptr) {
} }
} }
if (plain || sds) { /* return robj */
void *buf = plain ? ztrymalloc(len) : sdstrynewlen(SDS_NOINIT,len); if (robjFlag) {
if (!buf) {
serverLog(isRestoreContext()? LL_VERBOSE: LL_WARNING, "rdbGenericLoadStringObject failed allocating %llu bytes", len);
return NULL;
}
if (lenptr) *lenptr = len;
if (len && rioRead(rdb,buf,len) == 0) {
if (plain)
zfree(buf);
else
sdsfree(buf);
return NULL;
}
return buf;
} else {
robj *o = tryCreateStringObject(SDS_NOINIT,len); robj *o = tryCreateStringObject(SDS_NOINIT,len);
if (!o) { if (!o) {
serverLog(isRestoreContext()? LL_VERBOSE: LL_WARNING, "rdbGenericLoadStringObject failed allocating %llu bytes", len); serverLog(isRestoreContext()? LL_VERBOSE: LL_WARNING, "rdbGenericLoadStringObject failed allocating %llu bytes", len);
...@@ -544,6 +547,32 @@ void *rdbGenericLoadStringObject(rio *rdb, int flags, size_t *lenptr) { ...@@ -544,6 +547,32 @@ void *rdbGenericLoadStringObject(rio *rdb, int flags, size_t *lenptr) {
} }
return o; return o;
} }
/* plain/sds/hfld */
if (plainFlag) {
buf = ztrymalloc(len);
} else if (sdsFlag) {
buf = sdstrynewlen(SDS_NOINIT,len);
} else { /* hfldFlag */
buf = hfieldTryNew(NULL, len, 0);
}
if (!buf) {
serverLog(isRestoreContext()? LL_VERBOSE: LL_WARNING, "rdbGenericLoadStringObject failed allocating %llu bytes", len);
return NULL;
}
if (lenptr) *lenptr = len;
if (len && rioRead(rdb,buf,len) == 0) {
if (plainFlag)
zfree(buf);
else if (sdsFlag) {
sdsfree(buf);
} else { /* hfldFlag */
hfieldFree(buf);
}
return NULL;
}
return buf;
} }
robj *rdbLoadStringObject(rio *rdb) { robj *rdbLoadStringObject(rio *rdb) {
...@@ -924,11 +953,11 @@ ssize_t rdbSaveObject(rio *rdb, robj *o, robj *key, int dbid) { ...@@ -924,11 +953,11 @@ ssize_t rdbSaveObject(rio *rdb, robj *o, robj *key, int dbid) {
nwritten += n; nwritten += n;
while((de = dictNext(di)) != NULL) { while((de = dictNext(di)) != NULL) {
sds field = dictGetKey(de); hfield field = dictGetKey(de);
sds value = dictGetVal(de); sds value = dictGetVal(de);
if ((n = rdbSaveRawString(rdb,(unsigned char*)field, if ((n = rdbSaveRawString(rdb,(unsigned char*)field,
sdslen(field))) == -1) hfieldlen(field))) == -1)
{ {
dictReleaseIterator(di); dictReleaseIterator(di);
return -1; return -1;
...@@ -1403,7 +1432,7 @@ werr: ...@@ -1403,7 +1432,7 @@ werr:
return C_ERR; return C_ERR;
} }
/* This helper function is only used for diskless replication. /* This helper function is only used for diskless replication.
* This is just a wrapper to rdbSaveRio() that additionally adds a prefix * This is just a wrapper to rdbSaveRio() that additionally adds a prefix
* and a suffix to the generated RDB dump. The prefix is: * and a suffix to the generated RDB dump. The prefix is:
* *
...@@ -1856,7 +1885,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { ...@@ -1856,7 +1885,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
decrRefCount(ele); decrRefCount(ele);
} }
listTypeTryConversion(o,LIST_CONV_AUTO,NULL,NULL); listTypeTryConversion(o, LIST_CONV_AUTO, NULL, NULL);
} else if (rdbtype == RDB_TYPE_SET) { } else if (rdbtype == RDB_TYPE_SET) {
/* Read Set value */ /* Read Set value */
if ((len = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL; if ((len = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL;
...@@ -1869,7 +1898,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { ...@@ -1869,7 +1898,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
o = createSetObject(); o = createSetObject();
/* It's faster to expand the dict to the right size asap in order /* It's faster to expand the dict to the right size asap in order
* to avoid rehashing */ * to avoid rehashing */
if (len > DICT_HT_INITIAL_SIZE && dictTryExpand(o->ptr,len) != DICT_OK) { if (len > DICT_HT_INITIAL_SIZE && dictTryExpand(o->ptr, len) != DICT_OK) {
rdbReportCorruptRDB("OOM in dictTryExpand %llu", (unsigned long long)len); rdbReportCorruptRDB("OOM in dictTryExpand %llu", (unsigned long long)len);
decrRefCount(o); decrRefCount(o);
return NULL; return NULL;
...@@ -1896,7 +1925,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { ...@@ -1896,7 +1925,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
/* Fetch integer value from element. */ /* Fetch integer value from element. */
if (isSdsRepresentableAsLongLong(sdsele,&llval) == C_OK) { if (isSdsRepresentableAsLongLong(sdsele,&llval) == C_OK) {
uint8_t success; uint8_t success;
o->ptr = intsetAdd(o->ptr,llval,&success); o->ptr = intsetAdd(o->ptr, llval, &success);
if (!success) { if (!success) {
rdbReportCorruptRDB("Duplicate set members detected"); rdbReportCorruptRDB("Duplicate set members detected");
decrRefCount(o); decrRefCount(o);
...@@ -1946,7 +1975,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { ...@@ -1946,7 +1975,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
/* This will also be called when the set was just converted /* This will also be called when the set was just converted
* to a regular hash table encoded set. */ * to a regular hash table encoded set. */
if (o->encoding == OBJ_ENCODING_HT) { if (o->encoding == OBJ_ENCODING_HT) {
if (dictAdd((dict*)o->ptr,sdsele,NULL) != DICT_OK) { if (dictAdd((dict*)o->ptr, sdsele, NULL) != DICT_OK) {
rdbReportCorruptRDB("Duplicate set members detected"); rdbReportCorruptRDB("Duplicate set members detected");
decrRefCount(o); decrRefCount(o);
sdsfree(sdsele); sdsfree(sdsele);
...@@ -2024,12 +2053,13 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { ...@@ -2024,12 +2053,13 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
maxelelen <= server.zset_max_listpack_value && maxelelen <= server.zset_max_listpack_value &&
lpSafeToAdd(NULL, totelelen)) lpSafeToAdd(NULL, totelelen))
{ {
zsetConvert(o,OBJ_ENCODING_LISTPACK); zsetConvert(o, OBJ_ENCODING_LISTPACK);
} }
} else if (rdbtype == RDB_TYPE_HASH) { } else if (rdbtype == RDB_TYPE_HASH) {
uint64_t len; uint64_t len;
int ret; int ret;
sds field, value; sds value;
hfield field;
dict *dupSearchDict = NULL; dict *dupSearchDict = NULL;
len = rdbLoadLen(rdb, NULL); len = rdbLoadLen(rdb, NULL);
...@@ -2054,43 +2084,46 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { ...@@ -2054,43 +2084,46 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
while (o->encoding == OBJ_ENCODING_LISTPACK && len > 0) { while (o->encoding == OBJ_ENCODING_LISTPACK && len > 0) {
len--; len--;
/* Load raw strings */ /* Load raw strings */
if ((field = rdbGenericLoadStringObject(rdb,RDB_LOAD_SDS,NULL)) == NULL) { if ((field = rdbGenericLoadStringObject(rdb,RDB_LOAD_HFLD,NULL)) == NULL) {
decrRefCount(o); decrRefCount(o);
if (dupSearchDict) dictRelease(dupSearchDict); if (dupSearchDict) dictRelease(dupSearchDict);
return NULL; return NULL;
} }
if ((value = rdbGenericLoadStringObject(rdb,RDB_LOAD_SDS,NULL)) == NULL) { if ((value = rdbGenericLoadStringObject(rdb,RDB_LOAD_SDS,NULL)) == NULL) {
sdsfree(field); hfieldFree(field);
decrRefCount(o); decrRefCount(o);
if (dupSearchDict) dictRelease(dupSearchDict); if (dupSearchDict) dictRelease(dupSearchDict);
return NULL; return NULL;
} }
if (dupSearchDict) { if (dupSearchDict) {
sds field_dup = sdsdup(field); sds field_dup = sdsnewlen(field, hfieldlen(field));
if (dictAdd(dupSearchDict, field_dup, NULL) != DICT_OK) { if (dictAdd(dupSearchDict, field_dup, NULL) != DICT_OK) {
rdbReportCorruptRDB("Hash with dup elements"); rdbReportCorruptRDB("Hash with dup elements");
dictRelease(dupSearchDict); dictRelease(dupSearchDict);
decrRefCount(o); decrRefCount(o);
sdsfree(field_dup); sdsfree(field_dup);
sdsfree(field); hfieldFree(field);
sdsfree(value); sdsfree(value);
return NULL; return NULL;
} }
} }
/* Convert to hash table if size threshold is exceeded */ /* Convert to hash table if size threshold is exceeded */
if (sdslen(field) > server.hash_max_listpack_value || if (hfieldlen(field) > server.hash_max_listpack_value ||
sdslen(value) > server.hash_max_listpack_value || sdslen(value) > server.hash_max_listpack_value ||
!lpSafeToAdd(o->ptr, sdslen(field)+sdslen(value))) !lpSafeToAdd(o->ptr, hfieldlen(field) + sdslen(value)))
{ {
hashTypeConvert(o, OBJ_ENCODING_HT); hashTypeConvert(o, OBJ_ENCODING_HT);
dictUseStoredKeyApi((dict *)o->ptr, 1);
ret = dictAdd((dict*)o->ptr, field, value); ret = dictAdd((dict*)o->ptr, field, value);
dictUseStoredKeyApi((dict *)o->ptr, 0);
if (ret == DICT_ERR) { if (ret == DICT_ERR) {
rdbReportCorruptRDB("Duplicate hash fields detected"); rdbReportCorruptRDB("Duplicate hash fields detected");
if (dupSearchDict) dictRelease(dupSearchDict); if (dupSearchDict) dictRelease(dupSearchDict);
sdsfree(value); sdsfree(value);
sdsfree(field); hfieldFree(field);
decrRefCount(o); decrRefCount(o);
return NULL; return NULL;
} }
...@@ -2098,10 +2131,10 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { ...@@ -2098,10 +2131,10 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
} }
/* Add pair to listpack */ /* Add pair to listpack */
o->ptr = lpAppend(o->ptr, (unsigned char*)field, sdslen(field)); o->ptr = lpAppend(o->ptr, (unsigned char*)field, hfieldlen(field));
o->ptr = lpAppend(o->ptr, (unsigned char*)value, sdslen(value)); o->ptr = lpAppend(o->ptr, (unsigned char*)value, sdslen(value));
sdsfree(field); hfieldFree(field);
sdsfree(value); sdsfree(value);
} }
...@@ -2113,7 +2146,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { ...@@ -2113,7 +2146,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
} }
if (o->encoding == OBJ_ENCODING_HT && len > DICT_HT_INITIAL_SIZE) { if (o->encoding == OBJ_ENCODING_HT && len > DICT_HT_INITIAL_SIZE) {
if (dictTryExpand(o->ptr,len) != DICT_OK) { if (dictTryExpand(o->ptr, len) != DICT_OK) {
rdbReportCorruptRDB("OOM in dictTryExpand %llu", (unsigned long long)len); rdbReportCorruptRDB("OOM in dictTryExpand %llu", (unsigned long long)len);
decrRefCount(o); decrRefCount(o);
return NULL; return NULL;
...@@ -2124,22 +2157,25 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { ...@@ -2124,22 +2157,25 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
while (o->encoding == OBJ_ENCODING_HT && len > 0) { while (o->encoding == OBJ_ENCODING_HT && len > 0) {
len--; len--;
/* Load encoded strings */ /* Load encoded strings */
if ((field = rdbGenericLoadStringObject(rdb,RDB_LOAD_SDS,NULL)) == NULL) { if ((field = rdbGenericLoadStringObject(rdb,RDB_LOAD_HFLD,NULL)) == NULL) {
decrRefCount(o); decrRefCount(o);
return NULL; return NULL;
} }
if ((value = rdbGenericLoadStringObject(rdb,RDB_LOAD_SDS,NULL)) == NULL) { if ((value = rdbGenericLoadStringObject(rdb,RDB_LOAD_SDS,NULL)) == NULL) {
sdsfree(field); hfieldFree(field);
decrRefCount(o); decrRefCount(o);
return NULL; return NULL;
} }
/* Add pair to hash table */ /* Add pair to hash table */
ret = dictAdd((dict*)o->ptr, field, value); dict *d = o->ptr;
dictUseStoredKeyApi(d, 1);
ret = dictAdd(d, field, value);
dictUseStoredKeyApi(d, 0);
if (ret == DICT_ERR) { if (ret == DICT_ERR) {
rdbReportCorruptRDB("Duplicate hash fields detected"); rdbReportCorruptRDB("Duplicate hash fields detected");
sdsfree(value); sdsfree(value);
sdsfree(field); hfieldFree(field);
decrRefCount(o); decrRefCount(o);
return NULL; return NULL;
} }
...@@ -2221,7 +2257,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { ...@@ -2221,7 +2257,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
goto emptykey; goto emptykey;
} }
listTypeTryConversion(o,LIST_CONV_AUTO,NULL,NULL); listTypeTryConversion(o, LIST_CONV_AUTO, NULL, NULL);
} else if (rdbtype == RDB_TYPE_HASH_ZIPMAP || } else if (rdbtype == RDB_TYPE_HASH_ZIPMAP ||
rdbtype == RDB_TYPE_LIST_ZIPLIST || rdbtype == RDB_TYPE_LIST_ZIPLIST ||
rdbtype == RDB_TYPE_SET_INTSET || rdbtype == RDB_TYPE_SET_INTSET ||
...@@ -2236,7 +2272,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { ...@@ -2236,7 +2272,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
rdbGenericLoadStringObject(rdb,RDB_LOAD_PLAIN,&encoded_len); rdbGenericLoadStringObject(rdb,RDB_LOAD_PLAIN,&encoded_len);
if (encoded == NULL) return NULL; if (encoded == NULL) return NULL;
o = createObject(OBJ_STRING,encoded); /* Obj type fixed below. */ o = createObject(OBJ_STRING, encoded); /* Obj type fixed below. */
/* Fix the object encoding, and make sure to convert the encoded /* Fix the object encoding, and make sure to convert the encoded
* data type into the base type if accordingly to the current * data type into the base type if accordingly to the current
...@@ -2292,14 +2328,14 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { ...@@ -2292,14 +2328,14 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
o->type = OBJ_HASH; o->type = OBJ_HASH;
o->encoding = OBJ_ENCODING_LISTPACK; o->encoding = OBJ_ENCODING_LISTPACK;
if (hashTypeLength(o) > server.hash_max_listpack_entries || if (hashTypeLength(o, 0) > server.hash_max_listpack_entries ||
maxlen > server.hash_max_listpack_value) maxlen > server.hash_max_listpack_value)
{ {
hashTypeConvert(o, OBJ_ENCODING_HT); hashTypeConvert(o, OBJ_ENCODING_HT);
} }
} }
break; break;
case RDB_TYPE_LIST_ZIPLIST: case RDB_TYPE_LIST_ZIPLIST:
{ {
quicklist *ql = quicklistNew(server.list_max_listpack_size, quicklist *ql = quicklistNew(server.list_max_listpack_size,
server.list_compress_depth); server.list_compress_depth);
...@@ -2341,7 +2377,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { ...@@ -2341,7 +2377,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
o->type = OBJ_SET; o->type = OBJ_SET;
o->encoding = OBJ_ENCODING_INTSET; o->encoding = OBJ_ENCODING_INTSET;
if (intsetLen(o->ptr) > server.set_max_intset_entries) if (intsetLen(o->ptr) > server.set_max_intset_entries)
setTypeConvert(o,OBJ_ENCODING_HT); setTypeConvert(o, OBJ_ENCODING_HT);
break; break;
case RDB_TYPE_SET_LISTPACK: case RDB_TYPE_SET_LISTPACK:
if (deep_integrity_validation) server.stat_dump_payload_sanitizations++; if (deep_integrity_validation) server.stat_dump_payload_sanitizations++;
...@@ -2386,7 +2422,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { ...@@ -2386,7 +2422,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
} }
if (zsetLength(o) > server.zset_max_listpack_entries) if (zsetLength(o) > server.zset_max_listpack_entries)
zsetConvert(o,OBJ_ENCODING_SKIPLIST); zsetConvert(o, OBJ_ENCODING_SKIPLIST);
else else
o->ptr = lpShrinkToFit(o->ptr); o->ptr = lpShrinkToFit(o->ptr);
break; break;
...@@ -2408,7 +2444,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { ...@@ -2408,7 +2444,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
} }
if (zsetLength(o) > server.zset_max_listpack_entries) if (zsetLength(o) > server.zset_max_listpack_entries)
zsetConvert(o,OBJ_ENCODING_SKIPLIST); zsetConvert(o, OBJ_ENCODING_SKIPLIST);
break; break;
case RDB_TYPE_HASH_ZIPLIST: case RDB_TYPE_HASH_ZIPLIST:
{ {
...@@ -2426,12 +2462,12 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { ...@@ -2426,12 +2462,12 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
o->ptr = lp; o->ptr = lp;
o->type = OBJ_HASH; o->type = OBJ_HASH;
o->encoding = OBJ_ENCODING_LISTPACK; o->encoding = OBJ_ENCODING_LISTPACK;
if (hashTypeLength(o) == 0) { if (hashTypeLength(o, 0) == 0) {
decrRefCount(o); decrRefCount(o);
goto emptykey; goto emptykey;
} }
if (hashTypeLength(o) > server.hash_max_listpack_entries) if (hashTypeLength(o, 0) > server.hash_max_listpack_entries)
hashTypeConvert(o, OBJ_ENCODING_HT); hashTypeConvert(o, OBJ_ENCODING_HT);
else else
o->ptr = lpShrinkToFit(o->ptr); o->ptr = lpShrinkToFit(o->ptr);
...@@ -2448,12 +2484,12 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { ...@@ -2448,12 +2484,12 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
} }
o->type = OBJ_HASH; o->type = OBJ_HASH;
o->encoding = OBJ_ENCODING_LISTPACK; o->encoding = OBJ_ENCODING_LISTPACK;
if (hashTypeLength(o) == 0) { if (hashTypeLength(o, 0) == 0) {
decrRefCount(o); decrRefCount(o);
goto emptykey; goto emptykey;
} }
if (hashTypeLength(o) > server.hash_max_listpack_entries) if (hashTypeLength(o, 0) > server.hash_max_listpack_entries)
hashTypeConvert(o, OBJ_ENCODING_HT); hashTypeConvert(o, OBJ_ENCODING_HT);
break; break;
default: default:
...@@ -2540,7 +2576,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { ...@@ -2540,7 +2576,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
/* Load the last entry ID. */ /* Load the last entry ID. */
s->last_id.ms = rdbLoadLen(rdb,NULL); s->last_id.ms = rdbLoadLen(rdb,NULL);
s->last_id.seq = rdbLoadLen(rdb,NULL); s->last_id.seq = rdbLoadLen(rdb,NULL);
if (rdbtype >= RDB_TYPE_STREAM_LISTPACKS_2) { if (rdbtype >= RDB_TYPE_STREAM_LISTPACKS_2) {
/* Load the first entry ID. */ /* Load the first entry ID. */
s->first_id.ms = rdbLoadLen(rdb,NULL); s->first_id.ms = rdbLoadLen(rdb,NULL);
...@@ -2559,9 +2595,9 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { ...@@ -2559,9 +2595,9 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
s->max_deleted_entry_id.ms = 0; s->max_deleted_entry_id.ms = 0;
s->max_deleted_entry_id.seq = 0; s->max_deleted_entry_id.seq = 0;
s->entries_added = s->length; s->entries_added = s->length;
/* Since the rax is already loaded, we can find the first entry's /* Since the rax is already loaded, we can find the first entry's
* ID. */ * ID. */
streamGetEdgeID(s,1,1,&s->first_id); streamGetEdgeID(s,1,1,&s->first_id);
} }
...@@ -2807,7 +2843,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { ...@@ -2807,7 +2843,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
uint64_t eof = rdbLoadLen(rdb,NULL); uint64_t eof = rdbLoadLen(rdb,NULL);
if (eof == RDB_LENERR) { if (eof == RDB_LENERR) {
if (ptr) { if (ptr) {
o = createModuleObject(mt,ptr); /* creating just in order to easily destroy */ o = createModuleObject(mt, ptr); /* creating just in order to easily destroy */
decrRefCount(o); decrRefCount(o);
} }
return NULL; return NULL;
...@@ -2816,7 +2852,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { ...@@ -2816,7 +2852,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
rdbReportCorruptRDB("The RDB file contains module data for the module '%s' that is not terminated by " rdbReportCorruptRDB("The RDB file contains module data for the module '%s' that is not terminated by "
"the proper module value EOF marker", moduleTypeModuleName(mt)); "the proper module value EOF marker", moduleTypeModuleName(mt));
if (ptr) { if (ptr) {
o = createModuleObject(mt,ptr); /* creating just in order to easily destroy */ o = createModuleObject(mt, ptr); /* creating just in order to easily destroy */
decrRefCount(o); decrRefCount(o);
} }
return NULL; return NULL;
...@@ -2828,7 +2864,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { ...@@ -2828,7 +2864,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
moduleTypeModuleName(mt)); moduleTypeModuleName(mt));
return NULL; return NULL;
} }
o = createModuleObject(mt,ptr); o = createModuleObject(mt, ptr);
} else { } else {
rdbReportReadError("Unknown RDB encoding type %d",rdbtype); rdbReportReadError("Unknown RDB encoding type %d",rdbtype);
return NULL; return NULL;
...@@ -3256,8 +3292,8 @@ int rdbLoadRioWithLoadingCtx(rio *rdb, int rdbflags, rdbSaveInfo *rsi, rdbLoadin ...@@ -3256,8 +3292,8 @@ int rdbLoadRioWithLoadingCtx(rio *rdb, int rdbflags, rdbSaveInfo *rsi, rdbLoadin
* received from the master. In the latter case, the master is * received from the master. In the latter case, the master is
* responsible for key expiry. If we would expire keys here, the * responsible for key expiry. If we would expire keys here, the
* snapshot taken by the master may not be reflected on the slave. * snapshot taken by the master may not be reflected on the slave.
* Similarly, if the base AOF is RDB format, we want to load all * Similarly, if the base AOF is RDB format, we want to load all
* the keys they are, since the log of operations in the incr AOF * the keys they are, since the log of operations in the incr AOF
* is assumed to work in the exact keyspace state. */ * is assumed to work in the exact keyspace state. */
if (val == NULL) { if (val == NULL) {
/* Since we used to have bug that could lead to empty keys /* Since we used to have bug that could lead to empty keys
......
...@@ -105,6 +105,7 @@ ...@@ -105,6 +105,7 @@
#define RDB_LOAD_ENC (1<<0) #define RDB_LOAD_ENC (1<<0)
#define RDB_LOAD_PLAIN (1<<1) #define RDB_LOAD_PLAIN (1<<1)
#define RDB_LOAD_SDS (1<<2) #define RDB_LOAD_SDS (1<<2)
#define RDB_LOAD_HFLD (1<<3)
/* flags on the purpose of rdb save or load */ /* flags on the purpose of rdb save or load */
#define RDBFLAGS_NONE 0 /* No special RDB loading or saving. */ #define RDBFLAGS_NONE 0 /* No special RDB loading or saving. */
......
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
#include "syscheck.h" #include "syscheck.h"
#include "threads_mngr.h" #include "threads_mngr.h"
#include "fmtargs.h" #include "fmtargs.h"
#include "mstr.h"
#include "ebuckets.h"
#include <time.h> #include <time.h>
#include <signal.h> #include <signal.h>
...@@ -281,6 +283,18 @@ int dictSdsKeyCompare(dict *d, const void *key1, ...@@ -281,6 +283,18 @@ int dictSdsKeyCompare(dict *d, const void *key1,
return memcmp(key1, key2, l1) == 0; return memcmp(key1, key2, l1) == 0;
} }
int dictSdsMstrKeyCompare(dict *d, const void *sdsLookup, const void *mstrStored)
{
int l1,l2;
UNUSED(d);
l1 = sdslen((sds)sdsLookup);
l2 = hfieldlen((hfield)mstrStored);
if (l1 != l2) return 0;
return memcmp(sdsLookup, mstrStored, l1) == 0;
}
/* A case insensitive version used for the command lookup table and other /* A case insensitive version used for the command lookup table and other
* places where case insensitive non binary-safe comparison is needed. */ * places where case insensitive non binary-safe comparison is needed. */
int dictSdsKeyCaseCompare(dict *d, const void *key1, int dictSdsKeyCaseCompare(dict *d, const void *key1,
...@@ -2500,6 +2514,7 @@ void resetServerStats(void) { ...@@ -2500,6 +2514,7 @@ void resetServerStats(void) {
server.stat_numcommands = 0; server.stat_numcommands = 0;
server.stat_numconnections = 0; server.stat_numconnections = 0;
server.stat_expiredkeys = 0; server.stat_expiredkeys = 0;
server.stat_expired_hash_fields = 0;
server.stat_expired_stale_perc = 0; server.stat_expired_stale_perc = 0;
server.stat_expired_time_cap_reached_count = 0; server.stat_expired_time_cap_reached_count = 0;
server.stat_expire_cycle_time_used = 0; server.stat_expire_cycle_time_used = 0;
...@@ -2648,6 +2663,7 @@ void initServer(void) { ...@@ -2648,6 +2663,7 @@ void initServer(void) {
for (j = 0; j < server.dbnum; j++) { for (j = 0; j < server.dbnum; j++) {
server.db[j].keys = kvstoreCreate(&dbDictType, slot_count_bits, flags); server.db[j].keys = kvstoreCreate(&dbDictType, slot_count_bits, flags);
server.db[j].expires = kvstoreCreate(&dbExpiresDictType, slot_count_bits, flags); server.db[j].expires = kvstoreCreate(&dbExpiresDictType, slot_count_bits, flags);
server.db[j].hexpires = ebCreate();
server.db[j].expires_cursor = 0; server.db[j].expires_cursor = 0;
server.db[j].blocking_keys = dictCreate(&keylistDictType); server.db[j].blocking_keys = dictCreate(&keylistDictType);
server.db[j].blocking_keys_unblock_on_nokey = dictCreate(&objectKeyPointerValueDictType); server.db[j].blocking_keys_unblock_on_nokey = dictCreate(&objectKeyPointerValueDictType);
...@@ -5849,6 +5865,7 @@ sds genRedisInfoString(dict *section_dict, int all_sections, int everything) { ...@@ -5849,6 +5865,7 @@ sds genRedisInfoString(dict *section_dict, int all_sections, int everything) {
"sync_full:%lld\r\n", server.stat_sync_full, "sync_full:%lld\r\n", server.stat_sync_full,
"sync_partial_ok:%lld\r\n", server.stat_sync_partial_ok, "sync_partial_ok:%lld\r\n", server.stat_sync_partial_ok,
"sync_partial_err:%lld\r\n", server.stat_sync_partial_err, "sync_partial_err:%lld\r\n", server.stat_sync_partial_err,
"expired_hash_fields:%lld\r\n", server.stat_expired_hash_fields,
"expired_keys:%lld\r\n", server.stat_expiredkeys, "expired_keys:%lld\r\n", server.stat_expiredkeys,
"expired_stale_perc:%.2f\r\n", server.stat_expired_stale_perc*100, "expired_stale_perc:%.2f\r\n", server.stat_expired_stale_perc*100,
"expired_time_cap_reached_count:%lld\r\n", server.stat_expired_time_cap_reached_count, "expired_time_cap_reached_count:%lld\r\n", server.stat_expired_time_cap_reached_count,
...@@ -6862,9 +6879,11 @@ struct redisTest { ...@@ -6862,9 +6879,11 @@ struct redisTest {
{"crc64", crc64Test}, {"crc64", crc64Test},
{"zmalloc", zmalloc_test}, {"zmalloc", zmalloc_test},
{"sds", sdsTest}, {"sds", sdsTest},
{"mstr", mstrTest},
{"dict", dictTest}, {"dict", dictTest},
{"listpack", listpackTest}, {"listpack", listpackTest},
{"kvstore", kvstoreTest}, {"kvstore", kvstoreTest},
{"ebuckets", ebucketsTest},
}; };
redisTestProc *getTestProcByName(const char *name) { redisTestProc *getTestProcByName(const char *name) {
int numtests = sizeof(redisTests)/sizeof(struct redisTest); int numtests = sizeof(redisTests)/sizeof(struct redisTest);
...@@ -6891,6 +6910,7 @@ int main(int argc, char **argv) { ...@@ -6891,6 +6910,7 @@ int main(int argc, char **argv) {
if (!strcasecmp(arg, "--accurate")) flags |= REDIS_TEST_ACCURATE; if (!strcasecmp(arg, "--accurate")) flags |= REDIS_TEST_ACCURATE;
else if (!strcasecmp(arg, "--large-memory")) flags |= REDIS_TEST_LARGE_MEMORY; else if (!strcasecmp(arg, "--large-memory")) flags |= REDIS_TEST_LARGE_MEMORY;
else if (!strcasecmp(arg, "--valgrind")) flags |= REDIS_TEST_VALGRIND; else if (!strcasecmp(arg, "--valgrind")) flags |= REDIS_TEST_VALGRIND;
else if (!strcasecmp(arg, "--verbose")) flags |= REDIS_TEST_VERBOSE;
} }
if (!strcasecmp(argv[2], "all")) { if (!strcasecmp(argv[2], "all")) {
......
...@@ -45,6 +45,8 @@ typedef long long ustime_t; /* microsecond time type. */ ...@@ -45,6 +45,8 @@ typedef long long ustime_t; /* microsecond time type. */
#include "ae.h" /* Event driven programming library */ #include "ae.h" /* Event driven programming library */
#include "sds.h" /* Dynamic safe strings */ #include "sds.h" /* Dynamic safe strings */
#include "mstr.h" /* Immutable strings with optional metadata attached */
#include "ebuckets.h" /* expiry data structure */
#include "dict.h" /* Hash tables */ #include "dict.h" /* Hash tables */
#include "kvstore.h" /* Slot-based hash table */ #include "kvstore.h" /* Slot-based hash table */
#include "adlist.h" /* Linked lists */ #include "adlist.h" /* Linked lists */
...@@ -960,6 +962,7 @@ typedef struct replBufBlock { ...@@ -960,6 +962,7 @@ typedef struct replBufBlock {
typedef struct redisDb { typedef struct redisDb {
kvstore *keys; /* The keyspace for this DB */ kvstore *keys; /* The keyspace for this DB */
kvstore *expires; /* Timeout of keys with a timeout set */ kvstore *expires; /* Timeout of keys with a timeout set */
ebuckets hexpires; /* Hash expiration DS. Single TTL per hash (of next min field to expire) */
dict *blocking_keys; /* Keys with clients waiting for data (BLPOP)*/ dict *blocking_keys; /* Keys with clients waiting for data (BLPOP)*/
dict *blocking_keys_unblock_on_nokey; /* Keys with clients waiting for dict *blocking_keys_unblock_on_nokey; /* Keys with clients waiting for
* data, and should be unblocked if key is deleted (XREADEDGROUP). * data, and should be unblocked if key is deleted (XREADEDGROUP).
...@@ -1642,6 +1645,7 @@ struct redisServer { ...@@ -1642,6 +1645,7 @@ struct redisServer {
long long stat_numcommands; /* Number of processed commands */ long long stat_numcommands; /* Number of processed commands */
long long stat_numconnections; /* Number of connections received */ long long stat_numconnections; /* Number of connections received */
long long stat_expiredkeys; /* Number of expired keys */ long long stat_expiredkeys; /* Number of expired keys */
long long stat_expired_hash_fields; /* Number of expired hash-fields */
double stat_expired_stale_perc; /* Percentage of keys probably expired */ double stat_expired_stale_perc; /* Percentage of keys probably expired */
long long stat_expired_time_cap_reached_count; /* Early expire cycle stops.*/ long long stat_expired_time_cap_reached_count; /* Early expire cycle stops.*/
long long stat_expire_cycle_time_used; /* Cumulative microseconds used. */ long long stat_expire_cycle_time_used; /* Cumulative microseconds used. */
...@@ -2444,6 +2448,10 @@ typedef struct { ...@@ -2444,6 +2448,10 @@ typedef struct {
#define IO_THREADS_OP_WRITE 2 #define IO_THREADS_OP_WRITE 2
extern int io_threads_op; extern int io_threads_op;
/* Hash-field data type (of t_hash.c) */
typedef mstr hfield;
extern mstrKind mstrFieldKind;
/*----------------------------------------------------------------------------- /*-----------------------------------------------------------------------------
* Extern declarations * Extern declarations
*----------------------------------------------------------------------------*/ *----------------------------------------------------------------------------*/
...@@ -2458,6 +2466,8 @@ extern dictType zsetDictType; ...@@ -2458,6 +2466,8 @@ extern dictType zsetDictType;
extern dictType dbDictType; extern dictType dbDictType;
extern double R_Zero, R_PosInf, R_NegInf, R_Nan; extern double R_Zero, R_PosInf, R_NegInf, R_Nan;
extern dictType hashDictType; extern dictType hashDictType;
extern dictType mstrHashDictType;
extern dictType mstrHashDictTypeWithHFE;
extern dictType stringSetDictType; extern dictType stringSetDictType;
extern dictType externalStringType; extern dictType externalStringType;
extern dictType sdsHashDictType; extern dictType sdsHashDictType;
...@@ -2469,6 +2479,9 @@ extern dictType sdsReplyDictType; ...@@ -2469,6 +2479,9 @@ extern dictType sdsReplyDictType;
extern dictType keylistDictType; extern dictType keylistDictType;
extern dict *modules; extern dict *modules;
extern EbucketsType hashExpireBucketsType; /* global expires */
extern EbucketsType hashFieldExpiresBucketType; /* local per hash */
/*----------------------------------------------------------------------------- /*-----------------------------------------------------------------------------
* Functions prototypes * Functions prototypes
*----------------------------------------------------------------------------*/ *----------------------------------------------------------------------------*/
...@@ -2611,6 +2624,7 @@ void copyReplicaOutputBuffer(client *dst, client *src); ...@@ -2611,6 +2624,7 @@ void copyReplicaOutputBuffer(client *dst, client *src);
void addListRangeReply(client *c, robj *o, long start, long end, int reverse); void addListRangeReply(client *c, robj *o, long start, long end, int reverse);
void deferredAfterErrorReply(client *c, list *errors); void deferredAfterErrorReply(client *c, list *errors);
size_t sdsZmallocSize(sds s); size_t sdsZmallocSize(sds s);
size_t hfieldZmallocSize(hfield s);
size_t getStringObjectSdsUsedMemory(robj *o); size_t getStringObjectSdsUsedMemory(robj *o);
void freeClientReplyValue(void *o); void freeClientReplyValue(void *o);
void *dupClientReplyValue(void *o); void *dupClientReplyValue(void *o);
...@@ -3144,21 +3158,35 @@ void hashTypeConvert(robj *o, int enc); ...@@ -3144,21 +3158,35 @@ void hashTypeConvert(robj *o, int enc);
void hashTypeTryConversion(robj *subject, robj **argv, int start, int end); void hashTypeTryConversion(robj *subject, robj **argv, int start, int end);
int hashTypeExists(robj *o, sds key); int hashTypeExists(robj *o, sds key);
int hashTypeDelete(robj *o, sds key); int hashTypeDelete(robj *o, sds key);
unsigned long hashTypeLength(const robj *o); unsigned long hashTypeLength(const robj *o, int subtractExpiredFields);
hashTypeIterator *hashTypeInitIterator(robj *subject); hashTypeIterator *hashTypeInitIterator(robj *subject);
void hashTypeReleaseIterator(hashTypeIterator *hi); void hashTypeReleaseIterator(hashTypeIterator *hi);
int hashTypeNext(hashTypeIterator *hi); int hashTypeNext(hashTypeIterator *hi, int skipExpiredFields);
void hashTypeCurrentFromListpack(hashTypeIterator *hi, int what, void hashTypeCurrentFromListpack(hashTypeIterator *hi, int what,
unsigned char **vstr, unsigned char **vstr,
unsigned int *vlen, unsigned int *vlen,
long long *vll); long long *vll);
sds hashTypeCurrentFromHashTable(hashTypeIterator *hi, int what); void hashTypeCurrentFromHashTable(hashTypeIterator *hi, int what, char **str,
void hashTypeCurrentObject(hashTypeIterator *hi, int what, unsigned char **vstr, unsigned int *vlen, long long *vll); size_t *len, uint64_t *expireTime);
void hashTypeCurrentObject(hashTypeIterator *hi, int what, unsigned char **vstr,
unsigned int *vlen, long long *vll, uint64_t *expireTime);
sds hashTypeCurrentObjectNewSds(hashTypeIterator *hi, int what); sds hashTypeCurrentObjectNewSds(hashTypeIterator *hi, int what);
robj *hashTypeLookupWriteOrCreate(client *c, robj *key); hfield hashTypeCurrentObjectNewHfield(hashTypeIterator *hi);
robj *hashTypeGetValueObject(robj *o, sds field); robj *hashTypeGetValueObject(robj *o, sds field);
int hashTypeSet(robj *o, sds field, sds value, int flags); int hashTypeSet(redisDb *db, robj *o, sds field, sds value, int flags);
robj *hashTypeDup(robj *o); robj *hashTypeDup(robj *o, sds newkey, uint64_t *minHashExpire);
uint64_t hashTypeRemoveFromExpires(ebuckets *hexpires, robj *o);
void hashTypeAddToExpires(redisDb *db, sds key, robj *hashObj, uint64_t expireTime);
int64_t hashTypeGetMinExpire(robj *keyObj);
/* Hash-Field data type (of t_hash.c) */
hfield hfieldNew(const void *field, size_t fieldlen, int withExpireMeta);
hfield hfieldTryNew(const void *field, size_t fieldlen, int withExpireMeta);
int hfieldIsExpireAttached(hfield field);
int hfieldIsExpired(hfield field);
static inline void hfieldFree(hfield field) { mstrFree(&mstrFieldKind, field); }
static inline void *hfieldGetAllocPtr(hfield field) { return mstrGetAllocPtr(&mstrFieldKind, field); }
static inline size_t hfieldlen(hfield field) { return mstrlen(field);}
/* Pub / Sub */ /* Pub / Sub */
int pubsubUnsubscribeAllChannels(client *c, int notify); int pubsubUnsubscribeAllChannels(client *c, int notify);
...@@ -3177,7 +3205,7 @@ dict *getClientPubSubChannels(client *c); ...@@ -3177,7 +3205,7 @@ dict *getClientPubSubChannels(client *c);
dict *getClientPubSubShardChannels(client *c); dict *getClientPubSubShardChannels(client *c);
/* Keyspace events notification */ /* Keyspace events notification */
void notifyKeyspaceEvent(int type, char *event, robj *key, int dbid); void notifyKeyspaceEvent(int type, const char *event, robj *key, int dbid);
int keyspaceEventsStringToFlags(char *classes); int keyspaceEventsStringToFlags(char *classes);
sds keyspaceEventsFlagsToString(int flags); sds keyspaceEventsFlagsToString(int flags);
...@@ -3261,6 +3289,7 @@ int keyIsExpired(redisDb *db, robj *key); ...@@ -3261,6 +3289,7 @@ int keyIsExpired(redisDb *db, robj *key);
long long getExpire(redisDb *db, robj *key); long long getExpire(redisDb *db, robj *key);
void setExpire(client *c, redisDb *db, robj *key, long long when); void setExpire(client *c, redisDb *db, robj *key, long long when);
int checkAlreadyExpired(long long when); int checkAlreadyExpired(long long when);
int parseExtendedExpireArgumentsOrReply(client *c, int *flags);
robj *lookupKeyRead(redisDb *db, robj *key); robj *lookupKeyRead(redisDb *db, robj *key);
robj *lookupKeyWrite(redisDb *db, robj *key); robj *lookupKeyWrite(redisDb *db, robj *key);
robj *lookupKeyReadOrReply(client *c, robj *key, robj *reply); robj *lookupKeyReadOrReply(client *c, robj *key, robj *reply);
...@@ -3279,7 +3308,7 @@ int objectSetLRUOrLFU(robj *val, long long lfu_freq, long long lru_idle, ...@@ -3279,7 +3308,7 @@ int objectSetLRUOrLFU(robj *val, long long lfu_freq, long long lru_idle,
#define LOOKUP_NOEXPIRE (1<<4) /* Avoid deleting lazy expired keys. */ #define LOOKUP_NOEXPIRE (1<<4) /* Avoid deleting lazy expired keys. */
#define LOOKUP_NOEFFECTS (LOOKUP_NONOTIFY | LOOKUP_NOSTATS | LOOKUP_NOTOUCH | LOOKUP_NOEXPIRE) /* Avoid any effects from fetching the key */ #define LOOKUP_NOEFFECTS (LOOKUP_NONOTIFY | LOOKUP_NOSTATS | LOOKUP_NOTOUCH | LOOKUP_NOEXPIRE) /* Avoid any effects from fetching the key */
void dbAdd(redisDb *db, robj *key, robj *val); dictEntry *dbAdd(redisDb *db, robj *key, robj *val);
int dbAddRDBLoad(redisDb *db, sds key, robj *val); int dbAddRDBLoad(redisDb *db, sds key, robj *val);
void dbReplaceValue(redisDb *db, robj *key, robj *val); void dbReplaceValue(redisDb *db, robj *key, robj *val);
...@@ -3434,6 +3463,7 @@ void expireSlaveKeys(void); ...@@ -3434,6 +3463,7 @@ void expireSlaveKeys(void);
void rememberSlaveKeyWithExpire(redisDb *db, robj *key); void rememberSlaveKeyWithExpire(redisDb *db, robj *key);
void flushSlaveKeysWithExpireList(void); void flushSlaveKeysWithExpireList(void);
size_t getSlaveKeyWithExpireCount(void); size_t getSlaveKeyWithExpireCount(void);
uint64_t hashTypeDbActiveExpire(redisDb *db, uint32_t maxFieldsToExpire);
/* evict.c -- maxmemory handling and LRU eviction. */ /* evict.c -- maxmemory handling and LRU eviction. */
void evictionPoolAlloc(void); void evictionPoolAlloc(void);
...@@ -3451,6 +3481,7 @@ void startEvictionTimeProc(void); ...@@ -3451,6 +3481,7 @@ void startEvictionTimeProc(void);
uint64_t dictSdsHash(const void *key); uint64_t dictSdsHash(const void *key);
uint64_t dictSdsCaseHash(const void *key); uint64_t dictSdsCaseHash(const void *key);
int dictSdsKeyCompare(dict *d, const void *key1, const void *key2); int dictSdsKeyCompare(dict *d, const void *key1, const void *key2);
int dictSdsMstrKeyCompare(dict *d, const void *sdsLookup, const void *mstrStored);
int dictSdsKeyCaseCompare(dict *d, const void *key1, const void *key2); int dictSdsKeyCaseCompare(dict *d, const void *key1, const void *key2);
void dictSdsDestructor(dict *d, void *val); void dictSdsDestructor(dict *d, void *val);
void dictListDestructor(dict *d, void *val); void dictListDestructor(dict *d, void *val);
...@@ -3606,6 +3637,15 @@ void strlenCommand(client *c); ...@@ -3606,6 +3637,15 @@ void strlenCommand(client *c);
void zrankCommand(client *c); void zrankCommand(client *c);
void zrevrankCommand(client *c); void zrevrankCommand(client *c);
void hsetCommand(client *c); void hsetCommand(client *c);
void hpexpireCommand(client *c);
void hexpireCommand(client *c);
void hpexpireatCommand(client *c);
void hexpireatCommand(client *c);
void httlCommand(client *c);
void hpttlCommand(client *c);
void hexpiretimeCommand(client *c);
void hpexpiretimeCommand(client *c);
void hpersistCommand(client *c);
void hsetnxCommand(client *c); void hsetnxCommand(client *c);
void hgetCommand(client *c); void hgetCommand(client *c);
void hmgetCommand(client *c); void hmgetCommand(client *c);
......
...@@ -7,8 +7,208 @@ ...@@ -7,8 +7,208 @@
*/ */
#include "server.h" #include "server.h"
#include "ebuckets.h"
#include <math.h> #include <math.h>
/* Threshold for HEXPIRE and HPERSIST to be considered whether it is worth to
* update the expiration time of the hash object in global HFE DS. */
#define HASH_NEW_EXPIRE_DIFF_THRESHOLD max(4000, 1<<EB_BUCKET_KEY_PRECISION)
/* hash field expiration (HFE) funcs */
static ExpireAction onFieldExpire(eItem item, void *ctx);
static ExpireMeta* hfieldGetExpireMeta(const eItem field);
static ExpireMeta *hashGetExpireMeta(const eItem item);
static void hexpireGenericCommand(client *c, const char *cmd, long long basetime, int unit);
static ExpireAction hashTypeActiveExpire(eItem hashObj, void *ctx);
static void hfieldPersist(redisDb *db, robj *hashObj, hfield field);
static uint64_t hfieldGetExpireTime(hfield field);
/* hash dictType funcs */
static int dictHfieldKeyCompare(dict *d, const void *key1, const void *key2);
static uint64_t dictMstrHash(const void *key);
static void dictHfieldDestructor(dict *d, void *field);
static size_t hashDictWithExpireMetadataBytes(dict *d);
static void hashDictWithExpireOnRelease(dict *d);
static robj* hashTypeLookupWriteOrCreate(client *c, robj *key);
/*-----------------------------------------------------------------------------
* Define dictType of hash
*
* - Stores fields as mstr strings with optional metadata to attach TTL
* - Note that small hashes are represented with listpacks
* - Once expiration is set for a field, the dict instance and corresponding
* dictType are replaced with a dict containing metadata for Hash Field
* Expiration (HFE) and using dictType `mstrHashDictTypeWithHFE`
*----------------------------------------------------------------------------*/
dictType mstrHashDictType = {
dictSdsHash, /* lookup hash function */
NULL, /* key dup */
NULL, /* val dup */
dictSdsMstrKeyCompare, /* lookup key compare */
dictHfieldDestructor, /* key destructor */
dictSdsDestructor, /* val destructor */
.storedHashFunction = dictMstrHash, /* stored hash function */
.storedKeyCompare = dictHfieldKeyCompare, /* stored key compare */
};
/* Define alternative dictType of hash with hash-field expiration (HFE) support */
dictType mstrHashDictTypeWithHFE = {
dictSdsHash, /* lookup hash function */
NULL, /* key dup */
NULL, /* val dup */
dictSdsMstrKeyCompare, /* lookup key compare */
dictHfieldDestructor, /* key destructor */
dictSdsDestructor, /* val destructor */
.storedHashFunction = dictMstrHash, /* stored hash function */
.storedKeyCompare = dictHfieldKeyCompare, /* stored key compare */
.dictMetadataBytes = hashDictWithExpireMetadataBytes,
.onDictRelease = hashDictWithExpireOnRelease,
};
/*-----------------------------------------------------------------------------
* Hash Field Expiration (HFE) Feature
*
* Each hash instance maintains its own set of hash field expiration within its
* private ebuckets DS. In order to support HFE active expire cycle across hash
* instances, hashes with associated HFE will be also registered in a global
* ebuckets DS with expiration time value that reflects their next minimum
* time to expire. The global HFE Active expiration will be triggered from
* activeExpireCycle() function and will invoke "local" HFE Active expiration
* for each hash instance that has expired fields.
*
* hashExpireBucketsType - ebuckets-type to be used at the global space
* (db->hexpires) to register hashes that have one or more fields with time-Expiration.
* The hashes will be registered in with the expiration time of the earliest field
* in the hash.
*----------------------------------------------------------------------------*/
EbucketsType hashExpireBucketsType = {
.onDeleteItem = NULL,
.getExpireMeta = hashGetExpireMeta, /* get ExpireMeta attached to each hash */
.itemsAddrAreOdd = 0, /* Addresses of dict are even */
};
/* dictExpireMetadata - ebuckets-type for hash fields with time-Expiration. ebuckets
* instance Will be attached to each hash that has at least one field with expiry
* time. */
EbucketsType hashFieldExpireBucketsType = {
.onDeleteItem = NULL,
.getExpireMeta = hfieldGetExpireMeta, /* get ExpireMeta attached to each field */
.itemsAddrAreOdd = 1, /* Addresses of hfield (mstr) are odd!! */
};
/* Each dict of hash object that has fields with time-Expiration will have the
* following metadata attached to dict header */
typedef struct dictExpireMetadata {
ExpireMeta expireMeta; /* embedded ExpireMeta in dict.
To be used in order to register the hash in the
global ebuckets (i.e db->hexpires) with next,
minimum, hash-field to expire */
ebuckets hfe; /* DS of Hash Fields Expiration, associated to each hash */
sds key; /* reference to the key, same one that stored in
db->dict. Will be used from active-expiration flow
for notification and deletion of the object, if
needed. */
} dictExpireMetadata;
/* ActiveExpireCtx passed to hashTypeActiveExpire() */
typedef struct ActiveExpireCtx {
uint32_t fieldsToExpireQuota;
redisDb *db;
} ActiveExpireCtx;
/* The implementation of hashes by dict was modified from storing fields as sds
* strings to store "mstr" (Immutable string with metadata) in order to be able to
* attach TTL (ExpireMeta) to the hash-field. This usage of mstr opens up the
* opportunity for future features to attach additional metadata by need to the
* fields.
*
* The following defines new hfield kind of mstr */
typedef enum HfieldMetaFlags {
HFIELD_META_EXPIRE = 0,
} HfieldMetaFlags;
mstrKind mstrFieldKind = {
.name = "hField",
/* Taking care that all metaSize[*] values are even ensures that all
* addresses of hfield instances will be odd. */
.metaSize[HFIELD_META_EXPIRE] = sizeof(ExpireMeta),
};
static_assert(sizeof(struct ExpireMeta ) % 2 == 0, "must be even!");
/* Used by hpersistCommand() */
typedef enum SetPersistRes {
HFE_PERSIST_NO_FIELD = -2, /* No such hash-field */
HFE_PERSIST_NO_TTL = -1, /* No TTL attached to the field */
HFE_PERSIST_OK = 1
} SetPersistRes;
/* Used by hashTypeSetExpire() */
typedef enum SetExpireTimeRes {
HFE_SET_NO_FIELD = -2, /* No such hash-field */
HFE_SET_NO_CONDITION_MET = 0, /* Specified NX | XX | GT | LT condition not met */
HFE_SET_OK = 1, /* Expiration time set/updated as expected */
HFE_SET_DELETED = 2 /* Field deleted because the specified time is in the past */
} SetExpireTimeRes;
/* Used by httlGenericCommand() */
typedef enum GetExpireTimeRes {
HFE_GET_NO_FIELD = -2, /* No such hash-field */
HFE_GET_NO_TTL = -1, /* No TTL attached to the field */
} GetExpireTimeRes;
#define HFE_NX (1<<0)
#define HFE_XX (1<<1)
#define HFE_GT (1<<2)
#define HFE_LT (1<<3)
static inline int isDictWithMetaHFE(dict *d) {
return d->type == &mstrHashDictTypeWithHFE;
}
/*-----------------------------------------------------------------------------
* Accessor functions for dictType of hash
*----------------------------------------------------------------------------*/
static int dictHfieldKeyCompare(dict *d, const void *key1, const void *key2)
{
int l1,l2;
UNUSED(d);
l1 = hfieldlen((hfield)key1);
l2 = hfieldlen((hfield)key2);
if (l1 != l2) return 0;
return memcmp(key1, key2, l1) == 0;
}
static uint64_t dictMstrHash(const void *key) {
return dictGenHashFunction((unsigned char*)key, mstrlen((char*)key));
}
static void dictHfieldDestructor(dict *d, void *field) {
/* If attached TTL to the field, then remove it from hash's private ebuckets. */
if (hfieldGetExpireTime(field) != EB_EXPIRE_TIME_INVALID) {
dictExpireMetadata *dictExpireMeta = (dictExpireMetadata *) dictMetadata(d);
ebRemove(&dictExpireMeta->hfe, &hashFieldExpireBucketsType, field);
// TODO: Check if the field is the minimum in the hash and update the global HFE DS
}
hfieldFree(field);
}
static size_t hashDictWithExpireMetadataBytes(dict *d) {
UNUSED(d);
/* expireMeta of the hash, ref to ebuckets and pointer to hash's key */
return sizeof(dictExpireMetadata);
}
static void hashDictWithExpireOnRelease(dict *d) {
/* for sure allocated with metadata. Otherwise, this func won't be registered */
dictExpireMetadata *dictExpireMeta = (dictExpireMetadata *) dictMetadata(d);
ebDestroy(&dictExpireMeta->hfe, &hashFieldExpireBucketsType, NULL);
}
/*----------------------------------------------------------------------------- /*-----------------------------------------------------------------------------
* Hash type API * Hash type API
*----------------------------------------------------------------------------*/ *----------------------------------------------------------------------------*/
...@@ -85,7 +285,12 @@ sds hashTypeGetFromHashTable(robj *o, sds field) { ...@@ -85,7 +285,12 @@ sds hashTypeGetFromHashTable(robj *o, sds field) {
serverAssert(o->encoding == OBJ_ENCODING_HT); serverAssert(o->encoding == OBJ_ENCODING_HT);
de = dictFind(o->ptr, field); de = dictFind(o->ptr, field);
if (de == NULL) return NULL; if (de == NULL) return NULL;
/* Check if the field is expired */
if (hfieldIsExpired(dictGetKey(de))) return NULL;
return dictGetVal(de); return dictGetVal(de);
} }
...@@ -176,7 +381,7 @@ int hashTypeExists(robj *o, sds field) { ...@@ -176,7 +381,7 @@ int hashTypeExists(robj *o, sds field) {
#define HASH_SET_TAKE_FIELD (1<<0) #define HASH_SET_TAKE_FIELD (1<<0)
#define HASH_SET_TAKE_VALUE (1<<1) #define HASH_SET_TAKE_VALUE (1<<1)
#define HASH_SET_COPY 0 #define HASH_SET_COPY 0
int hashTypeSet(robj *o, sds field, sds value, int flags) { int hashTypeSet(redisDb *db, robj *o, sds field, sds value, int flags) {
int update = 0; int update = 0;
/* Check if the field is too long for listpack, and convert before adding the item. /* Check if the field is too long for listpack, and convert before adding the item.
...@@ -186,7 +391,7 @@ int hashTypeSet(robj *o, sds field, sds value, int flags) { ...@@ -186,7 +391,7 @@ int hashTypeSet(robj *o, sds field, sds value, int flags) {
if (sdslen(field) > server.hash_max_listpack_value || sdslen(value) > server.hash_max_listpack_value) if (sdslen(field) > server.hash_max_listpack_value || sdslen(value) > server.hash_max_listpack_value)
hashTypeConvert(o, OBJ_ENCODING_HT); hashTypeConvert(o, OBJ_ENCODING_HT);
} }
if (o->encoding == OBJ_ENCODING_LISTPACK) { if (o->encoding == OBJ_ENCODING_LISTPACK) {
unsigned char *zl, *fptr, *vptr; unsigned char *zl, *fptr, *vptr;
...@@ -213,30 +418,36 @@ int hashTypeSet(robj *o, sds field, sds value, int flags) { ...@@ -213,30 +418,36 @@ int hashTypeSet(robj *o, sds field, sds value, int flags) {
o->ptr = zl; o->ptr = zl;
/* Check if the listpack needs to be converted to a hash table */ /* Check if the listpack needs to be converted to a hash table */
if (hashTypeLength(o) > server.hash_max_listpack_entries) if (hashTypeLength(o, 0) > server.hash_max_listpack_entries)
hashTypeConvert(o, OBJ_ENCODING_HT); hashTypeConvert(o, OBJ_ENCODING_HT);
} else if (o->encoding == OBJ_ENCODING_HT) { } else if (o->encoding == OBJ_ENCODING_HT) {
dict *ht = o->ptr; dict *ht = o->ptr;
dictEntry *de, *existing; dictEntry *de, *existingEntry;
sds v; sds storedValue;
if (flags & HASH_SET_TAKE_VALUE) { if (flags & HASH_SET_TAKE_VALUE) {
v = value; storedValue = value;
value = NULL; value = NULL;
} else { } else {
v = sdsdup(value); storedValue = sdsdup(value);
} }
de = dictAddRaw(ht, field, &existing); /* Cannot leverage HASH_SET_TAKE_FIELD since hfield is not of type sds */
hfield newField = hfieldNew(field, sdslen(field), 0);
/* stored key is different than lookup key */
dictUseStoredKeyApi(ht, 1);
de = dictAddRaw(ht, newField, &existingEntry);
dictUseStoredKeyApi(ht, 0);
if (de) { if (de) {
dictSetVal(ht, de, v); dictSetVal(ht, de, storedValue);
if (flags & HASH_SET_TAKE_FIELD) {
field = NULL;
} else {
dictSetKey(ht, de, sdsdup(field));
}
} else { } else {
sdsfree(dictGetVal(existing)); /* If attached TTL to the old field, then remove it from hash's private ebuckets */
dictSetVal(ht, existing, v); hfield oldField = dictGetKey(existingEntry);
hfieldPersist(db, o, oldField);
sdsfree(dictGetVal(existingEntry));
dictSetVal(ht, existingEntry, storedValue);
update = 1; update = 1;
hfieldFree(newField);
} }
} else { } else {
serverPanic("Unknown hash encoding"); serverPanic("Unknown hash encoding");
...@@ -269,6 +480,7 @@ int hashTypeDelete(robj *o, sds field) { ...@@ -269,6 +480,7 @@ int hashTypeDelete(robj *o, sds field) {
} }
} }
} else if (o->encoding == OBJ_ENCODING_HT) { } else if (o->encoding == OBJ_ENCODING_HT) {
/* dictDelete() will call dictHfieldDestructor() */
if (dictDelete((dict*)o->ptr, field) == C_OK) { if (dictDelete((dict*)o->ptr, field) == C_OK) {
deleted = 1; deleted = 1;
} }
...@@ -279,14 +491,27 @@ int hashTypeDelete(robj *o, sds field) { ...@@ -279,14 +491,27 @@ int hashTypeDelete(robj *o, sds field) {
return deleted; return deleted;
} }
/* Return the number of elements in a hash. */ /* Return the number of elements in a hash.
unsigned long hashTypeLength(const robj *o) { *
* Note: Might be pricy in case there are many HFEs
*/
unsigned long hashTypeLength(const robj *o, int subtractExpiredFields) {
unsigned long length = ULONG_MAX; unsigned long length = ULONG_MAX;
if (o->encoding == OBJ_ENCODING_LISTPACK) { if (o->encoding == OBJ_ENCODING_LISTPACK) {
length = lpLength(o->ptr) / 2; length = lpLength(o->ptr) / 2;
} else if (o->encoding == OBJ_ENCODING_HT) { } else if (o->encoding == OBJ_ENCODING_HT) {
length = dictSize((const dict*)o->ptr); uint64_t expiredItems = 0;
dict *d = (dict*)o->ptr;
if (subtractExpiredFields && isDictWithMetaHFE(d)) {
dictExpireMetadata *meta = (dictExpireMetadata *) dictMetadata(d);
/* If dict registered in global HFE DS */
if (meta->expireMeta.trash == 0)
expiredItems = ebExpireDryRun(meta->hfe,
&hashFieldExpireBucketsType,
commandTimeSnapshot());
}
length = dictSize(d) - expiredItems;
} else { } else {
serverPanic("Unknown hash encoding"); serverPanic("Unknown hash encoding");
} }
...@@ -317,7 +542,7 @@ void hashTypeReleaseIterator(hashTypeIterator *hi) { ...@@ -317,7 +542,7 @@ void hashTypeReleaseIterator(hashTypeIterator *hi) {
/* Move to the next entry in the hash. Return C_OK when the next entry /* Move to the next entry in the hash. Return C_OK when the next entry
* could be found and C_ERR when the iterator reaches the end. */ * could be found and C_ERR when the iterator reaches the end. */
int hashTypeNext(hashTypeIterator *hi) { int hashTypeNext(hashTypeIterator *hi, int skipExpiredFields) {
if (hi->encoding == OBJ_ENCODING_LISTPACK) { if (hi->encoding == OBJ_ENCODING_LISTPACK) {
unsigned char *zl; unsigned char *zl;
unsigned char *fptr, *vptr; unsigned char *fptr, *vptr;
...@@ -326,6 +551,8 @@ int hashTypeNext(hashTypeIterator *hi) { ...@@ -326,6 +551,8 @@ int hashTypeNext(hashTypeIterator *hi) {
fptr = hi->fptr; fptr = hi->fptr;
vptr = hi->vptr; vptr = hi->vptr;
/* TODO-HFE: Handle skipExpiredFields for listpack */
if (fptr == NULL) { if (fptr == NULL) {
/* Initialize cursor */ /* Initialize cursor */
serverAssert(vptr == NULL); serverAssert(vptr == NULL);
...@@ -345,7 +572,12 @@ int hashTypeNext(hashTypeIterator *hi) { ...@@ -345,7 +572,12 @@ int hashTypeNext(hashTypeIterator *hi) {
hi->fptr = fptr; hi->fptr = fptr;
hi->vptr = vptr; hi->vptr = vptr;
} else if (hi->encoding == OBJ_ENCODING_HT) { } else if (hi->encoding == OBJ_ENCODING_HT) {
if ((hi->de = dictNext(hi->di)) == NULL) return C_ERR; while ((hi->de = dictNext(hi->di)) != NULL) {
if (skipExpiredFields && hfieldIsExpired(dictGetKey(hi->de)))
continue;
return C_OK;
}
return C_ERR;
} else { } else {
serverPanic("Unknown hash encoding"); serverPanic("Unknown hash encoding");
} }
...@@ -370,15 +602,30 @@ void hashTypeCurrentFromListpack(hashTypeIterator *hi, int what, ...@@ -370,15 +602,30 @@ void hashTypeCurrentFromListpack(hashTypeIterator *hi, int what,
/* Get the field or value at iterator cursor, for an iterator on a hash value /* Get the field or value at iterator cursor, for an iterator on a hash value
* encoded as a hash table. Prototype is similar to * encoded as a hash table. Prototype is similar to
* `hashTypeGetFromHashTable`. */ * `hashTypeGetFromHashTable`.
sds hashTypeCurrentFromHashTable(hashTypeIterator *hi, int what) { *
* expireTime - If parameter is not null, then the function will return the expire
* time of the field. If expiry not set, return EB_EXPIRE_TIME_INVALID
*/
void hashTypeCurrentFromHashTable(hashTypeIterator *hi, int what, char **str, size_t *len, uint64_t *expireTime) {
serverAssert(hi->encoding == OBJ_ENCODING_HT); serverAssert(hi->encoding == OBJ_ENCODING_HT);
hfield key = NULL;
if (what & OBJ_HASH_KEY) { if (what & OBJ_HASH_KEY) {
return dictGetKey(hi->de); key = dictGetKey(hi->de);
*str = key;
*len = hfieldlen(key);
} else { } else {
return dictGetVal(hi->de); sds val = dictGetVal(hi->de);
*str = val;
*len = sdslen(val);
} }
if (expireTime) {
if (!key) key = dictGetKey(hi->de);
*expireTime = hfieldGetExpireTime( key );
}
} }
/* Higher level function of hashTypeCurrent*() that returns the hash value /* Higher level function of hashTypeCurrent*() that returns the hash value
...@@ -391,14 +638,23 @@ sds hashTypeCurrentFromHashTable(hashTypeIterator *hi, int what) { ...@@ -391,14 +638,23 @@ sds hashTypeCurrentFromHashTable(hashTypeIterator *hi, int what) {
* If *vll is populated *vstr is set to NULL, so the caller * If *vll is populated *vstr is set to NULL, so the caller
* can always check the function return by checking the return value * can always check the function return by checking the return value
* type checking if vstr == NULL. */ * type checking if vstr == NULL. */
void hashTypeCurrentObject(hashTypeIterator *hi, int what, unsigned char **vstr, unsigned int *vlen, long long *vll) { void hashTypeCurrentObject(hashTypeIterator *hi,
int what,
unsigned char **vstr,
unsigned int *vlen,
long long *vll,
uint64_t *expireTime)
{
if (hi->encoding == OBJ_ENCODING_LISTPACK) { if (hi->encoding == OBJ_ENCODING_LISTPACK) {
*vstr = NULL; *vstr = NULL;
hashTypeCurrentFromListpack(hi, what, vstr, vlen, vll); hashTypeCurrentFromListpack(hi, what, vstr, vlen, vll);
/* TODO-HFE: Handle expireTime */
} else if (hi->encoding == OBJ_ENCODING_HT) { } else if (hi->encoding == OBJ_ENCODING_HT) {
sds ele = hashTypeCurrentFromHashTable(hi, what); char *ele;
size_t eleLen;
hashTypeCurrentFromHashTable(hi, what, &ele, &eleLen, expireTime);
*vstr = (unsigned char*) ele; *vstr = (unsigned char*) ele;
*vlen = sdslen(ele); *vlen = eleLen;
} else { } else {
serverPanic("Unknown hash encoding"); serverPanic("Unknown hash encoding");
} }
...@@ -411,12 +667,31 @@ sds hashTypeCurrentObjectNewSds(hashTypeIterator *hi, int what) { ...@@ -411,12 +667,31 @@ sds hashTypeCurrentObjectNewSds(hashTypeIterator *hi, int what) {
unsigned int vlen; unsigned int vlen;
long long vll; long long vll;
hashTypeCurrentObject(hi,what,&vstr,&vlen,&vll); hashTypeCurrentObject(hi,what,&vstr,&vlen,&vll, NULL);
if (vstr) return sdsnewlen(vstr,vlen); if (vstr) return sdsnewlen(vstr,vlen);
return sdsfromlonglong(vll); return sdsfromlonglong(vll);
} }
robj *hashTypeLookupWriteOrCreate(client *c, robj *key) { /* Return the key at the current iterator position as a new hfield string. */
hfield hashTypeCurrentObjectNewHfield(hashTypeIterator *hi) {
char buf[LONG_STR_SIZE];
unsigned char *vstr;
unsigned int vlen;
long long vll;
hfield hf;
hashTypeCurrentObject(hi,OBJ_HASH_KEY,&vstr,&vlen,&vll, NULL);
if (!vstr) {
vlen = ll2string(buf, sizeof(buf), vll);
vstr = (unsigned char *) buf;
}
hf = hfieldNew(vstr,vlen, 0);
return hf;
}
static robj *hashTypeLookupWriteOrCreate(client *c, robj *key) {
robj *o = lookupKeyWrite(c->db,key); robj *o = lookupKeyWrite(c->db,key);
if (checkType(c,o,OBJ_HASH)) return NULL; if (checkType(c,o,OBJ_HASH)) return NULL;
...@@ -440,19 +715,21 @@ void hashTypeConvertListpack(robj *o, int enc) { ...@@ -440,19 +715,21 @@ void hashTypeConvertListpack(robj *o, int enc) {
int ret; int ret;
hi = hashTypeInitIterator(o); hi = hashTypeInitIterator(o);
dict = dictCreate(&hashDictType); dict = dictCreate(&mstrHashDictType);
/* Presize the dict to avoid rehashing */ /* Presize the dict to avoid rehashing */
dictExpand(dict,hashTypeLength(o)); /* TODO: activeExpire list pack. Should be small */
dictExpand(dict,hashTypeLength(o, 0));
while (hashTypeNext(hi) != C_ERR) { while (hashTypeNext(hi, 0) != C_ERR) {
sds key, value;
key = hashTypeCurrentObjectNewSds(hi,OBJ_HASH_KEY); hfield key = hashTypeCurrentObjectNewHfield(hi);
value = hashTypeCurrentObjectNewSds(hi,OBJ_HASH_VALUE); sds value = hashTypeCurrentObjectNewSds(hi,OBJ_HASH_VALUE);
dictUseStoredKeyApi(dict, 1);
ret = dictAdd(dict, key, value); ret = dictAdd(dict, key, value);
dictUseStoredKeyApi(dict, 0);
if (ret != DICT_OK) { if (ret != DICT_OK) {
sdsfree(key); sdsfree(value); /* Needed for gcc ASAN */ hfieldFree(key); sdsfree(value); /* Needed for gcc ASAN */
hashTypeReleaseIterator(hi); /* Needed for gcc ASAN */ hashTypeReleaseIterator(hi); /* Needed for gcc ASAN */
serverLogHexDump(LL_WARNING,"listpack with dup elements dump", serverLogHexDump(LL_WARNING,"listpack with dup elements dump",
o->ptr,lpBytes(o->ptr)); o->ptr,lpBytes(o->ptr));
...@@ -483,7 +760,7 @@ void hashTypeConvert(robj *o, int enc) { ...@@ -483,7 +760,7 @@ void hashTypeConvert(robj *o, int enc) {
* has the same encoding as the original one. * has the same encoding as the original one.
* *
* The resulting object always has refcount set to 1 */ * The resulting object always has refcount set to 1 */
robj *hashTypeDup(robj *o) { robj *hashTypeDup(robj *o, sds newkey, uint64_t *minHashExpire) {
robj *hobj; robj *hobj;
hashTypeIterator *hi; hashTypeIterator *hi;
...@@ -496,22 +773,51 @@ robj *hashTypeDup(robj *o) { ...@@ -496,22 +773,51 @@ robj *hashTypeDup(robj *o) {
memcpy(new_zl, zl, sz); memcpy(new_zl, zl, sz);
hobj = createObject(OBJ_HASH, new_zl); hobj = createObject(OBJ_HASH, new_zl);
hobj->encoding = OBJ_ENCODING_LISTPACK; hobj->encoding = OBJ_ENCODING_LISTPACK;
} else if(o->encoding == OBJ_ENCODING_HT){ } else if(o->encoding == OBJ_ENCODING_HT) {
dict *d = dictCreate(&hashDictType); dictExpireMetadata *dictExpireMetaSrc, *dictExpireMetaDst = NULL;
dict *d;
/* If dict doesn't have HFE metadata, then create a new dict without it */
if (!isDictWithMetaHFE(o->ptr)) {
d = dictCreate(&mstrHashDictType);
} else {
/* Create a new dict with HFE metadata */
d = dictCreate(&mstrHashDictTypeWithHFE);
dictExpireMetaSrc = (dictExpireMetadata *) dictMetadata((dict *) o->ptr);
dictExpireMetaDst = (dictExpireMetadata *) dictMetadata(d);
dictExpireMetaDst->key = newkey; /* reference key in keyspace */
dictExpireMetaDst->hfe = ebCreate(); /* Allocate HFE DS */
dictExpireMetaDst->expireMeta.trash = 1; /* mark as trash (as long it wasn't ebAdd()) */
/* Extract the minimum expire time of the source hash (Will be used by caller
* to register the new hash in the global ebuckets, i.e db->hexpires) */
if (dictExpireMetaSrc->expireMeta.trash == 0)
*minHashExpire = ebGetMetaExpTime(&dictExpireMetaSrc->expireMeta);
}
dictExpand(d, dictSize((const dict*)o->ptr)); dictExpand(d, dictSize((const dict*)o->ptr));
hi = hashTypeInitIterator(o); hi = hashTypeInitIterator(o);
while (hashTypeNext(hi) != C_ERR) { while (hashTypeNext(hi, 0) != C_ERR) {
sds field, value; uint64_t expireTime;
sds newfield, newvalue; sds newfield, newvalue;
/* Extract a field-value pair from an original hash object.*/ /* Extract a field-value pair from an original hash object.*/
field = hashTypeCurrentFromHashTable(hi, OBJ_HASH_KEY); char *field, *value;
value = hashTypeCurrentFromHashTable(hi, OBJ_HASH_VALUE); size_t fieldLen, valueLen;
newfield = sdsdup(field); hashTypeCurrentFromHashTable(hi, OBJ_HASH_KEY, &field, &fieldLen, &expireTime);
newvalue = sdsdup(value); if (expireTime == EB_EXPIRE_TIME_INVALID) {
newfield = hfieldNew(field, fieldLen, 0);
} else {
newfield = hfieldNew(field, fieldLen, 1);
ebAdd(&dictExpireMetaDst->hfe, &hashFieldExpireBucketsType, newfield, expireTime);
}
hashTypeCurrentFromHashTable(hi, OBJ_HASH_VALUE, &value, &valueLen, NULL);
newvalue = sdsnewlen(value, valueLen);
/* Add a field-value pair to a new hash object. */ /* Add a field-value pair to a new hash object. */
dictUseStoredKeyApi(d, 1);
dictAdd(d,newfield,newvalue); dictAdd(d,newfield,newvalue);
dictUseStoredKeyApi(d, 0);
} }
hashTypeReleaseIterator(hi); hashTypeReleaseIterator(hi);
...@@ -543,9 +849,9 @@ void hashReplyFromListpackEntry(client *c, listpackEntry *e) { ...@@ -543,9 +849,9 @@ void hashReplyFromListpackEntry(client *c, listpackEntry *e) {
void hashTypeRandomElement(robj *hashobj, unsigned long hashsize, listpackEntry *key, listpackEntry *val) { void hashTypeRandomElement(robj *hashobj, unsigned long hashsize, listpackEntry *key, listpackEntry *val) {
if (hashobj->encoding == OBJ_ENCODING_HT) { if (hashobj->encoding == OBJ_ENCODING_HT) {
dictEntry *de = dictGetFairRandomKey(hashobj->ptr); dictEntry *de = dictGetFairRandomKey(hashobj->ptr);
sds s = dictGetKey(de); hfield field = dictGetKey(de);
key->sval = (unsigned char*)s; key->sval = (unsigned char*)field;
key->slen = sdslen(s); key->slen = hfieldlen(field);
if (val) { if (val) {
sds s = dictGetVal(de); sds s = dictGetVal(de);
val->sval = (unsigned char*)s; val->sval = (unsigned char*)s;
...@@ -558,6 +864,161 @@ void hashTypeRandomElement(robj *hashobj, unsigned long hashsize, listpackEntry ...@@ -558,6 +864,161 @@ void hashTypeRandomElement(robj *hashobj, unsigned long hashsize, listpackEntry
} }
} }
/*
* Active expiration of fields in hash
*
* Called by hashTypeDbActiveExpire() for each hash registered in the HFE DB
* (db->hexpires) with an expiration-time less than or equal current time.
*
* This callback performs the following actions for each hash:
* - Delete expired fields as by calling ebExpire(hash)
* - If afterward there are future fields to expire, it will update the hash in
* HFE DB with the next hash-field minimum expiration time by returning
* ACT_UPDATE_EXP_ITEM.
* - If the hash has no more fields to expire, it is removed from the HFE DB
* by returning ACT_REMOVE_EXP_ITEM.
* - If hash has no more fields afterward, it will remove the hash from keyspace.
*/
static ExpireAction hashTypeActiveExpire(eItem _hashObj, void *ctx) {
robj *hashObj = (robj *) _hashObj;
ActiveExpireCtx *activeExpireCtx = (ActiveExpireCtx *) ctx;
/* If no more quota left for this callback, stop */
if (activeExpireCtx->fieldsToExpireQuota == 0)
return ACT_STOP_ACTIVE_EXP;
if (hashObj->encoding == OBJ_ENCODING_LISTPACK) {
serverPanic("Listpack encoding not supported yet");
}
serverAssert(hashObj->encoding == OBJ_ENCODING_HT);
dict *d = hashObj->ptr;
dictExpireMetadata *dictExpireMeta = (dictExpireMetadata *) dictMetadata(d);
ExpireInfo info = {
.maxToExpire = activeExpireCtx->fieldsToExpireQuota,
.onExpireItem = onFieldExpire,
.ctx = hashObj,
.now = commandTimeSnapshot(),
.itemsExpired = 0
};
ebExpire(&dictExpireMeta->hfe, &hashFieldExpireBucketsType, &info);
/* Update quota left */
activeExpireCtx->fieldsToExpireQuota -= info.itemsExpired;
/* If hash has no more fields to expire, remove it from HFE DB */
if (info.nextExpireTime == 0) {
if (hashTypeLength(hashObj, 0) == 0) {
robj *key = createStringObject(dictExpireMeta->key, sdslen(dictExpireMeta->key));
dbDelete(activeExpireCtx->db, key);
//notifyKeyspaceEvent(NOTIFY_HASH,"xxxxxxxxx",c->argv[1],c->db->id);
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",key, activeExpireCtx->db->id);
server.dirty++;
signalModifiedKey(NULL, &server.db[0], key);
decrRefCount(key);
}
return ACT_REMOVE_EXP_ITEM;
} else {
/* Hash has more fields to expire. Keep hash to pending items that will
* be added back to global HFE DS at the end of ebExpire() */
ExpireMeta *expireMeta = hashGetExpireMeta(hashObj);
ebSetMetaExpTime(expireMeta, info.nextExpireTime);
return ACT_UPDATE_EXP_ITEM;
}
}
/* Return the next/minimum expiry time of the hash-field.
* If not found, return EB_EXPIRE_TIME_INVALID */
int64_t hashTypeGetMinExpire(robj *o) {
if (o->encoding == OBJ_ENCODING_LISTPACK) {
return EB_EXPIRE_TIME_INVALID; /* not supported yet */
}
serverAssert(o->encoding == OBJ_ENCODING_HT);
dict *d = o->ptr;
if (!isDictWithMetaHFE(d))
return EB_EXPIRE_TIME_INVALID;
ExpireMeta *expireMeta = &((dictExpireMetadata *) dictMetadata(d))->expireMeta;
/* Keep aside next hash-field expiry before updating HFE DS. Verify it is not trash */
if (expireMeta->trash == 1)
return EB_EXPIRE_TIME_INVALID;
return ebGetMetaExpTime(expireMeta);
}
uint64_t hashTypeRemoveFromExpires(ebuckets *hexpires, robj *o) {
if (o->encoding == OBJ_ENCODING_LISTPACK)
return EB_EXPIRE_TIME_INVALID; /* not supported yet */
/* If dict doesn't holds HFE metadata */
if (!isDictWithMetaHFE(o->ptr))
return EB_EXPIRE_TIME_INVALID;
uint64_t expireTime = ebGetExpireTime(&hashExpireBucketsType, o);
/* If registered in global HFE DS then remove it (not trash) */
if (expireTime != EB_EXPIRE_TIME_INVALID)
ebRemove(hexpires, &hashExpireBucketsType, o);
return expireTime;
}
/* Add hash to global HFE DS and update key for notifications.
*
* key - must be the same instance that is stored in db->dict
*/
void hashTypeAddToExpires(redisDb *db, sds key, robj *hashObj, uint64_t expireTime) {
if (expireTime == EB_EXPIRE_TIME_INVALID)
return;
if (hashObj->encoding == OBJ_ENCODING_LISTPACK) {
return; /* TODO */
}
serverAssert(hashObj->encoding == OBJ_ENCODING_HT);
serverAssert(isDictWithMetaHFE(hashObj->ptr));
/* Update hash with key for notifications */
dict *d = hashObj->ptr;
dictExpireMetadata *dictExpireMeta = (dictExpireMetadata *) dictMetadata(d);
dictExpireMeta->key = key;
/* Add hash to global HFE DS */
ebAdd(&db->hexpires, &hashExpireBucketsType, hashObj, expireTime);
}
/* DB active expire and update hashes with time-expiration on fields.
*
* The callback function hashTypeActiveExpire() is invoked for each hash registered
* in the HFE DB (db->expires) with an expiration-time less than or equal to the
* current time. This callback performs the following actions for each hash:
* - If the hash has one or more fields to expire, it will delete those fields.
* - If there are more fields to expire, it will update the hash with the next
* expiration time in HFE DB.
* - If the hash has no more fields to expire, it is removed from the HFE DB.
* - If the hash has no more fields, it is removed from the main DB.
*
* Returns number of fields active-expired.
*/
uint64_t hashTypeDbActiveExpire(redisDb *db, uint32_t maxFieldsToExpire) {
ActiveExpireCtx ctx = { .db = db, .fieldsToExpireQuota = maxFieldsToExpire };
ExpireInfo info = {
.maxToExpire = UINT64_MAX, /* Only maxFieldsToExpire play a role */
.onExpireItem = hashTypeActiveExpire,
.ctx = &ctx,
.now = commandTimeSnapshot(),
.itemsExpired = 0};
ebExpire(&db->hexpires, &hashExpireBucketsType, &info);
/* Return number of fields active-expired */
return maxFieldsToExpire - ctx.fieldsToExpireQuota;
}
/*----------------------------------------------------------------------------- /*-----------------------------------------------------------------------------
* Hash type commands * Hash type commands
...@@ -571,7 +1032,7 @@ void hsetnxCommand(client *c) { ...@@ -571,7 +1032,7 @@ void hsetnxCommand(client *c) {
addReply(c, shared.czero); addReply(c, shared.czero);
} else { } else {
hashTypeTryConversion(o,c->argv,2,3); hashTypeTryConversion(o,c->argv,2,3);
hashTypeSet(o,c->argv[2]->ptr,c->argv[3]->ptr,HASH_SET_COPY); hashTypeSet(c->db, o,c->argv[2]->ptr,c->argv[3]->ptr,HASH_SET_COPY);
addReply(c, shared.cone); addReply(c, shared.cone);
signalModifiedKey(c,c->db,c->argv[1]); signalModifiedKey(c,c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_HASH,"hset",c->argv[1],c->db->id); notifyKeyspaceEvent(NOTIFY_HASH,"hset",c->argv[1],c->db->id);
...@@ -592,7 +1053,7 @@ void hsetCommand(client *c) { ...@@ -592,7 +1053,7 @@ void hsetCommand(client *c) {
hashTypeTryConversion(o,c->argv,2,c->argc-1); hashTypeTryConversion(o,c->argv,2,c->argc-1);
for (i = 2; i < c->argc; i += 2) for (i = 2; i < c->argc; i += 2)
created += !hashTypeSet(o,c->argv[i]->ptr,c->argv[i+1]->ptr,HASH_SET_COPY); created += !hashTypeSet(c->db, o,c->argv[i]->ptr,c->argv[i+1]->ptr,HASH_SET_COPY);
/* HMSET (deprecated) and HSET return value is different. */ /* HMSET (deprecated) and HSET return value is different. */
char *cmdname = c->argv[0]->ptr; char *cmdname = c->argv[0]->ptr;
...@@ -636,7 +1097,7 @@ void hincrbyCommand(client *c) { ...@@ -636,7 +1097,7 @@ void hincrbyCommand(client *c) {
} }
value += incr; value += incr;
new = sdsfromlonglong(value); new = sdsfromlonglong(value);
hashTypeSet(o,c->argv[2]->ptr,new,HASH_SET_TAKE_VALUE); hashTypeSet(c->db, o,c->argv[2]->ptr,new,HASH_SET_TAKE_VALUE);
addReplyLongLong(c,value); addReplyLongLong(c,value);
signalModifiedKey(c,c->db,c->argv[1]); signalModifiedKey(c,c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_HASH,"hincrby",c->argv[1],c->db->id); notifyKeyspaceEvent(NOTIFY_HASH,"hincrby",c->argv[1],c->db->id);
...@@ -679,7 +1140,7 @@ void hincrbyfloatCommand(client *c) { ...@@ -679,7 +1140,7 @@ void hincrbyfloatCommand(client *c) {
char buf[MAX_LONG_DOUBLE_CHARS]; char buf[MAX_LONG_DOUBLE_CHARS];
int len = ld2string(buf,sizeof(buf),value,LD_STR_HUMAN); int len = ld2string(buf,sizeof(buf),value,LD_STR_HUMAN);
new = sdsnewlen(buf,len); new = sdsnewlen(buf,len);
hashTypeSet(o,c->argv[2]->ptr,new,HASH_SET_TAKE_VALUE); hashTypeSet(c->db, o,c->argv[2]->ptr,new,HASH_SET_TAKE_VALUE);
addReplyBulkCBuffer(c,buf,len); addReplyBulkCBuffer(c,buf,len);
signalModifiedKey(c,c->db,c->argv[1]); signalModifiedKey(c,c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_HASH,"hincrbyfloat",c->argv[1],c->db->id); notifyKeyspaceEvent(NOTIFY_HASH,"hincrbyfloat",c->argv[1],c->db->id);
...@@ -750,7 +1211,7 @@ void hdelCommand(client *c) { ...@@ -750,7 +1211,7 @@ void hdelCommand(client *c) {
for (j = 2; j < c->argc; j++) { for (j = 2; j < c->argc; j++) {
if (hashTypeDelete(o,c->argv[j]->ptr)) { if (hashTypeDelete(o,c->argv[j]->ptr)) {
deleted++; deleted++;
if (hashTypeLength(o) == 0) { if (hashTypeLength(o, 0) == 0) {
dbDelete(c->db,c->argv[1]); dbDelete(c->db,c->argv[1]);
keyremoved = 1; keyremoved = 1;
break; break;
...@@ -774,7 +1235,7 @@ void hlenCommand(client *c) { ...@@ -774,7 +1235,7 @@ void hlenCommand(client *c) {
if ((o = lookupKeyReadOrReply(c,c->argv[1],shared.czero)) == NULL || if ((o = lookupKeyReadOrReply(c,c->argv[1],shared.czero)) == NULL ||
checkType(c,o,OBJ_HASH)) return; checkType(c,o,OBJ_HASH)) return;
addReplyLongLong(c,hashTypeLength(o)); addReplyLongLong(c,hashTypeLength(o, 0));
} }
void hstrlenCommand(client *c) { void hstrlenCommand(client *c) {
...@@ -797,8 +1258,10 @@ static void addHashIteratorCursorToReply(client *c, hashTypeIterator *hi, int wh ...@@ -797,8 +1258,10 @@ static void addHashIteratorCursorToReply(client *c, hashTypeIterator *hi, int wh
else else
addReplyBulkLongLong(c, vll); addReplyBulkLongLong(c, vll);
} else if (hi->encoding == OBJ_ENCODING_HT) { } else if (hi->encoding == OBJ_ENCODING_HT) {
sds value = hashTypeCurrentFromHashTable(hi, what); char *value;
addReplyBulkCBuffer(c, value, sdslen(value)); size_t len;
hashTypeCurrentFromHashTable(hi, what, &value, &len, NULL);
addReplyBulkCBuffer(c, value, len);
} else { } else {
serverPanic("Unknown hash encoding"); serverPanic("Unknown hash encoding");
} }
...@@ -816,7 +1279,7 @@ void genericHgetallCommand(client *c, int flags) { ...@@ -816,7 +1279,7 @@ void genericHgetallCommand(client *c, int flags) {
/* We return a map if the user requested keys and values, like in the /* We return a map if the user requested keys and values, like in the
* HGETALL case. Otherwise to use a flat array makes more sense. */ * HGETALL case. Otherwise to use a flat array makes more sense. */
length = hashTypeLength(o); length = hashTypeLength(o, 1 /*subtractExpiredFields*/);
if (flags & OBJ_HASH_KEY && flags & OBJ_HASH_VALUE) { if (flags & OBJ_HASH_KEY && flags & OBJ_HASH_VALUE) {
addReplyMapLen(c, length); addReplyMapLen(c, length);
} else { } else {
...@@ -824,7 +1287,12 @@ void genericHgetallCommand(client *c, int flags) { ...@@ -824,7 +1287,12 @@ void genericHgetallCommand(client *c, int flags) {
} }
hi = hashTypeInitIterator(o); hi = hashTypeInitIterator(o);
while (hashTypeNext(hi) != C_ERR) {
/* Skip expired fields if the hash has an expire time set at global HFE DS. We could
* set it to constant 1, but then it will make another lookup for each field expiration */
int skipExpiredFields = (EB_EXPIRE_TIME_INVALID == hashTypeGetMinExpire(o)) ? 0 : 1;
while (hashTypeNext(hi, skipExpiredFields) != C_ERR) {
if (flags & OBJ_HASH_KEY) { if (flags & OBJ_HASH_KEY) {
addHashIteratorCursorToReply(c, hi, OBJ_HASH_KEY); addHashIteratorCursorToReply(c, hi, OBJ_HASH_KEY);
count++; count++;
...@@ -869,6 +1337,7 @@ void hscanCommand(client *c) { ...@@ -869,6 +1337,7 @@ void hscanCommand(client *c) {
if (parseScanCursorOrReply(c,c->argv[2],&cursor) == C_ERR) return; if (parseScanCursorOrReply(c,c->argv[2],&cursor) == C_ERR) return;
if ((o = lookupKeyReadOrReply(c,c->argv[1],shared.emptyscan)) == NULL || if ((o = lookupKeyReadOrReply(c,c->argv[1],shared.emptyscan)) == NULL ||
checkType(c,o,OBJ_HASH)) return; checkType(c,o,OBJ_HASH)) return;
scanGenericCommand(c,o,cursor); scanGenericCommand(c,o,cursor);
} }
...@@ -906,7 +1375,8 @@ void hrandfieldWithCountCommand(client *c, long l, int withvalues) { ...@@ -906,7 +1375,8 @@ void hrandfieldWithCountCommand(client *c, long l, int withvalues) {
if ((hash = lookupKeyReadOrReply(c,c->argv[1],shared.emptyarray)) if ((hash = lookupKeyReadOrReply(c,c->argv[1],shared.emptyarray))
== NULL || checkType(c,hash,OBJ_HASH)) return; == NULL || checkType(c,hash,OBJ_HASH)) return;
size = hashTypeLength(hash); /* TODO: Active-expire */
size = hashTypeLength(hash, 0);
if(l >= 0) { if(l >= 0) {
count = (unsigned long) l; count = (unsigned long) l;
...@@ -932,14 +1402,13 @@ void hrandfieldWithCountCommand(client *c, long l, int withvalues) { ...@@ -932,14 +1402,13 @@ void hrandfieldWithCountCommand(client *c, long l, int withvalues) {
else else
addReplyArrayLen(c, count); addReplyArrayLen(c, count);
if (hash->encoding == OBJ_ENCODING_HT) { if (hash->encoding == OBJ_ENCODING_HT) {
sds key, value;
while (count--) { while (count--) {
dictEntry *de = dictGetFairRandomKey(hash->ptr); dictEntry *de = dictGetFairRandomKey(hash->ptr);
key = dictGetKey(de); hfield field = dictGetKey(de);
value = dictGetVal(de); sds value = dictGetVal(de);
if (withvalues && c->resp > 2) if (withvalues && c->resp > 2)
addReplyArrayLen(c,2); addReplyArrayLen(c,2);
addReplyBulkCBuffer(c, key, sdslen(key)); addReplyBulkCBuffer(c, field, hfieldlen(field));
if (withvalues) if (withvalues)
addReplyBulkCBuffer(c, value, sdslen(value)); addReplyBulkCBuffer(c, value, sdslen(value));
if (c->flags & CLIENT_CLOSE_ASAP) if (c->flags & CLIENT_CLOSE_ASAP)
...@@ -979,7 +1448,7 @@ void hrandfieldWithCountCommand(client *c, long l, int withvalues) { ...@@ -979,7 +1448,7 @@ void hrandfieldWithCountCommand(client *c, long l, int withvalues) {
* elements inside the hash: simply return the whole hash. */ * elements inside the hash: simply return the whole hash. */
if(count >= size) { if(count >= size) {
hashTypeIterator *hi = hashTypeInitIterator(hash); hashTypeIterator *hi = hashTypeInitIterator(hash);
while (hashTypeNext(hi) != C_ERR) { while (hashTypeNext(hi, 0) != C_ERR) {
if (withvalues && c->resp > 2) if (withvalues && c->resp > 2)
addReplyArrayLen(c,2); addReplyArrayLen(c,2);
addHashIteratorCursorToReply(c, hi, OBJ_HASH_KEY); addHashIteratorCursorToReply(c, hi, OBJ_HASH_KEY);
...@@ -1021,12 +1490,12 @@ void hrandfieldWithCountCommand(client *c, long l, int withvalues) { ...@@ -1021,12 +1490,12 @@ void hrandfieldWithCountCommand(client *c, long l, int withvalues) {
* used into CASE 4 is highly inefficient. */ * used into CASE 4 is highly inefficient. */
if (count*HRANDFIELD_SUB_STRATEGY_MUL > size) { if (count*HRANDFIELD_SUB_STRATEGY_MUL > size) {
/* Hashtable encoding (generic implementation) */ /* Hashtable encoding (generic implementation) */
dict *d = dictCreate(&sdsReplyDictType); dict *d = dictCreate(&sdsReplyDictType); /* without metadata! */
dictExpand(d, size); dictExpand(d, size);
hashTypeIterator *hi = hashTypeInitIterator(hash); hashTypeIterator *hi = hashTypeInitIterator(hash);
/* Add all the elements into the temporary dictionary. */ /* Add all the elements into the temporary dictionary. */
while ((hashTypeNext(hi)) != C_ERR) { while ((hashTypeNext(hi, 0)) != C_ERR) {
int ret = DICT_ERR; int ret = DICT_ERR;
sds key, value = NULL; sds key, value = NULL;
...@@ -1044,7 +1513,9 @@ void hrandfieldWithCountCommand(client *c, long l, int withvalues) { ...@@ -1044,7 +1513,9 @@ void hrandfieldWithCountCommand(client *c, long l, int withvalues) {
while (size > count) { while (size > count) {
dictEntry *de; dictEntry *de;
de = dictGetFairRandomKey(d); de = dictGetFairRandomKey(d);
dictUseStoredKeyApi(d, 1);
dictUnlink(d,dictGetKey(de)); dictUnlink(d,dictGetKey(de));
dictUseStoredKeyApi(d, 0);
sdsfree(dictGetKey(de)); sdsfree(dictGetKey(de));
sdsfree(dictGetVal(de)); sdsfree(dictGetVal(de));
dictFreeUnlinkedEntry(d,de); dictFreeUnlinkedEntry(d,de);
...@@ -1134,6 +1605,510 @@ void hrandfieldCommand(client *c) { ...@@ -1134,6 +1605,510 @@ void hrandfieldCommand(client *c) {
return; return;
} }
hashTypeRandomElement(hash,hashTypeLength(hash),&ele,NULL); hashTypeRandomElement(hash,hashTypeLength(hash, 0),&ele,NULL);
hashReplyFromListpackEntry(c, &ele); hashReplyFromListpackEntry(c, &ele);
} }
/*-----------------------------------------------------------------------------
* Hash Field with optional expiry (based on mstr)
*----------------------------------------------------------------------------*/
static hfield _hfieldNew(const void *field, size_t fieldlen, int withExpireMeta,
int trymalloc)
{
if (!withExpireMeta)
return mstrNew(field, fieldlen, trymalloc);
hfield hf = mstrNewWithMeta(&mstrFieldKind, field, fieldlen,
(mstrFlags) 1 << HFIELD_META_EXPIRE, trymalloc);
ExpireMeta *expireMeta = mstrMetaRef(hf, &mstrFieldKind, HFIELD_META_EXPIRE);
/* as long as it is not inside ebuckets, it is considered trash */
expireMeta->trash = 1;
return hf;
}
/* if expireAt is 0, then expireAt is ignored and no metadata is attached */
hfield hfieldNew(const void *field, size_t fieldlen, int withExpireMeta) {
return _hfieldNew(field, fieldlen, withExpireMeta, 0);
}
hfield hfieldTryNew(const void *field, size_t fieldlen, int withExpireMeta) {
return _hfieldNew(field, fieldlen, withExpireMeta, 1);
}
int hfieldIsExpireAttached(hfield field) {
return mstrIsMetaAttached(field) && mstrGetFlag(field, (int) HFIELD_META_EXPIRE);
}
static ExpireMeta* hfieldGetExpireMeta(const eItem field) {
/* extract the expireMeta from the field of type mstr */
return mstrMetaRef(field, &mstrFieldKind, (int) HFIELD_META_EXPIRE);
}
static uint64_t hfieldGetExpireTime(hfield field) {
if (!hfieldIsExpireAttached(field))
return EB_EXPIRE_TIME_INVALID;
ExpireMeta *expireMeta = mstrMetaRef(field, &mstrFieldKind, (int) HFIELD_META_EXPIRE);
if (expireMeta->trash)
return EB_EXPIRE_TIME_INVALID;
return ebGetMetaExpTime(expireMeta);
}
/* Remove TTL from the field. Assumed ExpireMeta is attached and has valid value */
static void hfieldPersist(redisDb *db, robj *hashObj, hfield field) {
uint64_t fieldExpireTime = hfieldGetExpireTime(field);
if (fieldExpireTime == EB_EXPIRE_TIME_INVALID)
return;
serverAssert(isDictWithMetaHFE(hashObj->ptr));
dict *d = hashObj->ptr;
dictExpireMetadata *dictExpireMeta = (dictExpireMetadata *)dictMetadata(d);
/* If field has valid expiry then dict should have valid metadata as well */
serverAssert(dictExpireMeta->expireMeta.trash == 0);
uint64_t minExpire = ebGetMetaExpTime(&dictExpireMeta->expireMeta);
/* Remove field from private HFE DS */
ebRemove(&dictExpireMeta->hfe, &hashFieldExpireBucketsType, field);
/* If the removed field was not the minimal to expire, then no need to update
* the hash at global HFE DS. Take into account precision loss in case
* EB_BUCKET_KEY_PRECISION>0 by assisting EB_BUCKET_KEY() */
if (EB_BUCKET_KEY(minExpire) != EB_BUCKET_KEY(fieldExpireTime)) return;
uint64_t newMinExpire = ebGetNextTimeToExpire(dictExpireMeta->hfe, &hashFieldExpireBucketsType);
/* Calculate the diff between minExpire and newMinExpire. If it is
* only few seconds, then don't have to update global HFE DS. At the worst
* case fields of hash will be active-expired up to few seconds later.
*
* In any case, active-expire operation will know to update global
* HFE DS more efficiently than here for a single item.
*/
uint64_t diff = (minExpire > newMinExpire) ?
(minExpire - newMinExpire) : (newMinExpire - minExpire);
if (diff < HASH_NEW_EXPIRE_DIFF_THRESHOLD) return;
ebRemove(&db->hexpires, &hashExpireBucketsType, hashObj);
/* If it was not last field to expire */
if (newMinExpire != EB_EXPIRE_TIME_INVALID)
ebAdd(&db->hexpires, &hashExpireBucketsType, hashObj, newMinExpire);
}
int hfieldIsExpired(hfield field) {
/* Condition remains valid even if hfieldGetExpireTime() returns EB_EXPIRE_TIME_INVALID,
* as the constant is equivalent to (EB_EXPIRE_TIME_MAX + 1). */
return ( (mstime_t)hfieldGetExpireTime(field) < commandTimeSnapshot());
}
/*-----------------------------------------------------------------------------
* Hash Field Expiration (HFE)
*----------------------------------------------------------------------------*/
/* Called during active expiration of hash-fields */
static ExpireAction onFieldExpire(eItem item, void *ctx) {
hfield hf = item;
robj *hashobj = (robj *) ctx;
dictUseStoredKeyApi((dict *)hashobj->ptr, 1);
hashTypeDelete(hashobj, hf);
server.stat_expired_hash_fields++;
dictUseStoredKeyApi((dict *)hashobj->ptr, 0);
return ACT_REMOVE_EXP_ITEM;
}
/* Retrieve the ExpireMeta associated with the hash.
* The caller is responsible for ensuring that it is indeed attached. */
static ExpireMeta *hashGetExpireMeta(const eItem item) {
robj *hashObj = (robj *)item;
dict *d = hashObj->ptr;
dictExpireMetadata *dictExpireMeta = (dictExpireMetadata *) dictMetadata(d);
return &dictExpireMeta->expireMeta;
}
/* Set time-expiration to hash-field */
SetExpireTimeRes hashTypeSetExpire(ebuckets *eb,
robj *hashObj,
sds field,
uint64_t expireAt,
int flag,
uint64_t *minPrevExp)
{
dict *d = hashObj->ptr;
uint64_t prevExpire = EB_EXPIRE_TIME_MAX;
/* First retrieve the field to check if it exists */
dictEntry *de = dictFind(d, field);
if (de == NULL) return HFE_SET_NO_FIELD;
hfield hf = dictGetKey(de);
/* If field doesn't have expiry metadata attached */
if (!hfieldIsExpireAttached(hf)) {
if (flag & (HFE_XX | HFE_LT | HFE_GT))
return HFE_SET_NO_CONDITION_MET;
/* allocate new field with expire metadata */
hfield hfNew = hfieldNew(hf, hfieldlen(hf), 1 /*withExpireMeta*/);
/* Replace the old field with the new one with metadata */
dictSetKey(d, de, hfNew);
hfieldFree(hf);
hf = hfNew;
} else {
/* read previous expire time */
prevExpire = hfieldGetExpireTime(hf);
if (prevExpire != EB_EXPIRE_TIME_INVALID) {
if (((flag == HFE_GT) && (prevExpire >= expireAt)) ||
((flag == HFE_LT) && (prevExpire <= expireAt)) ||
(flag == HFE_NX) )
return HFE_SET_NO_CONDITION_MET;
ebRemove(eb, &hashFieldExpireBucketsType, hf);
if (*minPrevExp > prevExpire)
*minPrevExp = prevExpire;
} else {
if (flag & (HFE_XX | HFE_LT | HFE_GT))
return HFE_SET_NO_CONDITION_MET;
}
}
/* if expiration time is in the past */
if (checkAlreadyExpired(expireAt)) {
hashTypeDelete(hashObj, field);
return HFE_SET_DELETED;
}
ebAdd(eb, &hashFieldExpireBucketsType, hf, expireAt);
// TODO: propagate, rewrite command if needed. See expireGenericCommand() as reference
return HFE_SET_OK;
}
static void httlGenericCommand(client *c, const char *cmd, long long basetime, int unit) {
UNUSED(cmd);
robj *hashObj;
long numFields = 0, numFieldsAt = 2;
/* Read the hash object */
if ((hashObj = lookupKeyReadOrReply(c, c->argv[1], shared.null[c->resp])) == NULL ||
checkType(c, hashObj, OBJ_HASH)) return;
/* not supported yet listpack */
if (hashObj->encoding == OBJ_ENCODING_LISTPACK) {
addReplyError(c,"Hash field expire for listpack not supported yet.");
return;
}
dict *d = hashObj->ptr;
/* Read number of fields */
if (getRangeLongFromObjectOrReply(c, c->argv[numFieldsAt], 1, LONG_MAX,
&numFields, "Parameter `numFileds` should be greater than 0") != C_OK)
return;
/* Verify `numFields` is consistent with number of arguments */
if (numFields > (c->argc - numFieldsAt - 1)) {
addReplyError(c, "Parameter `numFileds` is more than number of arguments");
return;
}
addReplyArrayLen(c, numFields);
for (int i = 0 ; i < numFields ; i++) {
sds field = c->argv[3+i]->ptr;
dictEntry *de = dictFind(d, field);
if (de == NULL) {
addReplyLongLong(c, HFE_GET_NO_FIELD);
continue;
}
hfield hf = dictGetKey(de);
uint64_t expire = hfieldGetExpireTime(hf);
if (expire == EB_EXPIRE_TIME_INVALID) {
addReplyLongLong(c, HFE_GET_NO_TTL); /* no ttl */
continue;
}
if ( (long long) expire <= commandTimeSnapshot()) {
addReplyLongLong(c, HFE_GET_NO_FIELD);
continue;
}
if (unit == UNIT_SECONDS)
addReplyLongLong(c, (expire + 999 - basetime) / 1000);
else
addReplyLongLong(c, (expire - basetime));
}
}
/* This is the generic command implementation for HEXPIRE, HPEXPIRE, HEXPIREAT
* and HPEXPIREAT. Because the command second argument may be relative or absolute
* the "basetime" argument is used to signal what the base time is (either 0
* for *AT variants of the command, or the current time for relative expires).
*
* unit is either UNIT_SECONDS or UNIT_MILLISECONDS, and is only used for
* the argv[2] parameter. The basetime is always specified in milliseconds.
*
* Additional flags are supported and parsed via parseExtendedExpireArguments */
static void hexpireGenericCommand(client *c, const char *cmd, long long basetime, int unit) {
long numFields = 0, numFieldsAt = 3;
long long expire; /* unix time in msec */
int flag = 0;
robj *hashObj, *keyArg = c->argv[1], *expireArg = c->argv[2];
/* Read the hash object */
if ((hashObj = lookupKeyWriteOrReply(c, keyArg, shared.null[c->resp])) == NULL ||
checkType(c, hashObj, OBJ_HASH)) return;
/* not supported yet listpack */
if (hashObj->encoding == OBJ_ENCODING_LISTPACK) {
addReplyError(c,"Hash field expire for listpack not supported yet.");
return;
}
dict *d = hashObj->ptr;
/* Read the expiry time from command */
if (getLongLongFromObjectOrReply(c, expireArg, &expire, NULL) != C_OK)
return;
/* Check expire overflow */
if (expire > (long long) EB_EXPIRE_TIME_MAX) {
addReplyErrorExpireTime(c);
return;
}
if (unit == UNIT_SECONDS) {
if (expire > (long long) EB_EXPIRE_TIME_MAX / 1000) {
addReplyErrorExpireTime(c);
return;
}
expire *= 1000;
} else {
if (expire > (long long) EB_EXPIRE_TIME_MAX) {
addReplyErrorExpireTime(c);
return;
}
}
if (expire > (long long) EB_EXPIRE_TIME_MAX - basetime) {
addReplyErrorExpireTime(c);
return;
}
expire += basetime;
/* Read optional flag [NX|XX|GT|LT] */
char *optArg = c->argv[3]->ptr;
if (!strcasecmp(optArg, "nx")) {
flag = HFE_NX; ++numFieldsAt;
} else if (!strcasecmp(optArg, "xx")) {
flag = HFE_XX; ++numFieldsAt;
} else if (!strcasecmp(optArg, "gt")) {
flag = HFE_GT; ++numFieldsAt;
} else if (!strcasecmp(optArg, "lt")) {
flag = HFE_LT; ++numFieldsAt;
}
/* Read number of fields */
if (getRangeLongFromObjectOrReply(c, c->argv[numFieldsAt], 1, LONG_MAX,
&numFields, "Parameter `numFields` should be greater than 0") != C_OK)
return;
/* Verify `numFields` is consistent with number of arguments */
if (numFields > (c->argc - numFieldsAt - 1)) {
addReplyError(c, "Parameter `numFileds` is more than number of arguments");
return;
}
dictExpireMetadata *dictExpireMeta;
uint64_t minExpire = EB_EXPIRE_TIME_INVALID;
/* If dict doesn't have metadata attached */
if (!isDictWithMetaHFE(d)) {
/* Realloc (only header of dict) with metadata for hash-field expiration */
dictTypeAddMeta(&d, &mstrHashDictTypeWithHFE);
dictExpireMeta = (dictExpireMetadata *) dictMetadata(d);
hashObj->ptr = d;
/* Find the key in the keyspace. Need to keep reference to the key for
* notifications or even removal of the hash */
dictEntry *de = dbFind(c->db, keyArg->ptr);
serverAssert(de != NULL);
sds key = dictGetKey(de);
/* Fillup dict HFE metadata */
dictExpireMeta->key = key; /* reference key in keyspace */
dictExpireMeta->hfe = ebCreate(); /* Allocate HFE DS */
dictExpireMeta->expireMeta.trash = 1; /* mark as trash (as long it wasn't ebAdd()) */
} else {
dictExpireMeta = (dictExpireMetadata *) dictMetadata(d);
ExpireMeta *expireMeta = &dictExpireMeta->expireMeta;
/* Keep aside next hash-field expiry before updating HFE DS. Verify it is not trash */
if (expireMeta->trash == 0)
minExpire = ebGetMetaExpTime(&dictExpireMeta->expireMeta);
}
/* Figure out from provided set of fields in command, which one has the minimum
* expiration time, before the modification (Will be used for optimization below) */
uint64_t minExpireFields = EB_EXPIRE_TIME_INVALID;
/* For each field in command, update dict HFE DS */
int fieldUpdated=0, fieldDeleted=0;
addReplyArrayLen(c, numFields);
for (int i = 0 ; i < numFields ; i++) {
sds field = c->argv[numFieldsAt+i+1]->ptr;
SetExpireTimeRes res = hashTypeSetExpire(&dictExpireMeta->hfe,
hashObj,
field,
expire,
flag,
&minExpireFields);
addReplyLongLong(c,res);
if (res == HFE_SET_DELETED)
++fieldDeleted;
else if (res == HFE_SET_OK)
++fieldUpdated;
}
/* Notify keyspace event, update dirty count and update global HFE DS */
if (fieldDeleted + fieldUpdated > 0) {
server.dirty += fieldDeleted + fieldUpdated;
signalModifiedKey(c,c->db,keyArg);
notifyKeyspaceEvent(NOTIFY_HASH,cmd,keyArg,c->db->id);
if (fieldDeleted && hashTypeLength(hashObj, 0) == 0) {
dbDelete(c->db,keyArg);
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",keyArg, c->db->id);
} else {
/* If minimum HFE of the hash is smaller than expiration time of the
* specified fields in the command as well as it is smaller or equal
* than expiration time provided in the command, then the minimum
* HFE of the hash won't change following this command. */
if ((minExpire < minExpireFields) && ((long long)minExpire <= expire) )
return;
/* retrieve new expired time. It might have changed. */
uint64_t newMinExpire = ebGetNextTimeToExpire(dictExpireMeta->hfe,
&hashFieldExpireBucketsType);
/* Calculate the diff between old minExpire and newMinExpire. If it is
* only few seconds, then don't have to update global HFE DS. At the worst
* case fields of hash will be active-expired up to few seconds later.
*
* In any case, active-expire operation will know to update global
* HFE DS more efficiently than here for a single item.
*/
uint64_t diff = (minExpire > newMinExpire) ?
(minExpire - newMinExpire) : (newMinExpire - minExpire);
if (diff < HASH_NEW_EXPIRE_DIFF_THRESHOLD) return;
if (minExpire != EB_EXPIRE_TIME_INVALID)
ebRemove(&c->db->hexpires, &hashExpireBucketsType, hashObj);
if (newMinExpire != EB_EXPIRE_TIME_INVALID)
ebAdd(&c->db->hexpires, &hashExpireBucketsType, hashObj, newMinExpire);
}
}
}
/* HPEXPIRE key milliseconds [ NX | XX | GT | LT] numfields <field [field ...]> */
void hpexpireCommand(client *c) {
hexpireGenericCommand(c,"hpexpire", commandTimeSnapshot(),UNIT_MILLISECONDS);
}
/* HEXPIRE key seconds [NX | XX | GT | LT] numfields <field [field ...]> */
void hexpireCommand(client *c) {
hexpireGenericCommand(c,"hexpire", commandTimeSnapshot(),UNIT_SECONDS);
}
/* HEXPIREAT key unix-time-seconds [NX | XX | GT | LT] numfields <field [field ...]> */
void hexpireatCommand(client *c) {
hexpireGenericCommand(c,"hexpireat", 0,UNIT_SECONDS);
}
/* HPEXPIREAT key unix-time-milliseconds [NX | XX | GT | LT] numfields <field [field ...]> */
void hpexpireatCommand(client *c) {
hexpireGenericCommand(c,"hpexpireat", 0,UNIT_MILLISECONDS);
}
/* for each specified field: get the remaining time to live in seconds*/
/* HTTL key numfields <field [field ...]> */
void httlCommand(client *c) {
httlGenericCommand(c, "httl", commandTimeSnapshot(), UNIT_SECONDS);
}
/* HPTTL key numfields <field [field ...]> */
void hpttlCommand(client *c) {
httlGenericCommand(c, "hpttl", commandTimeSnapshot(), UNIT_MILLISECONDS);
}
/* HEXPIRETIME key numFields <field [field ...]> */
void hexpiretimeCommand(client *c) {
httlGenericCommand(c, "hexpiretime", 0, UNIT_SECONDS);
}
/* HPEXPIRETIME key numFields <field [field ...]> */
void hpexpiretimeCommand(client *c) {
httlGenericCommand(c, "hexpiretime", 0, UNIT_MILLISECONDS);
}
/* HPERSIST key <FIELDS count field [field ...]> */
void hpersistCommand(client *c) {
robj *hashObj;
long numFields = 0, numFieldsAt = 2;
/* Read the hash object */
if ((hashObj = lookupKeyReadOrReply(c, c->argv[1], shared.null[c->resp])) == NULL ||
checkType(c, hashObj, OBJ_HASH)) return;
/* not supported yet listpack */
if (hashObj->encoding == OBJ_ENCODING_LISTPACK) {
addReplyError(c,"Hash field expire for listpack not supported yet.");
return;
}
dict *d = hashObj->ptr;
/* Read number of fields */
if (getRangeLongFromObjectOrReply(c, c->argv[numFieldsAt], 1, LONG_MAX,
&numFields, "Parameter `numFileds` should be greater than 0") != C_OK)
return;
/* Verify `numFields` is consistent with number of arguments */
if (numFields > (c->argc - numFieldsAt - 1)) {
addReplyError(c, "Parameter `numFileds` is more than number of arguments");
return;
}
addReplyArrayLen(c, numFields);
for (int i = 0 ; i < numFields ; i++) {
sds field = c->argv[3+i]->ptr;
dictEntry *de = dictFind(d, field);
if (de == NULL) {
addReplyLongLong(c, HFE_PERSIST_NO_FIELD);
continue;
}
hfield hf = dictGetKey(de);
uint64_t expire = hfieldGetExpireTime(hf);
if (expire == EB_EXPIRE_TIME_INVALID) {
addReplyLongLong(c, HFE_PERSIST_NO_TTL);
continue;
}
/* Already expired. Pretend there is no such field */
if ( (long long) expire <= commandTimeSnapshot()) {
addReplyLongLong(c, HFE_PERSIST_NO_FIELD);
continue;
}
hfieldPersist(c->db, hashObj, hf);
addReplyLongLong(c, HFE_PERSIST_OK);
}
}
...@@ -21,6 +21,8 @@ ...@@ -21,6 +21,8 @@
#define REDIS_TEST_ACCURATE (1<<0) #define REDIS_TEST_ACCURATE (1<<0)
#define REDIS_TEST_LARGE_MEMORY (1<<1) #define REDIS_TEST_LARGE_MEMORY (1<<1)
#define REDIS_TEST_VALGRIND (1<<2) #define REDIS_TEST_VALGRIND (1<<2)
#define REDIS_TEST_VERBOSE (1<<3)
extern int __failed_tests; extern int __failed_tests;
extern int __test_num; extern int __test_num;
......
...@@ -34,6 +34,7 @@ set ::all_tests { ...@@ -34,6 +34,7 @@ set ::all_tests {
unit/type/set unit/type/set
unit/type/zset unit/type/zset
unit/type/hash unit/type/hash
unit/type/hash-field-expire
unit/type/stream unit/type/stream
unit/type/stream-cgroups unit/type/stream-cgroups
unit/sort unit/sort
......
######## HEXPIRE family commands
# Field does not exists
set E_NO_FIELD -2
# Specified NX | XX | GT | LT condition not met
set E_FAIL 0
# expiration time set/updated
set E_OK 1
# Field deleted because the specified expiration time is in the past
set E_DELETED 2
######## HTTL family commands
set T_NO_FIELD -2
set T_NO_EXPIRY -1
######## HPERIST
set P_NO_FIELD -2
set P_NO_EXPIRY -1
set P_OK 1
############################### AUX FUNCS ######################################
proc create_hash {key entries} {
r del $key
foreach entry $entries {
r hset $key [lindex $entry 0] [lindex $entry 1]
}
}
proc get_keys {l} {
set res {}
foreach entry $l {
set key [lindex $entry 0]
lappend res $key
}
return $res
}
proc cmp_hrandfield_result {hash_name expected_result} {
# Accumulate hrandfield results
unset -nocomplain myhash
array set myhash {}
for {set i 0} {$i < 100} {incr i} {
set key [r hrandfield $hash_name]
set myhash($key) 1
}
set res [lsort [array names myhash]]
if {$res eq $expected_result} {
return 1
} else {
return $res
}
}
proc hrandfieldTest {activeExpireConfig} {
r debug set-active-expire $activeExpireConfig
r del myhash
set contents {{field1 1} {field2 2} }
create_hash myhash $contents
set factorValgrind [expr {$::valgrind ? 2 : 1}]
# Set expiration time for field1 and field2 such that field1 expires first
r hpexpire myhash 1 NX 1 field1
r hpexpire myhash 100 NX 1 field2
# On call hrandfield command lazy expire deletes field1 first
wait_for_condition 8 10 {
[cmp_hrandfield_result myhash "field2"] == 1
} else {
fail "Expected field2 to be returned by HRANDFIELD."
}
# On call hrandfield command lazy expire deletes field2 as well
wait_for_condition 8 20 {
[cmp_hrandfield_result myhash "{}"] == 1
} else {
fail "Expected {} to be returned by HRANDFIELD."
}
# restore the default value
r debug set-active-expire 1
}
############################### TESTS #########################################
start_server {tags {"external:skip needs:debug"}} {
# Currently listpack doesn't support HFE
r config set hash-max-listpack-entries 0
test {HPEXPIRE - Test 'NX' flag} {
r del myhash
r hset myhash field1 value1 field2 value2 field3 value3
assert_equal [r hpexpire myhash 1000 NX 1 field1] [list $E_OK]
assert_equal [r hpexpire myhash 1000 NX 2 field1 field2] [list $E_FAIL $E_OK]
}
test {HPEXPIRE - Test 'XX' flag} {
r del myhash
r hset myhash field1 value1 field2 value2 field3 value3
assert_equal [r hpexpire myhash 1000 NX 2 field1 field2] [list $E_OK $E_OK]
assert_equal [r hpexpire myhash 1000 XX 2 field1 field3] [list $E_OK $E_FAIL]
}
test {HPEXPIRE - Test 'GT' flag} {
r del myhash
r hset myhash field1 value1 field2 value2
assert_equal [r hpexpire myhash 1000 NX 1 field1] [list $E_OK]
assert_equal [r hpexpire myhash 2000 NX 1 field2] [list $E_OK]
assert_equal [r hpexpire myhash 1500 GT 2 field1 field2] [list $E_OK $E_FAIL]
}
test {HPEXPIRE - Test 'LT' flag} {
r del myhash
r hset myhash field1 value1 field2 value2
assert_equal [r hpexpire myhash 1000 NX 1 field1] [list $E_OK]
assert_equal [r hpexpire myhash 2000 NX 1 field2] [list $E_OK]
assert_equal [r hpexpire myhash 1500 LT 2 field1 field2] [list $E_FAIL $E_OK]
}
test {HPEXPIREAT - field not exists or TTL is in the past} {
r del myhash
r hset myhash f1 v1 f2 v2 f4 v4
r hexpire myhash 1000 NX 1 f4
assert_equal [r hexpireat myhash [expr {[clock seconds] - 1}] NX 4 f1 f2 f3 f4] "$E_DELETED $E_DELETED $E_NO_FIELD $E_FAIL"
assert_equal [r hexists myhash field1] 0
}
test {HPEXPIRE - wrong number of arguments} {
r del myhash
r hset myhash f1 v1
assert_error {*Parameter `numFields` should be greater than 0} {r hpexpire myhash 1000 NX 0 f1 f2 f3}
assert_error {*Parameter `numFileds` is more than number of arguments} {r hpexpire myhash 1000 NX 4 f1 f2 f3}
}
test {HPEXPIRE - parameter expire-time near limit of 2^48} {
r del myhash
r hset myhash f1 v1
# below & above
assert_equal [r hpexpire myhash [expr (1<<48) - [clock milliseconds] - 1000 ] 1 f1] [list $E_OK]
assert_error {*invalid expire time*} {r hpexpire myhash [expr (1<<48) - [clock milliseconds] + 100 ] 1 f1}
}
test {Lazy - doesn't delete hash that all its fields got expired} {
r debug set-active-expire 0
r flushall
set hash_sizes {1 15 16 17 31 32 33 40}
foreach h $hash_sizes {
for {set i 1} {$i <= $h} {incr i} {
# random expiration time
r hset hrand$h f$i v$i
r hpexpire hrand$h [expr {50 + int(rand() * 50)}] 1 f$i
assert_equal 1 [r HEXISTS hrand$h f$i]
# same expiration time
r hset same$h f$i v$i
r hpexpire same$h 100 1 f$i
assert_equal 1 [r HEXISTS same$h f$i]
# same expiration time
r hset mix$h f$i v$i fieldWithoutExpire$i v$i
r hpexpire mix$h 100 1 f$i
assert_equal 1 [r HEXISTS mix$h f$i]
}
}
after 150
# Verify that all fields got expired but keys wasn't lazy deleted
foreach h $hash_sizes {
for {set i 1} {$i <= $h} {incr i} {
assert_equal 0 [r HEXISTS mix$h f$i]
}
assert_equal 1 [r EXISTS hrand$h]
assert_equal 1 [r EXISTS same$h]
assert_equal [expr $h * 2] [r HLEN mix$h]
}
# Restore default
r debug set-active-expire 1
}
test {Active - deletes hash that all its fields got expired} {
r flushall
set hash_sizes {1 15 16 17 31 32 33 40}
foreach h $hash_sizes {
for {set i 1} {$i <= $h} {incr i} {
# random expiration time
r hset hrand$h f$i v$i
r hpexpire hrand$h [expr {50 + int(rand() * 50)}] 1 f$i
assert_equal 1 [r HEXISTS hrand$h f$i]
# same expiration time
r hset same$h f$i v$i
r hpexpire same$h 100 1 f$i
assert_equal 1 [r HEXISTS same$h f$i]
# same expiration time
r hset mix$h f$i v$i fieldWithoutExpire$i v$i
r hpexpire mix$h 100 1 f$i
assert_equal 1 [r HEXISTS mix$h f$i]
}
}
# Wait for active expire
wait_for_condition 50 20 { [r EXISTS same40] == 0 } else { fail "hash `same40` should be expired" }
# Verify that all fields got expired and keys got deleted
foreach h $hash_sizes {
for {set i 1} {$i <= $h} {incr i} {
assert_equal 0 [r HEXISTS mix$h f$i]
}
assert_equal 0 [r EXISTS hrand$h]
assert_equal 0 [r EXISTS same$h]
assert_equal $h [r HLEN mix$h]
}
}
test {HPEXPIRE - Flushall deletes all pending expired fields} {
r del myhash
r hset myhash field1 value1 field2 value2
r hpexpire myhash 10000 NX 1 field1
r hpexpire myhash 10000 NX 1 field2
r flushall
r del myhash
r hset myhash field1 value1 field2 value2
r hpexpire myhash 10000 NX 1 field1
r hpexpire myhash 10000 NX 1 field2
r flushall async
}
test {HTTL/HPTTL - Input validation gets failed on nonexists field or field without expire} {
r del myhash
r HSET myhash field1 value1 field2 value2
r HPEXPIRE myhash 1000 NX 1 field1
foreach cmd {HTTL HPTTL} {
assert_equal [r $cmd non_exists_key 1 f] {}
assert_equal [r $cmd myhash 2 field2 non_exists_field] "$T_NO_EXPIRY $T_NO_FIELD"
# Set numFields less than actual number of fields. Fine.
assert_equal [r $cmd myhash 1 non_exists_field1 non_exists_field2] "$T_NO_FIELD"
}
}
test {HTTL/HPTTL - returns time to live in seconds/msillisec} {
r del myhash
r HSET myhash field1 value1 field2 value2
r HPEXPIRE myhash 2000 NX 2 field1 field2
set ttlArray [r HTTL myhash 2 field1 field2]
assert_range [lindex $ttlArray 0] 1 2
set ttl [r HPTTL myhash 1 field1]
assert_range $ttl 1000 2000
}
test {HEXPIRETIME - returns TTL in Unix timestamp} {
r del myhash
r HSET myhash field1 value1
r HPEXPIRE myhash 1000 NX 1 field1
set lo [expr {[clock seconds] + 1}]
set hi [expr {[clock seconds] + 2}]
assert_range [r HEXPIRETIME myhash 1 field1] $lo $hi
assert_range [r HPEXPIRETIME myhash 1 field1] [expr $lo*1000] [expr $hi*1000]
}
test {HTTL/HPTTL - Verify TTL progress until expiration} {
r del myhash
r hset myhash field1 value1 field2 value2
r hpexpire myhash 200 NX 1 field1
assert_range [r HPTTL myhash 1 field1] 100 200
assert_range [r HTTL myhash 1 field1] 0 1
after 100
assert_range [r HPTTL myhash 1 field1] 1 101
after 110
assert_equal [r HPTTL myhash 1 field1] $T_NO_FIELD
assert_equal [r HTTL myhash 1 field1] $T_NO_FIELD
}
test {HPEXPIRE - DEL hash with non expired fields (valgrind test)} {
r del myhash
r hset myhash field1 value1 field2 value2
r hpexpire myhash 10000 NX 1 field1
r del myhash
}
test {HEXPIREAT - Set time in the past} {
r del myhash
r hset myhash field1 value1
assert_equal [r hexpireat myhash [expr {[clock seconds] - 1}] NX 1 field1] $E_DELETED
assert_equal [r hexists myhash field1] 0
}
test {HEXPIREAT - Set time and then get TTL} {
r del myhash
r hset myhash field1 value1
r hexpireat myhash [expr {[clock seconds] + 2}] NX 1 field1
assert_range [r hpttl myhash 1 field1] 1000 2000
assert_range [r httl myhash 1 field1] 1 2
r hexpireat myhash [expr {[clock seconds] + 5}] XX 1 field1
assert_range [r httl myhash 1 field1] 4 5
}
test {Lazy expire - delete hash with expired fields} {
r del myhash
r debug set-active-expire 0
r hset myhash k v
r hpexpire myhash 1 NX 1 k
after 5
r del myhash
r debug set-active-expire 1
}
# OPEN: To decide if to delete expired fields at start of HRANDFIELD.
# test {Test HRANDFIELD does not return expired fields} {
# hrandfieldTest 0
# hrandfieldTest 1
# }
test {Test HRANDFIELD can return expired fields} {
r debug set-active-expire 0
r del myhash
r hset myhash f1 v1 f2 v2 f3 v3 f4 v4 f5 v5
r hpexpire myhash 1 NX 4 f1 f2 f3 f4
after 5
set res [cmp_hrandfield_result myhash "f1 f2 f3 f4 f5"]
assert {$res == 1}
r debug set-active-expire 1
}
test {Lazy expire - HLEN does count expired fields} {
# Enforce only lazy expire
r debug set-active-expire 0
r del h1 h4 h18 h20
r hset h1 k1 v1
r hpexpire h1 1 NX 1 k1
r hset h4 k1 v1 k2 v2 k3 v3 k4 v4
r hpexpire h4 1 NX 3 k1 k3 k4
# beyond 16 fields: HFE DS (ebuckets) converts from list to rax
r hset h18 k1 v1 k2 v2 k3 v3 k4 v4 k5 v5 k6 v6 k7 v7 k8 v8 k9 v9 k10 v10 k11 v11 k12 v12 k13 v13 k14 v14 k15 v15 k16 v16 k17 v17 k18 v18
r hpexpire h18 1 NX 18 k1 k2 k3 k4 k5 k6 k7 k8 k9 k10 k11 k12 k13 k14 k15 k16 k17 k18
r hset h20 k1 v1 k2 v2 k3 v3 k4 v4 k5 v5 k6 v6 k7 v7 k8 v8 k9 v9 k10 v10 k11 v11 k12 v12 k13 v13 k14 v14 k15 v15 k16 v16 k17 v17 k18 v18 k19 v19 k20 v20
r hpexpire h20 1 NX 2 k1 k2
after 10
assert_equal [r hlen h1] 1
assert_equal [r hlen h4] 4
assert_equal [r hlen h18] 18
assert_equal [r hlen h20] 20
# Restore to support active expire
r debug set-active-expire 1
}
test {Lazy expire - HSCAN does not report expired fields} {
# Enforce only lazy expire
r debug set-active-expire 0
r del h1 h20 h4 h18 h20
r hset h1 01 01
r hpexpire h1 1 NX 1 01
r hset h4 01 01 02 02 03 03 04 04
r hpexpire h4 1 NX 3 01 03 04
# beyond 16 fields hash-field expiration DS (ebuckets) converts from list to rax
r hset h18 01 01 02 02 03 03 04 04 05 05 06 06 07 07 08 08 09 09 10 10 11 11 12 12 13 13 14 14 15 15 16 16 17 17 18 18
r hpexpire h18 1 NX 18 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 18
r hset h20 01 01 02 02 03 03 04 04 05 05 06 06 07 07 08 08 09 09 10 10 11 11 12 12 13 13 14 14 15 15 16 16 17 17 18 18 19 19 20 20
r hpexpire h20 1 NX 2 01 02
after 10
# Verify SCAN does not report expired fields
assert_equal [lsort -unique [lindex [r hscan h1 0 COUNT 10] 1]] ""
assert_equal [lsort -unique [lindex [r hscan h4 0 COUNT 10] 1]] "02"
assert_equal [lsort -unique [lindex [r hscan h18 0 COUNT 10] 1]] ""
assert_equal [lsort -unique [lindex [r hscan h20 0 COUNT 100] 1]] "03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 18 19 20"
# Restore to support active expire
r debug set-active-expire 1
}
test {Test HSCAN with mostly expired fields return empty result} {
r debug set-active-expire 0
# Create hash with 1000 fields and 999 of them will be expired
r del myhash
for {set i 1} {$i <= 1000} {incr i} {
r hset myhash field$i value$i
if {$i > 1} {
r hpexpire myhash 1 NX 1 field$i
}
}
after 3
# Verify iterative HSCAN returns either empty result or only the first field
set countEmptyResult 0
set cur 0
while 1 {
set res [r hscan myhash $cur]
set cur [lindex $res 0]
# if the result is not empty, it should contain only the first field
if {[llength [lindex $res 1]] > 0} {
assert_equal [lindex $res 1] "field1 value1"
} else {
incr countEmptyResult
}
if {$cur == 0} break
}
assert {$countEmptyResult > 0}
r debug set-active-expire 1
}
test {Lazy expire - verify various HASH commands handling expired fields} {
# Enforce only lazy expire
r debug set-active-expire 0
r del h1 h2 h3 h4 h5 h18
r hset h1 01 01
r hset h2 01 01 02 02
r hset h3 01 01 02 02 03 03
r hset h4 1 99 2 99 3 99 4 99
r hset h5 1 1 2 22 3 333 4 4444 5 55555
r hset h6 01 01 02 02 03 03 04 04 05 05 06 06
r hset h18 01 01 02 02 03 03 04 04 05 05 06 06 07 07 08 08 09 09 10 10 11 11 12 12 13 13 14 14 15 15 16 16 17 17 18 18
r hpexpire h1 100 NX 1 01
r hpexpire h2 100 NX 1 01
r hpexpire h2 100 NX 1 02
r hpexpire h3 100 NX 1 01
r hpexpire h4 100 NX 1 2
r hpexpire h5 100 NX 1 3
r hpexpire h6 100 NX 1 05
r hpexpire h18 100 NX 17 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17
after 150
# Verify HDEL not ignore expired field. It is too much overhead to check
# if the field is expired before deletion.
assert_equal [r HDEL h1 01] "1"
# Verify HGET ignore expired field
assert_equal [r HGET h2 01] ""
assert_equal [r HGET h2 02] ""
assert_equal [r HGET h3 01] ""
assert_equal [r HGET h3 02] "02"
assert_equal [r HGET h3 03] "03"
# Verify HINCRBY ignore expired field
assert_equal [r HINCRBY h4 2 1] "1"
assert_equal [r HINCRBY h4 3 1] "100"
# Verify HSTRLEN ignore expired field
assert_equal [r HSTRLEN h5 3] "0"
assert_equal [r HSTRLEN h5 4] "4"
assert_equal [lsort [r HKEYS h6]] "01 02 03 04 06"
# Verify HEXISTS ignore expired field
assert_equal [r HEXISTS h18 07] "0"
assert_equal [r HEXISTS h18 18] "1"
# Verify HVALS ignore expired field
assert_equal [lsort [r HVALS h18]] "18"
# Restore to support active expire
r debug set-active-expire 1
}
test {A field with TTL overridden with another value (TTL discarded)} {
r del myhash
r hset myhash field1 value1
r hpexpire myhash 1 NX 1 field1
r hset myhash field1 value2
after 5
# Expected TTL will be discarded
assert_equal [r hget myhash field1] "value2"
}
test {Modify TTL of a field} {
r del myhash
r hset myhash field1 value1
r hpexpire myhash 200 NX 1 field1
r hpexpire myhash 1000 XX 1 field1
after 15
assert_equal [r hget myhash field1] "value1"
assert_range [r hpttl myhash 1 field1] 900 1000
}
test {Test HGETALL not return expired fields} {
# Test with small hash
r debug set-active-expire 0
r del myhash
r hset myhash1 f1 v1 f2 v2 f3 v3 f4 v4 f5 v5
r hpexpire myhash1 1 NX 2 f2 f4
after 10
assert_equal [lsort [r hgetall myhash1]] "f1 f3 f5 v1 v3 v5"
# Test with large hash
r del myhash
for {set i 1} {$i <= 600} {incr i} {
r hset myhash f$i v$i
if {$i > 3} { r hpexpire myhash 1 NX 1 f$i }
}
after 10
assert_equal [lsort [r hgetall myhash]] [lsort "f1 f2 f3 v1 v2 v3"]
r debug set-active-expire 1
}
test {Test RENAME hash with fields to be expired} {
r debug set-active-expire 0
r del myhash
r hset myhash field1 value1
r hpexpire myhash 20 NX 1 field1
r rename myhash myhash2
assert_equal [r exists myhash] 0
assert_range [r hpttl myhash2 1 field1] 1 20
after 25
# Verify the renamed key exists
assert_equal [r exists myhash2] 1
r debug set-active-expire 1
# Only active expire will delete the key
wait_for_condition 30 10 { [r exists myhash2] == 0 } else { fail "`myhash2` should be expired" }
}
test {MOVE to another DB hash with fields to be expired} {
r select 9
r flushall
r hset myhash field1 value1
r hpexpire myhash 100 NX 1 field1
r move myhash 10
assert_equal [r exists myhash] 0
assert_equal [r dbsize] 0
# Verify the key and its field exists in the target DB
r select 10
assert_equal [r hget myhash field1] "value1"
assert_equal [r exists myhash] 1
# Eventually the field will be expired and the key will be deleted
wait_for_condition 40 10 { [r hget myhash field1] == "" } else { fail "`field1` should be expired" }
wait_for_condition 40 10 { [r exists myhash] == 0 } else { fail "db should be empty" }
} {} {singledb:skip}
test {Test COPY hash with fields to be expired} {
r flushall
r hset h1 f1 v1 f2 v2
r hset h2 f1 v1 f2 v2 f3 v3 f4 v4 f5 v5 f6 v6 f7 v7 f8 v8 f9 v9 f10 v10 f11 v11 f12 v12 f13 v13 f14 v14 f15 v15 f16 v16 f17 v17 f18 v18
r hpexpire h1 100 NX 1 f1
r hpexpire h2 100 NX 18 f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15 f16 f17 f18
r COPY h1 h1copy
r COPY h2 h2copy
assert_equal [r hget h1 f1] "v1"
assert_equal [r hget h1copy f1] "v1"
assert_equal [r exists h2] 1
assert_equal [r exists h2copy] 1
after 105
# Verify lazy expire of field in h1 and its copy
assert_equal [r hget h1 f1] ""
assert_equal [r hget h1copy f1] ""
# Verify lazy expire of field in h2 and its copy. Verify the key deleted as well.
wait_for_condition 40 10 { [r exists h2] == 0 } else { fail "`h2` should be expired" }
wait_for_condition 40 10 { [r exists h2copy] == 0 } else { fail "`h2copy` should be expired" }
} {} {singledb:skip}
test {Test SWAPDB hash-fields to be expired} {
r select 9
r flushall
r hset myhash field1 value1
r hpexpire myhash 50 NX 1 field1
r swapdb 9 10
# Verify the key and its field doesn't exist in the source DB
assert_equal [r exists myhash] 0
assert_equal [r dbsize] 0
# Verify the key and its field exists in the target DB
r select 10
assert_equal [r hget myhash field1] "value1"
assert_equal [r dbsize] 1
# Eventually the field will be expired and the key will be deleted
wait_for_condition 20 10 { [r exists myhash] == 0 } else { fail "'myhash' should be expired" }
} {} {singledb:skip}
test {HPERSIST - input validation} {
# HPERSIST key <num-fields> <field [field ...]>
r del myhash
r hset myhash f1 v1 f2 v2
r hexpire myhash 1000 NX 1 f1
assert_error {*wrong number of arguments*} {r hpersist myhash}
assert_error {*wrong number of arguments*} {r hpersist myhash 1}
assert_equal [r hpersist not-exists-key 1 f1] {}
assert_equal [r hpersist myhash 2 f1 not-exists-field] "$P_OK $P_NO_FIELD"
assert_equal [r hpersist myhash 1 f2] "$P_NO_EXPIRY"
}
test {HPERSIST - verify fields with TTL are persisted} {
r del myhash
r hset myhash f1 v1 f2 v2
r hexpire myhash 20 NX 2 f1 f2
r hpersist myhash 2 f1 f2
after 25
assert_equal [r hget myhash f1] "v1"
assert_equal [r hget myhash f2] "v2"
assert_equal [r HTTL myhash 2 f1 f2] "$T_NO_EXPIRY $T_NO_EXPIRY"
}
r config set hash-max-listpack-entries 1
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment