From eca0187370c14aa2c126fe07e5310e44c2780a95 Mon Sep 17 00:00:00 2001 From: James Rouzier Date: Sat, 19 Sep 2015 14:01:10 -0400 Subject: [PATCH 01/59] If the unit of a timeout is seconds treat it a float --- src/blocked.c | 15 +++++++++++---- tests/unit/type/list.tcl | 7 +++++-- 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/src/blocked.c b/src/blocked.c index d22872548..fc7106395 100644 --- a/src/blocked.c +++ b/src/blocked.c @@ -75,10 +75,18 @@ * is zero. */ int getTimeoutFromObjectOrReply(client *c, robj *object, mstime_t *timeout, int unit) { long long tval; + long double ftval; - if (getLongLongFromObjectOrReply(c,object,&tval, - "timeout is not an integer or out of range") != C_OK) - return C_ERR; + if (unit == UNIT_SECONDS) { + if (getLongDoubleFromObjectOrReply(c,object,&ftval, + "timeout is not an float or out of range") != C_OK) + return C_ERR; + tval = (long long) (ftval * 1000.0); + } else { + if (getLongLongFromObjectOrReply(c,object,&tval, + "timeout is not an integer or out of range") != C_OK) + return C_ERR; + } if (tval < 0) { addReplyError(c,"timeout is negative"); @@ -86,7 +94,6 @@ int getTimeoutFromObjectOrReply(client *c, robj *object, mstime_t *timeout, int } if (tval > 0) { - if (unit == UNIT_SECONDS) tval *= 1000; tval += mstime(); } *timeout = tval; diff --git a/tests/unit/type/list.tcl b/tests/unit/type/list.tcl index e4d568cf1..8daa30a77 100644 --- a/tests/unit/type/list.tcl +++ b/tests/unit/type/list.tcl @@ -436,8 +436,11 @@ start_server { test "$pop: with non-integer timeout" { set rd [redis_deferring_client] - $rd $pop blist1 1.1 - assert_error "ERR*not an integer*" {$rd read} + r del blist1 + $rd $pop blist1 0.1 + r rpush blist1 foo + assert_equal {blist1 foo} [$rd read] + assert_equal 0 [r exists blist1] } test "$pop: with zero timeout should block indefinitely" { From b660fc2fbe545f4a20a907ffa6c8333396435907 Mon Sep 17 00:00:00 2001 From: Guy Benoish Date: Wed, 7 Mar 2018 10:40:37 +0700 Subject: [PATCH 02/59] Fix zlexrangespec mem-leak in genericZrangebylexCommand --- src/t_zset.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/t_zset.c b/src/t_zset.c index f7f4c6eb2..fa7793b15 100644 --- a/src/t_zset.c +++ b/src/t_zset.c @@ -2856,7 +2856,10 @@ void genericZrangebylexCommand(client *c, int reverse) { while (remaining) { if (remaining >= 3 && !strcasecmp(c->argv[pos]->ptr,"limit")) { if ((getLongFromObjectOrReply(c, c->argv[pos+1], &offset, NULL) != C_OK) || - (getLongFromObjectOrReply(c, c->argv[pos+2], &limit, NULL) != C_OK)) return; + (getLongFromObjectOrReply(c, c->argv[pos+2], &limit, NULL) != C_OK)) { + zslFreeLexRange(&range); + return; + } pos += 3; remaining -= 3; } else { zslFreeLexRange(&range); From 8c8e85df874c852b5f125209e9d662a70e310f66 Mon Sep 17 00:00:00 2001 From: Guy Benoish Date: Wed, 18 Apr 2018 13:01:53 +0300 Subject: [PATCH 03/59] Use memtoll() in 'CONFIG SET client-output-buffer-limit' --- src/config.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/config.c b/src/config.c index a1122d059..a33981c63 100644 --- a/src/config.c +++ b/src/config.c @@ -983,8 +983,8 @@ void configSetCommand(client *c) { int soft_seconds; class = getClientTypeByName(v[j]); - hard = strtoll(v[j+1],NULL,10); - soft = strtoll(v[j+2],NULL,10); + hard = memtoll(v[j+1],NULL); + soft = memtoll(v[j+2],NULL); soft_seconds = strtoll(v[j+3],NULL,10); server.client_obuf_limits[class].hard_limit_bytes = hard; From ed88f77d6dcd36e0c62faa484491530bd6739d38 Mon Sep 17 00:00:00 2001 From: Guy Benoish Date: Thu, 13 Dec 2018 13:57:38 +0100 Subject: [PATCH 04/59] Check server.verbosity in RM_LogRaw --- src/module.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/module.c b/src/module.c index 20d159d33..e553bc0d8 100644 --- a/src/module.c +++ b/src/module.c @@ -3421,6 +3421,8 @@ void RM_LogRaw(RedisModule *module, const char *levelstr, const char *fmt, va_li else if (!strcasecmp(levelstr,"warning")) level = LL_WARNING; else level = LL_VERBOSE; /* Default. */ + if (level < server.verbosity) return; + name_len = snprintf(msg, sizeof(msg),"<%s> ", module->name); vsnprintf(msg + name_len, sizeof(msg) - name_len, fmt, ap); serverLogRaw(level,msg); From ab37289fa6035a774d7438f8a7342d3177fdc1be Mon Sep 17 00:00:00 2001 From: MeirShpilraien Date: Sun, 9 Dec 2018 14:32:55 +0200 Subject: [PATCH 05/59] added module ability to register api to be used by other modules --- src/module.c | 115 ++++++++++++++++++++++++++++++++++++++++++++++ src/redismodule.h | 4 ++ src/server.h | 1 + 3 files changed, 120 insertions(+) diff --git a/src/module.c b/src/module.c index 20d159d33..d177af24a 100644 --- a/src/module.c +++ b/src/module.c @@ -47,7 +47,16 @@ struct RedisModule { int ver; /* Module version. We use just progressive integers. */ int apiver; /* Module API version as requested during initialization.*/ list *types; /* Module data types. */ + list *usedBy; /* list of modules names using this module api. */ + list *using; /* list of modules names that this module is using thier api . */ + list *exportedFunctions; /* list of function names exported by this module. */ }; + +struct ModuleExportedApi { + void* funcPointer; + struct RedisModule* module; +}; + typedef struct RedisModule RedisModule; static dict *modules; /* Hash table of modules. SDS -> RedisModule ptr.*/ @@ -700,6 +709,9 @@ void RM_SetModuleAttribs(RedisModuleCtx *ctx, const char *name, int ver, int api module->ver = ver; module->apiver = apiver; module->types = listCreate(); + module->usedBy = listCreate(); + module->using = listCreate(); + module->exportedFunctions = listCreate(); ctx->module = module; } @@ -4615,6 +4627,59 @@ void RM_GetRandomHexChars(char *dst, size_t len) { getRandomHexChars(dst,len); } +/* Used to register an api to be used by other modules. */ +int RM_RegisterApi(RedisModuleCtx *ctx, const char *funcname, void *funcptr) { + struct ModuleExportedApi* eapi = zmalloc(sizeof(*eapi)); + eapi->module = ctx->module; + eapi->funcPointer = funcptr; + if(dictAdd(server.exportedapi, (char*)funcname, eapi) != DICT_OK){ + zfree(eapi); + return REDISMODULE_ERR; + } + listAddNodeHead(ctx->module->exportedFunctions, (char*)funcname); + return REDISMODULE_OK; +} + +static inline int IsModuleInList(list *l, const char* moduleName){ + listIter *iter = listGetIterator(l, AL_START_HEAD); + listNode *node = NULL; + while((node = listNext(iter))){ + char* name = listNodeValue(node); + if(strcmp(name, moduleName) == 0){ + listReleaseIterator(iter); + return 1; + } + } + listReleaseIterator(iter); + return 0; +} + +static inline void RemoveFromList(list *l, const char* moduleName){ + listIter *iter = listGetIterator(l, AL_START_HEAD); + listNode *node = NULL; + while((node = listNext(iter))){ + char* name = listNodeValue(node); + if(strcmp(name, moduleName) == 0){ + listDelNode(l, node); + return; + } + } + listReleaseIterator(iter); +} + +void* RM_GetExportedApi(RedisModuleCtx *ctx, const char *funcname) { + dictEntry* entry = dictFind(server.exportedapi, funcname); + if(!entry){ + return NULL; + } + struct ModuleExportedApi* eapi = dictGetVal(entry); + if(!IsModuleInList(eapi->module->usedBy, ctx->module->name)){ + listAddNodeHead(eapi->module->usedBy, ctx->module->name); + listAddNodeHead(ctx->module->using, eapi->module->name); + } + return eapi->funcPointer; +} + /* -------------------------------------------------------------------------- * Modules API internals * -------------------------------------------------------------------------- */ @@ -4735,6 +4800,28 @@ void moduleUnregisterCommands(struct RedisModule *module) { dictReleaseIterator(di); } +void moduleUnregisterApi(struct RedisModule *module) { + listIter *iter = listGetIterator(module->exportedFunctions, AL_START_HEAD); + listNode* node = NULL; + while((node = listNext(iter))){ + char* functionName = listNodeValue(node); + struct ModuleExportedApi* eapi = dictFetchValue(server.exportedapi, functionName); + serverAssert(eapi); + zfree(eapi); + dictDelete(server.exportedapi, functionName); + } + listReleaseIterator(iter); + iter = listGetIterator(module->using, AL_START_HEAD); + node = NULL; + while((node = listNext(iter))){ + char* moduleName = listNodeValue(node); + struct RedisModule* usingModule = dictFetchValue(modules, moduleName); + serverAssert(usingModule); + RemoveFromList(usingModule->usedBy, module->name); + } + listReleaseIterator(iter); +} + /* Load a module and initialize it. On success C_OK is returned, otherwise * C_ERR is returned. */ int moduleLoad(const char *path, void **module_argv, int module_argc) { @@ -4794,6 +4881,12 @@ int moduleUnload(sds name) { return REDISMODULE_ERR; } + if (listLength(module->usedBy)) { + errno = EPERM; + return REDISMODULE_ERR; + } + + moduleUnregisterApi(module); moduleUnregisterCommands(module); /* Remove any notification subscribers this module might have */ @@ -4826,6 +4919,7 @@ void moduleCommand(client *c) { if (c->argc == 2 && !strcasecmp(subcmd,"help")) { const char *help[] = { "LIST -- Return a list of loaded modules.", +"LISTAPI -- Return a list of exported api.", "LOAD [arg ...] -- Load a module library from .", "UNLOAD -- Unload a module.", NULL @@ -4858,6 +4952,9 @@ NULL case EBUSY: errmsg = "the module exports one or more module-side data types, can't unload"; break; + case EPERM: + errmsg = "the module api is used by other modules, please unload them first and try again."; + break; default: errmsg = "operation not possible."; break; @@ -4879,6 +4976,21 @@ NULL addReplyLongLong(c,module->ver); } dictReleaseIterator(di); + } else if (!strcasecmp(subcmd,"listapi") && c->argc == 3) { + char *moduleName = c->argv[2]->ptr; + struct RedisModule* module = dictFetchValue(modules, moduleName); + if(!module){ + addReplyErrorFormat(c,"Error listing module api: no such module %s", moduleName); + return; + } + addReplyMultiBulkLen(c,listLength(module->exportedFunctions)); + listIter *iter = listGetIterator(module->exportedFunctions, AL_START_HEAD); + listNode* node = NULL; + while((node = listNext(iter))){ + char* functionName = listNodeValue(node); + addReplyBulkCString(c,functionName); + } + listReleaseIterator(iter); } else { addReplySubcommandSyntaxError(c); return; @@ -4894,6 +5006,7 @@ size_t moduleCount(void) { * file so that's easy to seek it to add new entries. */ void moduleRegisterCoreAPI(void) { server.moduleapi = dictCreate(&moduleAPIDictType,NULL); + server.exportedapi = dictCreate(&moduleAPIDictType,NULL); REGISTER_API(Alloc); REGISTER_API(Calloc); REGISTER_API(Realloc); @@ -5044,4 +5157,6 @@ void moduleRegisterCoreAPI(void) { REGISTER_API(DictPrev); REGISTER_API(DictCompareC); REGISTER_API(DictCompare); + REGISTER_API(RegisterApi); + REGISTER_API(GetExportedApi); } diff --git a/src/redismodule.h b/src/redismodule.h index d18c38881..3c76fa02b 100644 --- a/src/redismodule.h +++ b/src/redismodule.h @@ -332,6 +332,8 @@ void REDISMODULE_API_FUNC(RedisModule_GetRandomBytes)(unsigned char *dst, size_t void REDISMODULE_API_FUNC(RedisModule_GetRandomHexChars)(char *dst, size_t len); void REDISMODULE_API_FUNC(RedisModule_SetDisconnectCallback)(RedisModuleBlockedClient *bc, RedisModuleDisconnectFunc callback); void REDISMODULE_API_FUNC(RedisModule_SetClusterFlags)(RedisModuleCtx *ctx, uint64_t flags); +int REDISMODULE_API_FUNC(RedisModule_RegisterApi)(RedisModuleCtx *ctx, const char *funcname, void *funcptr); +void* REDISMODULE_API_FUNC(RedisModule_GetExportedApi)(RedisModuleCtx *ctx, const char *funcname); #endif /* This is included inline inside each Redis module. */ @@ -492,6 +494,8 @@ static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int REDISMODULE_GET_API(GetRandomBytes); REDISMODULE_GET_API(GetRandomHexChars); REDISMODULE_GET_API(SetClusterFlags); + REDISMODULE_GET_API(RegisterApi); + REDISMODULE_GET_API(GetExportedApi); #endif if (RedisModule_IsModuleNameBusy && RedisModule_IsModuleNameBusy(name)) return REDISMODULE_ERR; diff --git a/src/server.h b/src/server.h index da4c6d45a..379cda058 100644 --- a/src/server.h +++ b/src/server.h @@ -955,6 +955,7 @@ struct redisServer { int always_show_logo; /* Show logo even for non-stdout logging. */ /* Modules */ dict *moduleapi; /* Exported APIs dictionary for modules. */ + dict *exportedapi; /* Api exported by other modules. */ list *loadmodule_queue; /* List of modules to load at startup. */ int module_blocked_pipe[2]; /* Pipe used to awake the event loop if a client blocked on a module command needs From 850b64c1166a1c36e9aa1b12a265b49982d776a0 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 20 Dec 2018 17:56:38 +0100 Subject: [PATCH 06/59] Revert shared APIs to modify the design. --- src/module.c | 115 ---------------------------------------------- src/redismodule.h | 4 -- src/server.h | 1 - 3 files changed, 120 deletions(-) diff --git a/src/module.c b/src/module.c index d177af24a..20d159d33 100644 --- a/src/module.c +++ b/src/module.c @@ -47,16 +47,7 @@ struct RedisModule { int ver; /* Module version. We use just progressive integers. */ int apiver; /* Module API version as requested during initialization.*/ list *types; /* Module data types. */ - list *usedBy; /* list of modules names using this module api. */ - list *using; /* list of modules names that this module is using thier api . */ - list *exportedFunctions; /* list of function names exported by this module. */ }; - -struct ModuleExportedApi { - void* funcPointer; - struct RedisModule* module; -}; - typedef struct RedisModule RedisModule; static dict *modules; /* Hash table of modules. SDS -> RedisModule ptr.*/ @@ -709,9 +700,6 @@ void RM_SetModuleAttribs(RedisModuleCtx *ctx, const char *name, int ver, int api module->ver = ver; module->apiver = apiver; module->types = listCreate(); - module->usedBy = listCreate(); - module->using = listCreate(); - module->exportedFunctions = listCreate(); ctx->module = module; } @@ -4627,59 +4615,6 @@ void RM_GetRandomHexChars(char *dst, size_t len) { getRandomHexChars(dst,len); } -/* Used to register an api to be used by other modules. */ -int RM_RegisterApi(RedisModuleCtx *ctx, const char *funcname, void *funcptr) { - struct ModuleExportedApi* eapi = zmalloc(sizeof(*eapi)); - eapi->module = ctx->module; - eapi->funcPointer = funcptr; - if(dictAdd(server.exportedapi, (char*)funcname, eapi) != DICT_OK){ - zfree(eapi); - return REDISMODULE_ERR; - } - listAddNodeHead(ctx->module->exportedFunctions, (char*)funcname); - return REDISMODULE_OK; -} - -static inline int IsModuleInList(list *l, const char* moduleName){ - listIter *iter = listGetIterator(l, AL_START_HEAD); - listNode *node = NULL; - while((node = listNext(iter))){ - char* name = listNodeValue(node); - if(strcmp(name, moduleName) == 0){ - listReleaseIterator(iter); - return 1; - } - } - listReleaseIterator(iter); - return 0; -} - -static inline void RemoveFromList(list *l, const char* moduleName){ - listIter *iter = listGetIterator(l, AL_START_HEAD); - listNode *node = NULL; - while((node = listNext(iter))){ - char* name = listNodeValue(node); - if(strcmp(name, moduleName) == 0){ - listDelNode(l, node); - return; - } - } - listReleaseIterator(iter); -} - -void* RM_GetExportedApi(RedisModuleCtx *ctx, const char *funcname) { - dictEntry* entry = dictFind(server.exportedapi, funcname); - if(!entry){ - return NULL; - } - struct ModuleExportedApi* eapi = dictGetVal(entry); - if(!IsModuleInList(eapi->module->usedBy, ctx->module->name)){ - listAddNodeHead(eapi->module->usedBy, ctx->module->name); - listAddNodeHead(ctx->module->using, eapi->module->name); - } - return eapi->funcPointer; -} - /* -------------------------------------------------------------------------- * Modules API internals * -------------------------------------------------------------------------- */ @@ -4800,28 +4735,6 @@ void moduleUnregisterCommands(struct RedisModule *module) { dictReleaseIterator(di); } -void moduleUnregisterApi(struct RedisModule *module) { - listIter *iter = listGetIterator(module->exportedFunctions, AL_START_HEAD); - listNode* node = NULL; - while((node = listNext(iter))){ - char* functionName = listNodeValue(node); - struct ModuleExportedApi* eapi = dictFetchValue(server.exportedapi, functionName); - serverAssert(eapi); - zfree(eapi); - dictDelete(server.exportedapi, functionName); - } - listReleaseIterator(iter); - iter = listGetIterator(module->using, AL_START_HEAD); - node = NULL; - while((node = listNext(iter))){ - char* moduleName = listNodeValue(node); - struct RedisModule* usingModule = dictFetchValue(modules, moduleName); - serverAssert(usingModule); - RemoveFromList(usingModule->usedBy, module->name); - } - listReleaseIterator(iter); -} - /* Load a module and initialize it. On success C_OK is returned, otherwise * C_ERR is returned. */ int moduleLoad(const char *path, void **module_argv, int module_argc) { @@ -4881,12 +4794,6 @@ int moduleUnload(sds name) { return REDISMODULE_ERR; } - if (listLength(module->usedBy)) { - errno = EPERM; - return REDISMODULE_ERR; - } - - moduleUnregisterApi(module); moduleUnregisterCommands(module); /* Remove any notification subscribers this module might have */ @@ -4919,7 +4826,6 @@ void moduleCommand(client *c) { if (c->argc == 2 && !strcasecmp(subcmd,"help")) { const char *help[] = { "LIST -- Return a list of loaded modules.", -"LISTAPI -- Return a list of exported api.", "LOAD [arg ...] -- Load a module library from .", "UNLOAD -- Unload a module.", NULL @@ -4952,9 +4858,6 @@ NULL case EBUSY: errmsg = "the module exports one or more module-side data types, can't unload"; break; - case EPERM: - errmsg = "the module api is used by other modules, please unload them first and try again."; - break; default: errmsg = "operation not possible."; break; @@ -4976,21 +4879,6 @@ NULL addReplyLongLong(c,module->ver); } dictReleaseIterator(di); - } else if (!strcasecmp(subcmd,"listapi") && c->argc == 3) { - char *moduleName = c->argv[2]->ptr; - struct RedisModule* module = dictFetchValue(modules, moduleName); - if(!module){ - addReplyErrorFormat(c,"Error listing module api: no such module %s", moduleName); - return; - } - addReplyMultiBulkLen(c,listLength(module->exportedFunctions)); - listIter *iter = listGetIterator(module->exportedFunctions, AL_START_HEAD); - listNode* node = NULL; - while((node = listNext(iter))){ - char* functionName = listNodeValue(node); - addReplyBulkCString(c,functionName); - } - listReleaseIterator(iter); } else { addReplySubcommandSyntaxError(c); return; @@ -5006,7 +4894,6 @@ size_t moduleCount(void) { * file so that's easy to seek it to add new entries. */ void moduleRegisterCoreAPI(void) { server.moduleapi = dictCreate(&moduleAPIDictType,NULL); - server.exportedapi = dictCreate(&moduleAPIDictType,NULL); REGISTER_API(Alloc); REGISTER_API(Calloc); REGISTER_API(Realloc); @@ -5157,6 +5044,4 @@ void moduleRegisterCoreAPI(void) { REGISTER_API(DictPrev); REGISTER_API(DictCompareC); REGISTER_API(DictCompare); - REGISTER_API(RegisterApi); - REGISTER_API(GetExportedApi); } diff --git a/src/redismodule.h b/src/redismodule.h index 3c76fa02b..d18c38881 100644 --- a/src/redismodule.h +++ b/src/redismodule.h @@ -332,8 +332,6 @@ void REDISMODULE_API_FUNC(RedisModule_GetRandomBytes)(unsigned char *dst, size_t void REDISMODULE_API_FUNC(RedisModule_GetRandomHexChars)(char *dst, size_t len); void REDISMODULE_API_FUNC(RedisModule_SetDisconnectCallback)(RedisModuleBlockedClient *bc, RedisModuleDisconnectFunc callback); void REDISMODULE_API_FUNC(RedisModule_SetClusterFlags)(RedisModuleCtx *ctx, uint64_t flags); -int REDISMODULE_API_FUNC(RedisModule_RegisterApi)(RedisModuleCtx *ctx, const char *funcname, void *funcptr); -void* REDISMODULE_API_FUNC(RedisModule_GetExportedApi)(RedisModuleCtx *ctx, const char *funcname); #endif /* This is included inline inside each Redis module. */ @@ -494,8 +492,6 @@ static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int REDISMODULE_GET_API(GetRandomBytes); REDISMODULE_GET_API(GetRandomHexChars); REDISMODULE_GET_API(SetClusterFlags); - REDISMODULE_GET_API(RegisterApi); - REDISMODULE_GET_API(GetExportedApi); #endif if (RedisModule_IsModuleNameBusy && RedisModule_IsModuleNameBusy(name)) return REDISMODULE_ERR; diff --git a/src/server.h b/src/server.h index 379cda058..da4c6d45a 100644 --- a/src/server.h +++ b/src/server.h @@ -955,7 +955,6 @@ struct redisServer { int always_show_logo; /* Show logo even for non-stdout logging. */ /* Modules */ dict *moduleapi; /* Exported APIs dictionary for modules. */ - dict *exportedapi; /* Api exported by other modules. */ list *loadmodule_queue; /* List of modules to load at startup. */ int module_blocked_pipe[2]; /* Pipe used to awake the event loop if a client blocked on a module command needs From 27f6e9bb9b6614bf4e49d9c53087f21de09cdb1a Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 20 Dec 2018 12:06:24 +0100 Subject: [PATCH 07/59] Modules shared API: initial core functions. Based on ideas and code in PR #5560 by @MeirShpilraien. --- src/module.c | 86 ++++++++++++++++++++++++++++++++++++++++++++++++++++ src/server.h | 4 ++- 2 files changed, 89 insertions(+), 1 deletion(-) diff --git a/src/module.c b/src/module.c index 20d159d33..2914b5903 100644 --- a/src/module.c +++ b/src/module.c @@ -47,9 +47,21 @@ struct RedisModule { int ver; /* Module version. We use just progressive integers. */ int apiver; /* Module API version as requested during initialization.*/ list *types; /* Module data types. */ + list *usedby; /* List of modules using APIs from this one. */ + list *using; /* List of modules we use some APIs of. */ }; typedef struct RedisModule RedisModule; +/* This represents a shared API. Shared APIs will be used to populate + * the server.sharedapi dictionary, mapping names of APIs exported by + * modules for other modules to use, to their structure specifying the + * function pointer that can be called. */ +struct RedisModuleSharedAPI { + void *func; + RedisModule *module; +}; +typedef struct RedisModuleSharedAPI RedisModuleSharedAPI; + static dict *modules; /* Hash table of modules. SDS -> RedisModule ptr.*/ /* Entries in the context->amqueue array, representing objects to free @@ -700,6 +712,8 @@ void RM_SetModuleAttribs(RedisModuleCtx *ctx, const char *name, int ver, int api module->ver = ver; module->apiver = apiver; module->types = listCreate(); + module->usedby = listCreate(); + module->using = listCreate(); ctx->module = module; } @@ -4615,6 +4629,77 @@ void RM_GetRandomHexChars(char *dst, size_t len) { getRandomHexChars(dst,len); } +/* -------------------------------------------------------------------------- + * Modules API exporting / importing + * -------------------------------------------------------------------------- */ + +/* This function is called by a module in order to export some API with a + * given name. Other modules will be able to use this API by calling the + * symmetrical function RM_GetSharedAPI() and casting the return value to + * the right function pointer. + * + * The function will return REDISMODULE_OK if the name is not already taken, + * otherwise REDISMODULE_ERR will be returned and no operation will be + * performed. + * + * IMPORTANT: the apiname argument should be a string literal with static + * lifetime. The API relies on the fact that it will always be valid in + * the future. */ +int RM_ExportSharedAPI(RedisModuleCtx *ctx, const char *apiname, void *func) { + RedisModuleSharedAPI *sapi = zmalloc(sizeof(*sapi)); + sapi->module = ctx->module; + sapi->func = func; + if (dictAdd(server.sharedapi, (char*)apiname, sapi) != DICT_OK) { + zfree(sapi); + return REDISMODULE_ERR; + } + return REDISMODULE_OK; +} + +/* Request an exported API pointer. The return value is just a void pointer + * that the caller of this function will be required to cast to the right + * function pointer, so this is a private contract between modules. + * + * If the requested API is not available then NULL is returned. Because + * modules can be loaded at different times with different order, this + * function calls should be put inside some module generic API registering + * step, that is called every time a module attempts to execute a + * command that requires external APIs: if some API cannot be resolved, the + * command should return an error. + * + * Here is an exmaple: + * + * int ... myCommandImplementation() { + * if (getExternalAPIs() == 0) { + * reply with an error here if we cannot have the APIs + * } + * // Use the API: + * myFunctionPointer(foo); + * } + * + * And the function registerAPI() is: + * + * int getExternalAPIs(void) { + * static int api_loaded = 0; + * if (api_loaded != 0) return 1; // APIs already resolved. + * + * myFunctionPointer = RedisModule_GetOtherModuleAPI("..."); + * if (myFunctionPointer == NULL) return 0; + * + * return 1; + * } + */ +void *RM_GetSharedAPI(RedisModuleCtx *ctx, const char *apiname) { + dictEntry *de = dictFind(server.sharedapi, apiname); + if (de == NULL) return NULL; + RedisModuleSharedAPI *sapi = dictGetVal(de); + if (listSearchKey(sapi->module->usedby,ctx->module) == NULL) { + listAddNodeTail(sapi->module->usedby,ctx->module); + listAddNodeTail(ctx->module->using,sapi->module); + } + return sapi->func; +} + /* -------------------------------------------------------------------------- * Modules API internals * -------------------------------------------------------------------------- */ @@ -4894,6 +4979,7 @@ size_t moduleCount(void) { * file so that's easy to seek it to add new entries. */ void moduleRegisterCoreAPI(void) { server.moduleapi = dictCreate(&moduleAPIDictType,NULL); + server.sharedapi = dictCreate(&moduleAPIDictType,NULL); REGISTER_API(Alloc); REGISTER_API(Calloc); REGISTER_API(Realloc); diff --git a/src/server.h b/src/server.h index da4c6d45a..3c2ecdd23 100644 --- a/src/server.h +++ b/src/server.h @@ -954,7 +954,9 @@ struct redisServer { size_t initial_memory_usage; /* Bytes used after initialization. */ int always_show_logo; /* Show logo even for non-stdout logging. */ /* Modules */ - dict *moduleapi; /* Exported APIs dictionary for modules. */ + dict *moduleapi; /* Exported core APIs dictionary for modules. */ + dict *sharedapi; /* Like moduleapi but containing the APIs that + modules share with each other. */ list *loadmodule_queue; /* List of modules to load at startup. */ int module_blocked_pipe[2]; /* Pipe used to awake the event loop if a client blocked on a module command needs From 6bb8cdaebe74f9c79bb754ccd7a7f05fe8385f81 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 20 Dec 2018 17:16:39 +0100 Subject: [PATCH 08/59] Modules shared API: unregister APIs function. --- src/module.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/src/module.c b/src/module.c index 2914b5903..eafdd81f6 100644 --- a/src/module.c +++ b/src/module.c @@ -4700,6 +4700,29 @@ void *RM_GetSharedAPI(RedisModuleCtx *ctx, const char *apiname) { return sapi->func; } +/* Remove all the APIs registered by the specified module. Usually you + * want this when the module is going to be unloaded. This function + * assumes that's caller responsibility to make sure the APIs are not + * used by other modules. + * + * The number of unregistered APIs is returned. */ +int moduleUnregisterSharedAPI(RedisModule *module) { + int count = 0; + dictIterator *di = dictGetSafeIterator(server.sharedapi); + dictEntry *de; + while ((de = dictNext(di)) != NULL) { + const char *apiname = dictGetKey(de); + RedisModuleSharedAPI *sapi = dictGetVal(de); + if (sapi->module == module) { + dictDelete(server.sharedapi,apiname); + zfree(sapi); + count++; + } + } + dictReleaseIterator(di); + return count; +} + /* -------------------------------------------------------------------------- * Modules API internals * -------------------------------------------------------------------------- */ @@ -4843,6 +4866,7 @@ int moduleLoad(const char *path, void **module_argv, int module_argc) { if (onload((void*)&ctx,module_argv,module_argc) == REDISMODULE_ERR) { if (ctx.module) { moduleUnregisterCommands(ctx.module); + moduleUnregisterSharedAPI(ctx.module); moduleFreeModuleStructure(ctx.module); } dlclose(handle); @@ -4880,6 +4904,7 @@ int moduleUnload(sds name) { } moduleUnregisterCommands(module); + moduleUnregisterSharedAPI(module); /* Remove any notification subscribers this module might have */ moduleUnsubscribeNotifications(module); From 9403b3d7a39ea80f95ae71f386d6949f52284426 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 20 Dec 2018 17:31:55 +0100 Subject: [PATCH 09/59] Modules shared API: prevent unloading of used modules. --- src/module.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/module.c b/src/module.c index eafdd81f6..4f0e5b126 100644 --- a/src/module.c +++ b/src/module.c @@ -4896,11 +4896,12 @@ int moduleUnload(sds name) { if (module == NULL) { errno = ENOENT; return REDISMODULE_ERR; - } - - if (listLength(module->types)) { + } else if (listLength(module->types)) { errno = EBUSY; return REDISMODULE_ERR; + } else if (listLength(module->usedby)) { + errno = EPERM; + return REDISMODULE_ERR; } moduleUnregisterCommands(module); @@ -4966,7 +4967,12 @@ NULL errmsg = "no such module with that name"; break; case EBUSY: - errmsg = "the module exports one or more module-side data types, can't unload"; + errmsg = "the module exports one or more module-side data " + "types, can't unload"; + break; + case EPERM: + errmsg = "the module exports APIs used by other modules. " + "Please unload them first and try again"; break; default: errmsg = "operation not possible."; From d3eb0028e937fe8c6b435bcb3760f676dcc0920f Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 20 Dec 2018 17:40:55 +0100 Subject: [PATCH 10/59] Modules shared API: also unregister the module as user. --- src/module.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/src/module.c b/src/module.c index 4f0e5b126..7bb146954 100644 --- a/src/module.c +++ b/src/module.c @@ -4723,6 +4723,27 @@ int moduleUnregisterSharedAPI(RedisModule *module) { return count; } +/* Remove the specified module as an user of APIs of ever other module. + * This is usually called when a module is unloaded. + * + * Returns the number of modules this module was using APIs from. */ +int moduleUnregisterUsedAPI(RedisModule *module) { + listIter li; + listNode *ln; + int count = 0; + + listRewind(module->using,&li); + while((ln = listNext(&li))) { + RedisModule *used = ln->value; + listNode *ln = listSearchKey(used->usedby,module); + if (ln) { + listDelNode(module->using,ln); + count++; + } + } + return count; +} + /* -------------------------------------------------------------------------- * Modules API internals * -------------------------------------------------------------------------- */ @@ -4867,6 +4888,7 @@ int moduleLoad(const char *path, void **module_argv, int module_argc) { if (ctx.module) { moduleUnregisterCommands(ctx.module); moduleUnregisterSharedAPI(ctx.module); + moduleUnregisterUsedAPI(ctx.module); moduleFreeModuleStructure(ctx.module); } dlclose(handle); @@ -4906,6 +4928,7 @@ int moduleUnload(sds name) { moduleUnregisterCommands(module); moduleUnregisterSharedAPI(module); + moduleUnregisterUsedAPI(module); /* Remove any notification subscribers this module might have */ moduleUnsubscribeNotifications(module); From 8a87de130ff9389273516993f9aaec1f75cbb22a Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 20 Dec 2018 17:44:51 +0100 Subject: [PATCH 11/59] Modules shared API: export new core APIs. --- src/module.c | 2 ++ src/redismodule.h | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/src/module.c b/src/module.c index 7bb146954..f2582193c 100644 --- a/src/module.c +++ b/src/module.c @@ -5184,4 +5184,6 @@ void moduleRegisterCoreAPI(void) { REGISTER_API(DictPrev); REGISTER_API(DictCompareC); REGISTER_API(DictCompare); + REGISTER_API(ExportSharedAPI); + REGISTER_API(GetSharedAPI); } diff --git a/src/redismodule.h b/src/redismodule.h index d18c38881..3a7da18fe 100644 --- a/src/redismodule.h +++ b/src/redismodule.h @@ -332,6 +332,8 @@ void REDISMODULE_API_FUNC(RedisModule_GetRandomBytes)(unsigned char *dst, size_t void REDISMODULE_API_FUNC(RedisModule_GetRandomHexChars)(char *dst, size_t len); void REDISMODULE_API_FUNC(RedisModule_SetDisconnectCallback)(RedisModuleBlockedClient *bc, RedisModuleDisconnectFunc callback); void REDISMODULE_API_FUNC(RedisModule_SetClusterFlags)(RedisModuleCtx *ctx, uint64_t flags); +int REDISMODULE_API_FUNC(RedisModule_ExportSharedAPI)(RedisModuleCtx *ctx, const char *apiname, void *func); +void *REDISMODULE_API_FUNC(RedisModule_GetSharedAPI)(RedisModuleCtx *ctx, const char *apiname); #endif /* This is included inline inside each Redis module. */ @@ -492,6 +494,8 @@ static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int REDISMODULE_GET_API(GetRandomBytes); REDISMODULE_GET_API(GetRandomHexChars); REDISMODULE_GET_API(SetClusterFlags); + REDISMODULE_GET_API(ExportSharedAPI); + REDISMODULE_GET_API(GetSharedAPI); #endif if (RedisModule_IsModuleNameBusy && RedisModule_IsModuleNameBusy(name)) return REDISMODULE_ERR; From 645d44d545ec958b9f541d4b7473ec021331ab04 Mon Sep 17 00:00:00 2001 From: "zhaozhao.zz" Date: Wed, 16 Jan 2019 19:19:10 +0800 Subject: [PATCH 12/59] Streams: checkType before XGROUP CREATE Fix issue #5785, in case create group on a key is not stream. --- src/t_stream.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/t_stream.c b/src/t_stream.c index 1a5acac42..9676e975e 100644 --- a/src/t_stream.c +++ b/src/t_stream.c @@ -1737,14 +1737,17 @@ NULL /* Everything but the "HELP" option requires a key and group name. */ if (c->argc >= 4) { o = lookupKeyWrite(c->db,c->argv[2]); - if (o) s = o->ptr; + if (o) { + if (checkType(c,o,OBJ_STREAM)) return; + s = o->ptr; + } grpname = c->argv[3]->ptr; } /* Check for missing key/group. */ if (c->argc >= 4 && !mkstream) { /* At this point key must exist, or there is an error. */ - if (o == NULL) { + if (s == NULL) { addReplyError(c, "The XGROUP subcommand requires the key to exist. " "Note that for CREATE you may want to use the MKSTREAM " @@ -1752,8 +1755,6 @@ NULL return; } - if (checkType(c,o,OBJ_STREAM)) return; - /* Certain subcommands require the group to exist. */ if ((cg = streamLookupCG(s,grpname)) == NULL && (!strcasecmp(opt,"SETID") || @@ -1781,7 +1782,8 @@ NULL } /* Handle the MKSTREAM option now that the command can no longer fail. */ - if (s == NULL && mkstream) { + if (s == NULL) { + serverAssert(mkstream); o = createStreamObject(); dbAdd(c->db,c->argv[2],o); s = o->ptr; From 25029568358e70ac92c6048af4001f4c379ab788 Mon Sep 17 00:00:00 2001 From: Guy Benoish Date: Wed, 23 Jan 2019 11:11:57 +0200 Subject: [PATCH 13/59] ZPOP should return an empty array if COUNT=0 --- src/t_zset.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/t_zset.c b/src/t_zset.c index 0427ee887..1c3da1a28 100644 --- a/src/t_zset.c +++ b/src/t_zset.c @@ -3140,7 +3140,10 @@ void genericZpopCommand(client *c, robj **keyv, int keyc, int where, int emitkey if (countarg) { if (getLongFromObjectOrReply(c,countarg,&count,NULL) != C_OK) return; - if (count < 0) count = 1; + if (count <= 0) { + addReplyNullArray(c); + return; + } } /* Check type and break on the first error, otherwise identify candidate. */ From b0c8d6c227e172ec93d9b1f1c0f0ac49f8575a8f Mon Sep 17 00:00:00 2001 From: Guy Benoish Date: Mon, 28 Jan 2019 17:58:11 +0200 Subject: [PATCH 14/59] Increase string2ld's buffer size (and fix HINCRBYFLOAT) The string representation of `long double` may take up to ~5000 chars (see PR #3745). Before this fix HINCRBYFLOAT would never overflow (since the string could not exceed 256 chars). Now it can. --- src/t_hash.c | 4 ++++ src/util.c | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/t_hash.c b/src/t_hash.c index d8aee6572..bc70e4051 100644 --- a/src/t_hash.c +++ b/src/t_hash.c @@ -615,6 +615,10 @@ void hincrbyfloatCommand(client *c) { } value += incr; + if (isnan(value) || isinf(value)) { + addReplyError(c,"increment would produce NaN or Infinity"); + return; + } char buf[MAX_LONG_DOUBLE_CHARS]; int len = ld2string(buf,sizeof(buf),value,1); diff --git a/src/util.c b/src/util.c index 66d599190..783bcf83b 100644 --- a/src/util.c +++ b/src/util.c @@ -447,7 +447,7 @@ int string2l(const char *s, size_t slen, long *lval) { * a double: no spaces or other characters before or after the string * representing the number are accepted. */ int string2ld(const char *s, size_t slen, long double *dp) { - char buf[256]; + char buf[MAX_LONG_DOUBLE_CHARS]; long double value; char *eptr; From bdd9a8002a6fcc93135eb4125da703b87a1959fa Mon Sep 17 00:00:00 2001 From: Guy Benoish Date: Tue, 12 Feb 2019 14:21:21 +0100 Subject: [PATCH 15/59] Trim SDS free space of retained module strings In some cases processMultibulkBuffer uses sdsMakeRoomFor to expand the querybuf, but later in some cases it uses that query buffer as is for an argv element (see "Optimization"), which means that the sds in argv may have a lot of wasted space, and then in case modules keep that argv RedisString inside their data structure, this space waste will remain for long (until restarted from rdb). --- src/module.c | 11 +++++++++++ src/object.c | 17 ++++++++++++----- src/sds.c | 4 ++++ src/server.h | 1 + 4 files changed, 28 insertions(+), 5 deletions(-) diff --git a/src/module.c b/src/module.c index 81982ba76..5d3451916 100644 --- a/src/module.c +++ b/src/module.c @@ -509,6 +509,17 @@ void RedisModuleCommandDispatcher(client *c) { cp->func(&ctx,(void**)c->argv,c->argc); moduleHandlePropagationAfterCommandCallback(&ctx); moduleFreeContext(&ctx); + + /* In some cases processMultibulkBuffer uses sdsMakeRoomFor to + * expand the querybuf, but later in some cases it uses that query + * buffer as is for an argv element (see "Optimization"), which means + * that the sds in argv may have a lot of wasted space, and then in case + * modules keep that argv RedisString inside their data structure, this + * space waste will remain for long (until restarted from rdb). */ + for (int i = 0; i < c->argc; i++) { + if (c->argv[i]->refcount > 1) + trimStringObjectIfNeeded(c->argv[i]); + } } /* This function returns the list of keys, with the same interface as the diff --git a/src/object.c b/src/object.c index ec0bd02ee..42c247b87 100644 --- a/src/object.c +++ b/src/object.c @@ -415,6 +415,17 @@ int isObjectRepresentableAsLongLong(robj *o, long long *llval) { } } +void trimStringObjectIfNeeded(robj *o) { + /* Optimize the SDS string inside the string object to require + * little space, in case there is more than 10% of free space + * at the end of the SDS string. */ + if (o->encoding == OBJ_ENCODING_RAW && + sdsavail(o->ptr) > sdslen(o->ptr)/10) + { + o->ptr = sdsRemoveFreeSpace(o->ptr); + } +} + /* Try to encode a string object in order to save space */ robj *tryObjectEncoding(robj *o) { long value; @@ -484,11 +495,7 @@ robj *tryObjectEncoding(robj *o) { * We do that only for relatively large strings as this branch * is only entered if the length of the string is greater than * OBJ_ENCODING_EMBSTR_SIZE_LIMIT. */ - if (o->encoding == OBJ_ENCODING_RAW && - sdsavail(s) > len/10) - { - o->ptr = sdsRemoveFreeSpace(o->ptr); - } + trimStringObjectIfNeeded(o); /* Return the original object. */ return o; diff --git a/src/sds.c b/src/sds.c index 330c955e8..cd60946bd 100644 --- a/src/sds.c +++ b/src/sds.c @@ -257,8 +257,12 @@ sds sdsRemoveFreeSpace(sds s) { char type, oldtype = s[-1] & SDS_TYPE_MASK; int hdrlen, oldhdrlen = sdsHdrSize(oldtype); size_t len = sdslen(s); + size_t avail = sdsavail(s); sh = (char*)s-oldhdrlen; + /* Return ASAP if there is no space left. */ + if (avail == 0) return s; + /* Check what would be the minimum SDS header that is just good enough to * fit this string. */ type = sdsReqType(len); diff --git a/src/server.h b/src/server.h index a396e1cf7..34b4cd8d5 100644 --- a/src/server.h +++ b/src/server.h @@ -1656,6 +1656,7 @@ int compareStringObjects(robj *a, robj *b); int collateStringObjects(robj *a, robj *b); int equalStringObjects(robj *a, robj *b); unsigned long long estimateObjectIdleTime(robj *o); +void trimStringObjectIfNeeded(robj *o); #define sdsEncodedObject(objptr) (objptr->encoding == OBJ_ENCODING_RAW || objptr->encoding == OBJ_ENCODING_EMBSTR) /* Synchronous I/O with timeout */ From a16da7922895ad31e8b1a13aa5e23b3648e78aea Mon Sep 17 00:00:00 2001 From: chendianqiang Date: Fri, 1 Mar 2019 15:28:21 +0800 Subject: [PATCH 16/59] optimize cluster failover --- src/cluster.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/cluster.c b/src/cluster.c index 1a3a348b5..50a9ae687 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -3031,6 +3031,7 @@ void clusterHandleSlaveFailover(void) { if (server.cluster->mf_end) { server.cluster->failover_auth_time = mstime(); server.cluster->failover_auth_rank = 0; + clusterDoBeforeSleep(CLUSTER_TODO_HANDLE_FAILOVER); } serverLog(LL_WARNING, "Start of election delayed for %lld milliseconds " From cd2743c0e6b7702535b410db4e51f6d5e22a7d4c Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Sun, 3 Mar 2019 23:10:45 +0200 Subject: [PATCH 17/59] Fixes BZ[REV]POP's arity --- src/server.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/server.c b/src/server.c index 9de579815..712cda1bd 100644 --- a/src/server.c +++ b/src/server.c @@ -480,11 +480,11 @@ struct redisCommand redisCommandTable[] = { "write fast @sortedset", 0,NULL,1,1,1,0,0,0}, - {"bzpopmin",bzpopminCommand,-2, + {"bzpopmin",bzpopminCommand,-3, "write no-script fast @sortedset @blocking", 0,NULL,1,-2,1,0,0,0}, - {"bzpopmax",bzpopmaxCommand,-2, + {"bzpopmax",bzpopmaxCommand,-3, "write no-script fast @sortedset @blocking", 0,NULL,1,-2,1,0,0,0}, From fb81d1b3f891746e886f44cad5df96a29e6b9f51 Mon Sep 17 00:00:00 2001 From: "zhaozhao.zz" Date: Mon, 4 Mar 2019 19:43:00 +0800 Subject: [PATCH 18/59] Fix compile warning when log aux field --- src/rdb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rdb.c b/src/rdb.c index b800ee481..52dddf210 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -1965,7 +1965,7 @@ int rdbLoadRio(rio *rdb, rdbSaveInfo *rsi, int loading_aof) { } } else if (!strcasecmp(auxkey->ptr,"redis-ver")) { serverLog(LL_NOTICE,"Loading RDB produced by version %s", - auxval->ptr); + (char*)auxval->ptr); } else if (!strcasecmp(auxkey->ptr,"ctime")) { time_t age = time(NULL)-strtol(auxval->ptr,NULL,10); if (age < 0) age = 0; From c33cb4938a6debf999a9af312498a4ab91271c81 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 5 Mar 2019 15:51:37 +0100 Subject: [PATCH 19/59] ACL: GENPASS subcommand. --- src/acl.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/acl.c b/src/acl.c index 3cca50027..d9f431f4f 100644 --- a/src/acl.c +++ b/src/acl.c @@ -1389,6 +1389,8 @@ void ACLLoadUsersAtStartup(void) { * ACL SETUSER ... acl rules ... * ACL DELUSER [...] * ACL GETUSER + * ACL GENPASS + * ACL WHOAMI */ void aclCommand(client *c) { char *sub = c->argv[1]->ptr; @@ -1571,6 +1573,10 @@ void aclCommand(client *c) { } dictReleaseIterator(di); setDeferredArrayLen(c,dl,arraylen); + } else if (!strcasecmp(sub,"genpass") && c->argc == 2) { + char pass[32]; /* 128 bits of actual pseudo random data. */ + getRandomHexChars(pass,sizeof(pass)); + addReplyBulkCBuffer(c,pass,sizeof(pass)); } else if (!strcasecmp(sub,"help")) { const char *help[] = { "LOAD -- Reload users from the ACL file.", @@ -1581,6 +1587,7 @@ void aclCommand(client *c) { "DELUSER [...] -- Delete a list of users.", "CAT -- List available categories.", "CAT -- List commands inside category.", +"GENPASS -- Generate a secure user password.", "WHOAMI -- Return the current connection username.", NULL }; From 93e51239ce1d3778cbe068223221365a1ce7ad89 Mon Sep 17 00:00:00 2001 From: artix Date: Wed, 6 Mar 2019 16:38:36 +0100 Subject: [PATCH 20/59] Cluster Manager: add importing/migrating nodes to backup info --- src/redis-cli.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/src/redis-cli.c b/src/redis-cli.c index 0e52c16be..5968ee389 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -2732,6 +2732,36 @@ static sds clusterManagerNodeGetJSON(clusterManagerNode *node, json = sdscatprintf(json, ",\n \"cluster_errors\": %lu", error_count); } + if (node->migrating_count > 0 && node->migrating != NULL) { + int i = 0; + sds migrating = sdsempty(); + for (; i < node->migrating_count; i += 2) { + sds slot = node->migrating[i]; + sds dest = node->migrating[i + 1]; + if (slot && dest) { + if (sdslen(migrating) > 0) migrating = sdscat(migrating, ","); + migrating = sdscatfmt(migrating, "\"%S\": \"%S\"", slot, dest); + } + } + if (sdslen(migrating) > 0) + json = sdscatfmt(json, ",\n \"migrating\": {%S}", migrating); + sdsfree(migrating); + } + if (node->importing_count > 0 && node->importing != NULL) { + int i = 0; + sds importing = sdsempty(); + for (; i < node->importing_count; i += 2) { + sds slot = node->importing[i]; + sds from = node->importing[i + 1]; + if (slot && from) { + if (sdslen(importing) > 0) importing = sdscat(importing, ","); + importing = sdscatfmt(importing, "\"%S\": \"%S\"", slot, from); + } + } + if (sdslen(importing) > 0) + json = sdscatfmt(json, ",\n \"importing\": {%S}", importing); + sdsfree(importing); + } json = sdscat(json, "\n }"); sdsfree(replicate); sdsfree(slots); From d5b24d31d73d80f27feed5334d9c79eec00afc31 Mon Sep 17 00:00:00 2001 From: Brad Solomon Date: Wed, 6 Mar 2019 21:24:45 -0500 Subject: [PATCH 21/59] Provide an uninstall target in Makefile On `make uninstall`, removes: - /usr/local/bin/redis-benchmark - /usr/local/bin/redis-check-aof - /usr/local/bin/redis-check-rdb - /usr/local/bin/redis-cli - /usr/local/bin/redis-sentinel - /usr/local/bin/redis-server (Only the src/ versions are removed in `make clean`) --- src/Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Makefile b/src/Makefile index 9da1da8d3..93cfdc28e 100644 --- a/src/Makefile +++ b/src/Makefile @@ -310,3 +310,6 @@ install: all $(REDIS_INSTALL) $(REDIS_CHECK_RDB_NAME) $(INSTALL_BIN) $(REDIS_INSTALL) $(REDIS_CHECK_AOF_NAME) $(INSTALL_BIN) @ln -sf $(REDIS_SERVER_NAME) $(INSTALL_BIN)/$(REDIS_SENTINEL_NAME) + +uninstall: + rm -f $(INSTALL_BIN)/{$(REDIS_SERVER_NAME),$(REDIS_BENCHMARK_NAME),$(REDIS_CLI_NAME),$(REDIS_CHECK_RDB_NAME),$(REDIS_CHECK_AOF_NAME),$(REDIS_SENTINEL_NAME)} From 0e963e068d15e56254987c12e5aba9162e208099 Mon Sep 17 00:00:00 2001 From: artix Date: Thu, 7 Mar 2019 11:14:03 +0100 Subject: [PATCH 22/59] Redis Benchmark: add multithread idle mode Fix issue #5891 --- src/redis-benchmark.c | 55 ++++++++++++++++++++++++++----------------- 1 file changed, 33 insertions(+), 22 deletions(-) diff --git a/src/redis-benchmark.c b/src/redis-benchmark.c index 23a02d548..89245132b 100644 --- a/src/redis-benchmark.c +++ b/src/redis-benchmark.c @@ -817,22 +817,37 @@ static void showLatencyReport(void) { } } -static void benchmark(char *title, char *cmd, int len) { +static void initBenchmarkThreads() { + int i; + if (config.threads) freeBenchmarkThreads(); + config.threads = zmalloc(config.num_threads * sizeof(benchmarkThread*)); + for (i = 0; i < config.num_threads; i++) { + benchmarkThread *thread = createBenchmarkThread(i); + config.threads[i] = thread; + } +} + +static void startBenchmarkThreads() { int i; + for (i = 0; i < config.num_threads; i++) { + benchmarkThread *t = config.threads[i]; + if (pthread_create(&(t->thread), NULL, execBenchmarkThread, t)){ + fprintf(stderr, "FATAL: Failed to start thread %d.\n", i); + exit(1); + } + } + for (i = 0; i < config.num_threads; i++) + pthread_join(config.threads[i]->thread, NULL); +} + +static void benchmark(char *title, char *cmd, int len) { client c; config.title = title; config.requests_issued = 0; config.requests_finished = 0; - if (config.num_threads) { - if (config.threads) freeBenchmarkThreads(); - config.threads = zmalloc(config.num_threads * sizeof(benchmarkThread*)); - for (i = 0; i < config.num_threads; i++) { - benchmarkThread *thread = createBenchmarkThread(i); - config.threads[i] = thread; - } - } + if (config.num_threads) initBenchmarkThreads(); int thread_id = config.num_threads > 0 ? 0 : -1; c = createClient(cmd,len,NULL,thread_id); @@ -840,17 +855,7 @@ static void benchmark(char *title, char *cmd, int len) { config.start = mstime(); if (!config.num_threads) aeMain(config.el); - else { - for (i = 0; i < config.num_threads; i++) { - benchmarkThread *t = config.threads[i]; - if (pthread_create(&(t->thread), NULL, execBenchmarkThread, t)){ - fprintf(stderr, "FATAL: Failed to start thread %d.\n", i); - exit(1); - } - } - for (i = 0; i < config.num_threads; i++) - pthread_join(config.threads[i]->thread, NULL); - } + else startBenchmarkThreads(); config.totlatency = mstime()-config.start; showLatencyReport(); @@ -1546,9 +1551,15 @@ int main(int argc, const char **argv) { if (config.idlemode) { printf("Creating %d idle connections and waiting forever (Ctrl+C when done)\n", config.numclients); - c = createClient("",0,NULL,-1); /* will never receive a reply */ + int thread_id = -1, use_threads = (config.num_threads > 0); + if (use_threads) { + thread_id = 0; + initBenchmarkThreads(); + } + c = createClient("",0,NULL,thread_id); /* will never receive a reply */ createMissingClients(c); - aeMain(config.el); + if (use_threads) startBenchmarkThreads(); + else aeMain(config.el); /* and will wait for every */ } From c389ad0d52d7770f2b1b1e48608bbbd171bc5a3e Mon Sep 17 00:00:00 2001 From: artix Date: Thu, 7 Mar 2019 11:30:09 +0100 Subject: [PATCH 23/59] Redis Benchmark: fix key randomization with zero keyspacelen --- src/redis-benchmark.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/redis-benchmark.c b/src/redis-benchmark.c index 89245132b..83574f26c 100644 --- a/src/redis-benchmark.c +++ b/src/redis-benchmark.c @@ -345,7 +345,9 @@ static void randomizeClientKey(client c) { for (i = 0; i < c->randlen; i++) { char *p = c->randptr[i]+11; - size_t r = random() % config.randomkeys_keyspacelen; + size_t r = 0; + if (config.randomkeys_keyspacelen != 0) + r = random() % config.randomkeys_keyspacelen; size_t j; for (j = 0; j < 12; j++) { @@ -1288,6 +1290,11 @@ int parseOptions(int argc, const char **argv) { if (config.pipeline <= 0) config.pipeline=1; } else if (!strcmp(argv[i],"-r")) { if (lastarg) goto invalid; + const char *next = argv[++i], *p = next; + if (*p == '-') { + p++; + if (*p < '0' || *p > '9') goto invalid; + } config.randomkeys = 1; config.randomkeys_keyspacelen = atoi(argv[++i]); if (config.randomkeys_keyspacelen < 0) From feb4ebff45c5eb9e6aaabe3c1391d4844a480541 Mon Sep 17 00:00:00 2001 From: Yuan Zhou Date: Thu, 7 Mar 2019 18:35:27 +0800 Subject: [PATCH 24/59] server.h: remove dead code hashTypeTryObjectEncoding() is not used now --- src/server.h | 1 - 1 file changed, 1 deletion(-) diff --git a/src/server.h b/src/server.h index 71fef492f..7c635e5fe 100644 --- a/src/server.h +++ b/src/server.h @@ -1887,7 +1887,6 @@ void setTypeConvert(robj *subject, int enc); void hashTypeConvert(robj *o, int enc); void hashTypeTryConversion(robj *subject, robj **argv, int start, int end); -void hashTypeTryObjectEncoding(robj *subject, robj **o1, robj **o2); int hashTypeExists(robj *o, sds key); int hashTypeDelete(robj *o, sds key); unsigned long hashTypeLength(const robj *o); From 0137f1a2e33b6f14d7e365adddcf6299a4014b50 Mon Sep 17 00:00:00 2001 From: "zhaozhao.zz" Date: Thu, 7 Mar 2019 22:08:04 +0800 Subject: [PATCH 25/59] try lazyfree temp set in SUNION & SDIFF --- src/t_set.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/t_set.c b/src/t_set.c index 290a83e6d..cbe55aaa4 100644 --- a/src/t_set.c +++ b/src/t_set.c @@ -1064,7 +1064,8 @@ void sunionDiffGenericCommand(client *c, robj **setkeys, int setnum, sdsfree(ele); } setTypeReleaseIterator(si); - decrRefCount(dstset); + server.lazyfree_lazy_server_del ? freeObjAsync(dstset) : + decrRefCount(dstset); } else { /* If we have a target key where to store the resulting set * create this key with the result set inside */ From 8fadebfcca0d514fd6949eaa72599ab5e163bd4c Mon Sep 17 00:00:00 2001 From: artix Date: Fri, 8 Mar 2019 11:05:02 +0100 Subject: [PATCH 26/59] Redis Benchmark: handle CLUSTERDOWN error --- src/redis-benchmark.c | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/src/redis-benchmark.c b/src/redis-benchmark.c index 83574f26c..18d5c1020 100644 --- a/src/redis-benchmark.c +++ b/src/redis-benchmark.c @@ -449,27 +449,27 @@ static void readHandler(aeEventLoop *el, int fd, void *privdata, int mask) { } } - if (config.cluster_mode && is_err && c->cluster_node && - (!strncmp(r->str,"MOVED",5) || - !strcmp(r->str,"ASK"))) - { - /* Try to update slots configuration if the key of the - * command is using the slot hash tag. */ - if (c->staglen && !fetchClusterSlotsConfiguration(c)) - exit(1); - /* - clusterNode *node = c->cluster_node; - assert(node); - if (++node->current_slot_index >= node->slots_count) { - if (config.showerrors) { - fprintf(stderr, "WARN: No more available slots in " - "node %s:%d\n", node->ip, node->port); - } - freeReplyObject(reply); - freeClient(c); - return; + /* Try to update slots configuration if reply error is + * MOVED/ASK/CLUSTERDOWN and the key(s) used by the command + * contain(s) the slot hash tag. */ + if (is_err && c->cluster_node && c->staglen) { + int fetch_slots = 0, do_wait = 0; + if (!strncmp(r->str,"MOVED",5) || !strncmp(r->str,"ASK",3)) + fetch_slots = 1; + else if (!strncmp(r->str,"CLUSTERDOWN",11)) { + /* Usually the cluster is able to recover itself after + * a CLUSTERDOWN error, so try to sleep one second + * before requesting the new configuration. */ + fetch_slots = 1; + do_wait = 1; + printf("Error from server %s:%d: %s\n", + c->cluster_node->ip, + c->cluster_node->port, + r->str); } - */ + if (do_wait) sleep(1); + if (fetch_slots && !fetchClusterSlotsConfiguration(c)) + exit(1); } freeReplyObject(reply); From f1e7df4b7c0dfb8ceaaa2e844b2b29024e8dfbcc Mon Sep 17 00:00:00 2001 From: Steve Webster Date: Fri, 8 Mar 2019 17:09:11 +0000 Subject: [PATCH 27/59] Increment delivery counter on XCLAIM unless RETRYCOUNT specified The XCLAIM docs state the XCLAIM increments the delivery counter for messages. This PR makes the code match the documentation - which seems like the desired behaviour - whilst still allowing RETRYCOUNT to be specified manually. My understanding of the way streamPropagateXCLAIM() works is that this change will safely propagate to replicas since retry count is pulled directly from the streamNACK struct. Fixes #5194 --- src/t_stream.c | 8 ++++++-- tests/unit/type/stream-cgroups.tcl | 29 +++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/src/t_stream.c b/src/t_stream.c index 1a5acac42..f02b9e99b 100644 --- a/src/t_stream.c +++ b/src/t_stream.c @@ -2279,8 +2279,12 @@ void xclaimCommand(client *c) { /* Update the consumer and idle time. */ nack->consumer = consumer; nack->delivery_time = deliverytime; - /* Set the delivery attempts counter if given. */ - if (retrycount >= 0) nack->delivery_count = retrycount; + /* Set the delivery attempts counter if given, otherwise autoincrement */ + if (retrycount >= 0) { + nack->delivery_count = retrycount; + } else { + nack->delivery_count++; + } /* Add the entry in the new consumer local PEL. */ raxInsert(consumer->pel,buf,sizeof(buf),nack,NULL); /* Send the reply for this entry. */ diff --git a/tests/unit/type/stream-cgroups.tcl b/tests/unit/type/stream-cgroups.tcl index 13981cc22..3a056bfab 100644 --- a/tests/unit/type/stream-cgroups.tcl +++ b/tests/unit/type/stream-cgroups.tcl @@ -195,6 +195,35 @@ start_server { assert_equal "" [lindex $reply 0] } + test {XCLAIM increments delivery count} { + # Add 3 items into the stream, and create a consumer group + r del mystream + set id1 [r XADD mystream * a 1] + set id2 [r XADD mystream * b 2] + set id3 [r XADD mystream * c 3] + r XGROUP CREATE mystream mygroup 0 + + # Client 1 reads item 1 from the stream without acknowledgements. + # Client 2 then claims pending item 1 from the PEL of client 1 + set reply [ + r XREADGROUP GROUP mygroup client1 count 1 STREAMS mystream > + ] + assert {[llength [lindex $reply 0 1 0 1]] == 2} + assert {[lindex $reply 0 1 0 1] eq {a 1}} + r debug sleep 0.2 + set reply [ + r XCLAIM mystream mygroup client2 10 $id1 + ] + assert {[llength [lindex $reply 0 1]] == 2} + assert {[lindex $reply 0 1] eq {a 1}} + + set reply [ + r XPENDING mystream mygroup - + 10 + ] + assert {[llength [lindex $reply 0]] == 4} + assert {[lindex $reply 0 3] == 2} + } + start_server {} { set master [srv -1 client] set master_host [srv -1 host] From 79660e4ff43b55fee1eaa70a27263c4cc3c905a4 Mon Sep 17 00:00:00 2001 From: Brad Solomon Date: Sat, 9 Mar 2019 10:21:15 -0500 Subject: [PATCH 28/59] Note that install_server.sh is not for Mac OSX It will fail pretty quickly since there is no -f readlink flag there. --- README.md | 2 ++ utils/install_server.sh | 3 +++ 2 files changed, 5 insertions(+) diff --git a/README.md b/README.md index 2b4eeb19b..6c9435b53 100644 --- a/README.md +++ b/README.md @@ -166,6 +166,8 @@ for Ubuntu and Debian systems: % cd utils % ./install_server.sh +_Note_: `install_server.sh` will not work on Mac OSX; it is built for Linux only. + The script will ask you a few questions and will setup everything you need to run Redis properly as a background daemon that will start again on system reboots. diff --git a/utils/install_server.sh b/utils/install_server.sh index 7eb341417..8e5753bc6 100755 --- a/utils/install_server.sh +++ b/utils/install_server.sh @@ -43,6 +43,9 @@ # # /!\ This script should be run as root # +# NOTE: This script will not work on Mac OSX. +# It supports Debian and Ubuntu Linux. +# ################################################################################ die () { From 5b52bc738bcf0881d07805dc300aeee9cf555b77 Mon Sep 17 00:00:00 2001 From: John Sully Date: Sat, 9 Mar 2019 11:03:59 -0500 Subject: [PATCH 29/59] Replicas aren't allowed to run the replicaof command --- src/replication.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/replication.c b/src/replication.c index 9175bb420..26313893d 100644 --- a/src/replication.c +++ b/src/replication.c @@ -2038,6 +2038,14 @@ void replicaofCommand(client *c) { } else { long port; + if (c->flags & CLIENT_SLAVE) + { + /* If a client is already a replica they cannot run this command, + * because it involves flushing all replicas (including this client) */ + addReplyError(c, "Command is not valid when client is a replica."); + return; + } + if ((getLongFromObjectOrReply(c, c->argv[2], &port, NULL) != C_OK)) return; From 0298d3ad1833c0085ebc26baeda58dc3a511de25 Mon Sep 17 00:00:00 2001 From: wurongxin Date: Sun, 10 Mar 2019 15:30:32 +0800 Subject: [PATCH 30/59] fix a bufferoverflow bug --- src/redis-cli.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index 0e52c16be..09d9cc72e 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -2268,7 +2268,7 @@ static clusterManagerNode *clusterManagerNewNode(char *ip, int port) { static sds clusterManagerGetNodeRDBFilename(clusterManagerNode *node) { assert(config.cluster_manager_command.backup_dir); sds filename = sdsnew(config.cluster_manager_command.backup_dir); - if (filename[sdslen(filename)] - 1 != '/') + if (filename[sdslen(filename) - 1] != '/') filename = sdscat(filename, "/"); filename = sdscatprintf(filename, "redis-node-%s-%d-%s.rdb", node->ip, node->port, node->name); From 8a46d32be2eaf07b6b2e8c3757e4d9f59cd1ab64 Mon Sep 17 00:00:00 2001 From: antirez Date: Sun, 10 Mar 2019 09:48:06 +0100 Subject: [PATCH 31/59] Make comment in #5911 stay inside 80 cols. --- src/replication.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/replication.c b/src/replication.c index 26313893d..3c30999af 100644 --- a/src/replication.c +++ b/src/replication.c @@ -2041,7 +2041,8 @@ void replicaofCommand(client *c) { if (c->flags & CLIENT_SLAVE) { /* If a client is already a replica they cannot run this command, - * because it involves flushing all replicas (including this client) */ + * because it involves flushing all replicas (including this + * client) */ addReplyError(c, "Command is not valid when client is a replica."); return; } From 468860ae18b11486f734c476bc2930de7860abe1 Mon Sep 17 00:00:00 2001 From: swilly22 Date: Mon, 11 Mar 2019 10:02:19 +0200 Subject: [PATCH 32/59] Extend REDISMODULE_CTX_FLAGS to indicate if command was sent by master --- src/module.c | 3 +++ src/redismodule.h | 3 +++ 2 files changed, 6 insertions(+) diff --git a/src/module.c b/src/module.c index 81982ba76..5ad999751 100644 --- a/src/module.c +++ b/src/module.c @@ -1391,6 +1391,9 @@ int RM_GetContextFlags(RedisModuleCtx *ctx) { flags |= REDISMODULE_CTX_FLAGS_LUA; if (ctx->client->flags & CLIENT_MULTI) flags |= REDISMODULE_CTX_FLAGS_MULTI; + /* Module command recieved from MASTER, is replicated. */ + if (ctx->client->flags & CLIENT_MASTER) + flags |= REDISMODULE_CTX_FLAGS_REPLICATED; } if (server.cluster_enabled) diff --git a/src/redismodule.h b/src/redismodule.h index d18c38881..540f8e3db 100644 --- a/src/redismodule.h +++ b/src/redismodule.h @@ -85,6 +85,9 @@ #define REDISMODULE_CTX_FLAGS_OOM (1<<10) /* Less than 25% of memory available according to maxmemory. */ #define REDISMODULE_CTX_FLAGS_OOM_WARNING (1<<11) +/* The command was sent over the replication link. */ +#define REDISMODULE_CTX_FLAGS_REPLICATED (1<<12) + #define REDISMODULE_NOTIFY_GENERIC (1<<2) /* g */ #define REDISMODULE_NOTIFY_STRING (1<<3) /* $ */ From 3621223fbba8d9b5e9e5ce6348c96513e0186be2 Mon Sep 17 00:00:00 2001 From: chendianqiang Date: Tue, 12 Mar 2019 20:46:40 +0800 Subject: [PATCH 33/59] remove temp-rewriteaof-xxx.aof when interrupt aofrewrite --- src/aof.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/aof.c b/src/aof.c index 46ae58324..cafcf961c 100644 --- a/src/aof.c +++ b/src/aof.c @@ -1611,6 +1611,9 @@ void aofRemoveTempFile(pid_t childpid) { snprintf(tmpfile,256,"temp-rewriteaof-bg-%d.aof", (int) childpid); unlink(tmpfile); + + snprintf(tmpfile,256,"temp-rewriteaof-%d.aof", (int) childpid); + unlink(tmpfile); } /* Update the server.aof_current_size field explicitly using stat(2) From bdc783b472769e9881b42ed8c45583ab98a791e8 Mon Sep 17 00:00:00 2001 From: vattezhang Date: Tue, 12 Mar 2019 22:01:02 +0800 Subject: [PATCH 34/59] fix: fix the if condition in clusterManagerShowClusterInfo --- src/redis-cli.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index 0e52c16be..c28dfeeee 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -2841,7 +2841,7 @@ static void clusterManagerShowClusterInfo(void) { replicas++; } redisReply *reply = CLUSTER_MANAGER_COMMAND(node, "DBSIZE"); - if (reply != NULL || reply->type == REDIS_REPLY_INTEGER) + if (reply != NULL && reply->type == REDIS_REPLY_INTEGER) dbsize = reply->integer; if (dbsize < 0) { char *err = ""; From 5284d67e376eee1a01eece66a6d0e3273051f359 Mon Sep 17 00:00:00 2001 From: artix Date: Tue, 12 Mar 2019 17:07:19 +0100 Subject: [PATCH 35/59] Redis Benchmark: fix possible usage of freed pointer (getRedisConfig) Fixes issue #5912 --- src/redis-benchmark.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/src/redis-benchmark.c b/src/redis-benchmark.c index 18d5c1020..12e9f7e41 100644 --- a/src/redis-benchmark.c +++ b/src/redis-benchmark.c @@ -247,11 +247,11 @@ static redisConfig *getRedisConfig(const char *ip, int port, c = redisConnect(ip, port); else c = redisConnectUnix(hostsocket); - if (c->err) { + if (c == NULL || c->err) { fprintf(stderr,"Could not connect to Redis at "); - if (hostsocket == NULL) - fprintf(stderr,"%s:%d: %s\n",ip,port,c->errstr); - else fprintf(stderr,"%s: %s\n",hostsocket,c->errstr); + char *err = (c != NULL ? c->errstr : ""); + if (hostsocket == NULL) fprintf(stderr,"%s:%d: %s\n",ip,port,err); + else fprintf(stderr,"%s: %s\n",hostsocket,err); goto fail; } redisAppendCommand(c, "CONFIG GET %s", "save"); @@ -276,18 +276,16 @@ static redisConfig *getRedisConfig(const char *ip, int port, case 1: cfg->appendonly = sdsnew(value); break; } } - if (reply) freeReplyObject(reply); - if (c) redisFree(c); + freeReplyObject(reply); + redisFree(c); return cfg; fail: - if (reply) freeReplyObject(reply); - if (c) redisFree(c); - zfree(cfg); fprintf(stderr, "ERROR: failed to fetch CONFIG from "); - if (c->connection_type == REDIS_CONN_TCP) - fprintf(stderr, "%s:%d\n", c->tcp.host, c->tcp.port); - else if (c->connection_type == REDIS_CONN_UNIX) - fprintf(stderr, "%s\n", c->unix_sock.path); + if (hostsocket == NULL) fprintf(stderr, "%s:%d\n", ip, port); + else fprintf(stderr, "%s\n", hostsocket); + freeReplyObject(reply); + redisFree(c); + zfree(cfg); return NULL; } static void freeRedisConfig(redisConfig *cfg) { From 7546c7d8b41358e4d2a0b71f41ca08b82fa3f8b8 Mon Sep 17 00:00:00 2001 From: JohnSully Date: Tue, 12 Mar 2019 14:38:03 -0400 Subject: [PATCH 36/59] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index d3963dc85..49c6373d7 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,8 @@ On the same hardware KeyDB can perform twice as many queries per second as Redis KeyDB has full compatibility with the Redis protocol, modules, and scripts. This includes full support for transactions, and atomic execution of scripts. For more information see our architecture section below. +Try our docker container: https://hub.docker.com/r/eqalpha/keydb + Why fork Redis? --------------- From dfcb227b5066275df18734ea7a6ab19212e57f1f Mon Sep 17 00:00:00 2001 From: Steve Webster Date: Tue, 12 Mar 2019 20:27:53 +0000 Subject: [PATCH 37/59] Only increment delivery count if JUSTID option is omitted --- src/t_stream.c | 5 +++-- tests/unit/type/stream-cgroups.tcl | 16 +++++++++++++++- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/src/t_stream.c b/src/t_stream.c index f02b9e99b..7816c775c 100644 --- a/src/t_stream.c +++ b/src/t_stream.c @@ -2279,10 +2279,11 @@ void xclaimCommand(client *c) { /* Update the consumer and idle time. */ nack->consumer = consumer; nack->delivery_time = deliverytime; - /* Set the delivery attempts counter if given, otherwise autoincrement */ + /* Set the delivery attempts counter if given, otherwise + * autoincrement unless JUSTID option provided */ if (retrycount >= 0) { nack->delivery_count = retrycount; - } else { + } else if (!justid) { nack->delivery_count++; } /* Add the entry in the new consumer local PEL. */ diff --git a/tests/unit/type/stream-cgroups.tcl b/tests/unit/type/stream-cgroups.tcl index 3a056bfab..34d4061c2 100644 --- a/tests/unit/type/stream-cgroups.tcl +++ b/tests/unit/type/stream-cgroups.tcl @@ -195,7 +195,7 @@ start_server { assert_equal "" [lindex $reply 0] } - test {XCLAIM increments delivery count} { + test {XCLAIM without JUSTID increments delivery count} { # Add 3 items into the stream, and create a consumer group r del mystream set id1 [r XADD mystream * a 1] @@ -222,6 +222,20 @@ start_server { ] assert {[llength [lindex $reply 0]] == 4} assert {[lindex $reply 0 3] == 2} + + # Client 3 then claims pending item 1 from the PEL of client 2 using JUSTID + r debug sleep 0.2 + set reply [ + r XCLAIM mystream mygroup client3 10 $id1 JUSTID + ] + assert {[llength $reply] == 1} + assert {[lindex $reply 0] eq $id1} + + set reply [ + r XPENDING mystream mygroup - + 10 + ] + assert {[llength [lindex $reply 0]] == 4} + assert {[lindex $reply 0 3] == 2} } start_server {} { From 89bf4db4fa41aaedac59c720e837f9e3235d5ad7 Mon Sep 17 00:00:00 2001 From: swilly22 Date: Wed, 13 Mar 2019 08:22:40 +0200 Subject: [PATCH 38/59] document additional flag of RM_GetContextFlags --- src/module.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/module.c b/src/module.c index 5ad999751..d3d122638 100644 --- a/src/module.c +++ b/src/module.c @@ -1359,6 +1359,9 @@ int RM_GetSelectedDb(RedisModuleCtx *ctx) { * * * REDISMODULE_CTX_FLAGS_MULTI: The command is running inside a transaction * + * * REDISMODULE_CTX_FLAGS_REPLICATED: The command was sent over the replication + * link by the MASTER + * * * REDISMODULE_CTX_FLAGS_MASTER: The Redis instance is a master * * * REDISMODULE_CTX_FLAGS_SLAVE: The Redis instance is a slave From 5a3d3d8be414d201abb670313f2801aeadd9ba69 Mon Sep 17 00:00:00 2001 From: "zhaozhao.zz" Date: Wed, 13 Mar 2019 16:54:34 +0800 Subject: [PATCH 39/59] Fix compile warning in redis-cli.c --- src/redis-cli.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index 0e52c16be..5414de7ef 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -2726,7 +2726,7 @@ static sds clusterManagerNodeGetJSON(clusterManagerNode *node, slots, node->slots_count, flags, - node->current_epoch + (unsigned long long)node->current_epoch ); if (error_count > 0) { json = sdscatprintf(json, ",\n \"cluster_errors\": %lu", From 66f203270862a53b27b18cea8a03999be2262492 Mon Sep 17 00:00:00 2001 From: 0xflotus <0xflotus@gmail.com> Date: Wed, 13 Mar 2019 15:14:34 +0100 Subject: [PATCH 40/59] fixed guarantees --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 49c6373d7..9d89eaa5b 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ What is KeyDB? -------------- -KeyDB is a high performance fork of Redis focussing on multithreading, memory efficiency, and high throughput. In addition to multithreading KeyDB also has features only available in Redis Enterprise such as FLASH storage support, and some not available at all such as direct backup to AWS S3. +KeyDB is a high performance fork of Redis focusing on multithreading, memory efficiency, and high throughput. In addition to multithreading KeyDB also has features only available in Redis Enterprise such as FLASH storage support, and some not available at all such as direct backup to AWS S3. On the same hardware KeyDB can perform twice as many queries per second as Redis, with 60% lower latency. @@ -191,7 +191,7 @@ You'll be able to stop and start KeyDB using the script named Multithreading Architecture --------------------------- -KeyDB works by running the normal Redis event loop on multiple threads. Network IO, and query parsing are done concurrently. Each connection is assigned a thread on accept(). Access to the core hash table is guarded by spinlock. Because the hashtable access is extremely fast this lock has low contention. Transactions hold the lock for the duration of the EXEC command. Modules work in concert with the GIL which is only acquired when all server threads are paused. This maintains the atomicity gurantees modules expect. +KeyDB works by running the normal Redis event loop on multiple threads. Network IO, and query parsing are done concurrently. Each connection is assigned a thread on accept(). Access to the core hash table is guarded by spinlock. Because the hashtable access is extremely fast this lock has low contention. Transactions hold the lock for the duration of the EXEC command. Modules work in concert with the GIL which is only acquired when all server threads are paused. This maintains the atomicity guarantees modules expect. Unlike most databases the core data structure is the fastest part of the system. Most of the query time comes from parsing the REPL protocol and copying data to/from the network. From a45f212693956a6fb1aacf465d88e940bbbfd56f Mon Sep 17 00:00:00 2001 From: John Sully Date: Wed, 13 Mar 2019 16:53:37 -0400 Subject: [PATCH 41/59] Implement load database dumps from S3. We already save. --- src/debug.c | 2 +- src/rdb-s3.cpp | 62 ++++++++++++++++++++++++++++++++++++++++++++- src/rdb.c | 15 ++++++++++- src/rdb.h | 3 ++- src/replication.cpp | 2 +- src/server.cpp | 4 +-- 6 files changed, 81 insertions(+), 7 deletions(-) diff --git a/src/debug.c b/src/debug.c index d24c9ef9c..34e0ab22c 100644 --- a/src/debug.c +++ b/src/debug.c @@ -362,7 +362,7 @@ NULL } emptyDb(-1,EMPTYDB_NO_FLAGS,NULL); protectClient(c); - int ret = rdbLoad(server.rdb_filename,NULL); + int ret = rdbLoad(NULL); unprotectClient(c); if (ret != C_OK) { addReplyError(c,"Error trying to load the RDB dump"); diff --git a/src/rdb-s3.cpp b/src/rdb-s3.cpp index bd00bb2bd..f28bd07d5 100644 --- a/src/rdb-s3.cpp +++ b/src/rdb-s3.cpp @@ -48,4 +48,64 @@ extern "C" int rdbSaveS3(char *s3bucket, rdbSaveInfo *rsi) serverLog(LL_NOTICE,"DB saved on AWS S3"); return (status == EXIT_SUCCESS) ? C_OK : C_ERR; -} \ No newline at end of file +} + + +int rdbLoadS3Core(int fd, rdbSaveInfo *rsi) +{ + FILE *fp; + rio rdb; + int retval; + + if ((fp = fdopen(fd, "rb")) == NULL) return C_ERR; + startLoading(fp); + rioInitWithFile(&rdb,fileno(fp)); + retval = rdbLoadRio(&rdb,rsi,0); + fclose(fp); + stopLoading(); + return retval; +} + +int rdbLoadS3(char *s3bucket, rdbSaveInfo *rsi) +{ + int status = EXIT_FAILURE; + int fd[2]; + if (pipe(fd) != 0) + return C_ERR; + + pid_t pid = fork(); + if (pid < 0) + { + close(fd[0]); + close(fd[1]); + return C_ERR; + } + + if (pid == 0) + { + // child process + dup2(fd[1], STDOUT_FILENO); + close(fd[1]); + close(fd[0]); + execlp("aws", "aws", "s3", "cp", s3bucket, "-", nullptr); + exit(EXIT_FAILURE); + } + else + { + close(fd[1]); + if (rdbLoadS3Core(fd[0], rsi) != C_OK) + { + close(fd[0]); + return C_ERR; + } + close(fd[0]); + waitpid(pid, &status, 0); + } + + if (status != EXIT_SUCCESS) + serverLog(LL_WARNING, "Failed to load DB from AWS S3"); + else + serverLog(LL_NOTICE,"DB loaded from AWS S3"); + + return (status == EXIT_SUCCESS) ? C_OK : C_ERR; +} diff --git a/src/rdb.c b/src/rdb.c index 4a504e815..31db9d67f 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -2097,6 +2097,19 @@ int rdbLoadRio(rio *rdb, rdbSaveInfo *rsi, int loading_aof) { return C_ERR; /* Just to avoid warning */ } +int rdbLoadFile(char *filename, rdbSaveInfo *rsi); +int rdbLoad(rdbSaveInfo *rsi) +{ + int err = C_ERR; + if (server.rdb_filename != NULL) + err = rdbLoadFile(server.rdb_filename, rsi); + + if ((err == C_ERR) && server.rdb_s3bucketpath != NULL) + err = rdbLoadS3(server.rdb_s3bucketpath, rsi); + + return err; +} + /* Like rdbLoadRio() but takes a filename instead of a rio stream. The * filename is open for reading and a rio stream object created in order * to do the actual loading. Moreover the ETA displayed in the INFO @@ -2104,7 +2117,7 @@ int rdbLoadRio(rio *rdb, rdbSaveInfo *rsi, int loading_aof) { * * If you pass an 'rsi' structure initialied with RDB_SAVE_OPTION_INIT, the * loading code will fiil the information fields in the structure. */ -int rdbLoad(char *filename, rdbSaveInfo *rsi) { +int rdbLoadFile(char *filename, rdbSaveInfo *rsi) { FILE *fp; rio rdb; int retval; diff --git a/src/rdb.h b/src/rdb.h index 2daa49984..fcd44e742 100644 --- a/src/rdb.h +++ b/src/rdb.h @@ -135,7 +135,7 @@ uint64_t rdbLoadLen(rio *rdb, int *isencoded); int rdbLoadLenByRef(rio *rdb, int *isencoded, uint64_t *lenptr); int rdbSaveObjectType(rio *rdb, robj *o); int rdbLoadObjectType(rio *rdb); -int rdbLoad(char *filename, rdbSaveInfo *rsi); +int rdbLoad(rdbSaveInfo *rsi); int rdbSaveBackground(rdbSaveInfo *rsi); int rdbSaveToSlavesSockets(rdbSaveInfo *rsi); void rdbRemoveTempFile(pid_t childpid); @@ -143,6 +143,7 @@ int rdbSave(rdbSaveInfo *rsi); int rdbSaveFile(char *filename, rdbSaveInfo *rsi); int rdbSaveFd(int fd, rdbSaveInfo *rsi); int rdbSaveS3(char *path, rdbSaveInfo *rsi); +int rdbLoadS3(char *path, rdbSaveInfo *rsi); ssize_t rdbSaveObject(rio *rdb, robj *o); size_t rdbSavedObjectLen(robj *o); robj *rdbLoadObject(int type, rio *rdb); diff --git a/src/replication.cpp b/src/replication.cpp index 899fa3dcf..e86110320 100644 --- a/src/replication.cpp +++ b/src/replication.cpp @@ -1334,7 +1334,7 @@ void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) { aeDeleteFileEvent(el,server.repl_transfer_s,AE_READABLE); serverLog(LL_NOTICE, "MASTER <-> REPLICA sync: Loading DB in memory"); rdbSaveInfo rsi = RDB_SAVE_INFO_INIT; - if (rdbLoad(server.rdb_filename,&rsi) != C_OK) { + if (rdbLoad(&rsi) != C_OK) { serverLog(LL_WARNING,"Failed trying to load the MASTER synchronization DB from disk"); cancelReplicationHandshake(); /* Re-enable the AOF if we disabled it earlier, in order to restore diff --git a/src/server.cpp b/src/server.cpp index 9fd917866..b8075d20d 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -4668,9 +4668,9 @@ void loadDataFromDisk(void) { if (server.aof_state == AOF_ON) { if (loadAppendOnlyFile(server.aof_filename) == C_OK) serverLog(LL_NOTICE,"DB loaded from append only file: %.3f seconds",(float)(ustime()-start)/1000000); - } else if (server.rdb_filename != NULL) { + } else if (server.rdb_filename != NULL || server.rdb_s3bucketpath != NULL) { rdbSaveInfo rsi = RDB_SAVE_INFO_INIT; - if (rdbLoad(server.rdb_filename,&rsi) == C_OK) { + if (rdbLoad(&rsi) == C_OK) { serverLog(LL_NOTICE,"DB loaded from disk: %.3f seconds", (float)(ustime()-start)/1000000); From a3da10f4ebbc983fc1e10269a1c72fc2d146cdf0 Mon Sep 17 00:00:00 2001 From: JohnSully Date: Wed, 13 Mar 2019 16:57:40 -0400 Subject: [PATCH 42/59] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9d89eaa5b..8aadfd126 100644 --- a/README.md +++ b/README.md @@ -41,7 +41,7 @@ If you would like to use the FLASH backed storage this option configures the dir db-s3-object /path/to/bucket -If you would like KeyDB to dump directly to AWS S3 this option specifies the bucket. Using this option with the traditional RDB options will result in KeyDB backing up twice to both locations. This requires the AWS CLI tools to be installed and configured which are used under the hood to transfer the data. +If you would like KeyDB to dump and load directly to AWS S3 this option specifies the bucket. Using this option with the traditional RDB options will result in KeyDB backing up twice to both locations. If both are specified KeyDB will first attempt to load from the local dump file and if that fails load from S3. This requires the AWS CLI tools to be installed and configured which are used under the hood to transfer the data. All other configuration options behave as you'd expect. Your existing configuration files should continue to work unchanged. From b2eb48df89d8513a359faa677146d3c36e6266ab Mon Sep 17 00:00:00 2001 From: Guy Benoish Date: Thu, 14 Mar 2019 12:11:16 +0100 Subject: [PATCH 43/59] Fix mismatching keyspace notification classes --- src/geo.c | 2 +- src/t_list.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/geo.c b/src/geo.c index 91a0421f5..826d11ff5 100644 --- a/src/geo.c +++ b/src/geo.c @@ -659,7 +659,7 @@ void georadiusGeneric(client *c, int flags) { zsetConvertToZiplistIfNeeded(zobj,maxelelen); setKey(c->db,storekey,zobj); decrRefCount(zobj); - notifyKeyspaceEvent(NOTIFY_LIST,"georadiusstore",storekey, + notifyKeyspaceEvent(NOTIFY_ZSET,"georadiusstore",storekey, c->db->id); server.dirty += returned_items; } else if (dbDelete(c->db,storekey)) { diff --git a/src/t_list.c b/src/t_list.c index 451ffb4b5..45d2e3317 100644 --- a/src/t_list.c +++ b/src/t_list.c @@ -520,7 +520,7 @@ void lremCommand(client *c) { if (removed) { signalModifiedKey(c->db,c->argv[1]); - notifyKeyspaceEvent(NOTIFY_GENERIC,"lrem",c->argv[1],c->db->id); + notifyKeyspaceEvent(NOTIFY_LIST,"lrem",c->argv[1],c->db->id); } if (listTypeLength(subject) == 0) { From d292a516181293b54bb7b8d25e0647ae74b5ea62 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 14 Mar 2019 12:47:36 +0100 Subject: [PATCH 44/59] Improve comments after merging #5834. --- src/module.c | 15 ++++++++++----- src/object.c | 7 ++++--- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/src/module.c b/src/module.c index 28a4d3e64..e69d3dc61 100644 --- a/src/module.c +++ b/src/module.c @@ -523,12 +523,17 @@ void RedisModuleCommandDispatcher(client *c) { moduleFreeContext(&ctx); /* In some cases processMultibulkBuffer uses sdsMakeRoomFor to - * expand the querybuf, but later in some cases it uses that query - * buffer as is for an argv element (see "Optimization"), which means - * that the sds in argv may have a lot of wasted space, and then in case - * modules keep that argv RedisString inside their data structure, this - * space waste will remain for long (until restarted from rdb). */ + * expand the query buffer, and in order to avoid a big object copy + * the query buffer SDS may be used directly as the SDS string backing + * the client argument vectors: sometimes this will result in the SDS + * string having unused space at the end. Later if a module takes ownership + * of the RedisString, such space will be wasted forever. Inside the + * Redis core this is not a problem because tryObjectEncoding() is called + * before storing strings in the key space. Here we need to do it + * for the module. */ for (int i = 0; i < c->argc; i++) { + /* Only do the work if the module took ownership of the object: + * in that case the refcount is no longer 1. */ if (c->argv[i]->refcount > 1) trimStringObjectIfNeeded(c->argv[i]); } diff --git a/src/object.c b/src/object.c index 42c247b87..24e99d191 100644 --- a/src/object.c +++ b/src/object.c @@ -415,10 +415,11 @@ int isObjectRepresentableAsLongLong(robj *o, long long *llval) { } } +/* Optimize the SDS string inside the string object to require little space, + * in case there is more than 10% of free space at the end of the SDS + * string. This happens because SDS strings tend to overallocate to avoid + * wasting too much time in allocations when appending to the string. */ void trimStringObjectIfNeeded(robj *o) { - /* Optimize the SDS string inside the string object to require - * little space, in case there is more than 10% of free space - * at the end of the SDS string. */ if (o->encoding == OBJ_ENCODING_RAW && sdsavail(o->ptr) > sdslen(o->ptr)/10) { From 052e03495f3e6da64d814f80a5dae91721009317 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 14 Mar 2019 17:06:59 +0100 Subject: [PATCH 45/59] Fix objectSetLRUOrLFU() when LFU underflows. --- src/object.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/src/object.c b/src/object.c index 24e99d191..234e11f8a 100644 --- a/src/object.c +++ b/src/object.c @@ -1199,7 +1199,7 @@ sds getMemoryDoctorReport(void) { /* Set the object LRU/LFU depending on server.maxmemory_policy. * The lfu_freq arg is only relevant if policy is MAXMEMORY_FLAG_LFU. - * The lru_idle and lru_clock args are only relevant if policy + * The lru_idle and lru_clock args are only relevant if policy * is MAXMEMORY_FLAG_LRU. * Either or both of them may be <0, in that case, nothing is set. */ void objectSetLRUOrLFU(robj *val, long long lfu_freq, long long lru_idle, @@ -1210,16 +1210,20 @@ void objectSetLRUOrLFU(robj *val, long long lfu_freq, long long lru_idle, val->lru = (LFUGetTimeInMinutes()<<8) | lfu_freq; } } else if (lru_idle >= 0) { - /* Serialized LRU idle time is in seconds. Scale + /* Provided LRU idle time is in seconds. Scale * according to the LRU clock resolution this Redis * instance was compiled with (normally 1000 ms, so the * below statement will expand to lru_idle*1000/1000. */ lru_idle = lru_idle*1000/LRU_CLOCK_RESOLUTION; - val->lru = lru_clock - lru_idle; - /* If the lru field overflows (since LRU it is a wrapping - * clock), the best we can do is to provide the maximum - * representable idle time. */ - if (val->lru < 0) val->lru = lru_clock+1; + long lru_abs = lru_clock - lru_idle; /* Absolute access time. */ + /* If the LRU field underflows (since LRU it is a wrapping + * clock), the best we can do is to provide a large enough LRU + * that is half-way in the circlular LRU clock we use: this way + * the computed idle time for this object will stay high for quite + * some time. */ + if (lru_abs < 0) + lru_abs = (lru_clock+(LRU_CLOCK_MAX/2)) % LRU_CLOCK_MAX; + val->lru = lru_abs; } } From 74d6af8f8094b6d9e2e4bb7ea4eca1941f6412c0 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 14 Mar 2019 17:51:14 +0100 Subject: [PATCH 46/59] Fix ZPOP return type when COUNT=0. Related to #5799. --- src/t_zset.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/t_zset.c b/src/t_zset.c index 0daa6d643..fb7078abd 100644 --- a/src/t_zset.c +++ b/src/t_zset.c @@ -3144,7 +3144,7 @@ void genericZpopCommand(client *c, robj **keyv, int keyc, int where, int emitkey if (getLongFromObjectOrReply(c,countarg,&count,NULL) != C_OK) return; if (count <= 0) { - addReplyNullArray(c); + addReply(c,shared.emptyarray); return; } } From a89ab4d6b0d21e44e8ce3b509d2071bebe58e809 Mon Sep 17 00:00:00 2001 From: John Sully Date: Thu, 14 Mar 2019 14:02:16 -0400 Subject: [PATCH 47/59] Fix hyperloglog corruption --- src/hyperloglog.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/hyperloglog.c b/src/hyperloglog.c index fc21ea006..e993bf26e 100644 --- a/src/hyperloglog.c +++ b/src/hyperloglog.c @@ -614,6 +614,10 @@ int hllSparseToDense(robj *o) { } else { runlen = HLL_SPARSE_VAL_LEN(p); regval = HLL_SPARSE_VAL_VALUE(p); + if ((runlen + idx) > HLL_REGISTERS) { + sdsfree(dense); + return C_ERR; + } while(runlen--) { HLL_DENSE_SET_REGISTER(hdr->registers,idx,regval); idx++; @@ -1088,6 +1092,8 @@ int hllMerge(uint8_t *max, robj *hll) { } else { runlen = HLL_SPARSE_VAL_LEN(p); regval = HLL_SPARSE_VAL_VALUE(p); + if ((runlen + i) > HLL_REGISTERS) + return C_ERR; while(runlen--) { if (regval > max[i]) max[i] = regval; i++; From a9e525c81e62f99e083fdddc5c62d11e5e46a5f4 Mon Sep 17 00:00:00 2001 From: John Sully Date: Fri, 15 Mar 2019 05:28:20 +0000 Subject: [PATCH 48/59] Hack to prevent build errors on some machines due to missing throw() --- src/server.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/server.h b/src/server.h index 212ba9c81..3fa5f90a3 100644 --- a/src/server.h +++ b/src/server.h @@ -2330,11 +2330,13 @@ void lolwutCommand(client *c); void aclCommand(client *c); #if defined(__GNUC__) +#ifndef __cplusplus void *calloc(size_t count, size_t size) __attribute__ ((deprecated)); void free(void *ptr) __attribute__ ((deprecated)); void *malloc(size_t size) __attribute__ ((deprecated)); void *realloc(void *ptr, size_t size) __attribute__ ((deprecated)); #endif +#endif /* Debugging stuff */ void _serverAssertWithInfo(const client *c, const robj *o, const char *estr, const char *file, int line); From 9f13b2bd4967334b1701c6eccdf53760cb13f79e Mon Sep 17 00:00:00 2001 From: John Sully Date: Thu, 14 Mar 2019 14:02:16 -0400 Subject: [PATCH 49/59] Fix hyperloglog corruption --- src/hyperloglog.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/hyperloglog.c b/src/hyperloglog.c index fc21ea006..e993bf26e 100644 --- a/src/hyperloglog.c +++ b/src/hyperloglog.c @@ -614,6 +614,10 @@ int hllSparseToDense(robj *o) { } else { runlen = HLL_SPARSE_VAL_LEN(p); regval = HLL_SPARSE_VAL_VALUE(p); + if ((runlen + idx) > HLL_REGISTERS) { + sdsfree(dense); + return C_ERR; + } while(runlen--) { HLL_DENSE_SET_REGISTER(hdr->registers,idx,regval); idx++; @@ -1088,6 +1092,8 @@ int hllMerge(uint8_t *max, robj *hll) { } else { runlen = HLL_SPARSE_VAL_LEN(p); regval = HLL_SPARSE_VAL_VALUE(p); + if ((runlen + i) > HLL_REGISTERS) + return C_ERR; while(runlen--) { if (regval > max[i]) max[i] = regval; i++; From 4208666797b5831eefc022ae46ab5747200cd671 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 15 Mar 2019 13:52:29 +0100 Subject: [PATCH 50/59] HyperLogLog: dense/sparse repr parsing fuzz test. --- tests/unit/hyperloglog.tcl | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/tests/unit/hyperloglog.tcl b/tests/unit/hyperloglog.tcl index 7d36b7a35..6a9c47b11 100644 --- a/tests/unit/hyperloglog.tcl +++ b/tests/unit/hyperloglog.tcl @@ -115,6 +115,35 @@ start_server {tags {"hll"}} { set e } {*WRONGTYPE*} + test {Fuzzing dense/sparse encoding: Redis should always detect errors} { + for {set j 0} {$j < 10000} {incr j} { + r del hll + set items {} + set numitems [randomInt 3000] + for {set i 0} {$i < $numitems} {incr i} { + lappend items [expr {rand()}] + } + r pfadd hll {*}$items + + # Corrupt it in some random way. + for {set i 0} {$i < 5} {incr i} { + set len [r strlen hll] + set pos [randomInt $len] + set byte [randstring 1 1 binary] + r setrange hll $pos $byte + # Don't modify more bytes 50% of times + if {rand() < 0.5} break + } + + # Use the hyperloglog to check if it crashes + # Redis in some way. + catch { + r pfcount hll + r pfdebug getreg hll + } + } + } + test {PFADD, PFCOUNT, PFMERGE type checking works} { r set foo bar catch {r pfadd foo 1} e From a4b90be9fcd5e1668ac941cabce3b1ab38dbe326 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 15 Mar 2019 17:10:16 +0100 Subject: [PATCH 51/59] HyperLogLog: enlarge reghisto variable for safety. --- src/hyperloglog.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/hyperloglog.c b/src/hyperloglog.c index e993bf26e..526510b43 100644 --- a/src/hyperloglog.c +++ b/src/hyperloglog.c @@ -1017,7 +1017,12 @@ uint64_t hllCount(struct hllhdr *hdr, int *invalid) { double m = HLL_REGISTERS; double E; int j; - int reghisto[HLL_Q+2] = {0}; + /* Note that reghisto could be just HLL_Q+1, becuase this is the + * maximum frequency of the "000...1" sequence the hash function is + * able to return. However it is slow to check for sanity of the + * input: instead we history array at a safe size: overflows will + * just write data to wrong, but correctly allocated, places. */ + int reghisto[64] = {0}; /* Compute register histogram */ if (hdr->encoding == HLL_DENSE) { From dca7358279bb6449f93e01f7d2806639b8e9ec4b Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 15 Mar 2019 17:13:19 +0100 Subject: [PATCH 52/59] HyperLogLog: speedup fuzz test. --- tests/unit/hyperloglog.tcl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/unit/hyperloglog.tcl b/tests/unit/hyperloglog.tcl index 6a9c47b11..712fcc641 100644 --- a/tests/unit/hyperloglog.tcl +++ b/tests/unit/hyperloglog.tcl @@ -116,7 +116,7 @@ start_server {tags {"hll"}} { } {*WRONGTYPE*} test {Fuzzing dense/sparse encoding: Redis should always detect errors} { - for {set j 0} {$j < 10000} {incr j} { + for {set j 0} {$j < 1000} {incr j} { r del hll set items {} set numitems [randomInt 3000] @@ -139,7 +139,6 @@ start_server {tags {"hll"}} { # Redis in some way. catch { r pfcount hll - r pfdebug getreg hll } } } From e216ceaf0e099536fe3658a29dcb725d812364e0 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 15 Mar 2019 17:16:06 +0100 Subject: [PATCH 53/59] HyperLogLog: handle wrong offset in the base case. --- src/hyperloglog.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/hyperloglog.c b/src/hyperloglog.c index 526510b43..1e7ce3dce 100644 --- a/src/hyperloglog.c +++ b/src/hyperloglog.c @@ -614,10 +614,7 @@ int hllSparseToDense(robj *o) { } else { runlen = HLL_SPARSE_VAL_LEN(p); regval = HLL_SPARSE_VAL_VALUE(p); - if ((runlen + idx) > HLL_REGISTERS) { - sdsfree(dense); - return C_ERR; - } + if ((runlen + idx) > HLL_REGISTERS) break; /* Overflow. */ while(runlen--) { HLL_DENSE_SET_REGISTER(hdr->registers,idx,regval); idx++; @@ -1097,8 +1094,7 @@ int hllMerge(uint8_t *max, robj *hll) { } else { runlen = HLL_SPARSE_VAL_LEN(p); regval = HLL_SPARSE_VAL_VALUE(p); - if ((runlen + i) > HLL_REGISTERS) - return C_ERR; + if ((runlen + i) > HLL_REGISTERS) break; /* Overflow. */ while(runlen--) { if (regval > max[i]) max[i] = regval; i++; From 8ea906a3e8f3e125baa9cf54f6027921d3822b02 Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 16 Mar 2019 09:15:12 +0100 Subject: [PATCH 54/59] HyperLogLog: fix comment in hllCount(). --- src/hyperloglog.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/hyperloglog.c b/src/hyperloglog.c index 1e7ce3dce..e01ea6042 100644 --- a/src/hyperloglog.c +++ b/src/hyperloglog.c @@ -1014,8 +1014,8 @@ uint64_t hllCount(struct hllhdr *hdr, int *invalid) { double m = HLL_REGISTERS; double E; int j; - /* Note that reghisto could be just HLL_Q+1, becuase this is the - * maximum frequency of the "000...1" sequence the hash function is + /* Note that reghisto size could be just HLL_Q+2, becuase HLL_Q+1 is + * the maximum frequency of the "000...1" sequence the hash function is * able to return. However it is slow to check for sanity of the * input: instead we history array at a safe size: overflows will * just write data to wrong, but correctly allocated, places. */ From b78ac354f41e370a4dc21ac01981cb0ccd0a1b7d Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 18 Mar 2019 11:15:39 +0100 Subject: [PATCH 55/59] redis-check-aof: fix potential overflow. Bug signaled by @vattezhang in PR #5940 but fixed differently. --- src/redis-check-aof.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/redis-check-aof.c b/src/redis-check-aof.c index c4d5a225e..54ed85f0d 100644 --- a/src/redis-check-aof.c +++ b/src/redis-check-aof.c @@ -33,8 +33,8 @@ #define ERROR(...) { \ char __buf[1024]; \ - sprintf(__buf, __VA_ARGS__); \ - sprintf(error, "0x%16llx: %s", (long long)epos, __buf); \ + snprintf(__buf, sizeof(__buf), __VA_ARGS__); \ + snprintf(error, sizeof(error), "0x%16llx: %s", (long long)epos, __buf); \ } static char error[1024]; From 14b17c3615108fdbca5e7fe4d2c3f0e8b7454521 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 18 Mar 2019 11:34:40 +0100 Subject: [PATCH 56/59] replicaofCommand() refactoring: stay into 80 cols. --- src/replication.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/replication.c b/src/replication.c index 3c30999af..f2adc7995 100644 --- a/src/replication.c +++ b/src/replication.c @@ -2053,8 +2053,11 @@ void replicaofCommand(client *c) { /* Check if we are already attached to the specified slave */ if (server.masterhost && !strcasecmp(server.masterhost,c->argv[1]->ptr) && server.masterport == port) { - serverLog(LL_NOTICE,"REPLICAOF would result into synchronization with the master we are already connected with. No operation performed."); - addReplySds(c,sdsnew("+OK Already connected to specified master\r\n")); + serverLog(LL_NOTICE,"REPLICAOF would result into synchronization " + "with the master we are already connected " + "with. No operation performed."); + addReplySds(c,sdsnew("+OK Already connected to specified " + "master\r\n")); return; } /* There was no previous master or the user specified a different one, From a5af648fdddaf93e89735a8577b56f12379d1dd2 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 18 Mar 2019 15:38:43 +0100 Subject: [PATCH 57/59] MANIFESTO v2. --- MANIFESTO | 47 +++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 41 insertions(+), 6 deletions(-) diff --git a/MANIFESTO b/MANIFESTO index 2b719057e..d43a58893 100644 --- a/MANIFESTO +++ b/MANIFESTO @@ -34,7 +34,21 @@ Redis Manifesto so that the complexity is obvious and more complex operations can be performed as the sum of the basic operations. -4 - Code is like a poem; it's not just something we write to reach some +4 - We believe in code efficiency. Computers get faster and faster, yet we + believe that abusing computing capabilities is not wise: the amount of + operations you can do for a given amount of energy remains anyway a + significant parameter: it allows to do more with less computers and, at + the same time, having a smaller environmental impact. Similarly Redis is + able to "scale down" to smaller devices. It is perfectly usable in a + Raspberry Pi and other small ARM based computers. Faster code having + just the layers of abstractions that are really needed will also result, + often, in more predictable performances. We think likewise about memory + usage, one of the fundamental goals of the Redis project is to + incrementally build more and more memory efficient data structures, so that + problems that were not approachable in RAM in the past will be perfectly + fine to handle in the future. + +5 - Code is like a poem; it's not just something we write to reach some practical result. Sometimes people that are far from the Redis philosophy suggest using other code written by other authors (frequently in other languages) in order to implement something Redis currently lacks. But to us @@ -45,23 +59,44 @@ Redis Manifesto when needed. At the same time, when writing the Redis story we're trying to write smaller stories that will fit in to other code. -5 - We're against complexity. We believe designing systems is a fight against +6 - We're against complexity. We believe designing systems is a fight against complexity. We'll accept to fight the complexity when it's worthwhile but we'll try hard to recognize when a small feature is not worth 1000s of lines of code. Most of the time the best way to fight complexity is by not creating it at all. -6 - Two levels of API. The Redis API has two levels: 1) a subset of the API fits +7 - Threading is not a silver bullet. Instead of making Redis threaded we + believe on the idea of an efficient (mostly) single threaded Redis core. + Multiple of such cores, that may run in the same computer or may run + in multiple computers, are abstracted away as a single big system by + higher order protocols and features: Redis Cluster and the upcoming + Redis Proxy are our main goals. A shared nothing approach is not just + much simpler (see the previous point in this document), is also optimal + in NUMA systems. In the specific case of Redis it allows for each instance + to have a more limited amount of data, making the Redis persist-by-fork + approach more sounding. In the future we may explore parallelism only for + I/O, which is the low hanging fruit: minimal complexity could provide an + improved single process experience. + +8 - Two levels of API. The Redis API has two levels: 1) a subset of the API fits naturally into a distributed version of Redis and 2) a more complex API that supports multi-key operations. Both are useful if used judiciously but there's no way to make the more complex multi-keys API distributed in an opaque way without violating our other principles. We don't want to provide the illusion of something that will work magically when actually it can't in all cases. Instead we'll provide commands to quickly migrate keys from one - instance to another to perform multi-key operations and expose the tradeoffs - to the user. + instance to another to perform multi-key operations and expose the + trade-offs to the user. -7 - We optimize for joy. We believe writing code is a lot of hard work, and the +9 - We optimize for joy. We believe writing code is a lot of hard work, and the only way it can be worth is by enjoying it. When there is no longer joy in writing code, the best thing to do is stop. To prevent this, we'll avoid taking paths that will make Redis less of a joy to develop. + +10 - All the above points are put together in what we call opportunistic + programming: trying to get the most for the user with minimal increases + in complexity (hanging fruits). Solve 95% of the problem with 5% of the + code when it is acceptable. Avoid a fixed schedule but follow the flow of + user requests, inspiration, Redis internal readiness for certain features + (sometimes many past changes reach a critical point making a previously + complex feature very easy to obtain). From 3eaa2cdc44a9b0742f0695f44911b92547995836 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 18 Mar 2019 15:49:52 +0100 Subject: [PATCH 58/59] MANIFESTO: simplicity and lock-in. --- MANIFESTO | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/MANIFESTO b/MANIFESTO index d43a58893..372789462 100644 --- a/MANIFESTO +++ b/MANIFESTO @@ -63,7 +63,11 @@ Redis Manifesto complexity. We'll accept to fight the complexity when it's worthwhile but we'll try hard to recognize when a small feature is not worth 1000s of lines of code. Most of the time the best way to fight complexity is by not - creating it at all. + creating it at all. Complexity is also a form of lock-in: code that is + very hard to understand cannot be modified by users in an independent way + regardless of the license. One of the main Redis goals is to remain + understandable, enough for a single programmer to have a clear idea of how + it works in detail just reading the source code for a couple of weeks. 7 - Threading is not a silver bullet. Instead of making Redis threaded we believe on the idea of an efficient (mostly) single threaded Redis core. From d3800745698dc61a3fb6e491dcdba26ab5655932 Mon Sep 17 00:00:00 2001 From: John Sully Date: Tue, 19 Mar 2019 01:36:03 -0400 Subject: [PATCH 59/59] KeyDB has different aims than the Redis project. We believe software is for the user. If a feature requires complicating the codebase but simplifies our user's lives we will do it any time. This should not be taken as an outright rejection of the manifesto, as we agree on many points. --- MANIFESTO | 106 ------------------------------------------------------ 1 file changed, 106 deletions(-) delete mode 100644 MANIFESTO diff --git a/MANIFESTO b/MANIFESTO deleted file mode 100644 index 372789462..000000000 --- a/MANIFESTO +++ /dev/null @@ -1,106 +0,0 @@ -[Note: this is the Redis manifesto, for general information about - installing and running Redis read the README file instead.] - -Redis Manifesto -=============== - -1 - A DSL for Abstract Data Types. Redis is a DSL (Domain Specific Language) - that manipulates abstract data types and implemented as a TCP daemon. - Commands manipulate a key space where keys are binary-safe strings and - values are different kinds of abstract data types. Every data type - represents an abstract version of a fundamental data structure. For instance - Redis Lists are an abstract representation of linked lists. In Redis, the - essence of a data type isn't just the kind of operations that the data types - support, but also the space and time complexity of the data type and the - operations performed upon it. - -2 - Memory storage is #1. The Redis data set, composed of defined key-value - pairs, is primarily stored in the computer's memory. The amount of memory in - all kinds of computers, including entry-level servers, is increasing - significantly each year. Memory is fast, and allows Redis to have very - predictable performance. Datasets composed of 10k or 40 millions keys will - perform similarly. Complex data types like Redis Sorted Sets are easy to - implement and manipulate in memory with good performance, making Redis very - simple. Redis will continue to explore alternative options (where data can - be optionally stored on disk, say) but the main goal of the project remains - the development of an in-memory database. - -3 - Fundamental data structures for a fundamental API. The Redis API is a direct - consequence of fundamental data structures. APIs can often be arbitrary but - not an API that resembles the nature of fundamental data structures. If we - ever meet intelligent life forms from another part of the universe, they'll - likely know, understand and recognize the same basic data structures we have - in our computer science books. Redis will avoid intermediate layers in API, - so that the complexity is obvious and more complex operations can be - performed as the sum of the basic operations. - -4 - We believe in code efficiency. Computers get faster and faster, yet we - believe that abusing computing capabilities is not wise: the amount of - operations you can do for a given amount of energy remains anyway a - significant parameter: it allows to do more with less computers and, at - the same time, having a smaller environmental impact. Similarly Redis is - able to "scale down" to smaller devices. It is perfectly usable in a - Raspberry Pi and other small ARM based computers. Faster code having - just the layers of abstractions that are really needed will also result, - often, in more predictable performances. We think likewise about memory - usage, one of the fundamental goals of the Redis project is to - incrementally build more and more memory efficient data structures, so that - problems that were not approachable in RAM in the past will be perfectly - fine to handle in the future. - -5 - Code is like a poem; it's not just something we write to reach some - practical result. Sometimes people that are far from the Redis philosophy - suggest using other code written by other authors (frequently in other - languages) in order to implement something Redis currently lacks. But to us - this is like if Shakespeare decided to end Enrico IV using the Paradiso from - the Divina Commedia. Is using any external code a bad idea? Not at all. Like - in "One Thousand and One Nights" smaller self contained stories are embedded - in a bigger story, we'll be happy to use beautiful self contained libraries - when needed. At the same time, when writing the Redis story we're trying to - write smaller stories that will fit in to other code. - -6 - We're against complexity. We believe designing systems is a fight against - complexity. We'll accept to fight the complexity when it's worthwhile but - we'll try hard to recognize when a small feature is not worth 1000s of lines - of code. Most of the time the best way to fight complexity is by not - creating it at all. Complexity is also a form of lock-in: code that is - very hard to understand cannot be modified by users in an independent way - regardless of the license. One of the main Redis goals is to remain - understandable, enough for a single programmer to have a clear idea of how - it works in detail just reading the source code for a couple of weeks. - -7 - Threading is not a silver bullet. Instead of making Redis threaded we - believe on the idea of an efficient (mostly) single threaded Redis core. - Multiple of such cores, that may run in the same computer or may run - in multiple computers, are abstracted away as a single big system by - higher order protocols and features: Redis Cluster and the upcoming - Redis Proxy are our main goals. A shared nothing approach is not just - much simpler (see the previous point in this document), is also optimal - in NUMA systems. In the specific case of Redis it allows for each instance - to have a more limited amount of data, making the Redis persist-by-fork - approach more sounding. In the future we may explore parallelism only for - I/O, which is the low hanging fruit: minimal complexity could provide an - improved single process experience. - -8 - Two levels of API. The Redis API has two levels: 1) a subset of the API fits - naturally into a distributed version of Redis and 2) a more complex API that - supports multi-key operations. Both are useful if used judiciously but - there's no way to make the more complex multi-keys API distributed in an - opaque way without violating our other principles. We don't want to provide - the illusion of something that will work magically when actually it can't in - all cases. Instead we'll provide commands to quickly migrate keys from one - instance to another to perform multi-key operations and expose the - trade-offs to the user. - -9 - We optimize for joy. We believe writing code is a lot of hard work, and the - only way it can be worth is by enjoying it. When there is no longer joy in - writing code, the best thing to do is stop. To prevent this, we'll avoid - taking paths that will make Redis less of a joy to develop. - -10 - All the above points are put together in what we call opportunistic - programming: trying to get the most for the user with minimal increases - in complexity (hanging fruits). Solve 95% of the problem with 5% of the - code when it is acceptable. Avoid a fixed schedule but follow the flow of - user requests, inspiration, Redis internal readiness for certain features - (sometimes many past changes reach a critical point making a previously - complex feature very easy to obtain).