diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 5388d4d3..d2e3e2b3 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -8,6 +8,8 @@ jobs: check: runs-on: ubuntu-latest steps: + - name: Install aspell + run: sudo apt-get install aspell - uses: ruby/setup-ruby@v1 with: ruby-version: 2.6 diff --git a/README.md b/README.md index cdde215a..9bcfa552 100644 --- a/README.md +++ b/README.md @@ -65,7 +65,10 @@ Returns all keys matching the given pattern. Please use the following formatting rules: -* Wrap lines to 80 characters. +* No need for manual lines wrapping at any specific length, doing so usually + means that adding a word creates a cascade effect and changes other lines. +* Please avoid writing lines that are too long, this makes the diff harder to + review when only one word is changed. * Start every sentence on a new line. Luckily, this repository comes with an automated Markdown formatter. @@ -82,11 +85,12 @@ The formatter has the following dependencies: * Redcarpet * Nokogiri * The `par` tool +* batch Installation of the Ruby gems: ``` -gem install redcarpet nokogiri +gem install redcarpet nokogiri batch ``` Installation of par (OSX): diff --git a/clients.json b/clients.json index 1800d9c0..fe2e41a9 100644 --- a/clients.json +++ b/clients.json @@ -28,14 +28,6 @@ "active": true }, - { - "name": "Erldis", - "language": "Erlang", - "repository": "https://github.com/japerk/erldis", - "description": "A Redis erlang client library.", - "authors": ["dialtone_","japerk"] - }, - { "name": "Eredis", "language": "Erlang", @@ -43,6 +35,14 @@ "description": "Redis client with a focus on performance", "authors": ["wooga"], "recommended": true, + "active": false + }, + { + "name": "Eredis (Nordix fork)", + "language": "Erlang", + "repository": "https://github.com/Nordix/eredis", + "description": "An updated fork of eredis, adding TLS and various corrections and testing", + "recommended": true, "active": true }, { @@ -51,22 +51,22 @@ "repository": "https://github.com/adrienmo/eredis_cluster", "description": "Eredis wrapper providing cluster support and connection pooling", "authors": ["adrienmo"], - "active": true + "active": false }, { - "name": "sharded_eredis", + "name": "eredis_cluster (Nordix fork)", "language": "Erlang", - "repository": "https://github.com/jeremyong/sharded_eredis", - "description": "Wrapper around eredis providing process pools and consistent hashing.", - "authors": ["jeremyong"] + "repository": "https://github.com/Nordix/eredis_cluster", + "description": "An updated fork of eredis_cluster (providing cluster support and connection pooling), with added TLS support, ASK redirects, various corrections and testing", + "recommended": true, + "active": true }, - { - "name": "Tideland Erlang/OTP Redis Client", + "name": "ecredis", "language": "Erlang", - "repository": "git://git.tideland.biz/errc", - "description": "A comfortable Redis client for Erlang/OTP support pooling, pub/sub and transactions.", - "authors": ["themue"] + "repository": "https://github.com/HalloAppInc/ecredis", + "description": "Redis Cluster client that allows for connections to multiple clusters. Queries are send directly to eredis clients allowing for large throughput.", + "authors": ["HalloAppInc"] }, { @@ -92,6 +92,7 @@ "repository": "https://github.com/go-redis/redis", "description": "Redis client for Golang supporting Redis Sentinel and Redis Cluster out of the box.", "authors": [], + "recommended": true, "active": true }, @@ -150,6 +151,13 @@ "authors": [] }, + { + "name": "godis", + "language": "Go", + "repository": "https://github.com/piaohao/godis", + "description": "redis client implement by golang, inspired by jedis." + }, + { "name": "gosexy/redis", "language": "Go", @@ -208,30 +216,22 @@ "language": "Haskell", "url": "http://hackage.haskell.org/package/hedis", "repository": "https://github.com/informatikr/hedis", - "description": "Supports the complete command set. Commands are automatically pipelined for high performance.", + "description": "Supports the complete command set and cluster. Commands are automatically pipelined for high performance.", "authors": [], "recommended": true, "active": true }, - { - "name": "haskell-redis", - "language": "Haskell", - "url": "https://bitbucket.org/videlalvaro/redis-haskell/wiki/Home", - "repository": "https://bitbucket.org/videlalvaro/redis-haskell/src", - "description": "Not actively maintained, supports Redis <= 2.0.", - "authors": ["old_sound"] - }, - { "name": "Jedis", "language": "Java", - "repository": "https://github.com/xetorthio/jedis", - "description": "A blazingly small and sane redis java client", - "authors": ["xetorthio"], + "repository": "https://github.com/redis/jedis", + "description": "A blazingly small and sane Redis Java client", + "authors": ["xetorthio", "g_korland"], "recommended": true, "active": true }, + { "name": "java-redis-client", "language": "Java", @@ -240,14 +240,6 @@ "authors": [], "active": true }, - { - "name": "Jedipus", - "language": "Java", - "repository": "https://github.com/jamespedwards42/jedipus", - "description": "Redis Client & Command Executor.", - "authors": ["jamespedwards"], - "active": true - }, { "name": "Redisson", @@ -265,8 +257,7 @@ "url": "https://code.google.com/p/jredis/", "repository": "https://github.com/alphazero/jredis", "description": "", - "authors": ["SunOf27"], - "active": true + "authors": ["SunOf27"] }, { @@ -275,7 +266,7 @@ "url": "https://code.google.com/p/jdbc-redis/", "repository": "https://code.google.com/p/jdbc-redis/source/browse", "description": "", - "authors": ["mavcunha"] + "authors": [] }, { @@ -307,7 +298,8 @@ "language": "Java", "repository": "https://github.com/vert-x3/vertx-redis-client", "description": "The Vert.x Redis client provides an asynchronous API to interact with a Redis data-structure server.", - "authors": ["pmlopes"] + "authors": ["pmlopes"], + "active": true }, { @@ -398,7 +390,7 @@ "name": "Redis::ClusterRider", "language": "Perl", "url": "http://search.cpan.org/dist/Redis-ClusterRider/", - "repository": " https://github.com/iph0/Redis-ClusterRider", + "repository": "https://github.com/iph0/Redis-ClusterRider", "description": "Daring Redis Cluster client", "authors": ["iph0"], "active": true @@ -491,7 +483,7 @@ "language": "PHP", "repository": "https://github.com/swoole/redis-async", "description": "Asynchronous redis client library for PHP.", - "authors": ["matyhtf"], + "authors": [], "active": false }, @@ -513,6 +505,15 @@ "recommended": true, "active": true }, + + { + "name": "redis-py-cluster", + "language": "Python", + "repository": "https://github.com/Grokzen/redis-py-cluster", + "description": "Python cluster client for the official redis cluster. Redis 3.0+.", + "authors": ["grokzen"], + "active": true + }, { "name": "gxredis", @@ -534,7 +535,7 @@ { "name": "txredisapi", "language": "Python", - "url": "https://github.com/fiorix/txredisapi", + "repository": "https://github.com/fiorix/txredisapi", "description": "Full featured, non-blocking client for Twisted.", "authors": ["fiorix"], "active": true @@ -562,7 +563,7 @@ "language": "Python", "repository": "https://github.com/evilkost/brukva", "description": "Asynchronous Redis client that works within Tornado IO loop", - "authors": ["evilkost"] + "authors": [] }, { @@ -600,7 +601,7 @@ "authors": ["paualarco"], "active": true }, - + { "name": "laserdisc", "language": "Scala", @@ -657,7 +658,7 @@ "language": "Scala", "repository": "https://github.com/pk11/sedis", "description": "a thin scala wrapper for the popular Redis Java client, Jedis", - "authors": ["pk11"] + "authors": [] }, { @@ -697,7 +698,7 @@ { "name": "ServiceStack.Redis", "language": "C#", - "url": "https://github.com/ServiceStack/ServiceStack.Redis", + "repository": "https://github.com/ServiceStack/ServiceStack.Redis", "description": "This is a fork and improvement of the original C# client written by Miguel De Icaza.", "authors": ["demisbellot"], "recommended": true, @@ -707,7 +708,7 @@ { "name": "StackExchange.Redis", "language": "C#", - "url": "https://github.com/StackExchange/StackExchange.Redis", + "repository": "https://github.com/StackExchange/StackExchange.Redis", "description": "This .NET client was developed by Stack Exchange for very high performance needs (replacement to the earlier BookSleeve).", "authors": ["marcgravell"], "recommended": true, @@ -725,7 +726,7 @@ { "name": "Rediska", "language": "C#", - "url": "https://github.com/pepelev/Rediska", + "repository": "https://github.com/pepelev/Rediska", "description": "Rediska is a Redis client for .NET with a focus on flexibility and extensibility.", "authors": [], "recommended": false, @@ -735,7 +736,7 @@ { "name": "DartRedisClient", "language": "Dart", - "url": "https://github.com/dartist/redis_client", + "repository": "https://github.com/dartist/redis_client", "description": "A high-performance async/non-blocking Redis client for Dart", "authors": ["demisbellot"], "recommended": true, @@ -783,36 +784,45 @@ "repository": "https://code.google.com/p/credis/source/browse", "description": "", "authors": [], - "active": true + "active": false }, - + { "name": "xredis", "language": "Node.js", "repository": "https://github.com/razaellahi/xredis", "description": "Redis client with redis ACL features", "authors": ["razaellahi531"], - "recommended": true, + "recommended": false, "active": true }, - + { - "name": "node_redis", + "name": "node-redis", "language": "Node.js", - "repository": "https://github.com/NodeRedis/node_redis", + "repository": "https://github.com/NodeRedis/node-redis", "description": "Recommended client for node.", "authors": ["mranney"], "recommended": true, "active": true }, + { + "name": "handy-redis", + "language": "Node.js", + "repository": "https://github.com/mmkal/handy-redis", + "description": "A wrapper around node_redis with Promise and TypeScript support.", + "authors": [], + "active": true + }, + { "name": "thunk-redis", "language": "Node.js", "repository": "https://github.com/thunks/thunk-redis", "description": "A thunk/promise-based redis client with pipelining and cluster.", "authors": ["izensh"], - "active": true + "active": false }, { @@ -820,9 +830,9 @@ "language": "Node.js", "repository": "https://github.com/rootslab/spade", "description": "♠ Spade, a full-featured modular client for node.", - "authors": ["44gtti"], + "authors": [], "recommended": false, - "active": true + "active": false }, { @@ -831,7 +841,7 @@ "repository": "https://github.com/mjackson/then-redis", "description": "A small, promise-based Redis client for node", "authors": ["mjackson"], - "active": true + "active": false }, { @@ -862,6 +872,15 @@ "recommended": true, "active": true }, + + { + "name": "@camaro/redis", + "language": "Node.js", + "repository": "https://github.com/camarojs/redis", + "description": "Redis client for node, support resp2/3 and redis6.", + "authors": [], + "active": true + }, { "name": "redis-fast-driver", @@ -878,7 +897,7 @@ "repository": "https://github.com/h0x91b/fast-redis-cluster", "description": "Simple and fast cluster driver with error handling, uses redis-fast-driver as main adapter and node_redis as backup for windows", "authors": ["h0x91b"], - "active": true + "active": false }, { @@ -910,7 +929,7 @@ "language": "C#", "repository": "http://redis.codeplex.com/", "description": "Redis Client is based on redis-sharp for the basic communication functions, but it offers some differences.", - "authors": ["TeamDevPerugia"] + "authors": [] }, { @@ -1062,6 +1081,15 @@ "recommended": true }, + { + "name": "redisclient", + "language": "Rust", + "repository": "https://github.com/ltoddy/redis-rs", + "description": "Redis client for Rust.", + "authors": ["ltoddygen"], + "active": true + }, + { "name": "rust-redis", "language": "Rust", @@ -1111,8 +1139,7 @@ "language": "Java", "repository": "https://github.com/caoxinyu/RedisClient", "description": "redis client GUI tool", - "authors": [], - "active": true + "authors": [] }, { @@ -1170,7 +1197,7 @@ { "name": "redis", "language": "Dart", - "url": "https://github.com/ra1u/redis-dart", + "repository": "https://github.com/ra1u/redis-dart", "description": "Simple and fast client", "authors": [], "active": true @@ -1184,6 +1211,15 @@ "authors": [], "active": true }, + { + "name": "redis-async", + "language": "OCaml", + "repository": "https://github.com/janestreet/redis-async", + "url": "https://github.com/janestreet/redis-async", + "description": "A Redis client for OCaml Async applications with a strongly-typed API and client tracking support.", + "authors": [ "janestreet", "lukepalmer" ], + "active": true + }, { "name": "Nhiredis", "language": "C#", @@ -1360,8 +1396,7 @@ "language": "Bash", "repository": "https://github.com/crypt1d/redi.sh", "description": "Simple, Bash-based, Redis client to store your script's variables", - "authors": ["nkrzalic"], - "active": true + "authors": ["nkrzalic"] }, { @@ -1390,6 +1425,15 @@ "authors": ["jkaye2012"], "active": true }, + + { + "name": "Jedis.jl", + "language": "Julia", + "repository": "https://github.com/captchanjack/Jedis.jl", + "description": "A lightweight Redis client, implemented in Julia.", + "authors": ["captchanjack"], + "active": true + }, { "name": "Redis::Cluster", @@ -1458,14 +1502,14 @@ { "name": "RediStack", "language": "Swift", - "repository": "https://gitlab.com/Mordil/RediStack", + "repository": "https://github.com/Mordil/RediStack", "description": "Non-blocking, event-driven Swift client for Redis built with SwiftNIO for all official Swift deployment environments.", "authors": ["mordil"], "active": true, "recommended": true, "url": "https://docs.redistack.info" }, - + { "name": "Rackdis", "language": "Racket", @@ -1591,14 +1635,6 @@ "active": true }, - { - "name": "Hierdis", - "language": "Erlang", - "repository": "https://github.com/funbox/hierdis", - "description": "High-performance Erlang client for the Redis key-value store (NIF wrapping the hiredis C client).", - "authors": ["funbox_team"] - }, - { "name": "yoredis", "language": "Node.js", @@ -1638,8 +1674,17 @@ "name": "hiredis-vip", "language": "C", "repository": "https://github.com/vipshop/hiredis-vip", - "description": "This is the C client for redis cluster. Support for synchronous api, MSET/MGET/DEL, pipelining, asynchronous api.", + "description": "This was the original C client for Redis Cluster. Support for synchronous and asyncronous APIs, MSET/MGET/DEL, pipelining. Built around an outdated version of hiredis.", "authors": ["diguo58"], + "recommended": false, + "active": false + }, + + { + "name": "hiredis-cluster", + "language": "C", + "repository": "https://github.com/Nordix/hiredis-cluster", + "description": "This is an updated fork of hiredis-cluster, the C client for Redis Cluster, with added TLS and AUTH support, decoupling hiredis as an external dependency, leak corrections and improved testing.", "recommended": true, "active": true }, @@ -1772,6 +1817,16 @@ "active": true }, + { + "name": "NewLife.Redis", + "language": "C#", + "url": "https://github.com/NewLifeX/NewLife.Redis", + "repository": "https://github.com/NewLifeX/NewLife.Redis", + "description": "The high-performance redis client supports .NETCORE/.NET4.0/.NET4.5, which is specially optimized for big data and message queuing. The average daily call volume of single online application is 10 billion", + "authors": [], + "active": true + }, + { "name": "wiredis", "language": "C++", @@ -1834,8 +1889,7 @@ "language": "Java", "repository": "https://github.com/virendradhankar/viredis", "description": "A simple and small redis client for java.", - "authors": [], - "active": true + "authors": [] }, { @@ -1852,7 +1906,7 @@ "url": "https://github.com/kristoff-it/zig-okredis", "repository": "https://github.com/kristoff-it/zig-okredis", "description": "OkRedis is a zero-allocation client for Redis 6+ ", - "authors": ["kristoff-it"], + "authors": ["croloris"], "recommended": true, "active": true }, @@ -1875,5 +1929,31 @@ "description": "A custom connector for Dell Boomi that utilizes the lettuce.io Java client to add Redis client support to the Dell Boomi iPaaS.", "authors": [], "active": true + }, + + { + "name": "Redis library for SWI-Prolog", + "language": "Prolog", + "repository": "https://github.com/SWI-Prolog/packages-redis", + "description": "Prolog redis client that exploits SWI-Prolog's extensions such as strings for compact replies and threads to deal with publish/subscribe.", + "active": true + }, + + { + "name": "FreeRedis", + "language": "C#", + "repository": "https://github.com/2881099/FreeRedis", + "description": "This .NET client supports redis6.0+, cluster, sentinel, pipeline, And simple api.", + "authors": [], + "active": true + }, + + { + "name": "redis-client", + "language": "Bash", + "repository": "https://github.com/SomajitDey/redis-client", + "description": "extensible client library for Bash scripting or command-line + connection pooling + redis-cli", + "active": true } + ] diff --git a/commands.json b/commands.json index b65afe1a..4f402828 100644 --- a/commands.json +++ b/commands.json @@ -133,6 +133,13 @@ "since": "2.0.0", "group": "string" }, + "ASKING": { + "summary": "Sent by cluster clients after an -ASK redirect", + "complexity": "O(1)", + "arguments": [], + "since": "3.0.0", + "group": "cluster" + }, "AUTH": { "summary": "Authenticate to the server", "arguments": [ @@ -190,7 +197,7 @@ } ], "since": "2.6.0", - "group": "string" + "group": "bitmap" }, "BITFIELD": { "summary": "Perform arbitrary bitfield integer operations on strings", @@ -252,7 +259,30 @@ } ], "since": "3.2.0", - "group": "string" + "group": "bitmap" + }, + "BITFIELD_RO": { + "summary": "Perform arbitrary bitfield integer operations on strings. Read-only variant of BITFIELD", + "complexity": "O(1) for each subcommand specified", + "arguments": [ + { + "name": "key", + "type": "key" + }, + { + "command": "GET", + "name": [ + "type", + "offset" + ], + "type": [ + "type", + "integer" + ] + } + ], + "since": "6.2.0", + "group": "bitmap" }, "BITOP": { "summary": "Perform bitwise operations between strings", @@ -273,7 +303,7 @@ } ], "since": "2.6.0", - "group": "string" + "group": "bitmap" }, "BITPOS": { "summary": "Find first bit set or clear in a string", @@ -288,22 +318,28 @@ "type": "integer" }, { - "name": "start", - "type": "integer", - "optional": true - }, - { - "name": "end", - "type": "integer", - "optional": true + "name": "index", + "type": "block", + "optional": true, + "block": [ + { + "name": "start", + "type": "integer" + }, + { + "name": "end", + "type": "integer", + "optional": true + } + ] } ], "since": "2.8.7", - "group": "string" + "group": "bitmap" }, "BLPOP": { "summary": "Remove and get the first element in a list, or block until one is available", - "complexity": "O(1)", + "complexity": "O(N) where N is the number of provided keys.", "arguments": [ { "name": "key", @@ -320,7 +356,7 @@ }, "BRPOP": { "summary": "Remove and get the last element in a list, or block until one is available", - "complexity": "O(1)", + "complexity": "O(N) where N is the number of provided keys.", "arguments": [ { "name": "key", @@ -391,6 +427,74 @@ "since": "6.2.0", "group": "list" }, + "LMPOP": { + "summary": "Pop elements from a list", + "complexity": "O(N+M) where N is the number of provided keys and M is the number of elements returned.", + "arguments": [ + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "optional": true, + "multiple": true + }, + { + "name": "where", + "type": "enum", + "enum": [ + "LEFT", + "RIGHT" + ] + }, + { + "command": "COUNT", + "name": "count", + "type": "integer", + "optional": true + } + ], + "since": "7.0.0", + "group": "list" + }, + "BLMPOP": { + "summary": "Pop elements from a list, or block until one is available", + "complexity": "O(N+M) where N is the number of provided keys and M is the number of elements returned.", + "arguments": [ + { + "name": "timeout", + "type": "double" + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "optional": true, + "multiple": true + }, + { + "name": "where", + "type": "enum", + "enum": [ + "LEFT", + "RIGHT" + ] + }, + { + "command": "COUNT", + "name": "count", + "type": "integer", + "optional": true + } + ], + "since": "7.0.0", + "group": "list" + }, "BZPOPMIN": { "summary": "Remove and return the member with the lowest score from one or more sorted sets, or block until one is available", "complexity": "O(log(N)) with N being the number of elements in the sorted set.", @@ -447,6 +551,12 @@ "since": "5.0.0", "group": "connection" }, + "CLIENT INFO": { + "summary": "Returns information about the current client connection.", + "complexity": "O(1)", + "since": "6.2.0", + "group": "connection" + }, "CLIENT KILL": { "summary": "Kill the connection of a client", "complexity": "O(N) where N is the number of client connections", @@ -485,6 +595,12 @@ "type": "string", "optional": true }, + { + "command": "LADDR", + "name": "ip:port", + "type": "string", + "optional": true + }, { "command": "SKIPME", "name": "yes/no", @@ -509,6 +625,21 @@ "pubsub" ], "optional": true + }, + { + "name": "id", + "type": "block", + "block": [ + { + "command": "ID" + }, + { + "name": "client-id", + "type": "integer", + "multiple": true + } + ], + "optional": true } ], "since": "2.4.0", @@ -526,6 +657,12 @@ "since": "6.0.0", "group": "connection" }, + "CLIENT UNPAUSE": { + "summary": "Resume processing of clients that were paused", + "complexity": "O(N) Where N is the number of paused clients", + "since": "6.2.0", + "group": "connection" + }, "CLIENT PAUSE": { "summary": "Stop processing commands from clients for some time", "complexity": "O(1)", @@ -533,6 +670,15 @@ { "name": "timeout", "type": "integer" + }, + { + "name": "mode", + "type": "enum", + "optional": true, + "enum": [ + "WRITE", + "ALL" + ] } ], "since": "2.9.50", @@ -569,7 +715,7 @@ }, "CLIENT TRACKING": { "summary": "Enable or disable server assisted client side caching support", - "complexity": "O(1)", + "complexity": "O(1). Some options may introduce additional complexity.", "arguments": [ { "name": "status", @@ -628,6 +774,12 @@ "since": "6.0.0", "group": "connection" }, + "CLIENT TRACKINGINFO": { + "summary": "Return information about server assisted client side caching for the current connection", + "complexity": "O(1)", + "since": "6.2.0", + "group": "connection" + }, "CLIENT UNBLOCK": { "summary": "Unblock a client blocked in a blocking command from a different connection", "complexity": "O(log N) where N is the number of client connections", @@ -974,6 +1126,36 @@ "since": "2.0.0", "group": "server" }, + "COPY": { + "summary": "Copy a key", + "complexity": "O(N) worst case for collections, where N is the number of nested items. O(1) for string values.", + "since": "6.2.0", + "arguments": [ + { + "name": "source", + "type": "key" + }, + { + "name": "destination", + "type": "key" + }, + { + "command": "DB", + "name": "destination-db", + "type": "integer", + "optional": true + }, + { + "name": "replace", + "type": "enum", + "enum": [ + "REPLACE" + ], + "optional": true + } + ], + "group": "generic" + }, "DBSIZE": { "summary": "Return the number of keys in the selected database", "since": "1.0.0", @@ -1079,17 +1261,45 @@ { "name": "key", "type": "key", + "optional": true, "multiple": true }, { "name": "arg", "type": "string", + "optional": true, "multiple": true } ], "since": "2.6.0", "group": "scripting" }, + "EVAL_RO": { + "summary": "Execute a read-only Lua script server side", + "complexity": "Depends on the script that is executed.", + "arguments": [ + { + "name": "script", + "type": "string" + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "multiple": true + }, + { + "name": "arg", + "type": "string", + "multiple": true + } + ], + "since": "7.0.0", + "group": "scripting" + }, "EVALSHA": { "summary": "Execute a Lua script server side", "complexity": "Depends on the script that is executed.", @@ -1105,17 +1315,45 @@ { "name": "key", "type": "key", + "optional": true, "multiple": true }, { "name": "arg", "type": "string", + "optional": true, "multiple": true } ], "since": "2.6.0", "group": "scripting" }, + "EVALSHA_RO": { + "summary": "Execute a read-only Lua script server side", + "complexity": "Depends on the script that is executed.", + "arguments": [ + { + "name": "sha1", + "type": "string" + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "multiple": true + }, + { + "name": "arg", + "type": "string", + "multiple": true + } + ], + "since": "7.0.0", + "group": "scripting" + }, "EXEC": { "summary": "Execute all commands issued after MULTI", "since": "1.2.0", @@ -1123,7 +1361,7 @@ }, "EXISTS": { "summary": "Determine if a key exists", - "complexity": "O(1)", + "complexity": "O(N) where N is the number of keys to check.", "arguments": [ { "name": "key", @@ -1145,6 +1383,17 @@ { "name": "seconds", "type": "integer" + }, + { + "name": "condition", + "type": "enum", + "enum": [ + "NX", + "XX", + "GT", + "LT" + ], + "optional": true } ], "since": "1.0.0", @@ -1161,19 +1410,83 @@ { "name": "timestamp", "type": "posix time" + }, + { + "name": "condition", + "type": "enum", + "enum": [ + "NX", + "XX", + "GT", + "LT" + ], + "optional": true } ], "since": "1.2.0", "group": "generic" }, + "EXPIRETIME": { + "summary": "Get the expiration Unix timestamp for a key", + "complexity": "O(1)", + "arguments": [ + { + "name": "key", + "type": "key" + } + ], + "since": "7.0.0", + "group": "generic" + }, + "FAILOVER": { + "summary": "Start a coordinated failover between this server and one of its replicas.", + "arguments": [ + { + "name": "target", + "type": "block", + "optional": true, + "block": [ + { + "command": "TO" + }, + { + "name": "host", + "type": "string" + }, + { + "name": "port", + "type": "integer" + }, + { + "command": "FORCE", + "optional": true + } + ] + }, + { + "command": "ABORT", + "optional": true + }, + { + "command": "TIMEOUT", + "name": "milliseconds", + "type": "integer", + "optional": true + } + ], + "since": "6.2.0", + "group": "server" + }, "FLUSHALL": { "summary": "Remove all keys from all databases", + "complexity": "O(N) where N is the total number of keys in all databases", "arguments": [ { "name": "async", "type": "enum", "enum": [ - "ASYNC" + "ASYNC", + "SYNC" ], "optional": true } @@ -1183,12 +1496,14 @@ }, "FLUSHDB": { "summary": "Remove all keys from the current database", + "complexity": "O(N) where N is the number of keys in the selected database", "arguments": [ { "name": "async", "type": "enum", "enum": [ - "ASYNC" + "ASYNC", + "SYNC" ], "optional": true } @@ -1204,6 +1519,23 @@ "name": "key", "type": "key" }, + { + "name": "condition", + "type": "enum", + "enum": [ + "NX", + "XX" + ], + "optional": true + }, + { + "name": "change", + "type": "enum", + "enum": [ + "CH" + ], + "optional": true + }, { "name": [ "longitude", @@ -1240,7 +1572,7 @@ }, "GEOPOS": { "summary": "Returns longitude and latitude of members of a geospatial index", - "complexity": "O(log(N)) for each member requested, where N is the number of elements in the sorted set.", + "complexity": "O(N) where N is the number of members requested.", "arguments": [ { "name": "key", @@ -1341,9 +1673,23 @@ "optional": true }, { - "command": "COUNT", + "type": "block", "name": "count", - "type": "integer", + "block": [ + { + "name": "count", + "command": "COUNT", + "type": "integer" + }, + { + "name": "any", + "type": "enum", + "enum": [ + "ANY" + ], + "optional": true + } + ], "optional": true }, { @@ -1380,22 +1726,194 @@ "type": "key" }, { - "name": "member", - "type": "string" + "name": "member", + "type": "string" + }, + { + "name": "radius", + "type": "double" + }, + { + "name": "unit", + "type": "enum", + "enum": [ + "m", + "km", + "ft", + "mi" + ] + }, + { + "name": "withcoord", + "type": "enum", + "enum": [ + "WITHCOORD" + ], + "optional": true + }, + { + "name": "withdist", + "type": "enum", + "enum": [ + "WITHDIST" + ], + "optional": true + }, + { + "name": "withhash", + "type": "enum", + "enum": [ + "WITHHASH" + ], + "optional": true + }, + { + "type": "block", + "name": "count", + "block": [ + { + "name": "count", + "command": "COUNT", + "type": "integer" + }, + { + "name": "any", + "type": "enum", + "enum": [ + "ANY" + ], + "optional": true + } + ], + "optional": true + }, + { + "name": "order", + "type": "enum", + "enum": [ + "ASC", + "DESC" + ], + "optional": true + }, + { + "command": "STORE", + "name": "key", + "type": "key", + "optional": true + }, + { + "command": "STOREDIST", + "name": "key", + "type": "key", + "optional": true + } + ], + "since": "3.2.0", + "group": "geo" + }, + "GEOSEARCH": { + "summary": "Query a sorted set representing a geospatial index to fetch members inside an area of a box or a circle.", + "complexity": "O(N+log(M)) where N is the number of elements in the grid-aligned bounding box area around the shape provided as the filter and M is the number of items inside the shape", + "arguments": [ + { + "name": "key", + "type": "key" + }, + { + "command": "FROMMEMBER", + "name": "member", + "type": "string", + "optional": true + }, + { + "command": "FROMLONLAT", + "name": [ + "longitude", + "latitude" + ], + "type": [ + "double", + "double" + ], + "optional": true + }, + { + "type": "block", + "name": "circle", + "block": [ + { + "name": "radius", + "command": "BYRADIUS", + "type": "double" + }, + { + "name": "unit", + "type": "enum", + "enum": [ + "m", + "km", + "ft", + "mi" + ] + } + ], + "optional": true }, { - "name": "radius", - "type": "double" + "type": "block", + "name": "box", + "block": [ + { + "name": "width", + "command": "BYBOX", + "type": "double" + }, + { + "name": "height", + "type": "double" + }, + { + "name": "unit", + "type": "enum", + "enum": [ + "m", + "km", + "ft", + "mi" + ] + } + ], + "optional": true }, { - "name": "unit", + "name": "order", "type": "enum", "enum": [ - "m", - "km", - "ft", - "mi" - ] + "ASC", + "DESC" + ], + "optional": true + }, + { + "type": "block", + "name": "count", + "block": [ + { + "name": "count", + "command": "COUNT", + "type": "integer" + }, + { + "name": "any", + "type": "enum", + "enum": [ + "ANY" + ], + "optional": true + } + ], + "optional": true }, { "name": "withcoord", @@ -1420,11 +1938,87 @@ "WITHHASH" ], "optional": true + } + ], + "since": "6.2", + "group": "geo" + }, + "GEOSEARCHSTORE": { + "summary": "Query a sorted set representing a geospatial index to fetch members inside an area of a box or a circle, and store the result in another key.", + "complexity": "O(N+log(M)) where N is the number of elements in the grid-aligned bounding box area around the shape provided as the filter and M is the number of items inside the shape", + "arguments": [ + { + "name": "destination", + "type": "key" + }, + { + "name": "source", + "type": "key" + }, + { + "command": "FROMMEMBER", + "name": "member", + "type": "string", + "optional": true + }, + { + "command": "FROMLONLAT", + "name": [ + "longitude", + "latitude" + ], + "type": [ + "double", + "double" + ], + "optional": true }, { - "command": "COUNT", - "name": "count", - "type": "integer", + "type": "block", + "name": "circle", + "block": [ + { + "name": "radius", + "command": "BYRADIUS", + "type": "double" + }, + { + "name": "unit", + "type": "enum", + "enum": [ + "m", + "km", + "ft", + "mi" + ] + } + ], + "optional": true + }, + { + "type": "block", + "name": "box", + "block": [ + { + "name": "width", + "command": "BYBOX", + "type": "double" + }, + { + "name": "height", + "type": "double" + }, + { + "name": "unit", + "type": "enum", + "enum": [ + "m", + "km", + "ft", + "mi" + ] + } + ], "optional": true }, { @@ -1437,19 +2031,35 @@ "optional": true }, { - "command": "STORE", - "name": "key", - "type": "key", + "type": "block", + "name": "count", + "block": [ + { + "name": "count", + "command": "COUNT", + "type": "integer" + }, + { + "name": "any", + "type": "enum", + "enum": [ + "ANY" + ], + "optional": true + } + ], "optional": true }, { - "command": "STOREDIST", - "name": "key", - "type": "key", + "name": "storedist", + "type": "enum", + "enum": [ + "STOREDIST" + ], "optional": true } ], - "since": "3.2.0", + "since": "6.2", "group": "geo" }, "GET": { @@ -1478,6 +2088,42 @@ } ], "since": "2.2.0", + "group": "bitmap" + }, + "GETDEL": { + "summary":"Get the value of a key and delete the key", + "complexity": "O(1)", + "arguments": [ + { + "name": "key", + "type": "key" + } + ], + "since": "6.2.0", + "group": "string" + }, + "GETEX": { + "summary": "Get the value of a key and optionally set its expiration", + "complexity": "O(1)", + "arguments": [ + { + "name": "key", + "type": "key" + }, + { + "name": "expiration", + "type": "enum", + "enum": [ + "EX seconds", + "PX milliseconds", + "EXAT timestamp", + "PXAT milliseconds-timestamp", + "PERSIST" + ], + "optional": true + } + ], + "since": "6.2.0", "group": "string" }, "GETRANGE": { @@ -1534,30 +2180,37 @@ "group": "hash" }, "HELLO": { - "summary": "switch Redis protocol", + "summary": "Handshake with Redis", "complexity": "O(1)", "arguments": [ { - "name": "protover", - "type": "integer" - }, - { - "command": "AUTH", - "name": [ - "username", - "password" - ], - "type": [ - "string", - "string" + "name": "arguments", + "type": "block", + "block": [ + { + "name": "protover", + "type": "integer" + }, + { + "command": "AUTH", + "name": [ + "username", + "password" + ], + "type": [ + "string", + "string" + ], + "optional": true + }, + { + "command": "SETNAME", + "name": "clientname", + "type": "string", + "optional": true + } ], "optional": true - }, - { - "command": "SETNAME", - "name": "clientname", - "type": "string", - "optional": true } ], "since": "6.0.0", @@ -1754,6 +2407,37 @@ "since": "2.0.0", "group": "hash" }, + "HRANDFIELD": { + "summary": "Get one or multiple random fields from a hash", + "complexity": "O(N) where N is the number of fields returned", + "arguments": [ + { + "name": "key", + "type": "key" + }, + { + "name": "options", + "type": "block", + "block": [ + { + "name": "count", + "type": "integer" + }, + { + "name": "withvalues", + "type": "enum", + "enum": [ + "WITHVALUES" + ], + "optional": true + } + ], + "optional": true + } + ], + "since": "6.2.0", + "group": "hash" + }, "HSTRLEN": { "summary": "Get the length of the value of a hash field", "complexity": "O(1)", @@ -1925,12 +2609,17 @@ "group": "list" }, "LPOP": { - "summary": "Remove and get the first element in a list", - "complexity": "O(1)", + "summary": "Remove and get the first elements in a list", + "complexity": "O(N) where N is the number of elements returned", "arguments": [ { "name": "key", "type": "key" + }, + { + "name": "count", + "type": "integer", + "optional": true } ], "since": "1.0.0", @@ -2348,6 +3037,17 @@ { "name": "milliseconds", "type": "integer" + }, + { + "name": "condition", + "type": "enum", + "enum": [ + "NX", + "XX", + "GT", + "LT" + ], + "optional": true } ], "since": "2.6.0", @@ -2364,11 +3064,34 @@ { "name": "milliseconds-timestamp", "type": "posix time" + }, + { + "name": "condition", + "type": "enum", + "enum": [ + "NX", + "XX", + "GT", + "LT" + ], + "optional": true } ], "since": "2.6.0", "group": "generic" }, + "PEXPIRETIME": { + "summary": "Get the expiration Unix timestamp for a key in milliseconds", + "complexity": "O(1)", + "arguments": [ + { + "name": "key", + "type": "key" + } + ], + "since": "7.0.0", + "group": "generic" + }, "PFADD": { "summary": "Adds the specified elements to the specified HyperLogLog.", "complexity": "O(1) to add every element.", @@ -2380,6 +3103,7 @@ { "name": "element", "type": "string", + "optional": true, "multiple": true } ], @@ -2580,6 +3304,11 @@ "since": "1.0.0", "group": "generic" }, + "RESET": { + "summary": "Reset the connection", + "since": "6.2", + "group": "connection" + }, "RESTORE": { "summary": "Create a key using the provided serialized value, previously obtained using DUMP.", "complexity": "O(1) to create the new key and additional O(N*M) to reconstruct the serialized value, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1). However for sorted set values the complexity is O(N*M*log(N)) because inserting values into sorted sets is O(log(N)).", @@ -2634,12 +3363,17 @@ "group": "server" }, "RPOP": { - "summary": "Remove and get the last element in a list", - "complexity": "O(1)", + "summary": "Remove and get the last elements in a list", + "complexity": "O(N) where N is the number of elements returned", "arguments": [ { "name": "key", "type": "key" + }, + { + "name": "count", + "type": "integer", + "optional": true } ], "since": "1.0.0", @@ -2793,6 +3527,17 @@ }, "SCRIPT FLUSH": { "summary": "Remove all the scripts from the script cache.", + "arguments": [ + { + "name": "async", + "type": "enum", + "enum": [ + "ASYNC", + "SYNC" + ], + "optional": true + } + ], "complexity": "O(N) with N being the number of scripts in cache", "since": "2.6.0", "group": "scripting" @@ -2874,6 +3619,8 @@ "enum": [ "EX seconds", "PX milliseconds", + "EXAT timestamp", + "PXAT milliseconds-timestamp", "KEEPTTL" ], "optional": true @@ -2917,7 +3664,7 @@ } ], "since": "2.2.0", - "group": "string" + "group": "bitmap" }, "SETEX": { "summary": "Set the value and expiration of a key", @@ -3004,6 +3751,19 @@ "since": "1.0.0", "group": "set" }, + "SINTERCARD": { + "summary": "Intersect multiple sets and return the cardinality of the result", + "complexity": "O(N*M) worst case where N is the cardinality of the smallest set and M is the number of sets.", + "arguments": [ + { + "name": "key", + "type": "key", + "multiple": true + } + ], + "since": "7.0.0", + "group": "set" + }, "SINTERSTORE": { "summary": "Intersect multiple sets and store the resulting set in a key", "complexity": "O(N*M) worst case where N is the cardinality of the smallest set and M is the number of sets.", @@ -3122,19 +3882,79 @@ }, { "name": "destination", - "type": "key" - }, - { - "name": "member", - "type": "string" + "type": "key" + }, + { + "name": "member", + "type": "string" + } + ], + "since": "1.0.0", + "group": "set" + }, + "SORT": { + "summary": "Sort the elements in a list, set or sorted set", + "complexity": "O(N+M*log(M)) where N is the number of elements in the list or set to sort, and M the number of returned elements. When the elements are not sorted, complexity is O(N).", + "arguments": [ + { + "name": "key", + "type": "key" + }, + { + "command": "BY", + "name": "pattern", + "type": "pattern", + "optional": true + }, + { + "command": "LIMIT", + "name": [ + "offset", + "count" + ], + "type": [ + "integer", + "integer" + ], + "optional": true + }, + { + "command": "GET", + "name": "pattern", + "type": "string", + "optional": true, + "multiple": true + }, + { + "name": "order", + "type": "enum", + "enum": [ + "ASC", + "DESC" + ], + "optional": true + }, + { + "name": "sorting", + "type": "enum", + "enum": [ + "ALPHA" + ], + "optional": true + }, + { + "command": "STORE", + "name": "destination", + "type": "key", + "optional": true } ], "since": "1.0.0", - "group": "set" + "group": "generic" }, - "SORT": { - "summary": "Sort the elements in a list, set or sorted set", - "complexity": "O(N+M*log(M)) where N is the number of elements in the list or set to sort, and M the number of returned elements. When the elements are not sorted, complexity is currently O(N) as there is a copy step that will be avoided in next releases.", + "SORT_RO": { + "summary": "Sort the elements in a list, set or sorted set. Read-only variant of SORT.", + "complexity": "O(N+M*log(M)) where N is the number of elements in the list or set to sort, and M the number of returned elements. When the elements are not sorted, complexity is O(N).", "arguments": [ { "name": "key", @@ -3181,20 +4001,14 @@ "ALPHA" ], "optional": true - }, - { - "command": "STORE", - "name": "destination", - "type": "key", - "optional": true } ], - "since": "1.0.0", + "since": "7.0.0", "group": "generic" }, "SPOP": { "summary": "Remove and return one or multiple random members from a set", - "complexity": "O(1)", + "complexity": "Without the count argument O(1), otherwise O(N) where N is the value of the passed count.", "arguments": [ { "name": "key", @@ -3320,6 +4134,7 @@ }, "SWAPDB": { "summary": "Swaps two Redis databases", + "complexity": "O(N) where N is the count of clients watching or blocking on keys from both databases.", "arguments": [ { "name": "index1", @@ -3373,7 +4188,7 @@ "group": "generic" }, "TTL": { - "summary": "Get the time to live for a key", + "summary": "Get the time to live for a key in seconds", "complexity": "O(1)", "arguments": [ { @@ -3547,6 +4362,52 @@ "since": "2.0.0", "group": "sorted_set" }, + "ZDIFF": { + "summary": "Subtract multiple sorted sets", + "complexity": "O(L + (N-K)log(N)) worst case where L is the total number of elements in all the sets, N is the size of the first set, and K is the size of the result set.", + "arguments": [ + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "multiple": true + }, + { + "name": "withscores", + "type": "enum", + "enum": [ + "WITHSCORES" + ], + "optional": true + } + ], + "since": "6.2.0", + "group": "sorted_set" + }, + "ZDIFFSTORE": { + "summary": "Subtract multiple sorted sets and store the resulting sorted set in a new key", + "complexity": "O(L + (N-K)log(N)) worst case where L is the total number of elements in all the sets, N is the size of the first set, and K is the size of the result set.", + "arguments": [ + { + "name": "destination", + "type": "key" + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "multiple": true + } + ], + "since": "6.2.0", + "group": "sorted_set" + }, "ZINCRBY": { "summary": "Increment the score of a member in a sorted set", "complexity": "O(log(N)) where N is the number of elements in the sorted set.", @@ -3610,6 +4471,23 @@ "since": "6.2.0", "group": "sorted_set" }, + "ZINTERCARD": { + "summary": "Intersect multiple sorted sets and return the cardinality of the result", + "complexity": "O(N*K) worst case with N being the smallest input sorted set, K being the number of input sorted sets.", + "arguments": [ + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "multiple": true + } + ], + "since": "7.0.0", + "group": "sorted_set" + }, "ZINTERSTORE": { "summary": "Intersect multiple sorted sets and store the resulting sorted set in a new key", "complexity": "O(N*K)+O(M*log(M)) worst case with N being the smallest input sorted set, K being the number of input sorted sets and M being the number of elements in the resulting sorted set.", @@ -3703,8 +4581,92 @@ "since": "5.0.0", "group": "sorted_set" }, + "ZRANDMEMBER": { + "summary": "Get one or multiple random elements from a sorted set", + "complexity": "O(N) where N is the number of elements returned", + "arguments": [ + { + "name": "key", + "type": "key" + }, + { + "name": "options", + "type": "block", + "block": [ + { + "name": "count", + "type": "integer" + }, + { + "name": "withscores", + "type": "enum", + "enum": [ + "WITHSCORES" + ], + "optional": true + } + ], + "optional": true + } + ], + "since": "6.2.0", + "group": "sorted_set" + }, + "ZRANGESTORE": { + "summary": "Store a range of members from sorted set into another key", + "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements stored into the destination key.", + "arguments": [ + { + "name": "dst", + "type": "key" + }, + { + "name": "src", + "type": "key" + }, + { + "name": "min", + "type": "string" + }, + { + "name": "max", + "type": "string" + }, + { + "name": "sortby", + "type": "enum", + "enum": [ + "BYSCORE", + "BYLEX" + ], + "optional": true + }, + { + "name": "rev", + "type": "enum", + "enum": [ + "REV" + ], + "optional": true + }, + { + "command": "LIMIT", + "name": [ + "offset", + "count" + ], + "type": [ + "integer", + "integer" + ], + "optional": true + } + ], + "since": "6.2.0", + "group": "sorted_set" + }, "ZRANGE": { - "summary": "Return a range of members in a sorted set, by index", + "summary": "Return a range of members in a sorted set", "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements returned.", "arguments": [ { @@ -3712,12 +4674,41 @@ "type": "key" }, { - "name": "start", - "type": "integer" + "name": "min", + "type": "string" }, { - "name": "stop", - "type": "integer" + "name": "max", + "type": "string" + }, + { + "name": "sortby", + "type": "enum", + "enum": [ + "BYSCORE", + "BYLEX" + ], + "optional": true + }, + { + "name": "rev", + "type": "enum", + "enum": [ + "REV" + ], + "optional": true + }, + { + "command": "LIMIT", + "name": [ + "offset", + "count" + ], + "type": [ + "integer", + "integer" + ], + "optional": true }, { "name": "withscores", @@ -4283,15 +5274,56 @@ }, "XADD": { "summary": "Appends a new entry to a stream", - "complexity": "O(1)", + "complexity": "O(1) when adding a new entry, O(N) when trimming where N being the number of entires evicted.", "arguments": [ { "name": "key", "type": "key" }, { - "name": "ID", - "type": "string" + "command": "NOMKSTREAM", + "optional": true + }, + { + "name": "trim", + "type": "block", + "optional": true, + "block": [ + { + "name": "strategy", + "type": "enum", + "enum": [ + "MAXLEN", + "MINID" + ] + }, + { + "name": "operator", + "type": "enum", + "enum": [ + "=", + "~" + ], + "optional": true + }, + { + "name": "threshold", + "type": "string" + }, + { + "command": "LIMIT", + "name": "count", + "type": "integer", + "optional": true + } + ] + }, + { + "type": "enum", + "enum": [ + "*", + "ID" + ] }, { "name": [ @@ -4317,23 +5349,37 @@ "type": "key" }, { - "name": "strategy", - "type": "enum", - "enum": [ - "MAXLEN" + "name": "trim", + "type": "block", + "block": [ + { + "name": "strategy", + "type": "enum", + "enum": [ + "MAXLEN", + "MINID" + ] + }, + { + "name": "operator", + "type": "enum", + "enum": [ + "=", + "~" + ], + "optional": true + }, + { + "name": "threshold", + "type": "string" + }, + { + "command": "LIMIT", + "name": "count", + "type": "integer", + "optional": true + } ] - }, - { - "name": "approx", - "type": "enum", - "enum": [ - "~" - ], - "optional": true - }, - { - "name": "count", - "type": "integer" } ], "since": "5.0.0", @@ -4409,7 +5455,7 @@ "group": "stream" }, "XLEN": { - "summary": "Return the number of entires in a stream", + "summary": "Return the number of entries in a stream", "complexity": "O(1)", "arguments": [ { @@ -4449,7 +5495,7 @@ "multiple": true }, { - "name": "id", + "name": "ID", "type": "string", "multiple": true } @@ -4462,30 +5508,58 @@ "complexity": "O(1) for all the subcommands, with the exception of the DESTROY subcommand which takes an additional O(M) time in order to delete the M entries inside the consumer group pending entries list (PEL).", "arguments": [ { - "command": "CREATE", - "name": [ - "key", - "groupname", - "id-or-$" - ], - "type": [ - "key", - "string", - "string" + "name": "create", + "type": "block", + "block": [ + { + "command": "CREATE", + "name": [ + "key", + "groupname" + ], + "type": [ + "key", + "string" + ] + }, + { + "name": "id", + "type": "enum", + "enum": [ + "ID", + "$" + ] + }, + { + "command": "MKSTREAM", + "optional": true + } ], "optional": true }, { - "command": "SETID", - "name": [ - "key", - "groupname", - "id-or-$" - ], - "type": [ - "key", - "string", - "string" + "name": "setid", + "type": "block", + "block": [ + { + "command": "SETID", + "name": [ + "key", + "groupname" + ], + "type": [ + "key", + "string" + ] + }, + { + "name": "id", + "type": "enum", + "enum": [ + "ID", + "$" + ] + } ], "optional": true }, @@ -4671,9 +5745,9 @@ "since": "5.0.0", "group": "stream" }, - "XPENDING": { - "summary": "Return information and entries from a stream consumer group pending entries list, that are messages fetched but never acknowledged.", - "complexity": "O(N) with N being the number of elements returned, so asking for a small fixed number of entries per call is O(1). When the command returns just the summary it runs in O(1) time assuming the list of consumers is small, otherwise there is additional O(N) time needed to iterate every consumer.", + "XAUTOCLAIM": { + "summary": "Changes (or acquires) ownership of messages in a consumer group, as if the messages were delivered to the specified consumer.", + "complexity": "O(1) if COUNT is small.", "arguments": [ { "name": "key", @@ -4684,21 +5758,74 @@ "type": "string" }, { - "name": [ - "start", - "end", - "count" - ], - "type": [ - "string", - "string", - "integer" + "name": "consumer", + "type": "string" + }, + { + "name": "min-idle-time", + "type": "string" + }, + { + "name": "start", + "type": "string" + }, + { + "command": "COUNT", + "name": "count", + "type": "integer", + "optional": true + }, + { + "name": "justid", + "enum": [ + "JUSTID" ], "optional": true + } + ], + "since": "6.2.0", + "group": "stream" + }, + "XPENDING": { + "summary": "Return information and entries from a stream consumer group pending entries list, that are messages fetched but never acknowledged.", + "complexity": "O(N) with N being the number of elements returned, so asking for a small fixed number of entries per call is O(1). O(M), where M is the total number of entries scanned when used with the IDLE filter. When the command returns just the summary and the list of consumers is small, it runs in O(1) time; otherwise, an additional O(N) time for iterating every consumer.", + "arguments": [ + { + "name": "key", + "type": "key" }, { - "name": "consumer", - "type": "string", + "name": "group", + "type": "string" + }, + { + "type": "block", + "name": "filters", + "block": [ + { + "command": "IDLE", + "name": "min-idle-time", + "type": "integer", + "optional": true + }, + { + "name": "start", + "type": "string" + }, + { + "name": "end", + "type": "string" + }, + { + "name": "count", + "type": "integer" + }, + { + "name": "consumer", + "type": "string", + "optional": true + } + ], "optional": true } ], diff --git a/commands/acl-getuser.md b/commands/acl-getuser.md index c2e051df..d866d2a2 100644 --- a/commands/acl-getuser.md +++ b/commands/acl-getuser.md @@ -8,6 +8,10 @@ the set of rules used to configure the user, it is still functionally identical. @array-reply: a list of ACL rule definitions for the user. +@history + +* `>= 6.2`: Added Pub/Sub channel patterns. + @examples Here's the default configuration for the default user: @@ -25,4 +29,6 @@ Here's the default configuration for the default user: 6) "+@all" 7) "keys" 8) 1) "*" +9) "channels" +10) 1) "*" ``` diff --git a/commands/acl-list.md b/commands/acl-list.md index 44b6c3c9..e21e7104 100644 --- a/commands/acl-list.md +++ b/commands/acl-list.md @@ -12,6 +12,6 @@ An array of strings. ``` > ACL LIST -1) "user antirez on #9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08 ~objects:* +@all -@admin -@dangerous" -2) "user default on nopass ~* +@all" +1) "user antirez on #9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08 ~objects:* &* +@all -@admin -@dangerous" +2) "user default on nopass ~* &* +@all" ``` diff --git a/commands/acl-setuser.md b/commands/acl-setuser.md index a71e6ccb..ee9b6435 100644 --- a/commands/acl-setuser.md +++ b/commands/acl-setuser.md @@ -44,9 +44,12 @@ This is a list of all the supported Redis ACL rules: * `on`: set the user as active, it will be possible to authenticate as this user using `AUTH `. * `off`: set user as not active, it will be impossible to log as this user. Please note that if a user gets disabled (set to off) after there are connections already authenticated with such a user, the connections will continue to work as expected. To also kill the old connections you can use `CLIENT KILL` with the user option. An alternative is to delete the user with `ACL DELUSER`, that will result in all the connections authenticated as the deleted user to be disconnected. -* `~`: add the specified key pattern (glob style pattern, like in the `KEYS` command), to the list of key patterns accessible by the user. You can add as many key patterns you want to the same user. Example: `~objects:*` +* `~`: add the specified key pattern (glob style pattern, like in the `KEYS` command), to the list of key patterns accessible by the user. You can add multiple key patterns to the same user. Example: `~objects:*` * `allkeys`: alias for `~*`, it allows the user to access all the keys. -* `resetkey`: removes all the key patterns from the list of key patterns the user can access. +* `resetkeys`: removes all the key patterns from the list of key patterns the user can access. +* `&`: add the specified glob style pattern to the list of Pub/Sub channel patterns accessible by the user. You can add multiple channel patterns to the same user. Example: `&chatroom:*` +* `allchannels`: alias for `&*`, it allows the user to access all Pub/Sub channels. +* `resetchannels`: removes all channel patterns from the list of Pub/Sub channel patterns the user can access. * `+`: add this command to the list of the commands the user can call. Example: `+zadd`. * `+@`: add all the commands in the specified category to the list of commands the user is able to execute. Example: `+@string` (adds all the string commands). For a list of categories check the `ACL CAT` command. * `+|`: add the specified command to the list of the commands the user can execute, but only for the specified subcommand. Example: `+config|get`. Generates an error if the specified command is already allowed in its full version for the specified user. Note: there is no symmetrical command to remove subcommands, you need to remove the whole command and re-add the subcommands you want to allow. This is much safer than removing subcommands, in the future Redis may add new dangerous subcommands, so configuring by subtraction is not good. @@ -67,6 +70,10 @@ This is a list of all the supported Redis ACL rules: If the rules contain errors, the error is returned. +@history + +* `>= 6.2`: Added Pub/Sub channel patterns. + @examples ``` diff --git a/commands/asking.md b/commands/asking.md new file mode 100644 index 00000000..d98643c2 --- /dev/null +++ b/commands/asking.md @@ -0,0 +1,10 @@ +When a cluster client receives an `-ASK` redirect, the `ASKING` command is sent to the target node followed by the command which was redirected. +This is normally done automatically by cluster clients. + +If an `-ASK` redirect is received during a transaction, only one ASKING command needs to be sent to the target node before sending the complete transaction to the target node. + +See [ASK redirection in the Redis Cluster Specification](/topics/cluster-spec#ask-redirection) for details. + +@return + +@simple-string-reply: `OK`. diff --git a/commands/bitfield.md b/commands/bitfield.md index 87468707..107fe61f 100644 --- a/commands/bitfield.md +++ b/commands/bitfield.md @@ -10,8 +10,8 @@ For example the following command increments an 5 bit signed integer at bit offs Note that: -1. Addressing with `GET` bits outside the current string length (including the case the key does not exist at all), results in the operation to be performed like the missing part all consists of bits set to 0. -2. Addressing with `SET` or `INCRBY` bits outside the current string length will enlarge the string, zero-padding it, as needed, for the minimal length needed, according to the most far bit touched. +1. Addressing with `!GET` bits outside the current string length (including the case the key does not exist at all), results in the operation to be performed like the missing part all consists of bits set to 0. +2. Addressing with `!SET` or `!INCRBY` bits outside the current string length will enlarge the string, zero-padding it, as needed, for the minimal length needed, according to the most far bit touched. ## Supported subcommands and integer types @@ -22,7 +22,7 @@ The following is the list of supported commands. * **INCRBY** `` `` `` -- Increments or decrements (if a negative increment is given) the specified bit field and returns the new value. There is another subcommand that only changes the behavior of successive -`INCRBY` subcommand calls by setting the overflow behavior: +`!INCRBY` and `!SET` subcommands calls by setting the overflow behavior: * **OVERFLOW** `[WRAP|SAT|FAIL]` @@ -59,8 +59,8 @@ the following behaviors: * **SAT**: uses saturation arithmetic, that is, on underflows the value is set to the minimum integer value, and on overflows to the maximum integer value. For example incrementing an `i8` integer starting from value 120 with an increment of 10, will result into the value 127, and further increments will always keep the value at 127. The same happens on underflows, but towards the value is blocked at the most negative value. * **FAIL**: in this mode no operation is performed on overflows or underflows detected. The corresponding return value is set to NULL to signal the condition to the caller. -Note that each `OVERFLOW` statement only affects the `INCRBY` commands -that follow it in the list of subcommands, up to the next `OVERFLOW` +Note that each `OVERFLOW` statement only affects the `!INCRBY` and `!SET` +commands that follow it in the list of subcommands, up to the next `OVERFLOW` statement. By default, **WRAP** is used if not otherwise specified. diff --git a/commands/bitfield_ro.md b/commands/bitfield_ro.md new file mode 100644 index 00000000..94057a11 --- /dev/null +++ b/commands/bitfield_ro.md @@ -0,0 +1,19 @@ +Read-only variant of the `BITFIELD` command. +It is like the original `BITFIELD` but only accepts `!GET` subcommand and can safely be used in read-only replicas. + +Since the original `BITFIELD` has `!SET` and `!INCRBY` options it is technically flagged as a writing command in the Redis command table. +For this reason read-only replicas in a Redis Cluster will redirect it to the master instance even if the connection is in read-only mode (see the `READONLY` command of Redis Cluster). + +Since Redis 6.2, the `BITFIELD_RO` variant was introduced in order to allow `BITFIELD` behavior in read-only replicas without breaking compatibility on command flags. + +See original `BITFIELD` for more details. + +@examples + +``` +BITFIELD_RO hello GET i8 16 +``` + +@return + +@array-reply: An array with each entry being the corresponding result of the subcommand given at the same position. diff --git a/commands/blmove.md b/commands/blmove.md index e1d5be92..463a2dca 100644 --- a/commands/blmove.md +++ b/commands/blmove.md @@ -2,7 +2,7 @@ When `source` contains elements, this command behaves exactly like `LMOVE`. When used inside a `MULTI`/`EXEC` block, this command behaves exactly like `LMOVE`. When `source` is empty, Redis will block the connection until another client -pushes to it or until `timeout` is reached. +pushes to it or until `timeout` (a double value specifying the maximum number of seconds to block) is reached. A `timeout` of zero can be used to block indefinitely. This command comes in place of the now deprecated `BRPOPLPUSH`. Doing diff --git a/commands/blmpop.md b/commands/blmpop.md new file mode 100644 index 00000000..fd31eb8a --- /dev/null +++ b/commands/blmpop.md @@ -0,0 +1,30 @@ +`BLMPOP` is the blocking variant of `LMPOP`. + +When any of the lists contains elements, this command behaves exactly like `LMPOP`. +When used inside a `MULTI`/`EXEC` block, this command behaves exactly like `LMPOP`. +When all lists are empty, Redis will block the connection until another client pushes to it or until the `timeout` (a double value specifying the maximum number of seconds to block) elapses. +A `timeout` of zero can be used to block indefinitely. + +See `LMPOP` for more information. + +@return + +@array-reply: specifically: + +* A `nil` when no element could be popped, and timeout is reached. +* A two-element array with the first element being the name of the key from which elements were popped, and the second element is an array of elements. + +@examples + +```cli +DEL mylist mylist2 +LPUSH mylist "one" "two" "three" "four" "five" +BLMPOP 1 1 mylist LEFT COUNT 2 +LRANGE mylist 0 -1 +LPUSH mylist2 "a" "b" "c" "d" "e" +BLMPOP 1 2 mylist mylist2 LEFT COUNT 3 +LRANGE mylist 0 -1 +BLMPOP 1 2 mylist mylist2 RIGHT COUNT 10 +LRANGE mylist2 0 -1 +EXISTS mylist mylist2 +``` diff --git a/commands/brpoplpush.md b/commands/brpoplpush.md index 3547ff49..3e8b0a89 100644 --- a/commands/brpoplpush.md +++ b/commands/brpoplpush.md @@ -5,7 +5,7 @@ When `source` is empty, Redis will block the connection until another client pushes to it or until `timeout` is reached. A `timeout` of zero can be used to block indefinitely. -As per Redis 6.2.0, BRPOPLPUSH is considered deprecated. Please use `BLMOVE` in +As per Redis 6.2.0, BRPOPLPUSH is considered deprecated. Please prefer `BLMOVE` in new code. See `RPOPLPUSH` for more information. diff --git a/commands/client-info.md b/commands/client-info.md new file mode 100644 index 00000000..f60592e1 --- /dev/null +++ b/commands/client-info.md @@ -0,0 +1,13 @@ +The command returns information and statistics about the current client connection in a mostly human readable format. + +The reply format is identical to that of `CLIENT LIST`, and the content consists only of information about the current client. + +@examples + +```cli +CLIENT INFO +``` + +@return + +@bulk-string-reply: a unique string, as described at the `CLIENT LIST` page, for the current client. diff --git a/commands/client-kill.md b/commands/client-kill.md index 38af71ca..d0bb27ea 100644 --- a/commands/client-kill.md +++ b/commands/client-kill.md @@ -1,11 +1,10 @@ -The `CLIENT KILL` command closes a given client connection. Up to Redis 2.8.11 it was possible to close a connection only by client address, using the following form: +The `CLIENT KILL` command closes a given client connection. This command support two formats, the old format: CLIENT KILL addr:port The `ip:port` should match a line returned by the `CLIENT LIST` command (`addr` field). -However starting with Redis 2.8.12 or greater, the command accepts the following -form: +The new format: CLIENT KILL ... ... @@ -13,13 +12,12 @@ With the new form it is possible to kill clients by different attributes instead of killing just by address. The following filters are available: * `CLIENT KILL ADDR ip:port`. This is exactly the same as the old three-arguments behavior. -* `CLIENT KILL ID client-id`. Allows to kill a client by its unique `ID` field, which was introduced in the `CLIENT LIST` command starting from Redis 2.8.12. -* `CLIENT KILL TYPE type`, where *type* is one of `normal`, `master`, `slave` and `pubsub` (the `master` type is available from v3.2). This closes the connections of **all the clients** in the specified class. Note that clients blocked into the `MONITOR` command are considered to belong to the `normal` class. +* `CLIENT KILL LADDR ip:port`. Kill all clients connected to specified local (bind) address. +* `CLIENT KILL ID client-id`. Allows to kill a client by its unique `ID` field. Client `ID`'s are retrieved using the `CLIENT LIST` command. +* `CLIENT KILL TYPE type`, where *type* is one of `normal`, `master`, `replica` and `pubsub`. This closes the connections of **all the clients** in the specified class. Note that clients blocked into the `MONITOR` command are considered to belong to the `normal` class. * `CLIENT KILL USER username`. Closes all the connections that are authenticated with the specified [ACL](/topics/acl) username, however it returns an error if the username does not map to an existing ACL user. * `CLIENT KILL SKIPME yes/no`. By default this option is set to `yes`, that is, the client calling the command will not get killed, however setting this option to `no` will have the effect of also killing the client calling the command. -**Note: starting with Redis 5 the project is no longer using the slave word. You can use `TYPE replica` instead, however the old form is still supported for backward compatibility.** - It is possible to provide multiple filters at the same time. The command will handle multiple filters via logical AND. For example: CLIENT KILL addr 127.0.0.1:12345 type pubsub @@ -53,3 +51,11 @@ When called with the three arguments format: When called with the filter / value format: @integer-reply: the number of clients killed. + +@history + +* `>= 2.8.12`: Added new filter format. +* `>= 2.8.12`: `ID` option. +* `>= 3.2`: Added `master` type in for `TYPE` option. +* `>= 5`: Replaced `slave` `TYPE` with `replica`. `slave` still supported for backward compatibility. +* `>= 6.2`: `LADDR` option. diff --git a/commands/client-list.md b/commands/client-list.md index 1ed45be6..5a00eb00 100644 --- a/commands/client-list.md +++ b/commands/client-list.md @@ -1,7 +1,9 @@ The `CLIENT LIST` command returns information and statistics about the client connections server in a mostly human readable format. -As of v5.0, the optional `TYPE type` subcommand can be used to filter the list by clients' type, where *type* is one of `normal`, `master`, `replica` and `pubsub`. Note that clients blocked into the `MONITOR` command are considered to belong to the `normal` class. +You can use one of the optional subcommands to filter the list. The `TYPE type` subcommand filters the list by clients' type, where *type* is one of `normal`, `master`, `replica`, and `pubsub`. Note that clients blocked by the `MONITOR` command belong to the `normal` class. + +The `ID` filter only returns entries for clients with IDs matching the `client-id` arguments. @return @@ -13,9 +15,10 @@ As of v5.0, the optional `TYPE type` subcommand can be used to filter the list b Here is the meaning of the fields: -* `id`: an unique 64-bit client ID (introduced in Redis 2.8.12). +* `id`: an unique 64-bit client ID. * `name`: the name set by the client with `CLIENT SETNAME` * `addr`: address/port of the client +* `laddr`: address/port of local address client connected to (bind address) * `fd`: file descriptor corresponding to the socket * `age`: total duration of the connection in seconds * `idle`: idle time of the connection in seconds @@ -33,6 +36,8 @@ Here is the meaning of the fields: * `cmd`: last command played * `argv-mem`: incomplete arguments for the next command (already extracted from query buffer) * `tot-mem`: total memory consumed by this client in its various buffers +* `redir`: client id of current client tracking redirection +* `user`: the authenticated username of the client The client flags can be a combination of: @@ -53,6 +58,7 @@ U: the client is connected via a Unix domain socket x: the client is in a MULTI/EXEC context t: the client enabled keys tracking in order to perform client side caching R: the client tracking target client is invalid +B: the client enabled broadcast tracking mode ``` The file descriptor events can be: @@ -68,3 +74,9 @@ New fields are regularly added for debugging purpose. Some could be removed in the future. A version safe Redis client using this command should parse the output accordingly (i.e. handling gracefully missing fields, skipping unknown fields). + +@history + +* `>= 2.8.12`: Added unique client `id` field. +* `>= 5.0`: Added optional `TYPE` filter. +* `>= 6.2`: Added `laddr` field and the optional `ID` filter. diff --git a/commands/client-pause.md b/commands/client-pause.md index eb7a2f10..2806072a 100644 --- a/commands/client-pause.md +++ b/commands/client-pause.md @@ -2,10 +2,22 @@ The command performs the following actions: -* It stops processing all the pending commands from normal and pub/sub clients. However interactions with replicas will continue normally. +* It stops processing all the pending commands from normal and pub/sub clients for the given mode. However interactions with replicas will continue normally. Note that clients are formally paused when they try to execute a command, so no work is taken on the server side for inactive clients. * However it returns OK to the caller ASAP, so the `CLIENT PAUSE` command execution is not paused by itself. * When the specified amount of time has elapsed, all the clients are unblocked: this will trigger the processing of all the commands accumulated in the query buffer of every client during the pause. +Client pause currently supports two modes: + +* `ALL`: This is the default mode. All client commands are blocked. +* `WRITE`: Clients are only blocked if they attempt to execute a write command. + +For the `WRITE` mode, some commands have special behavior: + +* `EVAL`/`EVALSHA`: Will block client for all scripts. +* `PUBLISH`: Will block client. +* `PFCOUNT`: Will block client. +* `WAIT`: Acknowledgments will be delayed, so this command will appear blocked. + This command is useful as it makes able to switch clients from a Redis instance to another one in a controlled way. For example during an instance upgrade the system administrator could do the following: * Pause the clients using `CLIENT PAUSE` @@ -13,7 +25,11 @@ This command is useful as it makes able to switch clients from a Redis instance * Turn one of the replicas into a master. * Reconfigure clients to connect with the new master. -It is possible to send `CLIENT PAUSE` in a MULTI/EXEC block together with the `INFO replication` command in order to get the current master offset at the time the clients are blocked. This way it is possible to wait for a specific offset in the replica side in order to make sure all the replication stream was processed. +Since Redis 6.2, the recommended mode for client pause is `WRITE`. This mode will stop all replication traffic, can be +aborted with the `CLIENT UNPAUSE` command, and allows reconfiguring the old master without risking accepting writes after the +failover. This is also the mode used during cluster failover. + +For versions before 6.2, it is possible to send `CLIENT PAUSE` in a MULTI/EXEC block together with the `INFO replication` command in order to get the current master offset at the time the clients are blocked. This way it is possible to wait for a specific offset in the replica side in order to make sure all the replication stream was processed. Since Redis 3.2.10 / 4.0.0, this command also prevents keys to be evicted or expired during the time clients are paused. This way the dataset is guaranteed @@ -22,3 +38,8 @@ to be static not just from the point of view of clients not being able to write, @return @simple-string-reply: The command returns OK or an error if the timeout is invalid. + +@history + +* `>= 3.2.10`: Client pause prevents client pause and key eviction as well. +* `>= 6.2`: CLIENT PAUSE WRITE mode added along with the `mode` option. diff --git a/commands/client-tracking.md b/commands/client-tracking.md index 8300b511..12bb2e62 100644 --- a/commands/client-tracking.md +++ b/commands/client-tracking.md @@ -21,9 +21,9 @@ unless tracking is turned on with `CLIENT TRACKING off` at some point. The following are the list of options that modify the behavior of the command when enabling tracking: -* `REDIRECT `: send redirection messages to the connection with the specified ID. The connection must exist, you can get the ID of such connection using `CLIENT ID`. If the connection we are redirecting to is terminated, when in RESP3 mode the connection with tracking enabled will receive `tracking-redir-broken` push messages in order to signal the condition. +* `REDIRECT `: send invalidation messages to the connection with the specified ID. The connection must exist. You can get the ID of a connection using `CLIENT ID`. If the connection we are redirecting to is terminated, when in RESP3 mode the connection with tracking enabled will receive `tracking-redir-broken` push messages in order to signal the condition. * `BCAST`: enable tracking in broadcasting mode. In this mode invalidation messages are reported for all the prefixes specified, regardless of the keys requested by the connection. Instead when the broadcasting mode is not enabled, Redis will track which keys are fetched using read-only commands, and will report invalidation messages only for such keys. -* `PREFIX `: for broadcasting, register a given key prefix, so that notifications will be provided only for keys starting with this string. This option can be given multiple times to register multiple prefixes. If broadcasting is enabled without this option, Redis will send notifications for every key. +* `PREFIX `: for broadcasting, register a given key prefix, so that notifications will be provided only for keys starting with this string. This option can be given multiple times to register multiple prefixes. If broadcasting is enabled without this option, Redis will send notifications for every key. You can't delete a single prefix, but you can delete all prefixes by disabling and re-enabling tracking. Using this option adds the additional time complexity of O(N^2), where N is the total number of prefixes tracked. * `OPTIN`: when broadcasting is NOT active, normally don't track keys in read only commands, unless they are called immediately after a `CLIENT CACHING yes` command. * `OPTOUT`: when broadcasting is NOT active, normally track keys in read only commands, unless they are called immediately after a `CLIENT CACHING no` command. * `NOLOOP`: don't send notifications about keys modified by this connection itself. diff --git a/commands/client-trackinginfo.md b/commands/client-trackinginfo.md new file mode 100644 index 00000000..82de43e2 --- /dev/null +++ b/commands/client-trackinginfo.md @@ -0,0 +1,18 @@ +The command returns information about the current client connection's use of the [server assisted client side caching](/topics/client-side-caching) feature. + +@return + +@array-reply: a list of tracking information sections and their respective values, specifically: + +* **flags**: A list of tracking flags used by the connection. The flags and their meanings are as follows: + * `off`: The connection isn't using server assisted client side caching. + * `on`: Server assisted client side caching is enabled for the connection. + * `bcast`: The client uses broadcasting mode. + * `optin`: The client does not cache keys by default. + * `optout`: The client caches keys by default. + * `caching-yes`: The next command will cache keys (exists only together with `optin`). + * `caching-no`: The next command won't cache keys (exists only together with `optout`). + * `noloop`: The client isn't notified about keys modified by itself. + * `broken_redirect`: The client ID used for redirection isn't valid anymore. +* **redirect**: The client ID used for notifications redirection, or -1 when none. +* **prefixes**: A list of key prefixes for which notifications are sent to the client. diff --git a/commands/client-unpause.md b/commands/client-unpause.md new file mode 100644 index 00000000..c4384852 --- /dev/null +++ b/commands/client-unpause.md @@ -0,0 +1,5 @@ +`CLIENT UNPAUSE` is used to resume command processing for all clients that were paused by `CLIENT PAUSE`. + +@return + +@simple-string-reply: The command returns `OK` diff --git a/commands/cluster-addslots.md b/commands/cluster-addslots.md index b93fa16c..06040666 100644 --- a/commands/cluster-addslots.md +++ b/commands/cluster-addslots.md @@ -42,7 +42,7 @@ node advertising the new hash slot, is greater than the node currently listed in the table. This means that this command should be used with care only by applications -orchestrating Redis Cluster, like `redis-trib`, and the command if used +orchestrating Redis Cluster, like `redis-cli`, and the command if used out of the right context can leave the cluster in a wrong state or cause data loss. diff --git a/commands/cluster-delslots.md b/commands/cluster-delslots.md index 4e888a3f..8dc12a64 100644 --- a/commands/cluster-delslots.md +++ b/commands/cluster-delslots.md @@ -39,7 +39,7 @@ The following command removes the association for slots 5000 and This command only works in cluster mode and may be useful for debugging and in order to manually orchestrate a cluster configuration -when a new cluster is created. It is currently not used by `redis-trib`, +when a new cluster is created. It is currently not used by `redis-cli`, and mainly exists for API completeness. @return diff --git a/commands/cluster-failover.md b/commands/cluster-failover.md index 45c584ba..911eaea8 100644 --- a/commands/cluster-failover.md +++ b/commands/cluster-failover.md @@ -53,11 +53,14 @@ Because of this the **TAKEOVER** option should be used with care. ## Implementation details and notes -`CLUSTER FAILOVER`, unless the **TAKEOVER** option is specified, does not -execute a failover synchronously, it only *schedules* a manual failover, -bypassing the failure detection stage, so to check if the failover actually -happened, `CLUSTER NODES` or other means should be used in order to verify -that the state of the cluster changes after some time the command was sent. +* `CLUSTER FAILOVER`, unless the **TAKEOVER** option is specified, does not execute a failover synchronously. + It only *schedules* a manual failover, bypassing the failure detection stage. +* An `OK` reply is no guarantee that the failover will succeed. +* A replica can only be promoted to a master if it is known as a replica by a majority of the masters in the cluster. + If the replica is a new node that has just been added to the cluster (for example after upgrading it), it may not yet be known to all the masters in the cluster. + To check that the masters are aware of a new replica, you can send `CLUSTER NODES` or `CLUSTER REPLICAS` to each of the master nodes and check that it appears as a replica, before sending `CLUSTER FAILOVER` to the replica. +* To check that the failover has actually happened you can use `ROLE`, `INFO REPLICATION` (which indicates "role:master" after successful failover), or `CLUSTER NODES` to verify that the state of the cluster has changed sometime after the command was sent. +* To check if the failover has failed, check the replica's log for "Manual failover timed out", which is logged if the replica has given up after a few seconds. @return diff --git a/commands/cluster-flushslots.md b/commands/cluster-flushslots.md index 2279f3b7..b0b3fdfb 100644 --- a/commands/cluster-flushslots.md +++ b/commands/cluster-flushslots.md @@ -2,6 +2,6 @@ Deletes all slots from a node. The `CLUSTER FLUSHSLOTS` deletes all information about slots from the connected node. It can only be called when the database is empty. -@reply +@return @simple-string-reply: `OK` diff --git a/commands/cluster-nodes.md b/commands/cluster-nodes.md index 0b8c9f9a..2ec706c5 100644 --- a/commands/cluster-nodes.md +++ b/commands/cluster-nodes.md @@ -12,7 +12,7 @@ Note that normally clients willing to fetch the map between Cluster hash slots and node addresses should use `CLUSTER SLOTS` instead. `CLUSTER NODES`, that provides more information, should be used for administrative tasks, debugging, and configuration inspections. -It is also used by `redis-trib` in order to manage a cluster. +It is also used by `redis-cli` in order to manage a cluster. ## Serialization format @@ -39,7 +39,7 @@ The meaning of each filed is the following: 1. `id`: The node ID, a 40 characters random string generated when a node is created and never changed again (unless `CLUSTER RESET HARD` is used). 2. `ip:port@cport`: The node address where clients should contact the node to run queries. -3. `flags`: A list of comma separated flags: `myself`, `master`, `slave`, `fail?`, `fail`, `handshake`, `noaddr`, `noflags`. Flags are explained in detail in the next section. +3. `flags`: A list of comma separated flags: `myself`, `master`, `slave`, `fail?`, `fail`, `handshake`, `noaddr`, `nofailover`, `noflags`. Flags are explained in detail in the next section. 4. `master`: If the node is a replica, and the master is known, the master node ID, otherwise the "-" character. 5. `ping-sent`: Milliseconds unix time at which the currently active ping was sent, or zero if there are no pending pings. 6. `pong-recv`: Milliseconds unix time the last pong was received. @@ -56,6 +56,7 @@ Meaning of the flags (field number 3): * `fail`: Node is in `FAIL` state. It was not reachable for multiple nodes that promoted the `PFAIL` state to `FAIL`. * `handshake`: Untrusted node, we are handshaking. * `noaddr`: No address known for this node. +* `nofailover`: Replica will not try to failover. * `noflags`: No flags at all. ## Notes on published config epochs diff --git a/commands/cluster-setslot.md b/commands/cluster-setslot.md index f819abc0..d26da8ec 100644 --- a/commands/cluster-setslot.md +++ b/commands/cluster-setslot.md @@ -42,7 +42,7 @@ In this way when a node in migrating state generates an `ASK` redirection, the c ## CLUSTER SETSLOT `` STABLE This subcommand just clears migrating / importing state from the slot. It is -mainly used to fix a cluster stuck in a wrong state by `redis-trib fix`. +mainly used to fix a cluster stuck in a wrong state by `redis-cli --cluster fix`. Normally the two states are cleared automatically at the end of the migration using the `SETSLOT ... NODE ...` subcommand as explained in the next section. diff --git a/commands/command.md b/commands/command.md index d6d21526..46ea2d9f 100644 --- a/commands/command.md +++ b/commands/command.md @@ -104,8 +104,12 @@ Cluster client needs to parse commands marked `movablekeys` to locate all releva Complete list of commands currently requiring key location parsing: - `SORT` - optional `STORE` key, optional `BY` weights, optional `GET` keys + - `ZUNION` - keys stop when `WEIGHT` or `AGGREGATE` starts - `ZUNIONSTORE` - keys stop when `WEIGHT` or `AGGREGATE` starts + - `ZINTER` - keys stop when `WEIGHT` or `AGGREGATE` starts - `ZINTERSTORE` - keys stop when `WEIGHT` or `AGGREGATE` starts + - `ZDIFF` - keys stop after `numkeys` count arguments + - `ZDIFFSTORE` - keys stop after `numkeys` count arguments - `EVAL` - keys stop after `numkeys` count arguments - `EVALSHA` - keys stop after `numkeys` count arguments diff --git a/commands/copy.md b/commands/copy.md new file mode 100644 index 00000000..2803d2a0 --- /dev/null +++ b/commands/copy.md @@ -0,0 +1,24 @@ +This command copies the value stored at the `source` key to the `destination` +key. + +By default, the `destination` key is created in the logical database used by the +connection. The `DB` option allows specifying an alternative logical database +index for the destination key. + +The command returns an error when the `destination` key already exists. The +`REPLACE` option removes the `destination` key before copying the value to it. + +@return + +@integer-reply, specifically: + +* `1` if `source` was copied. +* `0` if `source` was not copied. + +@examples + +``` +SET dolly "sheep" +COPY dolly clone +GET clone +``` \ No newline at end of file diff --git a/commands/eval.md b/commands/eval.md index ba02ce7f..8f12b61d 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -194,6 +194,18 @@ returned in the format specified above (as a Lua table with an `err` field). The script can pass the exact error to the user by returning the error object returned by `redis.pcall()`. +## Running Lua under low memory conditions + +When the memory usage in Redis exceeds the `maxmemory` limit, the first write command encountered in the Lua script that uses additional memory will cause the script to abort (unless `redis.pcall` was used). +However, one thing to caution here is that if the first write command does not use additional memory such as DEL, LREM, or SREM, etc, Redis will allow it to run and all subsequent commands in the Lua script will execute to completion for atomicity. +If the subsequent writes in the script generate additional memory, the Redis memory usage can go over `maxmemory`. + +Another possible way for Lua script to cause Redis memory usage to go above `maxmemory` happens when the script execution starts when Redis is slightly below `maxmemory` so the first write command in the script is allowed. +As the script executes, subsequent write commands continue to generate memory and causes the Redis server to go above `maxmemory`. + +In those scenarios, it is recommended to configure the `maxmemory-policy` not to use `noeviction`. +Also Lua scripts should be short so that evictions of items can happen in between Lua scripts. + ## Bandwidth and EVALSHA The `EVAL` command forces you to send the script body again and again. @@ -235,7 +247,7 @@ OK > evalsha 6b1bf486c81ceb7edf3c093f4c48582e38c0e791 0 "bar" > evalsha ffffffffffffffffffffffffffffffffffffffff 0 -(error) `NOSCRIPT` No matching script. Please use `EVAL`. +(error) NOSCRIPT No matching script. Please use EVAL. ``` The client library implementation can always optimistically send `EVALSHA` under @@ -326,18 +338,18 @@ SCRIPT currently accepts three different commands: not violate the scripting engine's guaranteed atomicity). See the next sections for more information about long running scripts. -## Scripts as pure functions +## Scripts with deterministic writes *Note: starting with Redis 5, scripts are always replicated as effects and not sending the script verbatim. So the following section is mostly applicable to Redis version 4 or older.* -A very important part of scripting is writing scripts that are pure functions. +A very important part of scripting is writing scripts that only change the database in a deterministic way. Scripts executed in a Redis instance are, by default, propagated to replicas and to the AOF file by sending the script itself -- not the resulting commands. +Since the script will be re-run on the remote host (or when reloading the AOF file), the changes it makes to the database must be reproducible. -The reason is that sending a script to another Redis instance is often much -faster than sending the multiple commands the script generates, so if the -client is sending many scripts to the master, converting the scripts into +The reason for sending the script is that it is often much faster than sending the multiple commands that the script generates. +If the client is sending many scripts to the master, converting the scripts into individual commands for the replica / AOF would result in too much bandwidth for the replication link or the Append Only File (and also too much CPU since dispatching a command received via network is a lot more work for Redis compared @@ -348,12 +360,13 @@ however not in all the cases. So starting with Redis 3.2, the scripting engine is able to, alternatively, replicate the sequence of write commands resulting from the script execution, instead of replication the script itself. See the next section for more information. + In this section we'll assume that scripts are replicated by sending the whole script. Let's call this replication mode **whole scripts replication**. The main drawback with the *whole scripts replication* approach is that scripts are required to have the following property: -* The script must always evaluates the same Redis _write_ commands with the +* The script must always execute the same Redis _write_ commands with the same arguments given the same input data set. Operations performed by the script cannot depend on any hidden (non-explicit) information or state that may change as script execution proceeds or between @@ -361,7 +374,7 @@ The main drawback with the *whole scripts replication* approach is that scripts from I/O devices. Things like using the system time, calling Redis random commands like -`RANDOMKEY`, or using Lua random number generator, could result into scripts +`RANDOMKEY`, or using Lua's random number generator, could result in scripts that will not always evaluate in the same way. In order to enforce this behavior in scripts Redis does the following: @@ -389,9 +402,8 @@ In order to enforce this behavior in scripts Redis does the following: assume that certain commands in Lua will be ordered, but instead rely on the documentation of the original command you call to see the properties it provides. -* Lua pseudo random number generation functions `math.random` and - `math.randomseed` are modified in order to always have the same seed every - time a new script is executed. +* Lua's pseudo-random number generation function `math.random` is + modified to always use the same seed every time a new script is executed. This means that calling `math.random` will always generate the same sequence of numbers every time a script is executed if `math.randomseed` is not used. @@ -422,7 +434,7 @@ r.del(:mylist) puts r.eval(RandomPushScript,[:mylist],[10,rand(2**32)]) ``` -Every time this script executed the resulting list will have exactly the +Every time this script is executed the resulting list will have exactly the following elements: ``` @@ -439,9 +451,9 @@ following elements: 10) "0.17082803611217" ``` -In order to make it a pure function, but still be sure that every invocation +In order to make it deterministic, but still be sure that every invocation of the script will result in different random elements, we can simply add an -additional argument to the script that will be used in order to seed the Lua +additional argument to the script that will be used to seed the Lua pseudo-random number generator. The new script is as follows: @@ -462,9 +474,8 @@ puts r.eval(RandomPushScript,1,:mylist,10,rand(2**32)) ``` What we are doing here is sending the seed of the PRNG as one of the arguments. -This way the script output will be the same given the same arguments, but we are -changing one of the arguments in every invocation, generating the random seed -client-side. +The script output will always be the same given the same arguments (our requirement) +but we are changing one of the arguments at every invocation, generating the random seed client-side. The seed will be propagated as one of the arguments both in the replication link and in the Append Only File, guaranteeing that the same changes will be generated when the AOF is reloaded or when the replica processes the script. @@ -480,7 +491,7 @@ output. *Note: starting with Redis 5, the replication method described in this section (scripts effects replication) is the default and does not need to be explicitly enabled.* Starting with Redis 3.2, it is possible to select an -alternative replication method. Instead of replication whole scripts, we +alternative replication method. Instead of replicating whole scripts, we can just replicate single write commands generated by the script. We call this **script effects replication**. @@ -488,44 +499,41 @@ In this replication mode, while Lua scripts are executed, Redis collects all the commands executed by the Lua scripting engine that actually modify the dataset. When the script execution finishes, the sequence of commands that the script generated are wrapped into a MULTI / EXEC transaction and -are sent to replicas and AOF. +are sent to the replicas and AOF. This is useful in several ways depending on the use case: -* When the script is slow to compute, but the effects can be summarized by -a few write commands, it is a shame to re-compute the script on the replicas -or when reloading the AOF. In this case to replicate just the effect of the -script is much better. -* When script effects replication is enabled, the controls about non -deterministic functions are disabled. You can, for example, use the `TIME` -or `SRANDMEMBER` commands inside your scripts freely at any place. -* The Lua PRNG in this mode is seeded randomly at every call. +* When the script is slow to compute, but the effects can be summarized by a few write commands, it is a shame to re-compute the script on the replicas or when reloading the AOF. + In this case it is much better to replicate just the effects of the script. +* When script effects replication is enabled, the restrictions on non-deterministic functions are removed. + You can, for example, use the `TIME` or `SRANDMEMBER` commands inside your scripts freely at any place. +* The Lua PRNG in this mode is seeded randomly on every call. -In order to enable script effects replication, you need to issue the -following Lua command before any write operated by the script: +To enable script effects replication you need to issue the +following Lua command before the script performs a write: redis.replicate_commands() -The function returns true if the script effects replication was enabled, -otherwise if the function was called after the script already called -some write command, it returns false, and normal whole script replication +The function returns true if script effects replication was enabled; +otherwise, if the function was called after the script already called +a write command, it returns false, and normal whole script replication is used. ## Selective replication of commands When script effects replication is selected (see the previous section), it -is possible to have more control in the way commands are replicated to replicas -and AOF. This is a very advanced feature since **a misuse can do damage** by -breaking the contract that the master, replicas, and AOF, all must contain the +is possible to have more control over the way commands are propagated to replicas and the AOF. +This is a very advanced feature since **a misuse can do damage** by breaking the contract that the master, replicas, and AOF must all contain the same logical content. However this is a useful feature since, sometimes, we need to execute certain commands only in the master in order to create, for example, intermediate values. -Think at a Lua script where we perform an intersection between two sets. -Pick five random elements, and create a new set with this five random -elements. Finally we delete the temporary key representing the intersection +Think of a Lua script where we perform an intersection between two sets. +We then pick five random elements from the intersection and create a new set +containing them. +Finally, we delete the temporary key representing the intersection between the two original sets. What we want to replicate is only the creation of the new set with the five elements. It's not useful to also replicate the commands creating the temporary key. @@ -537,15 +545,14 @@ an error if called when script effects replication is disabled. The command can be called with four different arguments: - redis.set_repl(redis.REPL_ALL) -- Replicate to AOF and replicas. - redis.set_repl(redis.REPL_AOF) -- Replicate only to AOF. + redis.set_repl(redis.REPL_ALL) -- Replicate to the AOF and replicas. + redis.set_repl(redis.REPL_AOF) -- Replicate only to the AOF. redis.set_repl(redis.REPL_REPLICA) -- Replicate only to replicas (Redis >= 5) redis.set_repl(redis.REPL_SLAVE) -- Used for backward compatibility, the same as REPL_REPLICA. redis.set_repl(redis.REPL_NONE) -- Don't replicate at all. -By default the scripting engine is always set to `REPL_ALL`. By calling -this function the user can switch on/off AOF and or replicas propagation, and -turn them back later at her/his wish. +By default the scripting engine is set to `REPL_ALL`. +By calling this function the user can switch the replication mode on or off at any time. A simple example follows: @@ -556,8 +563,7 @@ A simple example follows: redis.set_repl(redis.REPL_ALL) redis.call('set','C','3') -After running the above script, the result is that only keys A and C -will be created on replicas and AOF. +After running the above script, the result is that only the keys A and C will be created on the replicas and AOF. ## Global variables protection @@ -641,8 +647,12 @@ At this point the new conversions are available, specifically: * Redis true reply -> Lua true boolean value. * Redis false reply -> Lua false boolean value. * Redis double reply -> Lua table with a single `score` field containing a Lua number representing the double value. +* Redis big number reply -> Lua table with a single `big_number` field containing a Lua string representing the big number value. +* Redis verbatim string reply -> Lua table with a single `verbatim_string` field containing a Lua table with two fields, `string` and `format`, representing the verbatim string and verbatim format respectively. * All the RESP2 old conversions still apply. +Note: the big number and verbatim replies are only available in Redis 7 or greater. Also, presently RESP3 attributes are not supported in Lua. + **Lua to Redis** conversion table specific for RESP3. * Lua boolean -> Redis boolean true or false. **Note that this is a change compared to the RESP2 mode**, where returning true from Lua returned the number 1 to the Redis client, and returning false used to return NULL. diff --git a/commands/eval_ro.md b/commands/eval_ro.md new file mode 100644 index 00000000..73fcf126 --- /dev/null +++ b/commands/eval_ro.md @@ -0,0 +1,17 @@ +This is a read-only variant of the `EVAL` command that isn't allowed to execute commands that modify data. + +Unlike `EVAL`, scripts executed with this command can always be killed and never affect the replication stream. +Because it can only read data, this command can always be executed on a master or a replica. + +@examples + +``` +> SET mykey "Hello" +OK + +> EVAL_RO "return redis.call('GET', KEYS[1])" 1 mykey +"Hello" + +> EVAL_RO "return redis.call('DEL', KEYS[1])" 1 mykey +(error) ERR Error running script (call to f_359f69785f876b7f3f60597d81534f3d6c403284): @user_script:1: @user_script: 1: Write commands are not allowed from read-only scripts +``` diff --git a/commands/evalsha_ro.md b/commands/evalsha_ro.md new file mode 100644 index 00000000..e07553d4 --- /dev/null +++ b/commands/evalsha_ro.md @@ -0,0 +1,4 @@ +This is a read-only variant of the `EVALSHA` command that isn't allowed to execute commands that modify data. + + Unlike `EVALSHA`, scripts executed with this command can always be killed and never affect the replication stream. + Because it can only read data, this command can always be executed on a master or a replica. diff --git a/commands/expire.md b/commands/expire.md index fbd86172..65befa96 100644 --- a/commands/expire.md +++ b/commands/expire.md @@ -31,6 +31,18 @@ will be `del`, not `expired`). [del]: /commands/del [ntf]: /topics/notifications +## Options + +The `EXPIRE` command supports a set of options since Redis 7.0: + +* `NX` -- Set expiry only when the key has no expiry +* `XX` -- Set expiry only when the key has an existing expiry +* `GT` -- Set expiry only when the new expiry is greater than current one +* `LT` -- Set expiry only when the new expiry is less than current one + +A non-volatile key is treated as an infinite TTL for the purpose of `GT` and `LT`. +The `GT`, `LT` and `NX` options are mutually exclusive. + ## Refreshing expires It is possible to call `EXPIRE` using as argument a key that already has an @@ -53,7 +65,7 @@ are now fixed. @integer-reply, specifically: * `1` if the timeout was set. -* `0` if `key` does not exist. +* `0` if the timeout was not set. e.g. key doesn't exist, or operation skipped due to the provided arguments. @examples @@ -63,8 +75,16 @@ EXPIRE mykey 10 TTL mykey SET mykey "Hello World" TTL mykey +EXPIRE mykey 10 XX +TTL mykey +EXPIRE mykey 10 NX +TTL mykey ``` +@history + +* `>= 7.0`: Added options: `NX`, `XX`, `GT` and `LT`. + ## Pattern: Navigation session Imagine you have a web service and you are interested in the latest N pages diff --git a/commands/expireat.md b/commands/expireat.md index a4430bb7..75085594 100644 --- a/commands/expireat.md +++ b/commands/expireat.md @@ -15,12 +15,24 @@ timeouts for the AOF persistence mode. Of course, it can be used directly to specify that a given key should expire at a given time in the future. +## Options + +The `EXPIREAT` command supports a set of options since Redis 7.0: + +* `NX` -- Set expiry only when the key has no expiry +* `XX` -- Set expiry only when the key has an existing expiry +* `GT` -- Set expiry only when the new expiry is greater than current one +* `LT` -- Set expiry only when the new expiry is less than current one + +A non-volatile key is treated as an infinite TTL for the purpose of `GT` and `LT`. +The `GT`, `LT` and `NX` options are mutually exclusive. + @return @integer-reply, specifically: * `1` if the timeout was set. -* `0` if `key` does not exist. +* `0` if the timeout was not set. e.g. key doesn't exist, or operation skipped due to the provided arguments. @examples @@ -30,3 +42,7 @@ EXISTS mykey EXPIREAT mykey 1293840000 EXISTS mykey ``` + +@history + +* `>= 7.0`: Added options: `NX`, `XX`, `GT` and `LT`. diff --git a/commands/expiretime.md b/commands/expiretime.md new file mode 100644 index 00000000..3b3a6b21 --- /dev/null +++ b/commands/expiretime.md @@ -0,0 +1,17 @@ +Returns the absolute Unix timestamp (since January 1, 1970) in seconds at which the given key will expire. + +See also the `PEXPIRETIME` command which returns the same information with milliseconds resolution. + +@return + +@integer-reply: Expiration Unix timestamp in seconds, or a negative value in order to signal an error (see the description below). +* The command returns `-1` if the key exists but has no associated expiration time. +* The command returns `-2` if the key does not exist. + +@examples + +```cli +SET mykey "Hello" +EXPIREAT mykey 33177117420 +EXPIRETIME mykey +``` diff --git a/commands/failover.md b/commands/failover.md new file mode 100644 index 00000000..719d1994 --- /dev/null +++ b/commands/failover.md @@ -0,0 +1,48 @@ +This command will start a coordinated failover between the currently-connected-to master and one of its replicas. +The failover is not synchronous, instead a background task will handle coordinating the failover. +It is designed to limit data loss and unavailability of the cluster during the failover. +This command is analogous to the `CLUSTER FAILOVER` command for non-clustered Redis and is similar to the failover support provided by sentinel. + +The specific details of the default failover flow are as follows: + +1. The master will internally start a `CLIENT PAUSE WRITE`, which will pause incoming writes and prevent the accumulation of new data in the replication stream. +2. The master will monitor its replicas, waiting for a replica to indicate that it has fully consumed the replication stream. If the master has multiple replicas, it will only wait for the first replica to catch up. +3. The master will then demote itself to a replica. This is done to prevent any dual master scenarios. NOTE: The master will not discard its data, so it will be able to rollback if the replica rejects the failover request in the next step. +4. The previous master will send a special PSYNC request to the target replica, `PSYNC FAILOVER`, instructing the target replica to become a master. +5. Once the previous master receives acknowledgement the `PSYNC FAILOVER` was accepted it will unpause its clients. If the PSYNC request is rejected, the master will abort the failover and return to normal. + +The field `master_failover_state` in `INFO replication` can be used to track the current state of the failover, which has the following values: + +* `no-failover`: There is no ongoing coordinated failover. +* `waiting-for-sync`: The master is waiting for the replica to catch up to its replication offset. +* `failover-in-progress`: The master has demoted itself, and is attempting to hand off ownership to a target replica. + +If the previous master had additional replicas attached to it, they will continue replicating from it as chained replicas. You will need to manually execute a `REPLICAOF` on these replicas to start replicating directly from the new master. + +## Optional arguments +The following optional arguments exist to modify the behavior of the failover flow: + +* `TIMEOUT` *milliseconds* -- This option allows specifying a maximum time a master will wait in the `waiting-for-sync` state before aborting the failover attempt and rolling back. +This is intended to set an upper bound on the write outage the Redis cluster can experience. +Failovers typically happen in less than a second, but could take longer if there is a large amount of write traffic or the replica is already behind in consuming the replication stream. +If this value is not specified, the timeout can be considered to be "infinite". + +* `TO` *HOST* *PORT* -- This option allows designating a specific replica, by its host and port, to failover to. The master will wait specifically for this replica to catch up to its replication offset, and then failover to it. + +* `FORCE` -- If both the `TIMEOUT` and `TO` options are set, the force flag can also be used to designate that that once the timeout has elapsed, the master should failover to the target replica instead of rolling back. +This can be used for a best-effort attempt at a failover without data loss, but limiting write outage. + +NOTE: The master will always rollback if the `PSYNC FAILOVER` request is rejected by the target replica. + +## Failover abort + +The failover command is intended to be safe from data loss and corruption, but can encounter some scenarios it can not automatically remediate from and may get stuck. +For this purpose, the `FAILOVER ABORT` command exists, which will abort an ongoing failover and return the master to its normal state. +The command has no side effects if issued in the `waiting-for-sync` state but can introduce multi-master scenarios in the `failover-in-progress` state. +If a multi-master scenario is encountered, you will need to manually identify which master has the latest data and designate it as the master and have the other replicas. + +NOTE: `REPLICAOF` is disabled while a failover is in progress, this is to prevent unintended interactions with the failover that might cause data loss. + +@return + +@simple-string-reply: `OK` if the command was accepted and a coordinated failover is in progress. An error if the operation cannot be executed. diff --git a/commands/flushall.md b/commands/flushall.md index fc7b597e..29fd68d3 100644 --- a/commands/flushall.md +++ b/commands/flushall.md @@ -1,17 +1,21 @@ -Delete all the keys of all the existing databases, not just the currently -selected one. +Delete all the keys of all the existing databases, not just the currently selected one. This command never fails. -The time-complexity for this operation is O(N), N being the number of -keys in all existing databases. +By default, `FLUSHALL` will synchronously flush all the databases. +Starting with Redis 6.2, setting the **lazyfree-lazy-user-flush** configuration directive to "yes" changes the default flush mode to asynchronous. -`FLUSHALL ASYNC` (Redis 4.0.0 or greater) ---- -Redis is now able to delete keys in the background in a different thread without blocking the server. -An `ASYNC` option was added to `FLUSHALL` and `FLUSHDB` in order to let the entire dataset or a single database to be freed asynchronously. +It is possible to use one of the following modifiers to dictate the flushing mode explicitly: -Asynchronous `FLUSHALL` and `FLUSHDB` commands only delete keys that were present at the time the command was invoked. Keys created during an asynchronous flush will be unaffected. +* `ASYNC`: flushes the databases asynchronously +* `!SYNC`: flushes the databases synchronously + +Note: an asynchronous `FLUSHALL` command only deletes keys that were present at the time the command was invoked. Keys created during an asynchronous flush will be unaffected. @return @simple-string-reply + +@history + +* `>= 4.0.0`: Added the `ASYNC` flushing mode modifier. +* `>= 6.2.0`: Added the `!SYNC` flushing mode modifier and the **lazyfree-lazy-user-flush** configuration directive. diff --git a/commands/flushdb.md b/commands/flushdb.md index fe5e8731..c733d6b3 100644 --- a/commands/flushdb.md +++ b/commands/flushdb.md @@ -1,12 +1,15 @@ Delete all the keys of the currently selected DB. This command never fails. -The time-complexity for this operation is O(N), N being the number of -keys in the database. +By default, `FLUSHDB` will synchronously flush all keys from the database. +Starting with Redis 6.2, setting the **lazyfree-lazy-user-flush** configuration directive to "yes" changes the default flush mode to asynchronous. -`FLUSHDB ASYNC` (Redis 4.0.0 or greater) ---- -See `FLUSHALL` for documentation. +It is possible to use one of the following modifiers to dictate the flushing mode explicitly: + +* `ASYNC`: flushes the database asynchronously +* `!SYNC`: flushes the database synchronously + +Note: an asynchronous `FLUSHDB` command only deletes keys that were present at the time the command was invoked. Keys created during an asynchronous flush will be unaffected. @return diff --git a/commands/geoadd.md b/commands/geoadd.md index e2a73fdd..128be8a4 100644 --- a/commands/geoadd.md +++ b/commands/geoadd.md @@ -1,47 +1,54 @@ -Adds the specified geospatial items (latitude, longitude, name) to the specified -key. Data is stored into the key as a sorted set, in a way that makes it possible to later retrieve items using a query by radius with the `GEORADIUS` or `GEORADIUSBYMEMBER` commands. +Adds the specified geospatial items (longitude, latitude, name) to the specified key. Data is stored into the key as a sorted set, in a way that makes it possible to query the items with the `GEOSEARCH` command. -The command takes arguments in the standard format x,y so the longitude must -be specified before the latitude. There are limits to the coordinates that -can be indexed: areas very near to the poles are not indexable. The exact -limits, as specified by EPSG:900913 / EPSG:3785 / OSGEO:41001 are the following: +The command takes arguments in the standard format x,y so the longitude must be specified before the latitude. There are limits to the coordinates that can be indexed: areas very near to the poles are not indexable. + +The exact limits, as specified by EPSG:900913 / EPSG:3785 / OSGEO:41001 are the following: * Valid longitudes are from -180 to 180 degrees. * Valid latitudes are from -85.05112878 to 85.05112878 degrees. The command will report an error when the user attempts to index coordinates outside the specified ranges. -**Note:** there is no **GEODEL** command because you can use `ZREM` in order to remove elements. The Geo index structure is just a sorted set. +**Note:** there is no **GEODEL** command because you can use `ZREM` to remove elements. The Geo index structure is just a sorted set. + +## GEOADD options + +`GEOADD` also provides the following options: + +* **XX**: Only update elements that already exist. Never add elements. +* **NX**: Don't update already existing elements. Always add new elements. +* **CH**: Modify the return value from the number of new elements added, to the total number of elements changed (CH is an abbreviation of *changed*). Changed elements are **new elements added** and elements already existing for which **the coordinates was updated**. So elements specified in the command line having the same score as they had in the past are not counted. Note: normally, the return value of `GEOADD` only counts the number of new elements added. + +Note: The **XX** and **NX** options are mutually exclusive. How does it work? --- The way the sorted set is populated is using a technique called [Geohash](https://en.wikipedia.org/wiki/Geohash). Latitude and Longitude -bits are interleaved in order to form an unique 52 bit integer. We know -that a sorted set double score can represent a 52 bit integer without losing +bits are interleaved to form a unique 52-bit integer. We know +that a sorted set double score can represent a 52-bit integer without losing precision. -This format allows for radius querying by checking the 1+8 areas needed -to cover the whole radius, and discarding elements outside the radius. -The areas are checked by calculating the range of the box covered removing -enough bits from the less significant part of the sorted set score, and -computing the score range to query in the sorted set for each area. +This format allows for bounding box and radius querying by checking the 1+8 areas needed to cover the whole shape and discarding elements outside it. The areas are checked by calculating the range of the box covered, removing enough bits from the less significant part of the sorted set score, and computing the score range to query in the sorted set for each area. What Earth model does it use? --- -It just assumes that the Earth is a sphere, since the used distance formula -is the Haversine formula. This formula is only an approximation when applied to the Earth, which is not a perfect sphere. The introduced errors are not an issue when used in the context of social network sites that need to query by radius -and most other applications. However in the worst case the error may be up to -0.5%, so you may want to consider other systems for error-critical applications. +The model assumes that the Earth is a sphere since it uses the Haversine formula to calculate distance. This formula is only an approximation when applied to the Earth, which is not a perfect sphere. +The introduced errors are not an issue when used, for example, by social networks and similar applications requiring this type of querying. +However, in the worst case, the error may be up to 0.5%, so you may want to consider other systems for error-critical applications. @return @integer-reply, specifically: -* The number of elements added to the sorted set, not including elements - already existing for which the score was updated. +* When used without optional arguments, the number of elements added to the sorted set (excluding score updates). +* If the `CH` option is specified, the number of elements that were changed (added or updated). + +@history + +* `>= 6.2`: Added the `CH`, `NX` and `XX` options. @examples diff --git a/commands/georadius.md b/commands/georadius.md index e0500027..e93737c8 100644 --- a/commands/georadius.md +++ b/commands/georadius.md @@ -1,5 +1,7 @@ Return the members of a sorted set populated with geospatial information using `GEOADD`, which are within the borders of the area specified with the center location and the maximum distance from the center (the radius). +As per Redis 6.2.0, GEORADIUS command family are considered deprecated. Please prefer `GEOSEARCH` and `GEOSEARCHSTORE` in new code. + This manual page also covers the `GEORADIUS_RO` and `GEORADIUSBYMEMBER_RO` variants (see the section below for more information). The common use case for this command is to retrieve geospatial items near a specified point not farther than a given amount of meters (or other units). This allows, for example, to suggest mobile users of an application nearby places. @@ -22,7 +24,11 @@ The command default is to return unsorted items. Two different sorting methods c * `ASC`: Sort returned items from the nearest to the farthest, relative to the center. * `DESC`: Sort returned items from the farthest to the nearest, relative to the center. -By default all the matching items are returned. It is possible to limit the results to the first N matching items by using the **COUNT ``** option. However note that internally the command needs to perform an effort proportional to the number of items matching the specified area, so to query very large areas with a very small `COUNT` option may be slow even if just a few results are returned. On the other hand `COUNT` can be a very effective way to reduce bandwidth usage if normally just the first results are used. +By default all the matching items are returned. It is possible to limit the results to the first N matching items by using the **COUNT ``** option. +When `ANY` is provided the command will return as soon as enough matches are found, +so the results may not be the ones closest to the specified point, but on the other hand, the effort invested by the server is significantly lower. +When `ANY` is not provided, the command will perform an effort that is proportional to the number of items matching the specified area and sort them, +so to query very large areas with a very small `COUNT` option may be slow even if just a few results are returned. By default the command returns the items to the client. It is possible to store the results with one of these options: @@ -46,14 +52,18 @@ So for example the command `GEORADIUS Sicily 15 37 200 km WITHCOORD WITHDIST` wi ["Palermo","190.4424",["13.361389338970184","38.115556395496299"]] -## Read only variants +## Read-only variants -Since `GEORADIUS` and `GEORADIUSBYMEMBER` have a `STORE` and `STOREDIST` option they are technically flagged as writing commands in the Redis command table. For this reason read-only replicas will flag them, and Redis Cluster replicas will redirect them to the master instance even if the connection is in read only mode (See the `READONLY` command of Redis Cluster). +Since `GEORADIUS` and `GEORADIUSBYMEMBER` have a `STORE` and `STOREDIST` option they are technically flagged as writing commands in the Redis command table. For this reason read-only replicas will flag them, and Redis Cluster replicas will redirect them to the master instance even if the connection is in read-only mode (see the `READONLY` command of Redis Cluster). -Breaking the compatibility with the past was considered but rejected, at least for Redis 4.0, so instead two read only variants of the commands were added. They are exactly like the original commands but refuse the `STORE` and `STOREDIST` options. The two variants are called `GEORADIUS_RO` and `GEORADIUSBYMEMBER_RO`, and can safely be used in replicas. +Breaking the compatibility with the past was considered but rejected, at least for Redis 4.0, so instead two read-only variants of the commands were added. They are exactly like the original commands but refuse the `STORE` and `STOREDIST` options. The two variants are called `GEORADIUS_RO` and `GEORADIUSBYMEMBER_RO`, and can safely be used in replicas. Both commands were introduced in Redis 3.2.10 and Redis 4.0.0 respectively. +@history + +* `>= 6.2`: Added the `ANY` option for `COUNT`. + @examples ```cli diff --git a/commands/georadiusbymember.md b/commands/georadiusbymember.md index 5eab55d8..d95990ef 100644 --- a/commands/georadiusbymember.md +++ b/commands/georadiusbymember.md @@ -1,6 +1,8 @@ This command is exactly like `GEORADIUS` with the sole difference that instead of taking, as the center of the area to query, a longitude and latitude value, it takes the name of a member already existing inside the geospatial index represented by the sorted set. +As per Redis 6.2.0, GEORADIUS command family are considered deprecated. Please prefer `GEOSEARCH` and `GEOSEARCHSTORE` in new code. + The position of the specified member is used as the center of the query. Please check the example below and the `GEORADIUS` documentation for more information about the command and its options. diff --git a/commands/geosearch.md b/commands/geosearch.md new file mode 100644 index 00000000..972c1c98 --- /dev/null +++ b/commands/geosearch.md @@ -0,0 +1,51 @@ +Return the members of a sorted set populated with geospatial information using `GEOADD`, which are within the borders of the area specified by a given shape. This command extends the `GEORADIUS` command, so in addition to searching within circular areas, it supports searching within rectangular areas. + +This command should be used in place of the deprecated `GEORADIUS` and `GEORADIUSBYMEMBER` commands. + +The query's center point is provided by one of these mandatory options: + +* `FROMMEMBER`: Use the position of the given existing `` in the sorted set. +* `FROMLONLAT`: Use the given `` and `` position. + +The query's shape is provided by one of these mandatory options: + +* `BYRADIUS`: Similar to `GEORADIUS`, search inside circular area according to given ``. +* `BYBOX`: Search inside an axis-aligned rectangle, determined by `` and ``. + +The command optionally returns additional information using the following options: + +* `WITHDIST`: Also return the distance of the returned items from the specified center point. The distance is returned in the same unit as specified for the radius or height and width arguments. +* `WITHCOORD`: Also return the longitude and latitude of the matching items. +* `WITHHASH`: Also return the raw geohash-encoded sorted set score of the item, in the form of a 52 bit unsigned integer. This is only useful for low level hacks or debugging and is otherwise of little interest for the general user. + +Matching items are returned unsorted by default. To sort them, use one of the following two options: + +* `ASC`: Sort returned items from the nearest to the farthest, relative to the center point. +* `DESC`: Sort returned items from the farthest to the nearest, relative to the center point. + +All matching items are returned by default. To limit the results to the first N matching items, use the **COUNT ``** option. +When the `ANY` option is used, the command returns as soon as enough matches are found. This means that the results returned may not be the ones closest to the specified point, but the effort invested by the server to generate them is significantly less. +When `ANY` is not provided, the command will perform an effort that is proportional to the number of items matching the specified area and sort them, +so to query very large areas with a very small `COUNT` option may be slow even if just a few results are returned. + +@return + +@array-reply, specifically: + +* Without any `WITH` option specified, the command just returns a linear array like ["New York","Milan","Paris"]. +* If `WITHCOORD`, `WITHDIST` or `WITHHASH` options are specified, the command returns an array of arrays, where each sub-array represents a single item. + +When additional information is returned as an array of arrays for each item, the first item in the sub-array is always the name of the returned item. The other information is returned in the following order as successive elements of the sub-array. + +1. The distance from the center as a floating point number, in the same unit specified in the shape. +2. The geohash integer. +3. The coordinates as a two items x,y array (longitude,latitude). + +@examples + +```cli +GEOADD Sicily 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania" +GEOADD Sicily 12.758489 38.788135 "edge1" 17.241510 38.788135 "edge2" +GEOSEARCH Sicily FROMLONLAT 15 37 BYRADIUS 200 km ASC +GEOSEARCH Sicily FROMLONLAT 15 37 BYBOX 400 400 km ASC WITHCOORD WITHDIST +``` diff --git a/commands/geosearchstore.md b/commands/geosearchstore.md new file mode 100644 index 00000000..2a4fc38d --- /dev/null +++ b/commands/geosearchstore.md @@ -0,0 +1,22 @@ +This command is like `GEOSEARCH`, but stores the result in destination key. + +This command comes in place of the now deprecated `GEORADIUS` and `GEORADIUSBYMEMBER`. + +By default, it stores the results in the `destination` sorted set with their geospatial information. + +When using the `STOREDIST` option, the command stores the items in a sorted set populated with their distance from the center of the circle or box, as a floating-point number, in the same unit specified for that shape. + +@return + +@integer-reply: the number of elements in the resulting set. + +@examples + +```cli +GEOADD Sicily 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania" +GEOADD Sicily 12.758489 38.788135 "edge1" 17.241510 38.788135 "edge2" +GEOSEARCHSTORE key1 Sicily FROMLONLAT 15 37 BYBOX 400 400 km ASC COUNT 3 +GEOSEARCH key1 FROMLONLAT 15 37 BYBOX 400 400 km ASC WITHCOORD WITHDIST WITHHASH +GEOSEARCHSTORE key2 Sicily FROMLONLAT 15 37 BYBOX 400 400 km ASC COUNT 3 STOREDIST +ZRANGE key2 0 -1 WITHSCORES +``` \ No newline at end of file diff --git a/commands/getdel.md b/commands/getdel.md new file mode 100644 index 00000000..8474e931 --- /dev/null +++ b/commands/getdel.md @@ -0,0 +1,14 @@ +Get the value of `key` and delete the key. +This command is similar to `GET`, except for the fact that it also deletes the key on success (if and only if the key's value type is a string). + +@return + +@bulk-string-reply: the value of `key`, `nil` when `key` does not exist, or an error if the key's value type isn't a string. + +@examples + +```cli +SET mykey "Hello" +GETDEL mykey +GET mykey +``` diff --git a/commands/getex.md b/commands/getex.md new file mode 100644 index 00000000..89ce809d --- /dev/null +++ b/commands/getex.md @@ -0,0 +1,26 @@ +Get the value of `key` and optionally set its expiration. +`GETEX` is similar to `GET`, but is a write command with additional options. + +## Options + +The `GETEX` command supports a set of options that modify its behavior: + +* `EX` *seconds* -- Set the specified expire time, in seconds. +* `PX` *milliseconds* -- Set the specified expire time, in milliseconds. +* `EXAT` *timestamp-seconds* -- Set the specified Unix time at which the key will expire, in seconds. +* `PXAT` *timestamp-milliseconds* -- Set the specified Unix time at which the key will expire, in milliseconds. +* `PERSIST` -- Remove the time to live associated with the key. + +@return + +@bulk-string-reply: the value of `key`, or `nil` when `key` does not exist. + +@examples + +```cli +SET mykey "Hello" +GETEX mykey +TTL mykey +GETEX mykey EX 60 +TTL mykey +``` diff --git a/commands/getset.md b/commands/getset.md index b3674f68..336fdd75 100644 --- a/commands/getset.md +++ b/commands/getset.md @@ -1,5 +1,7 @@ Atomically sets `key` to `value` and returns the old value stored at `key`. -Returns an error when `key` exists but does not hold a string value. +Returns an error when `key` exists but does not hold a string value. Any +previous time to live associated with the key is discarded on successful +`SET` operation. ## Design pattern @@ -15,7 +17,7 @@ GETSET mycounter "0" GET mycounter ``` -As per Redis 6.2, GETSET is considered deprecated. Please use `SET` with `GET` parameter in new code. +As per Redis 6.2, GETSET is considered deprecated. Please prefer `SET` with `GET` parameter in new code. @return diff --git a/commands/hello.md b/commands/hello.md index 0d389450..648dbddd 100644 --- a/commands/hello.md +++ b/commands/hello.md @@ -1,16 +1,41 @@ -Switch the connection to a different protocol. Redis version 6 or greater -are able to support two protocols, the old protocol, RESP2, and a new -one introduced with Redis 6, RESP3. RESP3 has certain advantages since when -the connection is in this mode, Redis is able to reply with more semantical -replies: for instance `HGETALL` will return a *map type*, so a client -library implementation no longer requires to know in advance to translate the -array into a hash before returning it to the caller. For a full -coverage of RESP3 please [check this repository](https://github.com/antirez/resp3). - -Redis 6 connections starts in RESP2 mode, so clients implementing RESP2 do -not need to change (nor there are short term plans to drop support for -RESP2). Clients that want to handshake the RESP3 mode need to call the -`HELLO` command, using "3" as first argument. +Switch to a different protocol, optionally authenticating and setting the +connection's name, or provide a contextual client report. + +Redis version 6 and above supports two protocols: the old protocol, RESP2, and +a new one introduced with Redis 6, RESP3. RESP3 has certain advantages since +when the connection is in this mode, Redis is able to reply with more semantical +replies: for instance, `HGETALL` will return a *map type*, so a client library +implementation no longer requires to know in advance to translate the array into +a hash before returning it to the caller. For a full coverage of RESP3, please +[check this repository](https://github.com/antirez/resp3). + +In Redis 6 connections start in RESP2 mode, so clients implementing RESP2 do +not need to updated or changed. There are no short term plans to drop support for +RESP2, although future version may default to RESP3. + +`HELLO` always replies with a list of current server and connection properties, +such as: versions, modules loaded, client ID, replication role and so forth. +When called without any arguments in Redis 6.2 and its default use of RESP2 +protocol, the reply looks like this: + + > HELLO + 1) "server" + 2) "redis" + 3) "version" + 4) "255.255.255" + 5) "proto" + 6) (integer) 2 + 7) "id" + 8) (integer) 5 + 9) "mode" + 10) "standalone" + 11) "role" + 12) "master" + 13) "modules" + 14) (empty array) + +Clients that want to handshake using the RESP3 mode need to call the `HELLO` +command and specify the value "3" as the `protover` argument, like so: > HELLO 3 1# "server" => "redis" @@ -21,20 +46,20 @@ RESP2). Clients that want to handshake the RESP3 mode need to call the 6# "role" => "master" 7# "modules" => (empty array) -The `HELLO` command has a useful reply that will state a number of facts -about the server: the exact version, the set of modules loaded, the client -ID, the replication role and so forth. Because of that, and given that -the `HELLO` command also works with "2" as argument, both in order to -downgrade the protocol back to version 2, or just to get the reply from -the server without switching the protocol, client library authors may -consider using this command instead of the canonical `PING` when setting -up the connection. +Because `HELLO` replies with useful information, and given that `protover` is +optional or can be set to "2", client library authors may consider using this +command instead of the canonical `PING` when setting up the connection. -This command accepts two non mandatory options: +When called with the optional `protover` argument, this command switches the +protocol to the specified version and also accepts the following options: -* `AUTH `: directly authenticate the connection other than switching to the specified protocol. In this way there is no need to call `AUTH` before `HELLO` when setting up new connections. Note that the username can be set to "default" in order to authenticate against a server that does not use ACLs, but the simpler `requirepass` mechanism of Redis before version 6. -* `SETNAME `: this is equivalent to also call `CLIENT SETNAME`. +* `AUTH `: directly authenticate the connection in addition to switching to the specified protocol version. This makes calling `AUTH` before `HELLO` unnecessary when setting up a new connection. Note that the `username` can be set to "default" to authenticate against a server that does not use ACLs, but rather the simpler `requirepass` mechanism of Redis prior to version 6. +* `SETNAME `: this is the equivalent of calling `CLIENT SETNAME`. @return -@array-reply: a list of server properties. The reply is a map instead of an array when RESP3 is selected. The command returns an error if the protocol requested does not exist. +@array-reply: a list of server properties. The reply is a map instead of an array when RESP3 is selected. The command returns an error if the `protover` requested does not exist. + +@history + +* `>= 6.2`: `protover` made optional; when called without arguments the command reports the current connection's context. diff --git a/commands/hmset.md b/commands/hmset.md index b06013e2..b271aa63 100644 --- a/commands/hmset.md +++ b/commands/hmset.md @@ -3,7 +3,7 @@ Sets the specified fields to their respective values in the hash stored at This command overwrites any specified fields already existing in the hash. If `key` does not exist, a new key holding a hash is created. -As per Redis 4.0.0, HMSET is considered deprecated. Please use `HSET` in new code. +As per Redis 4.0.0, HMSET is considered deprecated. Please prefer `HSET` in new code. @return diff --git a/commands/hrandfield.md b/commands/hrandfield.md new file mode 100644 index 00000000..389a1095 --- /dev/null +++ b/commands/hrandfield.md @@ -0,0 +1,39 @@ +When called with just the `key` argument, return a random field from the hash value stored at `key`. + +If the provided `count` argument is positive, return an array of **distinct fields**. +The array's length is either `count` or the hash's number of fields (`HLEN`), whichever is lower. + +If called with a negative `count`, the behavior changes and the command is allowed to return the **same field multiple times**. +In this case, the number of returned fields is the absolute value of the specified `count`. + +The optional `WITHVALUES` modifier changes the reply so it includes the respective values of the randomly selected hash fields. + +@return + +@bulk-string-reply: without the additional `count` argument, the command returns a Bulk Reply with the randomly selected field, or `nil` when `key` does not exist. + +@array-reply: when the additional `count` argument is passed, the command returns an array of fields, or an empty array when `key` does not exist. +If the `WITHVALUES` modifier is used, the reply is a list fields and their values from the hash. + +@examples + +```cli +HMSET coin heads obverse tails reverse edge null +HRANDFIELD coin +HRANDFIELD coin +HRANDFIELD coin -5 WITHVALUES +``` + +## Specification of the behavior when count is passed + +When the `count` argument is a positive value this command behaves as follows: + +* No repeated fields are returned. +* If `count` is bigger than the number of fields in the hash, the command will only return the whole hash without additional fields. +* The order of fields in the reply is not truly random, so it is up to the client to shuffle them if needed. + +When the `count` is a negative value, the behavior changes as follows: + +* Repeating fields are possible. +* Exactly `count` fields, or an empty array if the hash is empty (non-existing key), are always returned. +* The order of fields in the reply is truly random. diff --git a/commands/incr.md b/commands/incr.md index 48431f9a..048ce7ad 100644 --- a/commands/incr.md +++ b/commands/incr.md @@ -69,14 +69,14 @@ The more simple and direct implementation of this pattern is the following: FUNCTION LIMIT_API_CALL(ip) ts = CURRENT_UNIX_TIME() keyname = ip+":"+ts -current = GET(keyname) -IF current != NULL AND current > 10 THEN +MULTI + INCR(keyname) + EXPIRE(keyname,10) +EXEC +current = RESPONSE_OF_INCR_WITHIN_MULTI +IF current > 10 THEN ERROR "too many requests per second" ELSE - MULTI - INCR(keyname,1) - EXPIRE(keyname,10) - EXEC PERFORM_API_CALL() END ``` @@ -125,7 +125,7 @@ script that is send using the `EVAL` command (only available since Redis version ``` local current current = redis.call("incr",KEYS[1]) -if tonumber(current) == 1 then +if current == 1 then redis.call("expire",KEYS[1],1) end ``` diff --git a/commands/info.md b/commands/info.md index 7d18df11..aae7abf0 100644 --- a/commands/info.md +++ b/commands/info.md @@ -15,6 +15,7 @@ The optional parameter can be used to select a specific section of information: * `modules`: Modules section * `keyspace`: Database related statistics * `modules`: Module related sections +* `errorstats`: Redis error statistics It can also take the following values: @@ -58,9 +59,11 @@ Here is the meaning of all fields in the **server** section: * `atomicvar_api`: Atomicvar API used by Redis * `gcc_version`: Version of the GCC compiler used to compile the Redis server * `process_id`: PID of the server process +* `process_supervised`: Supervised system ("upstart", "systemd", "unknown" or "no") * `run_id`: Random value identifying the Redis server (to be used by Sentinel and Cluster) * `tcp_port`: TCP/IP listen port +* `server_time_in_usec`: Epoch-based system time with microsecond precision * `uptime_in_seconds`: Number of seconds since Redis server start * `uptime_in_days`: Same value expressed in days * `hz`: The server's current frequency setting @@ -73,6 +76,11 @@ Here is the meaning of all fields in the **clients** section: * `connected_clients`: Number of client connections (excluding connections from replicas) +* `cluster_connections`: An approximation of the number of sockets used by the + cluster's bus +* `maxclients`: The value of the `maxclients` configuration directive. This is + the upper limit for the sum of `connected_clients`, `connected_slaves` and + `cluster_connections`. * `client_longest_output_list`: Longest output list among current client connections * `client_biggest_input_buf`: Biggest input buffer among current client @@ -115,18 +123,29 @@ Here is the meaning of all fields in the **memory** section: * `maxmemory_human`: Human readable representation of previous value * `maxmemory_policy`: The value of the `maxmemory-policy` configuration directive -* `mem_fragmentation_ratio`: Ratio between `used_memory_rss` and `used_memory` -* `mem_allocator`: Memory allocator, chosen at compile time -* `active_defrag_running`: Flag indicating if active defragmentation is active +* `mem_fragmentation_ratio`: Ratio between `used_memory_rss` and `used_memory`. + Note that this doesn't only includes fragmentation, but also other process overheads (see the `allocator_*` metrics), and also overheads like code, shared libraries, stack, etc. +* `mem_fragmentation_bytes`: Delta between `used_memory_rss` and `used_memory`. + Note that when the total fragmentation bytes is low (few megabytes), a high ratio (e.g. 1.5 and above) is not an indication of an issue. +* `allocator_frag_ratio:`: Ratio between `allocator_active` and `allocator_allocated`. This is the true (external) fragmentation metric (not `mem_fragmentation_ratio`). +* `allocator_frag_bytes` Delta between `allocator_active` and `allocator_allocated`. See note about `mem_fragmentation_bytes`. +* `allocator_rss_ratio`: Ratio between `allocator_resident` and `allocator_active`. This usually indicates pages that the allocator can and probably will soon release back to the OS. +* `allocator_rss_bytes`: Delta between `allocator_resident` and `allocator_active` +* `rss_overhead_ratio`: Ratio between `used_memory_rss` (the process RSS) and `allocator_resident`. This includes RSS overheads that are not allocator or heap related. +* `rss_overhead_bytes`: Delta between `used_memory_rss` (the process RSS) and `allocator_resident` +* `allocator_allocated`: Total bytes allocated form the allocator, including internal-fragmentation. Normally the same as `used_memory`. +* `allocator_active`: Total bytes in the allocator active pages, this includes external-fragmentation. +* `allocator_resident`: Total bytes resident (RSS) in the allocator, this includes pages that can be released to the OS (by `MEMORY PURGE`, or just waiting). +* `mem_allocator`: Memory allocator, chosen at compile time. +* `active_defrag_running`: When `activedefrag` is enabled, this indicates whether defragmentation is currently active, and the CPU percentage it intends to utilize. * `lazyfree_pending_objects`: The number of objects waiting to be freed (as a result of calling `UNLINK`, or `FLUSHDB` and `FLUSHALL` with the **ASYNC** option) Ideally, the `used_memory_rss` value should be only slightly higher than `used_memory`. -When rss >> used, a large difference means there is memory fragmentation -(internal or external), which can be evaluated by checking -`mem_fragmentation_ratio`. +When rss >> used, a large difference may mean there is (external) memory fragmentation, which can be evaluated by checking +`allocator_frag_ratio`, `allocator_frag_bytes`. When used >> rss, it means part of Redis memory has been swapped off by the operating system: expect some significant latencies. @@ -147,6 +166,13 @@ by referring to the `MEMORY STATS` command and the `MEMORY DOCTOR`. Here is the meaning of all fields in the **persistence** section: * `loading`: Flag indicating if the load of a dump file is on-going +* `current_cow_peak`: The peak size in bytes of copy-on-write memory + while a child fork is running +* `current_cow_size`: The size in bytes of copy-on-write memory + while a child fork is running +* `current_fork_perc`: The percentage of progress of the current fork process. For AOF and RDB forks it is the percentage of `current_save_keys_processed` out of `current_save_keys_total`. +* `current_save_keys_processed`: Number of keys processed by the current save operation +* `current_save_keys_total`: Number of keys at the beginning of the current save operation * `rdb_changes_since_last_save`: Number of changes since the last dump * `rdb_bgsave_in_progress`: Flag indicating a RDB save is on-going * `rdb_last_save_time`: Epoch-based timestamp of last successful RDB save @@ -155,7 +181,7 @@ Here is the meaning of all fields in the **persistence** section: seconds * `rdb_current_bgsave_time_sec`: Duration of the on-going RDB save operation if any -* `rdb_last_cow_size`: The size in bytes of copy-on-write allocations during +* `rdb_last_cow_size`: The size in bytes of copy-on-write memory during the last RDB save operation * `aof_enabled`: Flag indicating AOF logging is activated * `aof_rewrite_in_progress`: Flag indicating a AOF rewrite operation is @@ -168,10 +194,10 @@ Here is the meaning of all fields in the **persistence** section: operation if any * `aof_last_bgrewrite_status`: Status of the last AOF rewrite operation * `aof_last_write_status`: Status of the last write operation to the AOF -* `aof_last_cow_size`: The size in bytes of copy-on-write allocations during +* `aof_last_cow_size`: The size in bytes of copy-on-write memory during the last AOF rewrite operation * `module_fork_in_progress`: Flag indicating a module fork is on-going -* `module_fork_last_cow_size`: The size in bytes of copy-on-write allocations +* `module_fork_last_cow_size`: The size in bytes of copy-on-write memory during the last module fork operation `rdb_changes_since_last_save` refers to the number of operations that produced @@ -195,6 +221,8 @@ If a load operation is on-going, these additional fields will be added: * `loading_start_time`: Epoch-based timestamp of the start of the load operation * `loading_total_bytes`: Total file size +* `loading_rdb_used_mem`: The memory usage of the server that had generated + the RDB file at the time of the file's creation * `loading_loaded_bytes`: Number of bytes already loaded * `loading_loaded_perc`: Same value expressed as a percentage * `loading_eta_seconds`: ETA in seconds for the load to be complete @@ -219,6 +247,8 @@ Here is the meaning of all fields in the **stats** section: * `expired_time_cap_reached_count`: The count of times that active expiry cycles have stopped early * `expire_cycle_cpu_milliseconds`: The cumulative amount of time spend on active expiry cycles * `evicted_keys`: Number of evicted keys due to `maxmemory` limit +* `total_eviction_exceeded_time`: Total time `used_memory` was greater than `maxmemory` since server startup, in milliseconds +* `current_eviction_exceeded_time`: The time passed since `used_memory` last rose above `maxmemory`, in milliseconds * `keyspace_hits`: Number of successful lookup of keys in the main dictionary * `keyspace_misses`: Number of failed lookup of keys in the main dictionary * `pubsub_channels`: Global number of pub/sub channels with client @@ -226,6 +256,7 @@ Here is the meaning of all fields in the **stats** section: * `pubsub_patterns`: Global number of pub/sub pattern with client subscriptions * `latest_fork_usec`: Duration of the latest fork operation in microseconds +* `total_forks`: Total number of fork operations since the server start * `migrate_cached_sockets`: The number of sockets open for `MIGRATE` purposes * `slave_expires_tracked_keys`: The number of keys tracked for expiry purposes (applicable only to writable replicas) @@ -236,6 +267,8 @@ Here is the meaning of all fields in the **stats** section: * `active_defrag_key_hits`: Number of keys that were actively defragmented * `active_defrag_key_misses`: Number of keys that were skipped by the active defragmentation process +* `total_active_defrag_time`: Total time memory fragmentation was over the limit, in milliseconds +* `current_active_defrag_time`: The time passed since memory fragmentation last was over the limit, in milliseconds * `tracking_total_keys`: Number of keys being tracked by the server * `tracking_total_items`: Number of items, that is the sum of clients number for each key, that are being tracked @@ -243,6 +276,9 @@ Here is the meaning of all fields in the **stats** section: (only applicable for broadcast mode) * `unexpected_error_replies`: Number of unexpected error replies, that are types of errors from an AOF load or replication +* `total_error_replies`: Total number of issued error replies, that is the sum of + rejected commands (errors prior command execution) and + failed commands (errors within the command execution) * `total_reads_processed`: Total number of read events processed * `total_writes_processed`: Total number of write events processed * `io_threaded_reads_processed`: Number of read events processed by the main and I/O threads @@ -252,6 +288,7 @@ Here is the meaning of all fields in the **replication** section: * `role`: Value is "master" if the instance is replica of no one, or "slave" if the instance is a replica of some master instance. Note that a replica can be master of another replica (chained replication). +* `master_failover_state`: The state of an ongoing failover, if any. * `master_replid`: The replication ID of the Redis server. * `master_replid2`: The secondary replication ID, used for PSYNC after a failover. * `master_repl_offset`: The server's current replication offset @@ -277,7 +314,15 @@ If the instance is a replica, these additional fields are provided: If a SYNC operation is on-going, these additional fields are provided: +* `master_sync_total_bytes`: Total number of bytes that need to be + transferred. this may be 0 when the size is unknown (for example, when + the `repl-diskless-sync` configuration directive is used) +* `master_sync_read_bytes`: Number of bytes already transferred * `master_sync_left_bytes`: Number of bytes left before syncing is complete + (may be negative when `master_sync_total_bytes` is 0) +* `master_sync_perc`: The percentage `master_sync_read_bytes` from + `master_sync_total_bytes`, or an approximation that uses + `loading_rdb_used_mem` when `master_sync_total_bytes` is 0 * `master_sync_last_io_seconds_ago`: Number of seconds since last transfer I/O during a SYNC operation @@ -299,18 +344,30 @@ For each replica, the following line is added: Here is the meaning of all fields in the **cpu** section: -* `used_cpu_sys`: System CPU consumed by the Redis server -* `used_cpu_user`:User CPU consumed by the Redis server +* `used_cpu_sys`: System CPU consumed by the Redis server, which is the sum of system CPU consumed by all threads of the server process (main thread and background threads) +* `used_cpu_user`: User CPU consumed by the Redis server, which is the sum of user CPU consumed by all threads of the server process (main thread and background threads) * `used_cpu_sys_children`: System CPU consumed by the background processes * `used_cpu_user_children`: User CPU consumed by the background processes +* `used_cpu_sys_main_thread`: System CPU consumed by the Redis server main thread +* `used_cpu_user_main_thread`: User CPU consumed by the Redis server main thread The **commandstats** section provides statistics based on the command type, -including the number of calls, the total CPU time consumed by these commands, -and the average CPU consumed per command execution. + including the number of calls that reached command execution (not rejected), + the total CPU time consumed by these commands, the average CPU consumed + per command execution, the number of rejected calls + (errors prior command execution), and the number of failed calls + (errors within the command execution). For each command type, the following line is added: -* `cmdstat_XXX`: `calls=XXX,usec=XXX,usec_per_call=XXX` +* `cmdstat_XXX`: `calls=XXX,usec=XXX,usec_per_call=XXX,rejected_calls=XXX,failed_calls=XXX` + +The **errorstats** section enables keeping track of the different errors that occurred within Redis, + based upon the reply error prefix ( The first word after the "-", up to the first space. Example: `ERR` ). + +For each error type, the following line is added: + +* `errorstat_XXX`: `count=XXX` The **cluster** section currently only contains a unique field: diff --git a/commands/latency-reset.md b/commands/latency-reset.md index 826cf71a..9762869b 100644 --- a/commands/latency-reset.md +++ b/commands/latency-reset.md @@ -29,6 +29,6 @@ For more information refer to the [Latency Monitoring Framework page][lm]. [lm]: /topics/latency-monitor -@reply +@return @integer-reply: the number of event time series that were reset. diff --git a/commands/lmpop.md b/commands/lmpop.md new file mode 100644 index 00000000..6caa35f3 --- /dev/null +++ b/commands/lmpop.md @@ -0,0 +1,34 @@ +Pops one or more elements from the first non-empty list key from the list of provided key names. + +LMPOP and BLMPOP are similar to the following, more limited, commands: +- `LPOP` or `RPOP` which take only one key, and can return multiple elements. +- `BLPOP` or `BRPOP` which take multiple keys, but return only one element from just one key. + +See `BLMPOP` for the blocking variant of this command. + +Elements are popped from either the left or right of the first non-empty list based on the passed argument. +The number of returned elements is limited to the lower between the non-empty list's length, and the count argument (which defaults to 1). + +@return + +@array-reply: specifically: + +* A `nil` when no element could be popped. +* A two-element array with the first element being the name of the key from which elements were popped, and the second element is an array of elements. + +@examples + +```cli +LMPOP 2 non1 non2 LEFT COUNT 10 +LPUSH mylist "one" "two" "three" "four" "five" +LMPOP 1 mylist LEFT +LRANGE mylist 0 -1 +LMPOP 1 mylist RIGHT COUNT 10 +LPUSH mylist "one" "two" "three" "four" "five" +LPUSH mylist2 "a" "b" "c" "d" "e" +LMPOP 2 mylist mylist2 right count 3 +LRANGE mylist 0 -1 +LMPOP 2 mylist mylist2 right count 5 +LMPOP 2 mylist mylist2 right count 10 +EXISTS mylist mylist2 +``` diff --git a/commands/lpop.md b/commands/lpop.md index b6860a65..ee3b9640 100644 --- a/commands/lpop.md +++ b/commands/lpop.md @@ -1,15 +1,28 @@ -Removes and returns the first element of the list stored at `key`. +Removes and returns the first elements of the list stored at `key`. + +By default, the command pops a single element from the beginning of the list. +When provided with the optional `count` argument, the reply will consist of up +to `count` elements, depending on the list's length. @return +When called without the `count` argument: + @bulk-string-reply: the value of the first element, or `nil` when `key` does not exist. +When called with the `count` argument: + +@array-reply: list of popped elements, or `nil` when `key` does not exist. + +@history + +* `>= 6.2`: Added the `count` argument. + @examples ```cli -RPUSH mylist "one" -RPUSH mylist "two" -RPUSH mylist "three" +RPUSH mylist "one" "two" "three" "four" "five" LPOP mylist +LPOP mylist 2 LRANGE mylist 0 -1 ``` diff --git a/commands/lpos.md b/commands/lpos.md index bb6bc163..93fe5794 100644 --- a/commands/lpos.md +++ b/commands/lpos.md @@ -1,6 +1,6 @@ The command returns the index of matching elements inside a Redis list. By default, when no options are given, it will scan the list from head to tail, -looking for the first match of "element". If the element is found, its index (the zero-based position in the list) is returned. Otherwise, if no match is found, NULL is returned. +looking for the first match of "element". If the element is found, its index (the zero-based position in the list) is returned. Otherwise, if no match is found, `nil` is returned. ``` > RPUSH mylist a b c 1 2 3 c c @@ -51,13 +51,15 @@ When `COUNT` is used, it is possible to specify 0 as the number of matches, as a [2,6,7] ``` -When `COUNT` is used and no match is found, an empty array is returned. However when `COUNT` is not used and there are no matches, the command returns NULL. +When `COUNT` is used and no match is found, an empty array is returned. However when `COUNT` is not used and there are no matches, the command returns `nil`. Finally, the `MAXLEN` option tells the command to compare the provided element only with a given maximum number of list items. So for instance specifying `MAXLEN 1000` will make sure that the command performs only 1000 comparisons, effectively running the algorithm on a subset of the list (the first part or the last part depending on the fact we use a positive or negative rank). This is useful to limit the maximum complexity of the command. It is also useful when we expect the match to be found very early, but want to be sure that in case this is not true, the command does not take too much time to run. +When `MAXLEN` is used, it is possible to specify 0 as the maximum number of comparisons, as a way to tell the command we want unlimited comparisons. This is better than giving a very large `MAXLEN` option because it is more general. + @return -The command returns the integer representing the matching element, or null if there is no match. However, if the `COUNT` option is given the command returns an array (empty if there are no matches). +The command returns the integer representing the matching element, or `nil` if there is no match. However, if the `COUNT` option is given the command returns an array (empty if there are no matches). @examples diff --git a/commands/memory-usage.md b/commands/memory-usage.md index 2bdb1d9d..ae5a4bcc 100644 --- a/commands/memory-usage.md +++ b/commands/memory-usage.md @@ -37,4 +37,4 @@ OK @return -@integer-reply: the memory usage in bytes \ No newline at end of file +@integer-reply: the memory usage in bytes, or `nil` when the key does not exist. diff --git a/commands/migrate.md b/commands/migrate.md index 6559b1b4..77e1f824 100644 --- a/commands/migrate.md +++ b/commands/migrate.md @@ -48,9 +48,9 @@ uses pipelining in order to migrate multiple keys between instances without incurring in the round trip time latency and other overheads that there are when moving each key with a single `MIGRATE` call. -In order to enable this form, the `KEYS` option is used, and the normal *key* +In order to enable this form, the `!KEYS` option is used, and the normal *key* argument is set to an empty string. The actual key names will be provided -after the `KEYS` argument itself, like in the following example: +after the `!KEYS` argument itself, like in the following example: MIGRATE 192.168.1.34 6379 "" 0 5000 KEYS key1 key2 key3 @@ -60,17 +60,17 @@ just a single key exists. ## Options -* `COPY` -- Do not remove the key from the local instance. +* `!COPY` -- Do not remove the key from the local instance. * `REPLACE` -- Replace existing key on the remote instance. -* `KEYS` -- If the key argument is an empty string, the command will instead migrate all the keys that follow the `KEYS` option (see the above section for more info). -* `AUTH` -- Authenticate with the given password to the remote instance. +* `!KEYS` -- If the key argument is an empty string, the command will instead migrate all the keys that follow the `KEYS` option (see the above section for more info). +* `!AUTH` -- Authenticate with the given password to the remote instance. * `AUTH2` -- Authenticate with the given username and password pair (Redis 6 or greater ACL auth style). @history -* `>= 3.0.0`: Added the `COPY` and `REPLACE` options. -* `>= 3.0.6`: Added the `KEYS` option. -* `>= 4.0.7`: Added the `AUTH` option. +* `>= 3.0.0`: Added the `!COPY` and `REPLACE` options. +* `>= 3.0.6`: Added the `!KEYS` option. +* `>= 4.0.7`: Added the `!AUTH` option. * `>= 6.0.0`: Added the `AUTH2` option. @return diff --git a/commands/monitor.md b/commands/monitor.md index 2cd62f23..6b00ee4e 100644 --- a/commands/monitor.md +++ b/commands/monitor.md @@ -13,8 +13,9 @@ $ redis-cli monitor 1339518087.877697 [0 127.0.0.1:60866] "dbsize" 1339518090.420270 [0 127.0.0.1:60866] "set" "x" "6" 1339518096.506257 [0 127.0.0.1:60866] "get" "x" -1339518099.363765 [0 127.0.0.1:60866] "del" "x" -1339518100.544926 [0 127.0.0.1:60866] "get" "x" +1339518099.363765 [0 127.0.0.1:60866] "eval" "return redis.call('set','x','7')" "0" +1339518100.363799 [0 lua] "set" "x" "7" +1339518100.544926 [0 127.0.0.1:60866] "del" "x" ``` Use `SIGINT` (Ctrl-C) to stop a `MONITOR` stream running via `redis-cli`. @@ -37,20 +38,15 @@ QUIT Connection closed by foreign host. ``` -Manually issue the `QUIT` command to stop a `MONITOR` stream running via -`telnet`. +Manually issue the `QUIT` or `RESET` commands to stop a `MONITOR` stream running +via `telnet`. ## Commands not logged by MONITOR -Because of security concerns, all administrative commands are not logged -by `MONITOR`'s output. +Because of security concerns, no administrative commands are logged +by `MONITOR`'s output and sensitive data is redacted in the command `AUTH`. -Furthermore, the following commands are also not logged: - - * `AUTH` - * `EXEC` - * `HELLO` - * `QUIT` +Furthermore, the command `QUIT` is also not logged. ## Cost of running MONITOR @@ -91,4 +87,6 @@ flow. @history -* `>=6.0`: `AUTH` excluded from the command's output. +* `>= 6.0`: `AUTH` excluded from the command's output. +* `>= 6.2`: `RESET` can be called to exit monitor mode. +* `>= 6.2.4`: `AUTH`, `HELLO`, `EVAL`, `EVAL_RO`, `EVALSHA` and `EVALSHA_RO` included in the command's output. diff --git a/commands/pexpire.md b/commands/pexpire.md index 33d9f0bc..6fd90c02 100644 --- a/commands/pexpire.md +++ b/commands/pexpire.md @@ -1,12 +1,24 @@ This command works exactly like `EXPIRE` but the time to live of the key is specified in milliseconds instead of seconds. +## Options + +The `PEXPIRE` command supports a set of options since Redis 7.0: + +* `NX` -- Set expiry only when the key has no expiry +* `XX` -- Set expiry only when the key has an existing expiry +* `GT` -- Set expiry only when the new expiry is greater than current one +* `LT` -- Set expiry only when the new expiry is less than current one + +A non-volatile key is treated as an infinite TTL for the purpose of `GT` and `LT`. +The `GT`, `LT` and `NX` options are mutually exclusive. + @return @integer-reply, specifically: * `1` if the timeout was set. -* `0` if `key` does not exist. +* `0` if the timeout was not set. e.g. key doesn't exist, or operation skipped due to the provided arguments. @examples @@ -15,4 +27,12 @@ SET mykey "Hello" PEXPIRE mykey 1500 TTL mykey PTTL mykey +PEXPIRE mykey 1000 XX +TTL mykey +PEXPIRE mykey 1000 NX +TTL mykey ``` + +@history + +* `>= 7.0`: Added options: `NX`, `XX`, `GT` and `LT`. diff --git a/commands/pexpireat.md b/commands/pexpireat.md index a15bb0a9..03fe3465 100644 --- a/commands/pexpireat.md +++ b/commands/pexpireat.md @@ -1,12 +1,24 @@ `PEXPIREAT` has the same effect and semantic as `EXPIREAT`, but the Unix time at which the key will expire is specified in milliseconds instead of seconds. +## Options + +The `PEXPIREAT` command supports a set of options since Redis 7.0: + +* `NX` -- Set expiry only when the key has no expiry +* `XX` -- Set expiry only when the key has an existing expiry +* `GT` -- Set expiry only when the new expiry is greater than current one +* `LT` -- Set expiry only when the new expiry is less than current one + +A non-volatile key is treated as an infinite TTL for the purpose of `GT` and `LT`. +The `GT`, `LT` and `NX` options are mutually exclusive. + @return @integer-reply, specifically: * `1` if the timeout was set. -* `0` if `key` does not exist. +* `0` if the timeout was not set. e.g. key doesn't exist, or operation skipped due to the provided arguments. @examples @@ -16,3 +28,7 @@ PEXPIREAT mykey 1555555555005 TTL mykey PTTL mykey ``` + +@history + +* `>= 7.0`: Added options: `NX`, `XX`, `GT` and `LT`. diff --git a/commands/pexpiretime.md b/commands/pexpiretime.md new file mode 100644 index 00000000..8b571d7e --- /dev/null +++ b/commands/pexpiretime.md @@ -0,0 +1,15 @@ +`PEXPIRETIME` has the same semantic as `EXPIRETIME`, but returns the absolute Unix expiration timestamp in milliseconds instead of seconds. + +@return + +@integer-reply: Expiration Unix timestamp in milliseconds, or a negative value in order to signal an error (see the description below). +* The command returns `-1` if the key exists but has no associated expiration time. +* The command returns `-2` if the key does not exist. + +@examples + +```cli +SET mykey "Hello" +PEXPIREAT mykey 33177117420000 +PEXPIRETIME mykey +``` diff --git a/commands/publish.md b/commands/publish.md index e4b338ab..62283f8d 100644 --- a/commands/publish.md +++ b/commands/publish.md @@ -1,5 +1,11 @@ Posts a message to the given channel. +In a Redis Cluster clients can publish to every node. The cluster makes sure +that published messages are forwarded as needed, so clients can subscribe to any +channel by connecting to any one of the nodes. + @return -@integer-reply: the number of clients that received the message. +@integer-reply: the number of clients that received the message. Note that in a +Redis Cluster, only clients that are connected to the same node as the +publishing client are included in the count. diff --git a/commands/pubsub.md b/commands/pubsub.md index 96f3ce04..9d86bc9b 100644 --- a/commands/pubsub.md +++ b/commands/pubsub.md @@ -4,6 +4,12 @@ documented separately. The general form is: PUBSUB ... args ... +Cluster note: in a Redis Cluster clients can subscribe to every node, and can +also publish to every other node. The cluster will make sure that published +messages are forwarded as needed. That said, `PUBSUB`'s replies in a cluster only +report information from the node's Pub/Sub context, rather than the entire +cluster. + # PUBSUB CHANNELS [pattern] Lists the currently *active channels*. An active channel is a Pub/Sub channel @@ -33,9 +39,9 @@ will just return an empty list. # `PUBSUB NUMPAT` -Returns the number of subscriptions to patterns (that are performed using the -`PSUBSCRIBE` command). Note that this is not just the count of clients subscribed -to patterns but the total number of patterns all the clients are subscribed to. +Returns the number of unique patterns that are subscribed to by clients (that are performed using the +`PSUBSCRIBE` command). Note that this is not the count of clients subscribed +to patterns but the total number of unique patterns all the clients are subscribed to. @return diff --git a/commands/reset.md b/commands/reset.md new file mode 100644 index 00000000..b3f17d24 --- /dev/null +++ b/commands/reset.md @@ -0,0 +1,23 @@ +This command performs a full reset of the connection's server-side context, +mimicking the effect of disconnecting and reconnecting again. + +When the command is called from a regular client connection, it does the +following: + +* Discards the current `MULTI` transaction block, if one exists. +* Unwatches all keys `WATCH`ed by the connection. +* Disables `CLIENT TRACKING`, if in use. +* Sets the connection to `READWRITE` mode. +* Cancels the connection's `ASKING` mode, if previously set. +* Sets `CLIENT REPLY` to `ON`. +* Sets the protocol version to RESP2. +* `SELECT`s database 0. +* Exits `MONITOR` mode, when applicable. +* Aborts Pub/Sub's subscription state (`SUBSCRIBE` and `PSUBSCRIBE`), when + appropriate. +* Deauthenticates the connection, requiring a call `AUTH` to reauthenticate when + authentication is enabled. + +@return + +@simple-string-reply: always 'RESET'. diff --git a/commands/rpop.md b/commands/rpop.md index ddf40aa0..9c35815f 100644 --- a/commands/rpop.md +++ b/commands/rpop.md @@ -1,15 +1,28 @@ -Removes and returns the last element of the list stored at `key`. +Removes and returns the last elements of the list stored at `key`. + +By default, the command pops a single element from the end of the list. +When provided with the optional `count` argument, the reply will consist of up +to `count` elements, depending on the list's length. @return +When called without the `count` argument: + @bulk-string-reply: the value of the last element, or `nil` when `key` does not exist. +When called with the `count` argument: + +@array-reply: list of popped elements, or `nil` when `key` does not exist. + +@history + +* `>= 6.2`: Added the `count` argument. + @examples ```cli -RPUSH mylist "one" -RPUSH mylist "two" -RPUSH mylist "three" +RPUSH mylist "one" "two" "three" "four" "five" RPOP mylist +RPOP mylist 2 LRANGE mylist 0 -1 ``` diff --git a/commands/rpoplpush.md b/commands/rpoplpush.md index 121c0509..37019aca 100644 --- a/commands/rpoplpush.md +++ b/commands/rpoplpush.md @@ -13,7 +13,7 @@ If `source` and `destination` are the same, the operation is equivalent to removing the last element from the list and pushing it as first element of the list, so it can be considered as a list rotation command. -As per Redis 6.2.0, RPOPLPUSH is considered deprecated. Please use `LMOVE` in +As per Redis 6.2.0, RPOPLPUSH is considered deprecated. Please prefer `LMOVE` in new code. @return @@ -42,7 +42,7 @@ operation. However in this context the obtained queue is not _reliable_ as messages can be lost, for example in the case there is a network problem or if the consumer -crashes just after the message is received but it is still to process. +crashes just after the message is received but before it can be processed. `RPOPLPUSH` (or `BRPOPLPUSH` for the blocking variant) offers a way to avoid this problem: the consumer fetches the message and at the same time pushes it @@ -51,7 +51,7 @@ It will use the `LREM` command in order to remove the message from the _processing_ list once the message has been processed. An additional client may monitor the _processing_ list for items that remain -there for too much time, and will push those timed out items into the queue +there for too much time, pushing timed out items into the queue again if needed. ## Pattern: Circular list @@ -61,12 +61,12 @@ all the elements of an N-elements list, one after the other, in O(N) without transferring the full list from the server to the client using a single `LRANGE` operation. -The above pattern works even if the following two conditions: +The above pattern works even if one or both of the following conditions occur: * There are multiple clients rotating the list: they'll fetch different elements, until all the elements of the list are visited, and the process restarts. -* Even if other clients are actively pushing new items at the end of the list. +* Other clients are actively pushing new items at the end of the list. The above makes it very simple to implement a system where a set of items must be processed by N workers continuously as fast as possible. diff --git a/commands/sadd.md b/commands/sadd.md index 63b3c094..ff672ed3 100644 --- a/commands/sadd.md +++ b/commands/sadd.md @@ -8,7 +8,7 @@ An error is returned when the value stored at `key` is not a set. @return @integer-reply: the number of elements that were added to the set, not including -all the elements already present into the set. +all the elements already present in the set. @history diff --git a/commands/scan.md b/commands/scan.md index 5238a9a9..0ea5cd1d 100644 --- a/commands/scan.md +++ b/commands/scan.md @@ -165,7 +165,7 @@ It is important to note that the **TYPE** filter is also applied after elements ## Multiple parallel iterations -It is possible for an infinite number of clients to iterate the same collection at the same time, as the full state of the iterator is in the cursor, that is obtained and returned to the client at every call. Server side no state is taken at all. +It is possible for an infinite number of clients to iterate the same collection at the same time, as the full state of the iterator is in the cursor, that is obtained and returned to the client at every call. No server side state is taken at all. ## Terminating iterations in the middle diff --git a/commands/script-debug.md b/commands/script-debug.md index 1b52217a..50a35d6e 100644 --- a/commands/script-debug.md +++ b/commands/script-debug.md @@ -16,7 +16,7 @@ alternative synchronous debug mode blocks the server while the debugging session is active and retains all changes to the data set once it ends. * `YES`. Enable non-blocking asynchronous debugging of Lua scripts (changes are discarded). -* `SYNC`. Enable blocking synchronous debugging of Lua scripts (saves changes to data). +* `!SYNC`. Enable blocking synchronous debugging of Lua scripts (saves changes to data). * `NO`. Disables scripts debug mode. @return diff --git a/commands/script-flush.md b/commands/script-flush.md index 833732d8..2dd449cc 100644 --- a/commands/script-flush.md +++ b/commands/script-flush.md @@ -1,8 +1,19 @@ Flush the Lua scripts cache. -Please refer to the `EVAL` documentation for detailed information about Redis -Lua scripting. +Please refer to the `EVAL` documentation for detailed information about Redis Lua scripting. + +By default, `SCRIPT FLUSH` will synchronously flush the cache. +Starting with Redis 6.2, setting the **lazyfree-lazy-user-flush** configuration directive to "yes" changes the default flush mode to asynchronous. + +It is possible to use one of the following modifiers to dictate the flushing mode explicitly: + +* `ASYNC`: flushes the cache asynchronously +* `!SYNC`: flushes the cache synchronously @return @simple-string-reply + +@history + +* `>= 6.2.0`: Added the `ASYNC` and `!SYNC` flushing mode modifiers, as well as the **lazyfree-lazy-user-flush** configuration directive. diff --git a/commands/set.md b/commands/set.md index 4697f336..280811ae 100644 --- a/commands/set.md +++ b/commands/set.md @@ -8,24 +8,34 @@ The `SET` command supports a set of options that modify its behavior: * `EX` *seconds* -- Set the specified expire time, in seconds. * `PX` *milliseconds* -- Set the specified expire time, in milliseconds. +* `EXAT` *timestamp-seconds* -- Set the specified Unix time at which the key will expire, in seconds. +* `PXAT` *timestamp-milliseconds* -- Set the specified Unix time at which the key will expire, in milliseconds. * `NX` -- Only set the key if it does not already exist. * `XX` -- Only set the key if it already exist. * `KEEPTTL` -- Retain the time to live associated with the key. -* `GET` -- Return the old value stored at key, or nil when key did not exist. +* `!GET` -- Return the old string stored at key, or nil if key did not exist. An error is returned and `SET` aborted if the value stored at key is not a string. -Note: Since the `SET` command options can replace `SETNX`, `SETEX`, `PSETEX`, `GETSET`, it is possible that in future versions of Redis these three commands will be deprecated and finally removed. +Note: Since the `SET` command options can replace `SETNX`, `SETEX`, `PSETEX`, `GETSET`, it is possible that in future versions of Redis these commands will be deprecated and finally removed. @return @simple-string-reply: `OK` if `SET` was executed correctly. -@bulk-string-reply: when `GET` option is set, the old value stored at key, or nil when key did not exist. -@nil-reply: a Null Bulk Reply is returned if the `SET` operation was not performed because the user specified the `NX` or `XX` option but the condition was not met or if user specified the `NX` and `GET` options that do not met. + +@nil-reply: `(nil)` if the `SET` operation was not performed because the user specified the `NX` or `XX` option but the condition was not met. + +If the command is issued with the `!GET` option, the above does not apply. It will instead reply as follows, regardless if the `SET` was actually performed: + +@bulk-string-reply: the old string value stored at key. + +@nil-reply: `(nil)` if the key did not exist. + @history * `>= 2.6.12`: Added the `EX`, `PX`, `NX` and `XX` options. * `>= 6.0`: Added the `KEEPTTL` option. -* `>= 6.2`: Added the `GET` option. +* `>= 6.2`: Added the `!GET`, `EXAT` and `PXAT` option. +* `>= 7.0`: Allowed the `NX` and `!GET` options to be used together. @examples diff --git a/commands/sintercard.md b/commands/sintercard.md new file mode 100644 index 00000000..4b06982d --- /dev/null +++ b/commands/sintercard.md @@ -0,0 +1,21 @@ +Returns the cardinality of the set which would result from the intersection of all the given sets. + +Keys that do not exist are considered to be empty sets. +With one of the keys being an empty set, the resulting set is also empty (since set intersection with an empty set always results in an empty set). + +@return + +@integer-reply: the number of elements in the resulting intersection. + +@examples + +```cli +SADD key1 "a" +SADD key1 "b" +SADD key1 "c" +SADD key2 "c" +SADD key2 "d" +SADD key2 "e" +SINTER key1 key2 +SINTERCARD key1 key2 +``` diff --git a/commands/slowlog.md b/commands/slowlog.md index 7872c7a1..59d9a0af 100644 --- a/commands/slowlog.md +++ b/commands/slowlog.md @@ -35,6 +35,7 @@ To read the slow log the **SLOWLOG GET** command is used, that returns every entry in the slow log. It is possible to return only the N most recent entries passing an additional argument to the command (for instance **SLOWLOG GET 10**). +The default requested length is 10 (when the argument is omitted). It's possible to pass -1 to get the entire slowlog. Note that you need a recent version of redis-cli in order to read the slow log output, since it uses some features of the protocol that were not formerly diff --git a/commands/sort.md b/commands/sort.md index 28e8bc68..d8994648 100644 --- a/commands/sort.md +++ b/commands/sort.md @@ -1,5 +1,8 @@ Returns or stores the elements contained in the [list][tdtl], [set][tdts] or [sorted set][tdtss] at `key`. + +Since Redis 7.0.0, there is also the `SORT_RO` read-only variant of this command. + By default, sorting is numeric and elements are compared by their value interpreted as double precision floating point number. This is `SORT` in its simplest form: diff --git a/commands/sort_ro.md b/commands/sort_ro.md new file mode 100644 index 00000000..d15303b8 --- /dev/null +++ b/commands/sort_ro.md @@ -0,0 +1,17 @@ +Read-only variant of the `SORT` command. It is exactly like the original `SORT` but refuses the `STORE` option and can safely be used in read-only replicas. + +Since the original `SORT` has a `STORE` option it is technically flagged as a writing command in the Redis command table. For this reason read-only replicas in a Redis Cluster will redirect it to the master instance even if the connection is in read-only mode (see the `READONLY` command of Redis Cluster). + +Since Redis 7.0.0, the `SORT_RO` variant was introduced in order to allow `SORT` behavior in read-only replicas without breaking compatibility on command flags. + +See original `SORT` for more details. + +@examples + +``` +SORT_RO mylist BY weight_*->fieldname GET object_*->fieldname +``` + +@return + +@array-reply: a list of sorted elements. diff --git a/commands/spop.md b/commands/spop.md index 062cbc18..c41d2659 100644 --- a/commands/spop.md +++ b/commands/spop.md @@ -1,12 +1,24 @@ -Removes and returns one or more random elements from the set value store at `key`. +Removes and returns one or more random members from the set value store at `key`. This operation is similar to `SRANDMEMBER`, that returns one or more random elements from a set but does not remove it. -The `count` argument is available since version 3.2. +By default, the command pops a single member from the set. When provided with +the optional `count` argument, the reply will consist of up to `count` members, +depending on the set's cardinality. @return -@bulk-string-reply: the removed element, or `nil` when `key` does not exist. +When called without the `count` argument: + +@bulk-string-reply: the removed member, or `nil` when `key` does not exist. + +When called with the `count` argument: + +@array-reply: the removed members, or an empty array when `key` does not exist. + +@history + +* `>= 3.2`: Added the `count` argument. @examples @@ -21,15 +33,6 @@ SADD myset "five" SPOP myset 3 SMEMBERS myset ``` - -## Specification of the behavior when count is passed - -If count is bigger than the number of elements inside the Set, the command will only return the whole set without additional elements. - ## Distribution of returned elements -Note that this command is not suitable when you need a guaranteed uniform distribution of the returned elements. For more information about the algorithms used for SPOP, look up both the Knuth sampling and Floyd sampling algorithms. - -## Count argument extension - -Redis 3.2 introduced an optional `count` argument that can be passed to `SPOP` in order to retrieve multiple elements in a single call. +Note that this command is not suitable when you need a guaranteed uniform distribution of the returned elements. For more information about the algorithms used for `SPOP`, look up both the Knuth sampling and Floyd sampling algorithms. diff --git a/commands/srandmember.md b/commands/srandmember.md index 1ef408fd..4b5aa63b 100644 --- a/commands/srandmember.md +++ b/commands/srandmember.md @@ -1,13 +1,16 @@ When called with just the `key` argument, return a random element from the set value stored at `key`. -Starting from Redis version 2.6, when called with the additional `count` argument, return an array of `count` **distinct elements** if `count` is positive. If called with a negative `count` the behavior changes and the command is allowed to return the **same element multiple times**. In this case the number of returned elements is the absolute value of the specified `count`. +If the provided `count` argument is positive, return an array of **distinct elements**. +The array's length is either `count` or the set's cardinality (`SCARD`), whichever is lower. -When called with just the key argument, the operation is similar to `SPOP`, however while `SPOP` also removes the randomly selected element from the set, `SRANDMEMBER` will just return a random element without altering the original set in any way. +If called with a negative `count`, the behavior changes and the command is allowed to return the **same element multiple times**. +In this case, the number of returned elements is the absolute value of the specified `count`. @return -@bulk-string-reply: without the additional `count` argument the command returns a Bulk Reply with the randomly selected element, or `nil` when `key` does not exist. -@array-reply: when the additional `count` argument is passed the command returns an array of elements, or an empty array when `key` does not exist. +@bulk-string-reply: without the additional `count` argument, the command returns a Bulk Reply with the randomly selected element, or `nil` when `key` does not exist. + +@array-reply: when the additional `count` argument is passed, the command returns an array of elements, or an empty array when `key` does not exist. @examples @@ -18,20 +21,28 @@ SRANDMEMBER myset 2 SRANDMEMBER myset -5 ``` +@history + +* `>= 2.6.0`: Added the optional `count` argument. + ## Specification of the behavior when count is passed -When a count argument is passed and is positive, the elements are returned -as if every selected element is removed from the set (like the extraction -of numbers in the game of Bingo). However elements are **not removed** from -the Set. So basically: +When the `count` argument is a positive value this command behaves as follows: * No repeated elements are returned. -* If count is bigger than the number of elements inside the Set, the command will only return the whole set without additional elements. +* If `count` is bigger than the set's cardinality, the command will only return the whole set without additional elements. +* The order of elements in the reply is not truly random, so it is up to the client to shuffle them if needed. -When instead the count is negative, the behavior changes and the extraction happens as if you put the extracted element inside the bag again after every extraction, so repeated elements are possible, and the number of elements requested is always returned as we can repeat the same elements again and again, with the exception of an empty Set (non existing key) that will always produce an empty array as a result. +When the `count` is a negative value, the behavior changes as follows: + +* Repeating elements are possible. +* Exactly `count` elements, or an empty array if the set is empty (non-existing key), are always returned. +* The order of elements in the reply is truly random. ## Distribution of returned elements +Note: this section is relevant only for Redis 5 or below, as Redis 6 implements a fairer algorithm. + The distribution of the returned elements is far from perfect when the number of elements in the set is small, this is due to the fact that we used an approximated random element function that does not really guarantees good distribution. The algorithm used, that is implemented inside dict.c, samples the hash table buckets to find a non-empty one. Once a non empty bucket is found, since we use chaining in our hash table implementation, the number of elements inside the bucket is checked and a random element is selected. diff --git a/commands/stralgo.md b/commands/stralgo.md index 05df3a4b..8231d6be 100644 --- a/commands/stralgo.md +++ b/commands/stralgo.md @@ -11,7 +11,7 @@ the argument must be "LCS", since this is the only implemented one. ## LCS algorithm ``` -STRALGO LCS [KEYS ...] [STRINGS ...] [LEN] [IDX] [MINMATCHLEN ] [WITHMATCHLEN] +STRALGO LCS STRINGS | KEYS [LEN] [IDX] [MINMATCHLEN ] [WITHMATCHLEN] ``` The LCS subcommand implements the longest common subsequence algorithm. Note that this is different than the longest common string algorithm, since matching characters in the string does not need to be contiguous. diff --git a/commands/subscribe.md b/commands/subscribe.md index 997670cf..4e252756 100644 --- a/commands/subscribe.md +++ b/commands/subscribe.md @@ -2,4 +2,8 @@ Subscribes the client to the specified channels. Once the client enters the subscribed state it is not supposed to issue any other commands, except for additional `SUBSCRIBE`, `PSUBSCRIBE`, `UNSUBSCRIBE`, -`PUNSUBSCRIBE`, `PING` and `QUIT` commands. +`PUNSUBSCRIBE`, `PING`, `RESET` and `QUIT` commands. + +@history + +* `>= 6.2`: `RESET` can be called to exit subscribed state. diff --git a/commands/xack.md b/commands/xack.md index 0c110ceb..aae2db55 100644 --- a/commands/xack.md +++ b/commands/xack.md @@ -1,5 +1,5 @@ The `XACK` command removes one or multiple messages from the -*pending entries list* (PEL) of a stream consumer group. A message is pending, +*Pending Entries List* (PEL) of a stream consumer group. A message is pending, and as such stored inside the PEL, when it was delivered to some consumer, normally as a side effect of calling `XREADGROUP`, or when a consumer took ownership of a message calling `XCLAIM`. The pending message was delivered to @@ -20,9 +20,12 @@ Redis server. The command returns the number of messages successfully acknowledged. Certain message IDs may no longer be part of the PEL (for example because -they have been already acknowledge), and XACK will not count them as +they have already been acknowledged), and XACK will not count them as successfully acknowledged. -```cli -XACK mystream mygroup 1526569495631-0 +@examples + +``` +redis> XACK mystream mygroup 1526569495631-0 +(integer) 1 ``` diff --git a/commands/xadd.md b/commands/xadd.md index 960670a7..a3fc182d 100644 --- a/commands/xadd.md +++ b/commands/xadd.md @@ -1,6 +1,7 @@ Appends the specified stream entry to the stream at the specified key. If the key does not exist, as a side effect of running this command the -key is created with a stream value. +key is created with a stream value. The creation of stream's key can be +disabled with the `NOMKSTREAM` option. An entry is composed of a set of field-value pairs, it is basically a small dictionary. The field-value pairs are stored in the same order @@ -15,6 +16,7 @@ remove data from a stream. ## Specifying a Stream ID as an argument A stream entry ID identifies a given entry inside a stream. + The `XADD` command will auto-generate a unique ID for you if the ID argument specified is the `*` character (asterisk ASCII character). However, while useful only in very rare cases, it is possible to specify a well-formed ID, so @@ -40,30 +42,22 @@ a different absolute time. When a user specified an explicit ID to `XADD`, the minimum valid ID is `0-1`, and the user *must* specify an ID which is greater than any other -ID currently inside the stream, otherwise the command will fail. Usually +ID currently inside the stream, otherwise the command will fail and return an error. Usually resorting to specific IDs is useful only if you have another system generating unique IDs (for instance an SQL table) and you really want the Redis stream IDs to match the one of this other system. ## Capped streams -It is possible to limit the size of the stream to a maximum number of -elements using the **MAXLEN** option. +`XADD` incorporates the same semantics as the `XTRIM` command - refer to its documentation page for more information. +This allows adding new entries and keeping the stream's size in check with a single call to `XADD`, effectively capping the stream with an arbitrary threshold. +Although exact trimming is possible and is the default, due to the internal representation of steams it is more efficient to add an entry and trim stream with `XADD` using **almost exact** trimming (the `~` argument). -Trimming with **MAXLEN** can be expensive compared to just adding entries with -`XADD`: streams are represented by macro nodes into a radix tree, in order to -be very memory efficient. Altering the single macro node, consisting of a few -tens of elements, is not optimal. So it is possible to give the command in the -following special form: +For example, calling `XADD` in the following form: XADD mystream MAXLEN ~ 1000 * ... entry fields here ... - -The `~` argument between the **MAXLEN** option and the actual count means that -the user is not really requesting that the stream length is exactly 1000 items, -but instead it could be a few tens of entries more, but never less than 1000 -items. When this option modifier is used, the trimming is performed only when -Redis is able to remove a whole macro node. This makes it much more efficient, -and it is usually what you want. + +Will add a new entry but will also evict old entries so that the stream will contain only 1000 entries, or at most a few tens more. ## Additional information about streams @@ -78,6 +72,13 @@ The command returns the ID of the added entry. The ID is the one auto-generated if `*` is passed as ID argument, otherwise the command just returns the same ID specified by the user during insertion. +The command returns a @nil-reply when used with the `NOMKSTREAM` option and the +key doesn't exist. + +@history + +* `>= 6.2`: Added the `NOMKSTREAM` option, `MINID` trimming strategy and the `LIMIT` option. + @examples ```cli diff --git a/commands/xautoclaim.md b/commands/xautoclaim.md new file mode 100644 index 00000000..434671a1 --- /dev/null +++ b/commands/xautoclaim.md @@ -0,0 +1,46 @@ +This command transfers ownership of pending stream entries that match the specified criteria. Conceptually, `XAUTOCLAIM` is equivalent to calling `XPENDING` and then `XCLAIM`, +but provides a more straightforward way to deal with message delivery failures via `SCAN`-like semantics. + +Like `XCLAIM`, the command operates on the stream entries at `` and in the context of the provided ``. +It transfers ownership to `` of messages pending for more than `` milliseconds and having an equal or greater ID than ``. + +The optional `` argument, which defaults to 100, is the upper limit of the number of entries that the command attempts to claim. +Internally, the command begins scanning the consumer group's Pending Entries List (PEL) from `` and filters out entries having an idle time less than or equal to ``. +The maximum number of pending entries that the command scans is the product of multiplying ``'s value by 10 (hard-coded). +It is possible, therefore, that the number of entries claimed will be less than the specified value. + +The optional `JUSTID` argument changes the reply to return just an array of IDs of messages successfully claimed, without returning the actual message. +Using this option means the retry counter is not incremented. + +The command returns the claimed entries as an array. It also returns a stream ID intended for cursor-like use as the `` argument for its subsequent call. +When there are no remaining PEL entries, the command returns the special `0-0` ID to signal completion. +However, note that you may want to continue calling `XAUTOCLAIM` even after the scan is complete with the `0-0` as `` ID, because enough time passed, so older pending entries may now be eligible for claiming. + +Note that only messages that are idle longer than `` are claimed, and claiming a message resets its idle time. +This ensures that only a single consumer can successfully claim a given pending message at a specific instant of time and trivially reduces the probability of processing the same message multiple times. + +Lastly, claiming a message with `XAUTOCLAIM` also increments the attempted deliveries count for that message, unless the `JUSTID` option has been specified (which only delivers the message ID, not the message itself). +Messages that cannot be processed for some reason - for example, because consumers systematically crash when processing them - will exhibit high attempted delivery counts that can be detected by monitoring. + +@return + +@array-reply, specifically: + +An array with two elements: + +1. The first element is a stream ID to be used as the `` argument for the next call to `XAUTOCLAIM` +2. The second element is an array containing all the successfully claimed messages in the same format as `XRANGE`. + +@examples + +``` +> XAUTOCLAIM mystream mygroup Alice 3600000 0-0 COUNT 25 +1) "0-0" +2) 1) 1) "1609338752495-0" + 2) 1) "field" + 2) "value" +``` + +In the above example, we attempt to claim up to 25 entries that are pending and idle (not having been acknowledged or claimed) for at least an hour, starting at the stream's beginning. +The consumer "Alice" from the "mygroup" group acquires ownership of these messages. +Note that the stream ID returned in the example is `0-0`, indicating that the entire stream was scanned. diff --git a/commands/xclaim.md b/commands/xclaim.md index a8ca2b11..480ecb6d 100644 --- a/commands/xclaim.md +++ b/commands/xclaim.md @@ -4,9 +4,9 @@ command argument. Normally this is what happens: 1. There is a stream with an associated consumer group. 2. Some consumer A reads a message via `XREADGROUP` from a stream, in the context of that consumer group. -3. As a side effect a pending message entry is created in the pending entries list (PEL) of the consumer group: it means the message was delivered to a given consumer, but it was not yet acknowledged via `XACK`. +3. As a side effect a pending message entry is created in the Pending Entries List (PEL) of the consumer group: it means the message was delivered to a given consumer, but it was not yet acknowledged via `XACK`. 4. Then suddenly that consumer fails forever. -5. Other consumers may inspect the list of pending messages, that are stale for quite some time, using the `XPENDING` command. In order to continue processing such messages, they use `XCLAIM` to acquire the ownership of the message and continue. +5. Other consumers may inspect the list of pending messages, that are stale for quite some time, using the `XPENDING` command. In order to continue processing such messages, they use `XCLAIM` to acquire the ownership of the message and continue. As of Redis 6.2, consumers can use the `XAUTOCLAIM` command to automatically scan and claim stale pending messages. This dynamic is clearly explained in the [Stream intro documentation](/topics/streams-intro). @@ -35,7 +35,7 @@ The command returns all the messages successfully claimed, in the same format as `XRANGE`. However if the `JUSTID` option was specified, only the message IDs are reported, without including the actual message. -Example: +@examples ``` > XCLAIM mystream mygroup Alice 3600000 1526569498055-0 diff --git a/commands/xgroup.md b/commands/xgroup.md index 95dc9fb5..07f42a8e 100644 --- a/commands/xgroup.md +++ b/commands/xgroup.md @@ -22,8 +22,8 @@ zero as the starting ID for the consumer group: Of course it is also possible to use any other valid ID. If the specified consumer group already exists, the command returns a `-BUSYGROUP` error. -Otherwise the operation is performed and OK is returned. There are no hard -limits to the number of consumer groups you can associate to a given stream. +Otherwise, the operation is performed and a @simple-string-reply `OK` is returned. +There are no hard limits to the number of consumer groups you can associate with a given stream. If the specified stream doesn't exist when creating a group, an error will be returned. You can use the optional `MKSTREAM` subcommand as the last argument @@ -39,6 +39,7 @@ A consumer group can be destroyed completely by using the following form: The consumer group will be destroyed even if there are active consumers and pending messages, so make sure to call this command only when really needed. +This form returns an @integer-reply with the number of destroyed consumer groups (0 or 1). Consumers in a consumer group are auto-created every time a new consumer name is mentioned by some command. They can also be explicitly created @@ -46,14 +47,16 @@ by using the following form: XGROUP CREATECONSUMER mystream consumer-group-name myconsumer123 +This form returns an @integer-reply with the number of created consumers (0 or 1). + To just remove a given consumer from a consumer group, the following form is used: XGROUP DELCONSUMER mystream consumer-group-name myconsumer123 Sometimes it may be useful to remove old consumers since they are no longer -used. This form returns the number of pending messages that the consumer -had before it was deleted. +used. +This form returns an @integer-reply with the number of pending messages that the consumer had before it was deleted. Finally it possible to set the next message to deliver using the `SETID` subcommand. Normally the next ID is set when the consumer is @@ -65,6 +68,8 @@ ID to 0: XGROUP SETID mystream consumer-group-name 0 +This form returns a @simple-string-reply `OK` or an error. + Finally to get some help if you don't remember the syntax, use the HELP subcommand: diff --git a/commands/xinfo.md b/commands/xinfo.md index 7176ea94..2e27fcba 100644 --- a/commands/xinfo.md +++ b/commands/xinfo.md @@ -39,7 +39,8 @@ sense about what is the stream content. * `XINFO STREAM FULL [COUNT ]` In this form the command returns the entire state of the stream, including -entries, groups, consumers and PELs. This form is available since Redis 6.0. +entries, groups, consumers and Pending Entries Lists (PELs). +This form is available since Redis 6.0. ``` > XADD mystream * foo bar diff --git a/commands/xpending.md b/commands/xpending.md index 7fe4fdad..9e74cf6f 100644 --- a/commands/xpending.md +++ b/commands/xpending.md @@ -2,7 +2,7 @@ Fetching data from a stream via a consumer group, and not acknowledging such data, has the effect of creating *pending entries*. This is well explained in the `XREADGROUP` command, and even better in our [introduction to Redis Streams](/topics/streams-intro). The `XACK` command -will immediately remove the pending entry from the Pending Entry List (PEL) +will immediately remove the pending entry from the Pending Entries List (PEL) since once a message is successfully processed, there is no longer need for the consumer group to track it and to remember the current owner of the message. @@ -60,9 +60,11 @@ consumer group, which is one, followed by the smallest and greatest ID among the pending messages, and then list every consumer in the consumer group with at least one pending message, and the number of pending messages it has. -This is a good overview, but sometimes we are interested in the details. -In order to see all the pending messages with more associated information -we need to also pass a range of IDs, in a similar way we do it with +## Extended form of XPENDING + +The summary provides a good overview, but sometimes we are interested in the +details. In order to see all the pending messages with more associated +information we need to also pass a range of IDs, in a similar way we do it with `XRANGE`, and a non optional *count* argument, to limit the number of messages returned per call: @@ -75,7 +77,7 @@ of messages returned per call: ``` In the extended form we no longer see the summary information, instead there -are detailed information for each message in the pending entries list. For +is detailed information for each message in the pending entries list. For each message four attributes are returned: 1. The ID of the message. @@ -88,7 +90,7 @@ when some other consumer *claims* the message with `XCLAIM`, or when the message is delivered again via `XREADGROUP`, when accessing the history of a consumer in a consumer group (see the `XREADGROUP` page for more info). -Finally it is possible to pass an additional argument to the command, in order +It is possible to pass an additional argument to the command, in order to see the messages having a specific owner: ``` @@ -103,6 +105,29 @@ we have a pending entries list data structure both globally, and for every consumer, so we can very efficiently show just messages pending for a single consumer. +## Idle time filter + +Since version 6.2 it is possible to filter entries by their idle-time, +given in milliseconds (useful for `XCLAIM`ing entries that have not been +processed for some time): + +``` +> XPENDING mystream group55 IDLE 9000 - + 10 +> XPENDING mystream group55 IDLE 9000 - + 10 consumer-123 +``` + +The first case will return the first 10 (or less) PEL entries of the entire group +that are idle for over 9 seconds, whereas in the second case only those of +`consumer-123`. + +## Exclusive ranges and iterating the PEL + +The `XPENDING` command allows iterating over the pending entries just like +`XRANGE` and `XREVRANGE` allow for the stream's entries. You can do this by +prefixing the ID of the last-read pending entry with the `(` character that +denotes an open (exclusive) range, and proving it to the subsequent call to the +command. + @return @array-reply, specifically: @@ -110,3 +135,7 @@ a single consumer. The command returns data in different format depending on the way it is called, as previously explained in this page. However the reply is always an array of items. + +@history + +* `>= 6.2.0`: Added the `IDLE` option and exclusive range intervals. diff --git a/commands/xrange.md b/commands/xrange.md index 34a831eb..100024d6 100644 --- a/commands/xrange.md +++ b/commands/xrange.md @@ -67,6 +67,13 @@ Used in this way `XRANGE` works as a range query command to obtain entries in a specified time. This is very handy in order to access the history of past events in a stream. +## Exclusive ranges + +The range is close (inclusive) by default, meaning that the reply can include +entries with IDs matching the query's start and end intervals. It is possible +to specify an open interval (exclusive) by prefixing the ID with the +character `(`. This is useful for iterating the stream, as explained below. + ## Returning a maximum number of entries Using the **COUNT** option it is possible to reduce the number of entries @@ -111,13 +118,13 @@ elements, which is trivial: Then instead of starting the iteration again from `-`, as the start of the range we use the entry ID of the *last* entry returned by the -previous `XRANGE` call, adding the sequence part of the ID by one. +previous `XRANGE` call as an exclusive interval. -The ID of the last entry is `1526985685298-0`, so we just add 1 to the -sequence to obtain `1526985685298-1`, and continue our iteration: +The ID of the last entry is `1526985685298-0`, so we just prefix it +with a '(', and continue our iteration: ``` -> XRANGE writers 1526985685298-1 + COUNT 2 +> XRANGE writers (1526985685298-0 + COUNT 2 1) 1) 1526985691746-0 2) 1) "name" 2) "Toni" @@ -140,6 +147,37 @@ The command `XREAD` is also able to iterate the stream. The command `XREVRANGE` can iterate the stream reverse, from higher IDs (or times) to lower IDs (or times). +### Iterating with earlier versions of Redis + +While exclusive range intervals are only available from Redis 6.2, it is still +possible to use a similar stream iteration pattern with earlier versions. You +start fetching from the stream the same way as described above to obtain the +first entries. + +For the subsequent calls, you'll need to programmatically advance the last +entry's ID returned. Most Redis client should abstract this detail, but the +implementation can also be in the application if needed. In the example above, +this means incrementing the sequence of `1526985685298-0` by one, from 0 to 1. +The second call would, therefore, be: + +``` +> XRANGE writers 1526985685298-1 + COUNT 2 +1) 1) 1526985691746-0 + 2) 1) "name" + 2) "Toni" +... +``` + +Also, note that once the sequence part of the last ID equals +18446744073709551615, you'll need to increment the timestamp and reset the +sequence part to 0. For example, incrementing the ID +`1526985685298-18446744073709551615` should result in `1526985685299-0`. + +A symmetrical pattern applies to iterating the stream with `XREVRANGE`. The +only difference is that the client needs to decrement the ID for the subsequent +calls. When decrementing an ID with a sequence part of 0, the timestamp needs +to be decremented by 1 and the sequence set to 18446744073709551615. + ## Fetching single items If you look for an `XGET` command you'll be disappointed because `XRANGE` @@ -172,6 +210,10 @@ The returned entries are complete, that means that the ID and all the fields they are composed are returned. Moreover, the entries are returned with their fields and values in the exact same order as `XADD` added them. +@history + +* `>= 6.2` Added exclusive ranges. + @examples ```cli diff --git a/commands/xreadgroup.md b/commands/xreadgroup.md index 23764685..7c4dc096 100644 --- a/commands/xreadgroup.md +++ b/commands/xreadgroup.md @@ -62,7 +62,7 @@ are no differences in this regard. Two things: -1. If the message was never delivered to anyone, that is, if we are talking about a new message, then a PEL (Pending Entry List) is created. +1. If the message was never delivered to anyone, that is, if we are talking about a new message, then a PEL (Pending Entries List) is created. 2. If instead the message was already delivered to this consumer, and it is just re-fetching the same message again, then the *last delivery counter* is updated to the current time, and the *number of deliveries* is incremented by one. You can access those message properties using the `XPENDING` command. ## Usage example @@ -101,3 +101,20 @@ can start to use `>` as ID, in order to get the new messages and rejoin the consumers that are processing new things. To see how the command actually replies, please check the `XREAD` command page. + +@return + +@array-reply, specifically: + +The command returns an array of results: each element of the returned +array is an array composed of a two element containing the key name and +the entries reported for that key. The entries reported are full stream +entries, having IDs and the list of all the fields and values. Field and +values are guaranteed to be reported in the same order they were added +by `XADD`. + +When **BLOCK** is used, on timeout a null reply is returned. + +Reading the [Redis Streams introduction](/topics/streams-intro) is highly +suggested in order to understand more about the streams overall behavior +and semantics. diff --git a/commands/xrevrange.md b/commands/xrevrange.md index 2518c433..eec3d33b 100644 --- a/commands/xrevrange.md +++ b/commands/xrevrange.md @@ -14,54 +14,6 @@ enough to send: XREVRANGE somestream + - COUNT 1 -## Iterating with XREVRANGE - -Like `XRANGE` this command can be used in order to iterate the whole -stream content, however note that in this case, the next command calls -should use the ID of the last entry, with the sequence number decremented -by one. However if the sequence number is already 0, the time part of the -ID should be decremented by 1, and the sequence part should be set to -the maximum possible sequence number, that is, 18446744073709551615, or -could be omitted at all, and the command will automatically assume it to -be such a number (see `XRANGE` for more info about incomplete IDs). - -Example: - -``` -> XREVRANGE writers + - COUNT 2 -1) 1) 1526985723355-0 - 2) 1) "name" - 2) "Ngozi" - 3) "surname" - 4) "Adichie" -2) 1) 1526985712947-0 - 2) 1) "name" - 2) "Agatha" - 3) "surname" - 4) "Christie" -``` - -The last ID returned is `1526985712947-0`, since the sequence number is -already zero, the next ID I'll use instead of the `+` special ID will -be `1526985712946-18446744073709551615`, or just `18446744073709551615`: - -``` -> XREVRANGE writers 1526985712946-18446744073709551615 - COUNT 2 -1) 1) 1526985691746-0 - 2) 1) "name" - 2) "Toni" - 3) "surname" - 4) "Morrison" -2) 1) 1526985685298-0 - 2) 1) "name" - 2) "Jane" - 3) "surname" - 4) "Austen" -``` - -And so for until the iteration is complete and no result is returned. -See the `XRANGE` page about iterating for more information. - @return @array-reply, specifically: @@ -72,6 +24,10 @@ The returned entries are complete, that means that the ID and all the fields they are composed are returned. Moreover the entries are returned with their fields and values in the exact same order as `XADD` added them. +@history + +* `>= 6.2` Added exclusive ranges. + @examples ```cli diff --git a/commands/xtrim.md b/commands/xtrim.md index 1754fa68..ca9c55e4 100644 --- a/commands/xtrim.md +++ b/commands/xtrim.md @@ -1,34 +1,58 @@ -`XTRIM` trims the stream to a given number of items, evicting older items -(items with lower IDs) if needed. The command is conceived to accept multiple -trimming strategies, however currently only a single one is implemented, -which is `MAXLEN`, and works exactly as the `MAXLEN` option in `XADD`. +`XTRIM` trims the stream by evicting older entries (entries with lower IDs) if needed. -For example the following command will trim the stream to exactly -the latest 1000 items: +Trimming the stream can be done using one of these strategies: + +* `MAXLEN`: Evicts entries as long as the stream's length exceeds the specified `threshold`, where `threshold` is a positive integer. +* `MINID`: Evicts entries with IDs lower than `threshold`, where `threshold` is a stream ID. + +For example, this will trim the stream to exactly the latest 1000 items: ``` XTRIM mystream MAXLEN 1000 ``` -It is possible to give the command in the following special form in -order to make it more efficient: +Whereas in this example, all entries that have an ID lower than 649085820-0 will be evicted: + +``` +XTRIM mystream MINID 649085820 +``` + +By default, or when provided with the optional `=` argument, the command performs exact trimming. + +Depending on the strategy, exact trimming means: + +* `MAXLEN`: the trimmed stream's length will be exactly the minimum between its original length and the specified `threshold`. +* `MINID`: the oldest ID in the stream will be exactly the minimum between its original oldest ID and the specified `threshold`. + +Nearly exact trimming +--- + +Because exact trimming may require additional effort from the Redis server, the optional `~` argument can be provided to make it more efficient. + +For example: ``` XTRIM mystream MAXLEN ~ 1000 ``` -The `~` argument between the **MAXLEN** option and the actual count means that -the user is not really requesting that the stream length is exactly 1000 items, -but instead it could be a few tens of entries more, but never less than 1000 -items. When this option modifier is used, the trimming is performed only when -Redis is able to remove a whole macro node. This makes it much more efficient, -and it is usually what you want. +The `~` argument between the `MAXLEN` strategy and the `threshold` means that the user is requesting to trim the stream so its length is **at least** the `threshold`, but possibly slightly more. +In this case, Redis will stop trimming early when performance can be gained (for example, when a whole macro node in the data structure can't be removed). +This makes trimming much more efficient, and it is usually what you want, although after trimming, the stream may have few tens of additional entries over the `threshold`. + +Another way to control the amount of work done by the command when using the `~`, is the `LIMIT` clause. +When used, it specifies the maximal `count` of entries that will be evicted. +When `LIMIT` and `count` aren't specified, the default value of 100 * the number of entries in a macro node will be implicitly used as the `count`. +Specifying the value 0 as `count` disables the limiting mechanism entirely. @return -@integer-reply, specifically: +@integer-reply: The number of entries deleted from the stream. + +@history + +* `>= 6.2`: Added the `MINID` trimming strategy and the `LIMIT` option. -The command returns the number of entries deleted from the stream. +@examples ```cli XADD mystream * field1 A field2 B field3 C field4 D diff --git a/commands/zadd.md b/commands/zadd.md index 8656ce9c..a554a155 100644 --- a/commands/zadd.md +++ b/commands/zadd.md @@ -16,8 +16,8 @@ ZADD options ZADD supports a list of options, specified after the name of the key and before the first score argument. Options are: -* **XX**: Only update elements that already exist. Never add elements. -* **NX**: Don't update already existing elements. Always add new elements. +* **XX**: Only update elements that already exist. Don't add new elements. +* **NX**: Only add new elements. Don't update already existing elements. * **LT**: Only update existing elements if the new score is **less than** the current score. This flag doesn't prevent adding new elements. * **GT**: Only update existing elements if the new score is **greater than** the current score. This flag doesn't prevent adding new elements. * **CH**: Modify the return value from the number of new elements added, to the total number of elements changed (CH is an abbreviation of *changed*). Changed elements are **new elements added** and elements already existing for which **the score was updated**. So elements specified in the command line having the same score as they had in the past are not counted. Note: normally the return value of `ZADD` only counts the number of new elements added. @@ -62,8 +62,8 @@ If the user inserts all the elements in a sorted set with the same score (for ex @integer-reply, specifically: -* The number of elements added to the sorted set, not including elements - already existing for which the score was updated. +* When used without optional arguments, the number of elements added to the sorted set (excluding score updates). +* If the `CH` option is specified, the number of elements that were changed (added or updated). If the `INCR` option is specified, the return value will be @bulk-string-reply: @@ -75,7 +75,7 @@ If the `INCR` option is specified, the return value will be @bulk-string-reply: In Redis versions older than 2.4 it was possible to add or update a single member per call. * `>= 3.0.2`: Added the `XX`, `NX`, `CH` and `INCR` options. -* `>=6.2`: Added the `GT` and `LT` options. +* `>= 6.2`: Added the `GT` and `LT` options. @examples diff --git a/commands/zdiff.md b/commands/zdiff.md new file mode 100644 index 00000000..d9449b7e --- /dev/null +++ b/commands/zdiff.md @@ -0,0 +1,19 @@ +This command is similar to `ZDIFFSTORE`, but instead of storing the resulting +sorted set, it is returned to the client. + +@return + +@array-reply: the result of the difference (optionally with their scores, in case +the `WITHSCORES` option is given). + +@examples + +```cli +ZADD zset1 1 "one" +ZADD zset1 2 "two" +ZADD zset1 3 "three" +ZADD zset2 1 "one" +ZADD zset2 2 "two" +ZDIFF 2 zset1 zset2 +ZDIFF 2 zset1 zset2 WITHSCORES +``` diff --git a/commands/zdiffstore.md b/commands/zdiffstore.md new file mode 100644 index 00000000..abe3ba7e --- /dev/null +++ b/commands/zdiffstore.md @@ -0,0 +1,24 @@ +Computes the difference between the first and all successive input sorted sets +and stores the result in `destination`. The total number of input keys is +specified by `numkeys`. + +Keys that do not exist are considered to be empty sets. + +If `destination` already exists, it is overwritten. + +@return + +@integer-reply: the number of elements in the resulting sorted set at +`destination`. + +@examples + +```cli +ZADD zset1 1 "one" +ZADD zset1 2 "two" +ZADD zset1 3 "three" +ZADD zset2 1 "one" +ZADD zset2 2 "two" +ZDIFFSTORE out 2 zset1 zset2 +ZRANGE out 0 -1 WITHSCORES +``` diff --git a/commands/zintercard.md b/commands/zintercard.md new file mode 100644 index 00000000..84abe27f --- /dev/null +++ b/commands/zintercard.md @@ -0,0 +1,20 @@ +This command is similar to `ZINTER`, but instead of returning the result set, it returns just the cardinality of the result. + +Keys that do not exist are considered to be empty sets. +With one of the keys being an empty set, the resulting set is also empty (since set intersection with an empty set always results in an empty set). + +@return + +@integer-reply: the number of elements in the resulting intersection. + +@examples + +```cli +ZADD zset1 1 "one" +ZADD zset1 2 "two" +ZADD zset2 1 "one" +ZADD zset2 2 "two" +ZADD zset2 3 "three" +ZINTER 2 zset1 zset2 +ZINTERCARD 2 zset1 zset2 +``` diff --git a/commands/zrandmember.md b/commands/zrandmember.md new file mode 100644 index 00000000..aae0b254 --- /dev/null +++ b/commands/zrandmember.md @@ -0,0 +1,39 @@ +When called with just the `key` argument, return a random element from the sorted set value stored at `key`. + +If the provided `count` argument is positive, return an array of **distinct elements**. +The array's length is either `count` or the sorted set's cardinality (`ZCARD`), whichever is lower. + +If called with a negative `count`, the behavior changes and the command is allowed to return the **same element multiple times**. +In this case, the number of returned elements is the absolute value of the specified `count`. + +The optional `WITHSCORES` modifier changes the reply so it includes the respective scores of the randomly selected elements from the sorted set. + +@return + +@bulk-string-reply: without the additional `count` argument, the command returns a Bulk Reply with the randomly selected element, or `nil` when `key` does not exist. + +@array-reply: when the additional `count` argument is passed, the command returns an array of elements, or an empty array when `key` does not exist. +If the `WITHSCORES` modifier is used, the reply is a list elements and their scores from the sorted set. + +@examples + +```cli +ZADD dadi 1 uno 2 due 3 tre 4 quattro 5 cinque 6 sei +ZRANDMEMBER dadi +ZRANDMEMBER dadi +ZRANDMEMBER dadi -5 WITHSCORES +``` + +## Specification of the behavior when count is passed + +When the `count` argument is a positive value this command behaves as follows: + +* No repeated elements are returned. +* If `count` is bigger than the cardinality of the sorted set, the command will only return the whole sorted set without additional elements. +* The order of elements in the reply is not truly random, so it is up to the client to shuffle them if needed. + +When the `count` is a negative value, the behavior changes as follows: + +* Repeating elements are possible. +* Exactly `count` elements, or an empty array if the sorted set is empty (non-existing key), are always returned. +* The order of elements in the reply is truly random. diff --git a/commands/zrange.md b/commands/zrange.md index 663de5b2..a05c6f1a 100644 --- a/commands/zrange.md +++ b/commands/zrange.md @@ -1,37 +1,91 @@ -Returns the specified range of elements in the sorted set stored at `key`. -The elements are considered to be ordered from the lowest to the highest score. -Lexicographical order is used for elements with equal score. - -See `ZREVRANGE` when you need the elements ordered from highest to lowest score -(and descending lexicographical order for elements with equal score). - -Both `start` and `stop` are zero-based indexes, where `0` is the first element, -`1` is the next element and so on. -They can also be negative numbers indicating offsets from the end of the sorted -set, with `-1` being the last element of the sorted set, `-2` the penultimate -element and so on. - -`start` and `stop` are **inclusive ranges**, so for example `ZRANGE myzset 0 1` -will return both the first and the second element of the sorted set. - -Out of range indexes will not produce an error. -If `start` is larger than the largest index in the sorted set, or `start > -stop`, an empty list is returned. -If `stop` is larger than the end of the sorted set Redis will treat it like it -is the last element of the sorted set. - -It is possible to pass the `WITHSCORES` option in order to return the scores of -the elements together with the elements. -The returned list will contain `value1,score1,...,valueN,scoreN` instead of -`value1,...,valueN`. -Client libraries are free to return a more appropriate data type (suggestion: an -array with (value, score) arrays/tuples). +Returns the specified range of elements in the sorted set stored at ``. + +`ZRANGE` can perform different types of range queries: by index (rank), by the score, or by lexicographical order. + +Starting with Redis 6.2.0, this command can replace the following commands: `ZREVRANGE`, `ZRANGEBYSCORE`, `ZREVRANGEBYSCORE`, `ZRANGEBYLEX` and `ZREVRANGEBYLEX`. + +## Common behavior and options + +The order of elements is from the lowest to the highest score. Elements with the same score are ordered lexicographically. + +The optional `REV` argument reverses the ordering, so elements are ordered from highest to lowest score, and score ties are resolved by reverse lexicographical ordering. + +The optional `LIMIT` argument can be used to obtain a sub-range from the matching elements (similar to _SELECT LIMIT offset, count_ in SQL). +A negative `` returns all elements from the ``. Keep in mind that if `` is large, the sorted set needs to be traversed for `` elements before getting to the elements to return, which can add up to O(N) time complexity. + +The optional `WITHSCORES` argument supplements the command's reply with the scores of elements returned. The returned list contains `value1,score1,...,valueN,scoreN` instead of `value1,...,valueN`. Client libraries are free to return a more appropriate data type (suggestion: an array with (value, score) arrays/tuples). + +## Index ranges + +By default, the command performs an index range query. The `` and `` arguments represent zero-based indexes, where `0` is the first element, `1` is the next element, and so on. These arguments specify an **inclusive range**, so for example, `ZRANGE myzset 0 1` will return both the first and the second element of the sorted set. + +The indexes can also be negative numbers indicating offsets from the end of the sorted set, with `-1` being the last element of the sorted set, `-2` the penultimate element, and so on. + +Out of range indexes do not produce an error. + +If `` is greater than either the end index of the sorted set or ``, an empty list is returned. + +If `` is greater than the end index of the sorted set, Redis will use the last element of the sorted set. + +## Score ranges + +When the `BYSCORE` option is provided, the command behaves like `ZRANGEBYSCORE` and returns the range of elements from the sorted set having scores equal or between `` and ``. + +`` and `` can be `-inf` and `+inf`, denoting the negative and positive infinities, respectively. This means that you are not required to know the highest or lowest score in the sorted set to get all elements from or up to a certain score. + +By default, the score intervals specified by `` and `` are closed (inclusive). +It is possible to specify an open interval (exclusive) by prefixing the score +with the character `(`. + +For example: + +``` +ZRANGE zset (1 5 BYSCORE +``` + +Will return all elements with `1 < score <= 5` while: + +``` +ZRANGE zset (5 (10 BYSCORE +``` + +Will return all the elements with `5 < score < 10` (5 and 10 excluded). + +## Lexicographical ranges + +When the `BYLEX` option is used, the command behaves like `ZRANGEBYLEX` and returns the range of elements from the sorted set between the `` and `` lexicographical closed range intervals. + +Note that lexicographical ordering relies on all elements having the same score. The reply is unspecified when the elements have different scores. + +Valid `` and `` must start with `(` or `[`, in order to specify +whether the range interval is exclusive or inclusive, respectively. + +The special values of `+` or `-` `` and `` mean positive and negative infinite strings, respectively, so for instance the command **ZRANGEBYLEX myzset - +** is guaranteed to return all the elements in the sorted set, providing that all the elements have the same score. + +### Lexicographical comparison of strings + +Strings are compared as a binary array of bytes. Because of how the ASCII character set is specified, this means that usually this also have the effect of comparing normal ASCII characters in an obvious dictionary way. However, this is not true if non-plain ASCII strings are used (for example, utf8 strings). + +However, the user can apply a transformation to the encoded string so that the first part of the element inserted in the sorted set will compare as the user requires for the specific application. For example, if I want to +add strings that will be compared in a case-insensitive way, but I still +want to retrieve the real case when querying, I can add strings in the +following way: + + ZADD autocomplete 0 foo:Foo 0 bar:BAR 0 zap:zap + +Because of the first *normalized* part in every element (before the colon character), we are forcing a given comparison. However, after the range is queried using `ZRANGE ... BYLEX`, the application can display to the user the second part of the string, after the colon. + +The binary nature of the comparison allows to use sorted sets as a general purpose index, for example, the first part of the element can be a 64-bit big-endian number. Since big-endian numbers have the most significant bytes in the initial positions, the binary comparison will match the numerical comparison of the numbers. This can be used in order to implement range queries on 64-bit values. As in the example below, after the first 8 bytes, we can store the value of the element we are indexing. @return @array-reply: list of elements in the specified range (optionally with their scores, in case the `WITHSCORES` option is given). +@history + +* `>= 6.2`: Added the `REV`, `BYSCORE`, `BYLEX` and `LIMIT` options. + @examples ```cli @@ -48,3 +102,9 @@ The following example using `WITHSCORES` shows how the command returns always an ```cli ZRANGE myzset 0 1 WITHSCORES ``` + +This example shows how to query the sorted set by score, excluding the value `1` and up to infinity, returning only the second element of the result: + +```cli +ZRANGE myzset (1 +inf BYSCORE LIMIT 1 1 +``` \ No newline at end of file diff --git a/commands/zrangebylex.md b/commands/zrangebylex.md index 4eefffc0..a87b64f2 100644 --- a/commands/zrangebylex.md +++ b/commands/zrangebylex.md @@ -4,6 +4,8 @@ If the elements in the sorted set have different scores, the returned elements a The elements are considered to be ordered from lower to higher strings as compared byte-by-byte using the `memcmp()` C function. Longer strings are considered greater than shorter strings if the common part is identical. +As per Redis 6.2.0, this command is considered deprecated. Please prefer using the `ZRANGE` command with the `BYLEX` argument in new code. + The optional `LIMIT` argument can be used to only get a range of the matching elements (similar to _SELECT LIMIT offset, count_ in SQL). A negative `count` returns all elements from the `offset`. diff --git a/commands/zrangebyscore.md b/commands/zrangebyscore.md index bc817085..2f0f3917 100644 --- a/commands/zrangebyscore.md +++ b/commands/zrangebyscore.md @@ -6,6 +6,8 @@ The elements having the same score are returned in lexicographical order (this follows from a property of the sorted set implementation in Redis and does not involve further computation). +As per Redis 6.2.0, this command is considered deprecated. Please prefer using the `ZRANGE` command with the `BYSCORE` argument in new code. + The optional `LIMIT` argument can be used to only get a range of the matching elements (similar to _SELECT LIMIT offset, count_ in SQL). A negative `count` returns all elements from the `offset`. diff --git a/commands/zrangestore.md b/commands/zrangestore.md new file mode 100644 index 00000000..8dc744c3 --- /dev/null +++ b/commands/zrangestore.md @@ -0,0 +1,13 @@ +This command is like `ZRANGE`, but stores the result in the `` destination key. + +@return + +@integer-reply: the number of elements in the resulting sorted set. + +@examples + +```cli +ZADD srczset 1 "one" 2 "two" 3 "three" 4 "four" +ZRANGESTORE dstzset srczset 2 -1 +ZRANGE dstzset 0 -1 +``` diff --git a/commands/zremrangebylex.md b/commands/zremrangebylex.md index 4098f1e4..4264f1b0 100644 --- a/commands/zremrangebylex.md +++ b/commands/zremrangebylex.md @@ -1,6 +1,6 @@ When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering, this command removes all elements in the sorted set stored at `key` between the lexicographical range specified by `min` and `max`. -The meaning of `min` and `max` are the same of the `ZRANGEBYLEX` command. Similarly, this command actually returns the same elements that `ZRANGEBYLEX` would return if called with the same `min` and `max` arguments. +The meaning of `min` and `max` are the same of the `ZRANGEBYLEX` command. Similarly, this command actually removes the same elements that `ZRANGEBYLEX` would return if called with the same `min` and `max` arguments. @return diff --git a/commands/zrevrange.md b/commands/zrevrange.md index 3a19810c..8632c1ba 100644 --- a/commands/zrevrange.md +++ b/commands/zrevrange.md @@ -4,6 +4,8 @@ Descending lexicographical order is used for elements with equal score. Apart from the reversed ordering, `ZREVRANGE` is similar to `ZRANGE`. +As per Redis 6.2.0, this command is considered deprecated. Please prefer using the `ZRANGE` command with the `REV` argument in new code. + @return @array-reply: list of elements in the specified range (optionally with diff --git a/commands/zrevrangebylex.md b/commands/zrevrangebylex.md index c6772c91..3772b7e5 100644 --- a/commands/zrevrangebylex.md +++ b/commands/zrevrangebylex.md @@ -2,6 +2,8 @@ When all the elements in a sorted set are inserted with the same score, in order Apart from the reversed ordering, `ZREVRANGEBYLEX` is similar to `ZRANGEBYLEX`. +As per Redis 6.2.0, this command is considered deprecated. Please prefer using the `ZRANGE` command with the `BYLEX` and `REV` arguments in new code. + @return @array-reply: list of elements in the specified score range. diff --git a/commands/zrevrangebyscore.md b/commands/zrevrangebyscore.md index e95d771b..d2167176 100644 --- a/commands/zrevrangebyscore.md +++ b/commands/zrevrangebyscore.md @@ -9,6 +9,8 @@ order. Apart from the reversed ordering, `ZREVRANGEBYSCORE` is similar to `ZRANGEBYSCORE`. +As per Redis 6.2.0, this command is considered deprecated. Please prefer using the `ZRANGE` command with the `BYSCORE` and `REV` arguments in new code. + @return @array-reply: list of elements in the specified score range (optionally diff --git a/modules.json b/modules.json index 61d1d4b3..a25032c1 100644 --- a/modules.json +++ b/modules.json @@ -18,7 +18,7 @@ "MeirShpilraien", "RedisLabs" ], - "stars": 122 + "stars": 185 }, { "name": "redis-roaring", @@ -49,7 +49,7 @@ "swilly22", "RedisLabs" ], - "stars": 1144 + "stars": 1409 }, { "name": "redis-tdigest", @@ -70,7 +70,7 @@ "itamarhaber", "RedisLabs" ], - "stars": 1119 + "stars": 1358 }, { "name": "RediSearch", @@ -81,7 +81,7 @@ "dvirsky", "RedisLabs" ], - "stars": 2616 + "stars": 3051 }, { "name": "RedisBloom", @@ -92,7 +92,7 @@ "mnunberg", "RedisLabs" ], - "stars": 691 + "stars": 960 }, { "name": "neural-redis", @@ -113,7 +113,7 @@ "danni-m", "RedisLabs" ], - "stars": 455 + "stars": 593 }, { "name": "RedisAI", @@ -124,7 +124,7 @@ "lantiga", "RedisLabs" ], - "stars": 435 + "stars": 604 }, { "name": "ReDe", @@ -187,6 +187,26 @@ ], "stars": 1125 }, + { + "name": "TairHash", + "license": "Apache-2.0", + "repository": "https://github.com/alibaba/TairHash", + "description": "A redis module, similar to redis hash, and you can set expire and version for the field", + "authors": [ + "Alibaba" + ], + "stars": 37 + }, + { + "name": "TairString", + "license": "Apache-2.0", + "repository": "https://github.com/alibaba/TairString", + "description": "A redis module, similar to redis string, and support CAS/CAD operations", + "authors": [ + "Alibaba" + ], + "stars": 28 + }, { "name": "lqrm", "license": "BSD", @@ -267,6 +287,16 @@ ], "stars": 30 }, + { + "name": "redis-tree", + "license": "MIT", + "repository": "https://github.com/OhBonsai/RedisTree", + "description": "Implements Polytree as a native data type. It allows creating,locating,pushing and detaching tree from Redis keys.", + "authors": [ + "ohbonsai" + ], + "stars": 18 + }, { "name": "Reventis", "license": "Redis Source Available License", @@ -317,7 +347,7 @@ ], "stars":0 }, - { + { "name":"Redis-ImageScout", "license": "pHash Redis Source Available License", "repository": "https://github.com/starkdg/Redis-ImageScout.git", @@ -327,7 +357,7 @@ ], "stars":2 }, - { + { "name":"redex", "license": "AGPL-3.0", "repository": "https://github.com/RedisLabsModules/redex.git", @@ -336,5 +366,45 @@ "itamarhaber" ], "stars":52 + }, + { + "name":"Redis Interval Sets", + "license": "BSD-3-Clause", + "repository": "https://github.com/danitseitlin/redis-interval-sets", + "description": "A Redis module for creating interval sets", + "authors": [ + "danitseitlin" + ], + "stars":3 + }, + { + "name":"redicrypt", + "license": "MIT", + "repository": "https://github.com/chayim/redicrypt", + "description": "Redis module for string encryption and decryption", + "authors": [ + "chayim" + ], + "stars":0 + }, + { + "name": "redis-interval-module", + "license": "MIT", + "repository": "https://github.com/ogama/redis-interval-module", + "description": "Redis module for creation and manipulation of interval set.", + "authors": [ + "ogama" + ], + "stars": 3 + }, + { + "name": "redisims", + "license": "MIT", + "repository": "https://github.com/Clement-Jean/RedisIMS", + "description": "A lightweight Redis module following the If Modified Since (IMS) pattern for caching", + "authors": [ + "Clement-Jean" + ], + "stars": 0 } ] diff --git a/tools.json b/tools.json index c5c47a0b..2c307ad8 100644 --- a/tools.json +++ b/tools.json @@ -77,13 +77,6 @@ "description": "Parse Redis dump.rdb files, Analyze Memory, and Export Data to JSON.", "authors": ["srithedabbler"] }, - { - "name": "Redily", - "language": "Javascript", - "url": "https://www.redily.app", - "description": "An intuitive, cross-platform Redis GUI Client built in Electron.", - "authors": ["stefano_arnone"] - }, { "name": "AnotherRedisDesktopManager", "language": "Javascript", @@ -115,7 +108,8 @@ { "name": "Meerkat", "language": "Ruby", - "repository": "http://carlhoerberg.github.io/meerkat/", + "url": "https://carlhoerberg.github.io/meerkat/", + "repository": "https://github.com/carlhoerberg/meerkat", "description": "Rack middleware for Server Sent Events with multiple backends.", "authors": ["carlhoerberg"] }, @@ -186,14 +180,15 @@ { "name": "Sidekiq", "language": "Ruby", - "repository": "http://sidekiq.org/", + "url": "http://sidekiq.org/", + "repository": "https://github.com/mperham/sidekiq/", "description": "Simple, efficient message processing for your Rails 3 application.", - "authors": ["mperham"] + "authors": ["getajobmike"] }, { "name": "Omhiredis", "language": "C", - "repository": "http://www.rsyslog.com/doc/build_from_repo.html", + "repository": "https://github.com/rsyslog/rsyslog", "description": "Redis output plugin for rsyslog (rsyslog dev, and rsyslog head).", "authors": ["taotetek"] }, @@ -258,12 +253,12 @@ "language": "Python", "repository": "https://github.com/bbangert/retools", "description": "Caching and locking helper library.", - "authors": ["benbangert"] + "authors": [] }, { "name": "Redback", "language": "Javascript", - "repository": "http://github.com/chriso/redback", + "repository": "https://github.com/chriso/redback", "description": "Higher-level Redis constructs - social graph, full text search, rate limiting, key pairs.", "authors": [] }, @@ -337,18 +332,10 @@ "name": "Nydus", "language": "Python", "url": "https://pypi.python.org/pypi/nydus", - "repository": "https://pypi.python.org/pypi/nydus", + "repository": "https://github.com/disqus/nydus", "description": "Connection clustering and routing for Redis and Python.", "authors": ["zeeg"] }, - { - "name": "redis-mount", - "language": "Go", - "url": "https://github.com/poying/redis-mount", - "repository": "https://github.com/poying/redis-mount", - "description": "redis-mount lets you use Redis as a filesystem.", - "authors": ["poying"] - }, { "name": "RPQueue", "language": "Python", @@ -371,7 +358,7 @@ "url": "https://github.com/adriano-di-giovanni/node-redis-keychain", "repository": "https://github.com/adriano-di-giovanni/node-redis-keychain", "description": "A Node.js library for streamlining the configuration and maintenance of your Redis namespace", - "authors": ["codecreativity"] + "authors": [] }, { "name": "phpRedExpert", @@ -394,7 +381,7 @@ "language": "Python", "repository" : "https://github.com/binarydud/pyres", "description" : "Python library inspired by Resque for creating background jobs and workers", - "authors" : ["binarydud"] + "authors" : [] }, { "name": "Redis-RdbParser", @@ -488,13 +475,6 @@ "description": "Redis GUI tool for windows platform.", "authors": ["cinience"] }, - { - "name": "Keylord", - "language": "Java", - "url": "http://protonail.com/products/keylord", - "description": "Cross-platform administration and development GUI application for key-value databases like Redis, LevelDB, etc.", - "authors": ["protonail"] - }, { "name": "redispapa", "language": "Python", @@ -516,7 +496,7 @@ "language": "C++", "url": "https://github.com/zhengshuxin/acl/tree/master/app/redis_tools/redis_builder", "repository": "https://github.com/zhengshuxin/acl/tree/master/app/redis_tools/redis_builder", - "description": "A C++ Redis tool to create and manage a Redis cluster, basing on acl Redis lib in https://github.com/zhengshuxin/acl/tree/master/lib_acl_cpp/samples/redis", + "description": "A C++ Redis tool to create and manage a Redis cluster", "authors": ["zhengshuxin"] }, { @@ -524,14 +504,14 @@ "language": "Python", "repository": "https://github.com/coleifer/huey", "description": "Simple multi-threaded Python task queue. Supports Redis.", - "authors": ["coleifer"] + "authors": [] }, { "name": "walrus", "language": "Python", "repository": "https://github.com/coleifer/walrus", "description": "A collection of lightweight utilities for working with Redis in Python. Includes ORM, autocompletion, full-text search, cache, locks, and more.", - "authors": ["coleifer"] + "authors": [] }, { "name": "RedisPlatform", @@ -620,13 +600,6 @@ "description": "Iodine is an HTTP / Websocket server with native pub/sub support. Iodine includes an integrated Redis client that provides Pub/Sub scaling beyond machine boundaries.", "authors": ["bowildmusic"] }, - { - "name": "Regis", - "language": "Swift", - "url": "https://www.harfangapps.com/regis/", - "description": "Full-featured Redis client for the Mac, available on the Mac App Store.", - "authors": ["harfangapps"] - }, { "name": "miniredis", "language": "Go", @@ -729,7 +702,7 @@ "language": "C#", "repository": "https://github.com/Oriflame/RedisMessaging.ReliableDelivery", "description": "This library provides reliability to delivering messages via Redis. By design Redis pub/sub message delivery is not reliable so it can happen that some messages can be lost due to network issues or they can be delivered more than once in case of Redis replication failure.", - "authors": ["PetrKozelek" , "OriflameSoftware"] + "authors": ["PetrKozelek"] }, { @@ -739,11 +712,26 @@ "description": "A structured streaming framework built atop Redis Streams with built-in support for persistence and indefinitely long streams.", "authors": [] }, + { + "name": "Red", + "language": "Swift", + "url": "https://echodot.com/red/", + "description": "♥️ A beautiful Redis UI for the Mac with tabbed-views, pub/sub, real-time updates, interactive console, LUA scripting and much more. Available on the Mac App Store.", + "authors": ["echodot"] + }, { "name": "Runnel", "language": "Python", "repository": "https://github.com/mjwestcott/runnel", "description": "Distributed event processing for Python based on Redis Streams", "authors": ["mjwestcott"] + }, + { + "name": "Trino (formerly Presto SQL) Redis Connector", + "language": "SQL", + "url": "https://trino.io/docs/current/connector/redis.html", + "repository": "https://github.com/trinodb/trino", + "description": "Trino Redis connector allows querying Redis data with ANSI SQL, with queries spanning Redis and other services such as Hive, relational databases, Cassandra, Kafka, cloud object storage, or leveraging multiple Redis instances at once", + "authors": [] } ] diff --git a/topics/acl.md b/topics/acl.md index da0adae9..1a62b512 100644 --- a/topics/acl.md +++ b/topics/acl.md @@ -63,7 +63,7 @@ and verify what the configuration of a freshly started, defaults-configured Redis instance is: > ACL LIST - 1) "user default on nopass ~* +@all" + 1) "user default on nopass ~* &* +@all" The command above reports the list of users in the same format that is used in the Redis configuration files, by translating the current ACLs set @@ -73,8 +73,8 @@ The first two words in each line are "user" followed by the username. The next words are ACL rules that describe different things. We'll show in details how the rules work, but for now it is enough to say that the default user is configured to be active (on), to require no password (nopass), to -access every possible key (`~*`) and be able to call every possible command -(+@all). +access every possible key (`~*`) and Pub/Sub channel (`&*`), and be able to +call every possible command (`+@all`). Also, in the special case of the default user, having the *nopass* rule means that new connections are automatically authenticated with the default user @@ -105,10 +105,16 @@ Allow and disallow commands: Allow and disallow certain keys: -* `~`: Add a pattern of keys that can be mentioned as part of commands. For instance `~*` allows all the keys. The pattern is a glob-style pattern like the one of KEYS. It is possible to specify multiple patterns. +* `~`: Add a pattern of keys that can be mentioned as part of commands. For instance `~*` allows all the keys. The pattern is a glob-style pattern like the one of `KEYS`. It is possible to specify multiple patterns. * `allkeys`: Alias for `~*`. * `resetkeys`: Flush the list of allowed keys patterns. For instance the ACL `~foo:* ~bar:* resetkeys ~objects:*`, will result in the client only be able to access keys matching the pattern `objects:*`. +Allow and disallow Pub/Sub channels: + +* `&`: Add a glob style pattern of Pub/Sub channels that can be accessed by the user. It is possible to specify multiple channel patterns. Note that pattern matching is done only for channels mentioned by `PUBLISH` and `SUBSCRIBE`, whereas `PSUBSCRIBE` requires a literal match between its channel patterns and those allowed for user. +* `allchannels`: Alias for `&*` that allows the user to access all Pub/Sub channels. +* `resetchannels`: Flush the list of allowed channel patterns and disconnect the user's Pub/Sub clients if these are no longer able to access their respective channels and/or channel patterns. + Configure valid passwords for the user: * `>`: Add this password to the list of valid passwords for the user. For example `>mypass` will add "mypass" to the list of valid passwords. This directive clears the *nopass* flag (see later). Every user can have any number of passwords. @@ -122,7 +128,7 @@ Configure valid passwords for the user: Reset the user: -* `reset` Performs the following actions: resetpass, resetkeys, off, -@all. The user returns to the same state it has immediately after its creation. +* `reset` Performs the following actions: resetpass, resetkeys, resetchannels, off, -@all. The user returns to the same state it has immediately after its creation. ## Creating and editing users ACLs with the ACL SETUSER command @@ -143,22 +149,24 @@ To start let's try the simplest `ACL SETUSER` command call: The `SETUSER` command takes the username and a list of ACL rules to apply to the user. However in the above example I did not specify any rule at all. -This will just create the user if it did not exist, using the default -attributes of a just creates uses. If the user already exist, the command -above will do nothing at all. +This will just create the user if it did not exist, using the defaults for new +users. If the user already exist, the command above will do nothing at all. Let's check what is the default user status: > ACL LIST - 1) "user alice off -@all" - 2) "user default on nopass ~* +@all" + 1) "user alice off &* -@all" + 2) "user default on nopass ~* ~& +@all" The just created user "alice" is: * In off status, that is, it's disabled. AUTH will not work. -* Cannot access any command. Note that the user is created by default without the ability to access any command, so the `-@all` in the output above could be omitted, however `ACL LIST` attempts to be explicit rather than implicit. -* Finally there are no key patterns that the user can access. * The user also has no passwords set. +* Cannot access any command. Note that the user is created by default without the ability to access any command, so the `-@all` in the output above could be omitted, however `ACL LIST` attempts to be explicit rather than implicit. +* There are no key patterns that the user can access. +* The user can access all Pub/Sub channels. + +New users are created with restrictive permissions by default. Starting with Redis 6.2, ACL provides Pub/Sub channels access management as well. To ensure backwards compatability with version 6.0 when upgrading to Redis 6.2, new users are granted the 'allchannels' permission by default. The default can be set to `resetchannels` via the `acl-pubsub-default` configuration directive. Such user is completely useless. Let's try to define the user so that it is active, has a password, and can access with only the `GET` command @@ -186,20 +194,25 @@ computers to read, while `ACL LIST` is more biased towards humans. > ACL GETUSER alice 1) "flags" 2) 1) "on" + 2) "allchannels" 3) "passwords" 4) 1) "2d9c75..." 5) "commands" 6) "-@all +get" 7) "keys" 8) 1) "cached:*" + 9) "channels" + 10) 1) "*" The `ACL GETUSER` returns a field-value array describing the user in more parsable terms. The output includes the set of flags, a list of key patterns, passwords and so forth. The output is probably more readable if we use RESP3, so that it is returned as as map reply: > ACL GETUSER alice 1# "flags" => 1~ "on" - 2# "passwords" => 1) "2d9c75..." + 2~ "allchannels" + 2# "passwords" => 1) "2d9c75273d72b32df726fb545c8a4edc719f0a95a6fd993950b10c474ad9c927" 3# "commands" => "-@all +get" 4# "keys" => 1) "cached:*" + 5# "channels" => 1) "*" *Note: from now on we'll continue using the Redis default protocol, version 2, because it will take some time for the community to switch to the new one.* @@ -208,8 +221,8 @@ Using another `ACL SETUSER` command (from a different user, because alice cannot > ACL SETUSER alice ~objects:* ~items:* ~public:* OK > ACL LIST - 1) "user alice on >2d9c75... ~cached:* ~objects:* ~items:* ~public:* -@all +get" - 2) "user default on nopass ~* +@all" + 1) "user alice on >2d9c75... ~cached:* ~objects:* ~items:* ~public:* &* -@all +get" + 2) "user default on nopass ~* &* +@all" The user representation in memory is now as we expect it to be. @@ -233,8 +246,8 @@ the following sequence: Will result in myuser being able to call both `GET` and `SET`: > ACL LIST - 1) "user default on nopass ~* +@all" - 2) "user myuser off -@all +set +get" + 1) "user default on nopass ~* &* +@all" + 2) "user myuser off &* -@all +set +get" ## Playings with command categories @@ -255,9 +268,42 @@ the case of an ACL that is just additive, that is, in the form of `+@all -...` You should be absolutely sure that you'll never include what you did not mean to. -However to remember that categories are defined, and what commands each -category exactly includes, is impossible and would be super boring, so the -Redis `ACL` command exports the `CAT` subcommand that can be used in two forms: +The following is a list of command categories and their meanings: + +* keyspace - Writing or reading from keys, databases, or their metadata + in a type agnostic way. Includes `DEL`, `RESTORE`, `DUMP`, `RENAME`, `EXISTS`, `DBSIZE`, + `KEYS`, `EXPIRE`, `TTL`, `FLUSHALL`, etc. Commands that may modify the keyspace, + key or metadata will also have `write` category. Commands that only read + the keyspace, key or metadata will have the `read` category. +* read - Reading from keys (values or metadata). Note that commands that don't + interact with keys, will not have either `read` or `write`. +* write - Writing to keys (values or metadata). +* admin - Administrative commands. Normal applications will never need to use + these. Includes `REPLICAOF`, `CONFIG`, `DEBUG`, `SAVE`, `MONITOR`, `ACL`, `SHUTDOWN`, etc. +* dangerous - Potentially dangerous commands (each should be considered with care for + various reasons). This includes `FLUSHALL`, `MIGRATE`, `RESTORE`, `SORT`, `KEYS`, + `CLIENT`, `DEBUG`, `INFO`, `CONFIG`, `SAVE`, `REPLICAOF`, etc. +* connection - Commands affecting the connection or other connections. + This includes `AUTH`, `SELECT`, `COMMAND`, `CLIENT`, `ECHO`, `PING`, etc. +* blocking - Potentially blocking the connection until released by another + command. +* fast - Fast O(1) commands. May loop on the number of arguments, but not the + number of elements in the key. +* slow - All commands that are not `fast`. +* pubsub - PubSub-related commands. +* transaction - `WATCH` / `MULTI` / `EXEC` related commands. +* scripting - Scripting related. +* set - Data type: sets related. +* sortedset - Data type: sorted sets related. +* list - Data type: lists related. +* hash - Data type: hashes related. +* string - Data type: strings related. +* bitmap - Data type: bitmaps related. +* hyperloglog - Data type: hyperloglog related. +* geo - Data type: geospatial indexes related. +* stream - Data type: streams related. + +Redis can also show you a list of all categories, and the exact commands each category includes using the redis `ACL` command's `CAT` subcommand that can be used in two forms: ACL CAT -- Will just list all the categories available ACL CAT -- Will list all the commands inside the category @@ -313,7 +359,7 @@ dangerous and non dangerous operations. Many deployments may not be happy to provide the ability to execute `CLIENT KILL` to non admin-level users, but may still want them to be able to run `CLIENT SETNAME`. -_Note: the new RESP3 `HELLO` command will probably provide a SETNAME option soon, but this is still a good example anyway._ +_Note: the new RESP3 `HELLO` handshake command provides a `SETNAME` option, but this is still a good example for subcommand control._ In such case I could alter the ACL of a user in the following way: @@ -354,14 +400,17 @@ examples, for the sake of brevity, the long hex string was trimmed: 2) 1) "on" 2) "allkeys" 3) "allcommands" + 4) "allchannels" 3) "passwords" 4) 1) "2d9c75273d72b32df726fb545c8a4edc719f0a95a6fd993950b10c474ad9c927" 5) "commands" 6) "+@all" 7) "keys" 8) 1) "*" + 9) "channels" + 10) 1) "*" -Also the old command `CONFIG GET requirepass` will, starting with Redis 6, +Also, starting with Redis 6, the old command `CONFIG GET requirepass` will no longer return the clear text password, but instead the hashed password. Using SHA256 provides the ability to avoid storing the password in clear text @@ -441,9 +490,9 @@ For Sentinel, allow the user to access the following commands both in the master * AUTH, CLIENT, SUBSCRIBE, SCRIPT, PUBLISH, PING, INFO, MULTI, SLAVEOF, CONFIG, CLIENT, EXEC. -Sentinel does not need to access any key in the database, so the ACL rule would be the following (note: AUTH is not needed since is always allowed): +Sentinel does not need to access any key in the database but does use Pub/Sub, so the ACL rule would be the following (note: AUTH is not needed since is always allowed): - ACL setuser sentinel-user >somepassword +client +subscribe +publish +ping +info +multi +slaveof +config +client +exec on + ACL SETUSER sentinel-user on >somepassword allchannels +multi +slaveof +ping +exec +subscribe +config|rewrite +role +publish +info +client|setname +client|kill +script|kill Redis replicas require the following commands to be whitelisted on the master instance: @@ -451,7 +500,7 @@ Redis replicas require the following commands to be whitelisted on the master in No keys need to be accessed, so this translates to the following rules: - ACL setuser replica-user >somepassword +psync +replconf +ping on + ACL setuser replica-user on >somepassword +psync +replconf +ping Note that you don't need to configure the replicas to allow the master to be able to execute any set of commands: the master is always authenticated as the root user from the point of view of replicas. diff --git a/topics/admin.md b/topics/admin.md index 85cec9d9..958a854a 100644 --- a/topics/admin.md +++ b/topics/admin.md @@ -9,10 +9,9 @@ Redis setup hints + We suggest deploying Redis using the **Linux operating system**. Redis is also tested heavily on OS X, and tested from time to time on FreeBSD and OpenBSD systems. However Linux is where we do all the major stress testing, and where most production deployments are running. + Make sure to set the Linux kernel **overcommit memory setting to 1**. Add `vm.overcommit_memory = 1` to `/etc/sysctl.conf` and then reboot or run the command `sysctl vm.overcommit_memory=1` for this to take effect immediately. -* Make sure to disable Linux kernel feature *transparent huge pages*, it will affect greatly both memory usage and latency in a negative way. This is accomplished with the following command: `echo never > /sys/kernel/mm/transparent_hugepage/enabled`. -+ Make sure to **setup some swap** in your system (we suggest as much as swap as memory). If Linux does not have swap and your Redis instance accidentally consumes too much memory, either Redis will crash for out of memory or the Linux kernel OOM killer will kill the Redis process. When swapping is enabled Redis will work in a bad way, but you'll likely notice the latency spikes and do something before it's too late. -+ Set an explicit `maxmemory` option limit in your instance in order to make sure that the instance will report errors instead of failing when the system memory limit is near to be reached. Note that maxmemory should be set calculating the overhead that Redis has, other than data, and the fragmentation overhead. So if you think you have 10 GB of free memory, set it to 8 or 9. -+ If you are using Redis in a very write-heavy application, while saving an RDB file on disk or rewriting the AOF log **Redis may use up to 2 times the memory normally used**. The additional memory used is proportional to the number of memory pages modified by writes during the saving process, so it is often proportional to the number of keys (or aggregate types items) touched during this time. Make sure to size your memory accordingly. ++ Make sure Redis won't be affected by Linux kernel feature *transparent huge pages*, otherwise it will impact greatly both memory usage and latency in a negative way. This is accomplished with the following command: `echo madvise > /sys/kernel/mm/transparent_hugepage/enabled`. ++ Make sure to **setup some swap** in your system (we suggest as much as swap as memory). If Linux does not have swap and your Redis instance accidentally consumes too much memory, either Redis will crash when it is out of memory or the Linux kernel OOM killer will kill the Redis process. When swapping is enabled Redis will work in a bad way, but you'll likely notice the latency spikes and do something before it's too late. ++ Set an explicit `maxmemory` option limit in your instance in order to make sure that the instance will report errors instead of failing when the system memory limit is near to be reached. Note that `maxmemory` should be set calculating the overhead that Redis has, other than data, and the fragmentation overhead. So if you think you have 10 GB of free memory, set it to 8 or 9.+ If you are using Redis in a very write-heavy application, while saving an RDB file on disk or rewriting the AOF log **Redis may use up to 2 times the memory normally used**. The additional memory used is proportional to the number of memory pages modified by writes during the saving process, so it is often proportional to the number of keys (or aggregate types items) touched during this time. Make sure to size your memory accordingly. + Use `daemonize no` when running under daemontools. + Make sure to setup some non trivial replication backlog, which must be set in proportion to the amount of memory Redis is using. In a 20 GB instance it does not make sense to have just 1 MB of backlog. The backlog will allow replicas to resynchronize with the master instance much easily. + Even if you have persistence disabled, Redis will need to perform RDB saves if you use replication, unless you use the new diskless replication feature. If you have no disk usage on the master, make sure to enable diskless replication. diff --git a/topics/client-side-caching.md b/topics/client-side-caching.md index 1e00343c..57d486a9 100644 --- a/topics/client-side-caching.md +++ b/topics/client-side-caching.md @@ -42,7 +42,7 @@ Moreover there are many datasets where items change very infrequently. For instance most user posts in a social network are either immutable or rarely edited by the user. Adding this to the fact that usually a small percentage of the posts are very popular, either because a small set of users -have a lot of follower and/or because recent posts have a lot more +have a lot of followers and/or because recent posts have a lot more visibility, it is clear why such pattern can be very useful. Usually the two key advantages of client side caching are: @@ -244,6 +244,7 @@ In this mode we have the following main behaviors: * Clients enable client side caching using the `BCAST` option, specifying one or more prefixes using the `PREFIX` option. For instance: `CLIENT TRACKING on REDIRECT 10 BCAST PREFIX object: PREFIX user:`. If no prefix is specified at all, the prefix is assumed to be the empty string, so the client will receive invalidation messages for every key that gets modified. Instead if one or more prefixes are used, only keys matching the one of the specified prefixes will be sent in the invalidation messages. * The server does not store anything in the invalidation table. Instead it only uses a different **Prefixes Table**, where each prefix is associated to a list of clients. +* No two prefixes can track overlapping parts of the keyspace. For instance, having the prefix foo and foob would not be allowed, since they would both trigger an invalidation for the key foobar. However, just using the prefix foo is sufficient. * Every time a key matching any of the prefixes is modified, all the clients subscribed to such prefix, will receive the invalidation message. * The server will consume a CPU proportional to the number of registered prefixes. If you have just a few, it is hard to see any difference. With a big number of prefixes the CPU cost can become quite large. * In this mode the server can perform the optimization of creating a single reply for all the clients subscribed to a given prefix, and send the same reply to all. This helps to lower the CPU usage. @@ -322,5 +323,5 @@ keys that were not served recently. ## Limiting the amount of memory used by Redis -Just make sure to configure a suitable value for the maxmimum number of keys remembered by Redis, or alternatively use the BCAST mode that consumes no memory at all in the Redis side. Note that the memory consumed by Redis when BCAST is not used, is proportional both to the number of keys tracked, and the number of clients requesting such keys. +Just make sure to configure a suitable value for the maximum number of keys remembered by Redis, or alternatively use the BCAST mode that consumes no memory at all in the Redis side. Note that the memory consumed by Redis when BCAST is not used, is proportional both to the number of keys tracked, and the number of clients requesting such keys. diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index aba8e735..9e15f62b 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -470,7 +470,7 @@ is that: * All queries about non-existing keys in A are processed by "B", because "A" will redirect clients to "B". This way we no longer create new keys in "A". -In the meantime, a special program called `redis-trib` used during reshardings +In the meantime, `redis-cli` used during reshardings and Redis Cluster configuration will migrate existing keys in hash slot 8 from A to B. This is performed using the following command: @@ -478,12 +478,12 @@ This is performed using the following command: CLUSTER GETKEYSINSLOT slot count The above command will return `count` keys in the specified hash slot. -For every key returned, `redis-trib` sends node "A" a `MIGRATE` command, that -will migrate the specified key from A to B in an atomic way (both instances -are locked for the time (usually very small time) needed to migrate a key so +For keys returned, `redis-cli` sends node "A" a `MIGRATE` command, that +will migrate the specified keys from A to B in an atomic way (both instances +are locked for the time (usually very small time) needed to migrate keys so there are no race conditions). This is how `MIGRATE` works: - MIGRATE target_host target_port key target_database id timeout + MIGRATE target_host target_port "" target_database id timeout KEYS key1 key2 ... `MIGRATE` will connect to the target instance, send a serialized version of the key, and once an OK code is received, the old key from its own dataset @@ -942,7 +942,7 @@ So if we receive a heartbeat from node A claiming to serve hash slots 1 and 2 wi 16383 -> NULL ``` -When a new cluster is created, a system administrator needs to manually assign (using the `CLUSTER ADDSLOTS` command, via the redis-trib command line tool, or by any other means) the slots served by each master node only to the node itself, and the information will rapidly propagate across the cluster. +When a new cluster is created, a system administrator needs to manually assign (using the `CLUSTER ADDSLOTS` command, via the redis-cli command line tool, or by any other means) the slots served by each master node only to the node itself, and the information will rapidly propagate across the cluster. However this rule is not enough. We know that hash slot mapping can change during two events: @@ -954,7 +954,7 @@ For now let's focus on failovers. When a slave fails over its master, it obtains a configuration epoch which is guaranteed to be greater than the one of its master (and more generally greater than any other configuration epoch generated previously). For example node B, which is a slave of A, may failover -B with configuration epoch of 4. It will start to send heartbeat packets +A with configuration epoch of 4. It will start to send heartbeat packets (the first time mass-broadcasting cluster-wide) and because of the following second rule, receivers will update their hash slot tables: @@ -1153,7 +1153,7 @@ If there are any set of nodes with the same `configEpoch`, all the nodes but the This mechanism also guarantees that after a fresh cluster is created, all nodes start with a different `configEpoch` (even if this is not actually -used) since `redis-trib` makes sure to use `CONFIG SET-CONFIG-EPOCH` at startup. +used) since `redis-cli` makes sure to use `CONFIG SET-CONFIG-EPOCH` at startup. However if for some reason a node is left misconfigured, it will update its configuration to a different configuration epoch automatically. diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index d004f60c..b824fe9d 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -2,7 +2,7 @@ Redis cluster tutorial === This document is a gentle introduction to Redis Cluster, that does not use -difficult to understand concepts of distributed systems . It provides +difficult to understand concepts of distributed systems. It provides instructions about how to setup a cluster, test, and operate it, without going into the details that are covered in the [Redis Cluster specification](/topics/cluster-spec) but just describing @@ -399,7 +399,7 @@ OK redis 127.0.0.1:7000> get foo -> Redirected to slot [12182] located at 127.0.0.1:7002 "bar" -redis 127.0.0.1:7000> get hello +redis 127.0.0.1:7002> get hello -> Redirected to slot [866] located at 127.0.0.1:7000 "world" ``` @@ -618,6 +618,11 @@ rebalance the cluster checking the distribution of keys across the cluster nodes and intelligently moving slots as needed. This feature will be added in the future. +The `--cluster-yes` option instructs the cluster manager to automatically answer +"yes" to the command's prompts, allowing it to run in a non-interactive mode. +Note that this option can also be activated by setting the +`REDISCLI_CLUSTER_YES` environment variable. + A more interesting example application --- @@ -799,6 +804,12 @@ the failover starts, and the old master is informed about the configuration switch. When the clients are unblocked on the old master, they are redirected to the new master. +Note: + +* To promote a replica to master, it must first be known as a replica by a majority of the masters in the cluster. + Otherwise, it cannot win the failover election. + If the replica has just been added to the cluster (see [Adding a new node as a replica](#adding-a-new-node-as-a-replica) below), you may need to wait a while before sending the `CLUSTER FAILOVER` command, to make sure the masters in cluster are aware of the new replica. + Adding a new node --- @@ -986,7 +997,8 @@ one is not available. Upgrading masters is a bit more complex, and the suggested procedure is: -1. Use CLUSTER FAILOVER to trigger a manual failover of the master to one of its slaves (see the "Manual failover" section of this documentation). +1. Use `CLUSTER FAILOVER` to trigger a manual failover of the master to one of its replicas. + (See the [Manual failover](#manual-failover) section in this document.) 2. Wait for the master to turn into a slave. 3. Finally upgrade the node as you do for slaves. 4. If you want the master to be the node you just upgraded, trigger a new manual failover in order to turn back the upgraded node into a master. diff --git a/topics/config.md b/topics/config.md index 65d190dc..a40c290e 100644 --- a/topics/config.md +++ b/topics/config.md @@ -18,10 +18,14 @@ This is an example of configuration directive: slaveof 127.0.0.1 6380 It is possible to provide strings containing spaces as arguments using -quotes, as in the following example: +(double or single) quotes, as in the following example: requirepass "hello world" +Single-quoted string can contain characters escaped by backslashes, and +double-quoted strings can additionally include any ASCII symbols encoded using +backslashed hexadecimal notation "\\xff". + The list of configuration directives, and their meaning and intended usage is available in the self documented example redis.conf shipped into the Redis distribution. diff --git a/topics/distlock.md b/topics/distlock.md index 96979470..139a738a 100644 --- a/topics/distlock.md +++ b/topics/distlock.md @@ -25,10 +25,12 @@ already available that can be used for reference. * [Redlock-rb](https://github.com/antirez/redlock-rb) (Ruby implementation). There is also a [fork of Redlock-rb](https://github.com/leandromoreira/redlock-rb) that adds a gem for easy distribution and perhaps more. * [Redlock-py](https://github.com/SPSCommerce/redlock-py) (Python implementation). +* [Pottery](https://github.com/brainix/pottery#redlock) (Python implementation). * [Aioredlock](https://github.com/joanvila/aioredlock) (Asyncio Python implementation). * [Redlock-php](https://github.com/ronnylt/redlock-php) (PHP implementation). * [PHPRedisMutex](https://github.com/malkusch/lock#phpredismutex) (further PHP implementation) * [cheprasov/php-redis-lock](https://github.com/cheprasov/php-redis-lock) (PHP library for locks) +* [rtckit/react-redlock](https://github.com/rtckit/reactphp-redlock) (Async PHP implementation) * [Redsync](https://github.com/go-redsync/redsync) (Go implementation). * [Redisson](https://github.com/mrniko/redisson) (Java implementation). * [Redis::DistLock](https://github.com/sbertrang/redis-distlock) (Perl implementation). @@ -78,7 +80,7 @@ To acquire the lock, the way to go is the following: SET resource_name my_random_value NX PX 30000 The command will set the key only if it does not already exist (NX option), with an expire of 30000 milliseconds (PX option). -The key is set to a value “my_random_value”. This value must be unique across all clients and all lock requests. +The key is set to a value “my\_random\_value”. This value must be unique across all clients and all lock requests. Basically the random value is used in order to release the lock in a safe way, with a script that tells Redis: remove the key only if it exists and the value stored at the key is exactly the one I expect to be. This is accomplished by the following Lua script: diff --git a/topics/indexes.md b/topics/indexes.md index 1460022e..312914b4 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -262,7 +262,7 @@ We also need logic in order to increment the index if the search term already exists in the index, so what we'll actually do is something like that: - ZRANGEBYLEX myindex "[banana:" + LIMIT 1 1 + ZRANGEBYLEX myindex "[banana:" + LIMIT 0 1 1) "banana:1" This will return the single entry of `banana` if it exists. Then we @@ -284,13 +284,13 @@ There is more: our goal is to just have items searched very frequently. So we need some form of purging. When we actually query the index in order to complete the user input, we may see something like that: - ZRANGEBYLEX myindex "[banana:" + LIMIT 1 10 + ZRANGEBYLEX myindex "[banana:" + LIMIT 0 10 1) "banana:123" - 2) "banahhh:1" + 2) "banaooo:1" 3) "banned user:49" 4) "banning:89" -Apparently nobody searches for "banahhh", for example, but the query was +Apparently nobody searches for "banaooo", for example, but the query was performed a single time, so we end presenting it to the user. This is what we can do. Out of the returned items, we pick a random one, @@ -348,7 +348,7 @@ we just store the entry as `key:value`: And search for the key with: - ZRANGEBYLEX myindex [mykey: + LIMIT 1 1 + ZRANGEBYLEX myindex [mykey: + LIMIT 0 1 1) "mykey:myvalue" Then we extract the part after the colon to retrieve the value. diff --git a/topics/introduction.md b/topics/introduction.md index 19d8b9f4..897623dd 100644 --- a/topics/introduction.md +++ b/topics/introduction.md @@ -1,8 +1,8 @@ Introduction to Redis === -Redis is an open source (BSD licensed), in-memory **data structure store**, used as a database, cache and message broker. It supports data structures such as -[strings](/topics/data-types-intro#strings), [hashes](/topics/data-types-intro#hashes), [lists](/topics/data-types-intro#lists), [sets](/topics/data-types-intro#sets), [sorted sets](/topics/data-types-intro#sorted-sets) with range queries, [bitmaps](/topics/data-types-intro#bitmaps), [hyperloglogs](/topics/data-types-intro#hyperloglogs), [geospatial indexes](/commands/geoadd) with radius queries and [streams](/topics/streams-intro). Redis has built-in [replication](/topics/replication), [Lua scripting](/commands/eval), [LRU eviction](/topics/lru-cache), [transactions](/topics/transactions) and different levels of [on-disk persistence](/topics/persistence), and provides high availability via [Redis Sentinel](/topics/sentinel) and automatic partitioning with [Redis Cluster](/topics/cluster-tutorial). +Redis is an open source (BSD licensed), in-memory **data structure store**, used as a database, cache, and message broker. Redis provides data structures such as +[strings](/topics/data-types-intro#strings), [hashes](/topics/data-types-intro#hashes), [lists](/topics/data-types-intro#lists), [sets](/topics/data-types-intro#sets), [sorted sets](/topics/data-types-intro#sorted-sets) with range queries, [bitmaps](/topics/data-types-intro#bitmaps), [hyperloglogs](/topics/data-types-intro#hyperloglogs), [geospatial indexes](/commands/geoadd), and [streams](/topics/streams-intro). Redis has built-in [replication](/topics/replication), [Lua scripting](/commands/eval), [LRU eviction](/topics/lru-cache), [transactions](/topics/transactions), and different levels of [on-disk persistence](/topics/persistence), and provides high availability via [Redis Sentinel](/topics/sentinel) and automatic partitioning with [Redis Cluster](/topics/cluster-tutorial). You can run **atomic operations** on these types, like [appending to a string](/commands/append); @@ -12,14 +12,13 @@ list](/commands/lpush); [computing set intersection](/commands/sinter), or [getting the member with highest ranking in a sorted set](/commands/zrangebyscore). -In order to achieve its outstanding performance, Redis works with an -**in-memory dataset**. Depending on your use case, you can persist it either -by [dumping the dataset to disk](/topics/persistence#snapshotting) -every once in a while, or by [appending each command to a -log](/topics/persistence#append-only-file). Persistence can be optionally -disabled, if you just need a feature-rich, networked, in-memory cache. +To achieve top performance, Redis works with an +**in-memory dataset**. Depending on your use case, you can persist your data either +by periodically [dumping the dataset to disk](/topics/persistence#snapshotting) + or by [appending each command to a +disk-based log](/topics/persistence#append-only-file). You can also disable persistence if you just need a feature-rich, networked, in-memory cache. -Redis also supports trivial-to-setup [master-slave asynchronous replication](/topics/replication), with very fast non-blocking first synchronization, auto-reconnection with partial resynchronization on net split. +Redis also supports [asynchronous replication](/topics/replication), with very fast non-blocking first synchronization, auto-reconnection with partial resynchronization on net split. Other features include: @@ -30,8 +29,8 @@ Other features include: * [LRU eviction of keys](/topics/lru-cache) * [Automatic failover](/topics/sentinel) -You can use Redis from [most programming languages](/clients) out there. +You can use Redis from [most programming languages](/clients). Redis is written in **ANSI C** and works in most POSIX systems like Linux, -\*BSD, OS X without external dependencies. Linux and OS X are the two operating systems where Redis is developed and tested the most, and we **recommend using Linux for deploying**. Redis may work in Solaris-derived systems like SmartOS, but the support is *best effort*. +\*BSD, and OS X, without external dependencies. Linux and OS X are the two operating systems where Redis is developed and tested the most, and we **recommend using Linux for deployment**. Redis may work in Solaris-derived systems like SmartOS, but the support is *best effort*. There is no official support for Windows builds. diff --git a/topics/latency.md b/topics/latency.md index 42021b74..b1eb7eca 100644 --- a/topics/latency.md +++ b/topics/latency.md @@ -88,7 +88,7 @@ intensive and will likely saturate a single core in your system. Max latency so far: 83 microseconds. Max latency so far: 115 microseconds. -Note: redis-cli in this special case needs to **run in the server** where you run or plan to run Redis, not in the client. In this special mode redis-cli does no connect to a Redis server at all: it will just try to measure the largest time the kernel does not provide CPU time to run to the redis-cli process itself. +Note: redis-cli in this special case needs to **run in the server** where you run or plan to run Redis, not in the client. In this special mode redis-cli does not connect to a Redis server at all: it will just try to measure the largest time the kernel does not provide CPU time to run to the redis-cli process itself. In the above example, the intrinsic latency of the system is just 0.115 milliseconds (or 115 microseconds), which is a good news, however keep in mind diff --git a/topics/ldb.md b/topics/ldb.md index 0d454228..07231ed0 100644 --- a/topics/ldb.md +++ b/topics/ldb.md @@ -215,7 +215,7 @@ LDB uses the client-server model where the Redis server acts as a debugging serv 2. The client provides an interface for sending arbitrary commands over RESP. 3. The client allows sending raw messages to the Redis server. -For example, the [Redis plugin](https://redislabs.com/blog/zerobrane-studio-plugin-for-redis-lua-scripts) for [ZeroBrane Studio](http://studio.zerobrane.com/) integrates with LDB using [redis-lua](https://github.com/nrk/redis-lua). The following Lua code is a simplified example of how the plugin achieves that: +For example, the [Redis plugin](https://redis.com/blog/zerobrane-studio-plugin-for-redis-lua-scripts) for [ZeroBrane Studio](http://studio.zerobrane.com/) integrates with LDB using [redis-lua](https://github.com/nrk/redis-lua). The following Lua code is a simplified example of how the plugin achieves that: ```Lua local redis = require 'redis' diff --git a/topics/memory-optimization.md b/topics/memory-optimization.md index 3b09a9e0..ff252e20 100644 --- a/topics/memory-optimization.md +++ b/topics/memory-optimization.md @@ -117,41 +117,41 @@ I used the following Ruby program to test how this works: require 'rubygems' require 'redis' - UseOptimization = true + USE_OPTIMIZATION = true def hash_get_key_field(key) - s = key.split(":") - if s[1].length > 2 - {:key => s[0]+":"+s[1][0..-3], :field => s[1][-2..-1]} - else - {:key => s[0]+":", :field => s[1]} - end + s = key.split(':') + if s[1].length > 2 + { key: s[0] + ':' + s[1][0..-3], field: s[1][-2..-1] } + else + { key: s[0] + ':', field: s[1] } + end end - def hash_set(r,key,value) - kf = hash_get_key_field(key) - r.hset(kf[:key],kf[:field],value) + def hash_set(r, key, value) + kf = hash_get_key_field(key) + r.hset(kf[:key], kf[:field], value) end - def hash_get(r,key,value) - kf = hash_get_key_field(key) - r.hget(kf[:key],kf[:field],value) + def hash_get(r, key, value) + kf = hash_get_key_field(key) + r.hget(kf[:key], kf[:field], value) end r = Redis.new - (0..100000).each{|id| - key = "object:#{id}" - if UseOptimization - hash_set(r,key,"val") - else - r.set(key,"val") - end - } + (0..100_000).each do |id| + key = "object:#{id}" + if USE_OPTIMIZATION + hash_set(r, key, 'val') + else + r.set(key, 'val') + end + end This is the result against a 64 bit instance of Redis 2.2: - * UseOptimization set to true: 1.7 MB of used memory - * UseOptimization set to false; 11 MB of used memory + * USE_OPTIMIZATION set to true: 1.7 MB of used memory + * USE_OPTIMIZATION set to false; 11 MB of used memory This is an order of magnitude, I think this makes Redis more or less the most memory efficient plain key value store out there. diff --git a/topics/modules-api-ref.md b/topics/modules-api-ref.md index 9ed02e24..552a3479 100644 --- a/topics/modules-api-ref.md +++ b/topics/modules-api-ref.md @@ -1,44 +1,107 @@ # Modules API reference -## `RedisModule_Alloc` + + +## Sections + +* [Heap allocation raw functions](#section-heap-allocation-raw-functions) +* [Commands API](#section-commands-api) +* [Module information and time measurement](#section-module-information-and-time-measurement) +* [Automatic memory management for modules](#section-automatic-memory-management-for-modules) +* [String objects APIs](#section-string-objects-apis) +* [Reply APIs](#section-reply-apis) +* [Commands replication API](#section-commands-replication-api) +* [DB and Key APIs – Generic API](#section-db-and-key-apis-generic-api) +* [Key API for String type](#section-key-api-for-string-type) +* [Key API for List type](#section-key-api-for-list-type) +* [Key API for Sorted Set type](#section-key-api-for-sorted-set-type) +* [Key API for Sorted Set iterator](#section-key-api-for-sorted-set-iterator) +* [Key API for Hash type](#section-key-api-for-hash-type) +* [Key API for Stream type](#section-key-api-for-stream-type) +* [Calling Redis commands from modules](#section-calling-redis-commands-from-modules) +* [Modules data types](#section-modules-data-types) +* [RDB loading and saving functions](#section-rdb-loading-and-saving-functions) +* [Key digest API (DEBUG DIGEST interface for modules types)](#section-key-digest-api-debug-digest-interface-for-modules-types) +* [AOF API for modules data types](#section-aof-api-for-modules-data-types) +* [IO context handling](#section-io-context-handling) +* [Logging](#section-logging) +* [Blocking clients from modules](#section-blocking-clients-from-modules) +* [Thread Safe Contexts](#section-thread-safe-contexts) +* [Module Keyspace Notifications API](#section-module-keyspace-notifications-api) +* [Modules Cluster API](#section-modules-cluster-api) +* [Modules Timers API](#section-modules-timers-api) +* [Modules ACL API](#section-modules-acl-api) +* [Modules Dictionary API](#section-modules-dictionary-api) +* [Modules Info fields](#section-modules-info-fields) +* [Modules utility APIs](#section-modules-utility-apis) +* [Modules API exporting / importing](#section-modules-api-exporting-importing) +* [Module Command Filter API](#section-module-command-filter-api) +* [Scanning keyspace and hashes](#section-scanning-keyspace-and-hashes) +* [Module fork API](#section-module-fork-api) +* [Server hooks implementation](#section-server-hooks-implementation) +* [Key eviction API](#section-key-eviction-api) +* [Miscellaneous APIs](#section-miscellaneous-apis) +* [Defrag API](#section-defrag-api) +* [Function index](#section-function-index) + + + +## Heap allocation raw functions + +Memory allocated with these functions are taken into account by Redis key +eviction algorithms and are reported in Redis memory usage information. + + + +### `RedisModule_Alloc` void *RedisModule_Alloc(size_t bytes); -Use like malloc(). Memory allocated with this function is reported in +Use like `malloc()`. Memory allocated with this function is reported in Redis INFO memory, used for keys eviction according to maxmemory settings and in general is taken into account as memory allocated by Redis. -You should avoid using malloc(). +You should avoid using `malloc()`. -## `RedisModule_Calloc` + + +### `RedisModule_Calloc` void *RedisModule_Calloc(size_t nmemb, size_t size); -Use like calloc(). Memory allocated with this function is reported in +Use like `calloc()`. Memory allocated with this function is reported in Redis INFO memory, used for keys eviction according to maxmemory settings and in general is taken into account as memory allocated by Redis. -You should avoid using calloc() directly. +You should avoid using `calloc()` directly. + + -## `RedisModule_Realloc` +### `RedisModule_Realloc` void* RedisModule_Realloc(void *ptr, size_t bytes); -Use like realloc() for memory obtained with `RedisModule_Alloc()`. +Use like `realloc()` for memory obtained with [`RedisModule_Alloc()`](#RedisModule_Alloc). -## `RedisModule_Free` + + +### `RedisModule_Free` void RedisModule_Free(void *ptr); -Use like free() for memory obtained by `RedisModule_Alloc()` and -`RedisModule_Realloc()`. However you should never try to free with -`RedisModule_Free()` memory allocated with malloc() inside your module. +Use like `free()` for memory obtained by [`RedisModule_Alloc()`](#RedisModule_Alloc) and +[`RedisModule_Realloc()`](#RedisModule_Realloc). However you should never try to free with +[`RedisModule_Free()`](#RedisModule_Free) memory allocated with `malloc()` inside your module. + + -## `RedisModule_Strdup` +### `RedisModule_Strdup` char *RedisModule_Strdup(const char *str); -Like strdup() but returns memory allocated with `RedisModule_Alloc()`. +Like `strdup()` but returns memory allocated with [`RedisModule_Alloc()`](#RedisModule_Alloc). -## `RedisModule_PoolAlloc` + + +### `RedisModule_PoolAlloc` void *RedisModule_PoolAlloc(RedisModuleCtx *ctx, size_t bytes); @@ -55,18 +118,17 @@ pool allocator is not a good idea. The function returns NULL if `bytes` is 0. -## `RedisModule_GetApi` + + +## Commands API - int RedisModule_GetApi(const char *funcname, void **targetPtrPtr); +These functions are used to implement custom Redis commands. -Lookup the requested module API and store the function pointer into the -target pointer. The function returns `REDISMODULE_ERR` if there is no such -named API, otherwise `REDISMODULE_OK`. +For examples, see [https://redis.io/topics/modules-intro](https://redis.io/topics/modules-intro). -This function is not meant to be used by modules developer, it is only -used implicitly by including redismodule.h. + -## `RedisModule_IsKeysPositionRequest` +### `RedisModule_IsKeysPositionRequest` int RedisModule_IsKeysPositionRequest(RedisModuleCtx *ctx); @@ -74,14 +136,16 @@ Return non-zero if a module command, that was declared with the flag "getkeys-api", is called in a special way to get the keys positions and not to get executed. Otherwise zero is returned. -## `RedisModule_KeyAtPos` + + +### `RedisModule_KeyAtPos` void RedisModule_KeyAtPos(RedisModuleCtx *ctx, int pos); When a module command is called in order to obtain the position of keys, since it was flagged as "getkeys-api" during the registration, the command implementation checks for this special call using the -`RedisModule_IsKeysPositionRequest()` API and uses this function in +[`RedisModule_IsKeysPositionRequest()`](#RedisModule_IsKeysPositionRequest) API and uses this function in order to report keys, like in the following example: if (RedisModule_IsKeysPositionRequest(ctx)) { @@ -93,9 +157,17 @@ order to report keys, like in the following example: keys are at fixed positions. This interface is only used for commands with a more complex structure. -## `RedisModule_CreateCommand` + + +### `RedisModule_CreateCommand` - int RedisModule_CreateCommand(RedisModuleCtx *ctx, const char *name, RedisModuleCmdFunc cmdfunc, const char *strflags, int firstkey, int lastkey, int keystep); + int RedisModule_CreateCommand(RedisModuleCtx *ctx, + const char *name, + RedisModuleCmdFunc cmdfunc, + const char *strflags, + int firstkey, + int lastkey, + int keystep); Register a new command in the Redis server, that will be handled by calling the function pointer 'func' using the RedisModule calling @@ -153,31 +225,59 @@ example "write deny-oom". The set of flags are: other reason. * **"no-auth"**: This command can be run by an un-authenticated client. Normally this is used by a command that is used - to authenticate a client. + to authenticate a client. +* **"may-replicate"**: This command may generate replication traffic, even + though it's not a write command. -## `RedisModule_SetModuleAttribs` + - void RedisModule_SetModuleAttribs(RedisModuleCtx *ctx, const char *name, int ver, int apiver); +## Module information and time measurement -Called by `RM_Init()` to setup the `ctx->module` structure. + -This is an internal function, Redis modules developers don't need -to use it. - -## `RedisModule_IsModuleNameBusy` +### `RedisModule_IsModuleNameBusy` int RedisModule_IsModuleNameBusy(const char *name); Return non-zero if the module name is busy. Otherwise zero is returned. -## `RedisModule_Milliseconds` + + +### `RedisModule_Milliseconds` long long RedisModule_Milliseconds(void); Return the current UNIX time in milliseconds. -## `RedisModule_SetModuleOptions` + + +### `RedisModule_BlockedClientMeasureTimeStart` + + int RedisModule_BlockedClientMeasureTimeStart(RedisModuleBlockedClient *bc); + +Mark a point in time that will be used as the start time to calculate +the elapsed execution time when [`RedisModule_BlockedClientMeasureTimeEnd()`](#RedisModule_BlockedClientMeasureTimeEnd) is called. +Within the same command, you can call multiple times +[`RedisModule_BlockedClientMeasureTimeStart()`](#RedisModule_BlockedClientMeasureTimeStart) and [`RedisModule_BlockedClientMeasureTimeEnd()`](#RedisModule_BlockedClientMeasureTimeEnd) +to accummulate indepedent time intervals to the background duration. +This method always return `REDISMODULE_OK`. + + + +### `RedisModule_BlockedClientMeasureTimeEnd` + + int RedisModule_BlockedClientMeasureTimeEnd(RedisModuleBlockedClient *bc); + +Mark a point in time that will be used as the end time +to calculate the elapsed execution time. +On success `REDISMODULE_OK` is returned. +This method only returns `REDISMODULE_ERR` if no start time was +previously defined ( meaning [`RedisModule_BlockedClientMeasureTimeStart`](#RedisModule_BlockedClientMeasureTimeStart) was not called ). + + + +### `RedisModule_SetModuleOptions` void RedisModule_SetModuleOptions(RedisModuleCtx *ctx, int options); @@ -187,32 +287,67 @@ Set flags defining capabilities or behavior bit flags. Generally, modules don't need to bother with this, as the process will just terminate if a read error happens, however, setting this flag would allow repl-diskless-load to work if enabled. -The module should use `RedisModule_IsIOError` after reads, before using the +The module should use [`RedisModule_IsIOError`](#RedisModule_IsIOError) after reads, before using the data that was read, and in case of error, propagate it upwards, and also be able to release the partially populated value and all it's allocations. -## `RedisModule_SignalModifiedKey` +`REDISMODULE_OPTION_NO_IMPLICIT_SIGNAL_MODIFIED`: +See [`RedisModule_SignalModifiedKey()`](#RedisModule_SignalModifiedKey). + + - int RedisModule_SignalModifiedKey(RedisModuleCtx *ctx, RedisModuleString *keyname); +### `RedisModule_SignalModifiedKey` + + int RedisModule_SignalModifiedKey(RedisModuleCtx *ctx, + RedisModuleString *keyname); Signals that the key is modified from user's perspective (i.e. invalidate WATCH and client side caching). -## `RedisModule_AutoMemory` +This is done automatically when a key opened for writing is closed, unless +the option `REDISMODULE_OPTION_NO_IMPLICIT_SIGNAL_MODIFIED` has been set using +[`RedisModule_SetModuleOptions()`](#RedisModule_SetModuleOptions). + + + +## Automatic memory management for modules + + + +### `RedisModule_AutoMemory` void RedisModule_AutoMemory(RedisModuleCtx *ctx); -Enable automatic memory management. See API.md for more information. +Enable automatic memory management. The function must be called as the first function of a command implementation that wants to use automatic memory. -## `RedisModule_CreateString` +When enabled, automatic memory management tracks and automatically frees +keys, call replies and Redis string objects once the command returns. In most +cases this eliminates the need of calling the following functions: + +1. [`RedisModule_CloseKey()`](#RedisModule_CloseKey) +2. [`RedisModule_FreeCallReply()`](#RedisModule_FreeCallReply) +3. [`RedisModule_FreeString()`](#RedisModule_FreeString) + +These functions can still be used with automatic memory management enabled, +to optimize loops that make numerous allocations for example. + + - RedisModuleString *RedisModule_CreateString(RedisModuleCtx *ctx, const char *ptr, size_t len); +## String objects APIs + + + +### `RedisModule_CreateString` + + RedisModuleString *RedisModule_CreateString(RedisModuleCtx *ctx, + const char *ptr, + size_t len); Create a new module string object. The returned string must be freed -with `RedisModule_FreeString()`, unless automatic memory is enabled. +with [`RedisModule_FreeString()`](#RedisModule_FreeString), unless automatic memory is enabled. The string is created by copying the `len` bytes starting at `ptr`. No reference is retained to the passed buffer. @@ -222,69 +357,101 @@ a string out of the context scope. However in that case, the automatic memory management will not be available, and the string memory must be managed manually. -## `RedisModule_CreateStringPrintf` + + +### `RedisModule_CreateStringPrintf` - RedisModuleString *RedisModule_CreateStringPrintf(RedisModuleCtx *ctx, const char *fmt, ...); + RedisModuleString *RedisModule_CreateStringPrintf(RedisModuleCtx *ctx, + const char *fmt, + ...); Create a new module string object from a printf format and arguments. -The returned string must be freed with `RedisModule_FreeString()`, unless +The returned string must be freed with [`RedisModule_FreeString()`](#RedisModule_FreeString), unless automatic memory is enabled. -The string is created using the sds formatter function sdscatvprintf(). +The string is created using the sds formatter function `sdscatvprintf()`. The passed context 'ctx' may be NULL if necessary, see the -`RedisModule_CreateString()` documentation for more info. +[`RedisModule_CreateString()`](#RedisModule_CreateString) documentation for more info. -## `RedisModule_CreateStringFromLongLong` + - RedisModuleString *RedisModule_CreateStringFromLongLong(RedisModuleCtx *ctx, long long ll); +### `RedisModule_CreateStringFromLongLong` + + RedisModuleString *RedisModule_CreateStringFromLongLong(RedisModuleCtx *ctx, + long long ll); Like `RedisModule_CreatString()`, but creates a string starting from a long long integer instead of taking a buffer and its length. -The returned string must be released with `RedisModule_FreeString()` or by +The returned string must be released with [`RedisModule_FreeString()`](#RedisModule_FreeString) or by enabling automatic memory management. The passed context 'ctx' may be NULL if necessary, see the -`RedisModule_CreateString()` documentation for more info. +[`RedisModule_CreateString()`](#RedisModule_CreateString) documentation for more info. + + -## `RedisModule_CreateStringFromDouble` +### `RedisModule_CreateStringFromDouble` - RedisModuleString *RedisModule_CreateStringFromDouble(RedisModuleCtx *ctx, double d); + RedisModuleString *RedisModule_CreateStringFromDouble(RedisModuleCtx *ctx, + double d); Like `RedisModule_CreatString()`, but creates a string starting from a double -integer instead of taking a buffer and its length. +instead of taking a buffer and its length. -The returned string must be released with `RedisModule_FreeString()` or by +The returned string must be released with [`RedisModule_FreeString()`](#RedisModule_FreeString) or by enabling automatic memory management. -## `RedisModule_CreateStringFromLongDouble` + + +### `RedisModule_CreateStringFromLongDouble` - RedisModuleString *RedisModule_CreateStringFromLongDouble(RedisModuleCtx *ctx, long double ld, int humanfriendly); + RedisModuleString *RedisModule_CreateStringFromLongDouble(RedisModuleCtx *ctx, + long double ld, + int humanfriendly); Like `RedisModule_CreatString()`, but creates a string starting from a long double. -The returned string must be released with `RedisModule_FreeString()` or by +The returned string must be released with [`RedisModule_FreeString()`](#RedisModule_FreeString) or by enabling automatic memory management. The passed context 'ctx' may be NULL if necessary, see the -`RedisModule_CreateString()` documentation for more info. +[`RedisModule_CreateString()`](#RedisModule_CreateString) documentation for more info. -## `RedisModule_CreateStringFromString` + - RedisModuleString *RedisModule_CreateStringFromString(RedisModuleCtx *ctx, const RedisModuleString *str); +### `RedisModule_CreateStringFromString` + + RedisModuleString *RedisModule_CreateStringFromString(RedisModuleCtx *ctx, + const RedisModuleString *str); Like `RedisModule_CreatString()`, but creates a string starting from another -RedisModuleString. +`RedisModuleString`. -The returned string must be released with `RedisModule_FreeString()` or by +The returned string must be released with [`RedisModule_FreeString()`](#RedisModule_FreeString) or by enabling automatic memory management. The passed context 'ctx' may be NULL if necessary, see the -`RedisModule_CreateString()` documentation for more info. +[`RedisModule_CreateString()`](#RedisModule_CreateString) documentation for more info. + + + +### `RedisModule_CreateStringFromStreamID` + + RedisModuleString *RedisModule_CreateStringFromStreamID(RedisModuleCtx *ctx, + const RedisModuleStreamID *id); -## `RedisModule_FreeString` +Creates a string from a stream ID. The returned string must be released with +[`RedisModule_FreeString()`](#RedisModule_FreeString), unless automatic memory is enabled. + +The passed context `ctx` may be NULL if necessary. See the +[`RedisModule_CreateString()`](#RedisModule_CreateString) documentation for more info. + + + +### `RedisModule_FreeString` void RedisModule_FreeString(RedisModuleCtx *ctx, RedisModuleString *str); @@ -301,22 +468,24 @@ create any issue). Strings created with a context should be freed also passing the context, so if you want to free a string out of context later, make sure to create it using a NULL context. -## `RedisModule_RetainString` + + +### `RedisModule_RetainString` void RedisModule_RetainString(RedisModuleCtx *ctx, RedisModuleString *str); Every call to this function, will make the string 'str' requiring -an additional call to `RedisModule_FreeString()` in order to really +an additional call to [`RedisModule_FreeString()`](#RedisModule_FreeString) in order to really free the string. Note that the automatic freeing of the string obtained enabling modules automatic memory management counts for one -`RedisModule_FreeString()` call (it is just executed automatically). +[`RedisModule_FreeString()`](#RedisModule_FreeString) call (it is just executed automatically). Normally you want to call this function when, at the same time the following conditions are true: -1) You have automatic memory management enabled. -2) You want to create string objects. -3) Those string objects you create need to live *after* the callback +1. You have automatic memory management enabled. +2. You want to create string objects. +3. Those string objects you create need to live *after* the callback function(for example a command implementation) creating them returns. Usually you want this in order to store the created string object @@ -330,40 +499,47 @@ no FreeString() call is performed. It is possible to call this function with a NULL context. -## `RedisModule_HoldString` - - RedisModuleString* RedisModule_HoldString(RedisModuleCtx *ctx, RedisModuleString *str); - -/** -* This function can be used instead of `RedisModule_RetainString()`. -* The main difference between the two is that this function will always -* succeed, whereas `RedisModule_RetainString()` may fail because of an -* assertion. -* -* The function returns a pointer to RedisModuleString, which is owned -* by the caller. It requires a call to `RedisModule_FreeString()` to free -* the string when automatic memory management is disabled for the context. -* When automatic memory management is enabled, you can either call -* `RedisModule_FreeString()` or let the automation free it. -* -* This function is more efficient than `RedisModule_CreateStringFromString()` -* because whenever possible, it avoids copying the underlying -* RedisModuleString. The disadvantage of using this function is that it -* might not be possible to use `RedisModule_StringAppendBuffer()` on the -* returned RedisModuleString. -* -* It is possible to call this function with a NULL context. -  - -## `RedisModule_StringPtrLen` - - const char *RedisModule_StringPtrLen(const RedisModuleString *str, size_t *len); + + +### `RedisModule_HoldString` + + RedisModuleString* RedisModule_HoldString(RedisModuleCtx *ctx, + RedisModuleString *str); + + +This function can be used instead of [`RedisModule_RetainString()`](#RedisModule_RetainString). +The main difference between the two is that this function will always +succeed, whereas [`RedisModule_RetainString()`](#RedisModule_RetainString) may fail because of an +assertion. + +The function returns a pointer to `RedisModuleString`, which is owned +by the caller. It requires a call to [`RedisModule_FreeString()`](#RedisModule_FreeString) to free +the string when automatic memory management is disabled for the context. +When automatic memory management is enabled, you can either call +[`RedisModule_FreeString()`](#RedisModule_FreeString) or let the automation free it. + +This function is more efficient than [`RedisModule_CreateStringFromString()`](#RedisModule_CreateStringFromString) +because whenever possible, it avoids copying the underlying +`RedisModuleString`. The disadvantage of using this function is that it +might not be possible to use [`RedisModule_StringAppendBuffer()`](#RedisModule_StringAppendBuffer) on the +returned `RedisModuleString`. + +It is possible to call this function with a NULL context. + + + +### `RedisModule_StringPtrLen` + + const char *RedisModule_StringPtrLen(const RedisModuleString *str, + size_t *len); Given a string module object, this function returns the string pointer and length of the string. The returned pointer and length should only be used for read only accesses and never modified. -## `RedisModule_StringToLongLong` + + +### `RedisModule_StringToLongLong` int RedisModule_StringToLongLong(const RedisModuleString *str, long long *ll); @@ -372,7 +548,9 @@ Returns `REDISMODULE_OK` on success. If the string can't be parsed as a valid, strict long long (no spaces before/after), `REDISMODULE_ERR` is returned. -## `RedisModule_StringToDouble` + + +### `RedisModule_StringToDouble` int RedisModule_StringToDouble(const RedisModuleString *str, double *d); @@ -380,15 +558,32 @@ Convert the string into a double, storing it at `*d`. Returns `REDISMODULE_OK` on success or `REDISMODULE_ERR` if the string is not a valid string representation of a double value. -## `RedisModule_StringToLongDouble` + + +### `RedisModule_StringToLongDouble` - int RedisModule_StringToLongDouble(const RedisModuleString *str, long double *ld); + int RedisModule_StringToLongDouble(const RedisModuleString *str, + long double *ld); Convert the string into a long double, storing it at `*ld`. Returns `REDISMODULE_OK` on success or `REDISMODULE_ERR` if the string is not a valid string representation of a double value. -## `RedisModule_StringCompare` + + +### `RedisModule_StringToStreamID` + + int RedisModule_StringToStreamID(const RedisModuleString *str, + RedisModuleStreamID *id); + +Convert the string into a stream ID, storing it at `*id`. +Returns `REDISMODULE_OK` on success and returns `REDISMODULE_ERR` if the string +is not a valid string representation of a stream ID. The special IDs "+" and +"-" are allowed. + + + +### `RedisModule_StringCompare` int RedisModule_StringCompare(RedisModuleString *a, RedisModuleString *b); @@ -396,33 +591,56 @@ Compare two string objects, returning -1, 0 or 1 respectively if a < b, a == b, a > b. Strings are compared byte by byte as two binary blobs without any encoding care / collation attempt. -## `RedisModule_StringAppendBuffer` + + +### `RedisModule_StringAppendBuffer` - int RedisModule_StringAppendBuffer(RedisModuleCtx *ctx, RedisModuleString *str, const char *buf, size_t len); + int RedisModule_StringAppendBuffer(RedisModuleCtx *ctx, + RedisModuleString *str, + const char *buf, + size_t len); Append the specified buffer to the string 'str'. The string must be a string created by the user that is referenced only a single time, otherwise `REDISMODULE_ERR` is returned and the operation is not performed. -## `RedisModule_WrongArity` + + +## Reply APIs + +These functions are used for sending replies to the client. + +Most functions always return `REDISMODULE_OK` so you can use it with +'return' in order to return from the command implementation with: + + if (... some condition ...) + return RedisModule_ReplyWithLongLong(ctx,mycount); + + + +### `RedisModule_WrongArity` int RedisModule_WrongArity(RedisModuleCtx *ctx); Send an error about the number of arguments given to the command, -citing the command name in the error message. +citing the command name in the error message. Returns `REDISMODULE_OK`. Example: if (argc != 3) return RedisModule_WrongArity(ctx); -## `RedisModule_ReplyWithLongLong` + + +### `RedisModule_ReplyWithLongLong` int RedisModule_ReplyWithLongLong(RedisModuleCtx *ctx, long long ll); Send an integer reply to the client, with the specified long long value. The function always returns `REDISMODULE_OK`. -## `RedisModule_ReplyWithError` + + +### `RedisModule_ReplyWithError` int RedisModule_ReplyWithError(RedisModuleCtx *ctx, const char *err); @@ -440,17 +658,21 @@ and not just: The function always returns `REDISMODULE_OK`. -## `RedisModule_ReplyWithSimpleString` + + +### `RedisModule_ReplyWithSimpleString` int RedisModule_ReplyWithSimpleString(RedisModuleCtx *ctx, const char *msg); -Reply with a simple string (+... \r\n in RESP protocol). This replies +Reply with a simple string (`+... \r\n` in RESP protocol). This replies are suitable only when sending a small non-binary string with small overhead, like "OK" or similar replies. The function always returns `REDISMODULE_OK`. -## `RedisModule_ReplyWithArray` + + +### `RedisModule_ReplyWithArray` int RedisModule_ReplyWithArray(RedisModuleCtx *ctx, long len); @@ -461,12 +683,14 @@ of the array. When producing arrays with a number of element that is not known beforehand the function can be called with the special count `REDISMODULE_POSTPONED_ARRAY_LEN`, and the actual number of elements can be -later set with `RedisModule_ReplySetArrayLength()` (which will set the +later set with [`RedisModule_ReplySetArrayLength()`](#RedisModule_ReplySetArrayLength) (which will set the latest "open" count if there are multiple ones). The function always returns `REDISMODULE_OK`. -## `RedisModule_ReplyWithNullArray` + + +### `RedisModule_ReplyWithNullArray` int RedisModule_ReplyWithNullArray(RedisModuleCtx *ctx); @@ -475,7 +699,9 @@ null array in RESP2. The function always returns `REDISMODULE_OK`. -## `RedisModule_ReplyWithEmptyArray` + + +### `RedisModule_ReplyWithEmptyArray` int RedisModule_ReplyWithEmptyArray(RedisModuleCtx *ctx); @@ -483,11 +709,13 @@ Reply to the client with an empty array. The function always returns `REDISMODULE_OK`. -## `RedisModule_ReplySetArrayLength` + + +### `RedisModule_ReplySetArrayLength` void RedisModule_ReplySetArrayLength(RedisModuleCtx *ctx, long len); -When `RedisModule_ReplyWithArray()` is used with the argument +When [`RedisModule_ReplyWithArray()`](#RedisModule_ReplyWithArray) is used with the argument `REDISMODULE_POSTPONED_ARRAY_LEN`, because we don't know beforehand the number of items we are going to output as elements of the array, this function will take care to set the array length. @@ -513,15 +741,21 @@ length, since we produce a fixed number of elements, but in the practice the code may use an iterator or other ways of creating the output so that is not easy to calculate in advance the number of elements. -## `RedisModule_ReplyWithStringBuffer` + + +### `RedisModule_ReplyWithStringBuffer` - int RedisModule_ReplyWithStringBuffer(RedisModuleCtx *ctx, const char *buf, size_t len); + int RedisModule_ReplyWithStringBuffer(RedisModuleCtx *ctx, + const char *buf, + size_t len); Reply with a bulk string, taking in input a C buffer pointer and length. The function always returns `REDISMODULE_OK`. -## `RedisModule_ReplyWithCString` + + +### `RedisModule_ReplyWithCString` int RedisModule_ReplyWithCString(RedisModuleCtx *ctx, const char *buf); @@ -530,15 +764,19 @@ assumed to be null-terminated. The function always returns `REDISMODULE_OK`. -## `RedisModule_ReplyWithString` + + +### `RedisModule_ReplyWithString` int RedisModule_ReplyWithString(RedisModuleCtx *ctx, RedisModuleString *str); -Reply with a bulk string, taking in input a RedisModuleString object. +Reply with a bulk string, taking in input a `RedisModuleString` object. The function always returns `REDISMODULE_OK`. -## `RedisModule_ReplyWithEmptyString` + + +### `RedisModule_ReplyWithEmptyString` int RedisModule_ReplyWithEmptyString(RedisModuleCtx *ctx); @@ -546,16 +784,22 @@ Reply with an empty string. The function always returns `REDISMODULE_OK`. -## `RedisModule_ReplyWithVerbatimString` + + +### `RedisModule_ReplyWithVerbatimString` - int RedisModule_ReplyWithVerbatimString(RedisModuleCtx *ctx, const char *buf, size_t len); + int RedisModule_ReplyWithVerbatimString(RedisModuleCtx *ctx, + const char *buf, + size_t len); Reply with a binary safe string, which should not be escaped or filtered taking in input a C buffer pointer and length. The function always returns `REDISMODULE_OK`. -## `RedisModule_ReplyWithNull` + + +### `RedisModule_ReplyWithNull` int RedisModule_ReplyWithNull(RedisModuleCtx *ctx); @@ -563,67 +807,83 @@ Reply to the client with a NULL. The function always returns `REDISMODULE_OK`. -## `RedisModule_ReplyWithCallReply` + + +### `RedisModule_ReplyWithCallReply` - int RedisModule_ReplyWithCallReply(RedisModuleCtx *ctx, RedisModuleCallReply *reply); + int RedisModule_ReplyWithCallReply(RedisModuleCtx *ctx, + RedisModuleCallReply *reply); -Reply exactly what a Redis command returned us with `RedisModule_Call()`. -This function is useful when we use `RedisModule_Call()` in order to +Reply exactly what a Redis command returned us with [`RedisModule_Call()`](#RedisModule_Call). +This function is useful when we use [`RedisModule_Call()`](#RedisModule_Call) in order to execute some command, as we want to reply to the client exactly the same reply we obtained by the command. The function always returns `REDISMODULE_OK`. -## `RedisModule_ReplyWithDouble` + + +### `RedisModule_ReplyWithDouble` int RedisModule_ReplyWithDouble(RedisModuleCtx *ctx, double d); Send a string reply obtained converting the double 'd' into a bulk string. This function is basically equivalent to converting a double into a string into a C buffer, and then calling the function -`RedisModule_ReplyWithStringBuffer()` with the buffer and length. +[`RedisModule_ReplyWithStringBuffer()`](#RedisModule_ReplyWithStringBuffer) with the buffer and length. The function always returns `REDISMODULE_OK`. -## `RedisModule_ReplyWithLongDouble` + + +### `RedisModule_ReplyWithLongDouble` int RedisModule_ReplyWithLongDouble(RedisModuleCtx *ctx, long double ld); Send a string reply obtained converting the long double 'ld' into a bulk string. This function is basically equivalent to converting a long double into a string into a C buffer, and then calling the function -`RedisModule_ReplyWithStringBuffer()` with the buffer and length. +[`RedisModule_ReplyWithStringBuffer()`](#RedisModule_ReplyWithStringBuffer) with the buffer and length. The double string uses human readable formatting (see `addReplyHumanLongDouble` in networking.c). The function always returns `REDISMODULE_OK`. -## `RedisModule_Replicate` + + +## Commands replication API + + - int RedisModule_Replicate(RedisModuleCtx *ctx, const char *cmdname, const char *fmt, ...); +### `RedisModule_Replicate` + + int RedisModule_Replicate(RedisModuleCtx *ctx, + const char *cmdname, + const char *fmt, + ...); Replicate the specified command and arguments to slaves and AOF, as effect of execution of the calling command implementation. The replicated commands are always wrapped into the MULTI/EXEC that contains all the commands replicated in a given module command -execution. However the commands replicated with `RedisModule_Call()` -are the first items, the ones replicated with `RedisModule_Replicate()` +execution. However the commands replicated with [`RedisModule_Call()`](#RedisModule_Call) +are the first items, the ones replicated with [`RedisModule_Replicate()`](#RedisModule_Replicate) will all follow before the EXEC. Modules should try to use one interface or the other. -This command follows exactly the same interface of `RedisModule_Call()`, +This command follows exactly the same interface of [`RedisModule_Call()`](#RedisModule_Call), so a set of format specifiers must be passed, followed by arguments matching the provided format specifiers. -Please refer to `RedisModule_Call()` for more information. +Please refer to [`RedisModule_Call()`](#RedisModule_Call) for more information. Using the special "A" and "R" modifiers, the caller can exclude either the AOF or the replicas from the propagation of the specified command. Otherwise, by default, the command will be propagated in both channels. -## Note about calling this function from a thread safe context: +#### Note about calling this function from a thread safe context: Normally when you call this function from the callback implementing a module command, or any other callback provided by the Redis Module API, @@ -635,12 +895,14 @@ at will, the behavior is different: MULTI/EXEC wrapper is not emitted and the command specified is inserted in the AOF and replication stream immediately. -## Return value +#### Return value The command returns `REDISMODULE_ERR` if the format specifiers are invalid or the command name does not belong to a known command. -## `RedisModule_ReplicateVerbatim` + + +### `RedisModule_ReplicateVerbatim` int RedisModule_ReplicateVerbatim(RedisModuleCtx *ctx); @@ -656,7 +918,13 @@ new state starting from the old one. The function always returns `REDISMODULE_OK`. -## `RedisModule_GetClientId` + + +## DB and Key APIs – Generic API + + + +### `RedisModule_GetClientId` unsigned long long RedisModule_GetClientId(RedisModuleCtx *ctx); @@ -669,7 +937,7 @@ command. The returned ID has a few guarantees: 2. The ID increases monotonically. Clients connecting to the server later are guaranteed to get IDs greater than any past ID previously seen. -Valid IDs are from 1 to 2^64-1. If 0 is returned it means there is no way +Valid IDs are from 1 to 2^64 - 1. If 0 is returned it means there is no way to fetch the ID in the context the function was currently called. After obtaining the ID, it is possible to check if the command execution @@ -679,17 +947,31 @@ is actually happening in the context of AOF loading, using this macro: // Handle it differently. } -## `RedisModule_GetClientInfoById` + + +### `RedisModule_GetClientUserNameById` + + RedisModuleString *RedisModule_GetClientUserNameById(RedisModuleCtx *ctx, + uint64_t id); + +Return the ACL user name used by the client with the specified client ID. +Client ID can be obtained with [`RedisModule_GetClientId()`](#RedisModule_GetClientId) API. If the client does not +exist, NULL is returned and errno is set to ENOENT. If the client isn't +using an ACL user, NULL is returned and errno is set to ENOTSUP + + + +### `RedisModule_GetClientInfoById` int RedisModule_GetClientInfoById(void *ci, uint64_t id); Return information about the client with the specified ID (that was -previously obtained via the `RedisModule_GetClientId()` API). If the +previously obtained via the [`RedisModule_GetClientId()`](#RedisModule_GetClientId) API). If the client exists, `REDISMODULE_OK` is returned, otherwise `REDISMODULE_ERR` is returned. When the client exist and the `ci` pointer is not NULL, but points to -a structure of type RedisModuleClientInfo, previously initialized with +a structure of type `RedisModuleClientInfo`, previously initialized with the correct `REDISMODULE_CLIENTINFO_INITIALIZER`, the structure is populated with the following fields: @@ -725,19 +1007,27 @@ returned: printf("Address: %s\n", ci.addr); } -## `RedisModule_PublishMessage` + - int RedisModule_PublishMessage(RedisModuleCtx *ctx, RedisModuleString *channel, RedisModuleString *message); +### `RedisModule_PublishMessage` + + int RedisModule_PublishMessage(RedisModuleCtx *ctx, + RedisModuleString *channel, + RedisModuleString *message); Publish a message to subscribers (see PUBLISH command). -## `RedisModule_GetSelectedDb` + + +### `RedisModule_GetSelectedDb` int RedisModule_GetSelectedDb(RedisModuleCtx *ctx); Return the currently selected DB. -## `RedisModule_GetContextFlags` + + +### `RedisModule_GetContextFlags` int RedisModule_GetContextFlags(RedisModuleCtx *ctx); @@ -752,54 +1042,62 @@ in this case the following flags will not be reported: Available flags and their meaning: - * REDISMODULE_CTX_FLAGS_LUA: The command is running in a Lua script + * `REDISMODULE_CTX_FLAGS_LUA`: The command is running in a Lua script - * REDISMODULE_CTX_FLAGS_MULTI: The command is running inside a transaction + * `REDISMODULE_CTX_FLAGS_MULTI`: The command is running inside a transaction - * REDISMODULE_CTX_FLAGS_REPLICATED: The command was sent over the replication + * `REDISMODULE_CTX_FLAGS_REPLICATED`: The command was sent over the replication link by the MASTER - * REDISMODULE_CTX_FLAGS_MASTER: The Redis instance is a master + * `REDISMODULE_CTX_FLAGS_MASTER`: The Redis instance is a master - * REDISMODULE_CTX_FLAGS_SLAVE: The Redis instance is a slave + * `REDISMODULE_CTX_FLAGS_SLAVE`: The Redis instance is a slave - * REDISMODULE_CTX_FLAGS_READONLY: The Redis instance is read-only + * `REDISMODULE_CTX_FLAGS_READONLY`: The Redis instance is read-only - * REDISMODULE_CTX_FLAGS_CLUSTER: The Redis instance is in cluster mode + * `REDISMODULE_CTX_FLAGS_CLUSTER`: The Redis instance is in cluster mode - * REDISMODULE_CTX_FLAGS_AOF: The Redis instance has AOF enabled + * `REDISMODULE_CTX_FLAGS_AOF`: The Redis instance has AOF enabled - * REDISMODULE_CTX_FLAGS_RDB: The instance has RDB enabled + * `REDISMODULE_CTX_FLAGS_RDB`: The instance has RDB enabled - * REDISMODULE_CTX_FLAGS_MAXMEMORY: The instance has Maxmemory set + * `REDISMODULE_CTX_FLAGS_MAXMEMORY`: The instance has Maxmemory set - * REDISMODULE_CTX_FLAGS_EVICT: Maxmemory is set and has an eviction + * `REDISMODULE_CTX_FLAGS_EVICT`: Maxmemory is set and has an eviction policy that may delete keys - * REDISMODULE_CTX_FLAGS_OOM: Redis is out of memory according to the + * `REDISMODULE_CTX_FLAGS_OOM`: Redis is out of memory according to the maxmemory setting. - * REDISMODULE_CTX_FLAGS_OOM_WARNING: Less than 25% of memory remains before + * `REDISMODULE_CTX_FLAGS_OOM_WARNING`: Less than 25% of memory remains before reaching the maxmemory level. - * REDISMODULE_CTX_FLAGS_LOADING: Server is loading RDB/AOF + * `REDISMODULE_CTX_FLAGS_LOADING`: Server is loading RDB/AOF - * REDISMODULE_CTX_FLAGS_REPLICA_IS_STALE: No active link with the master. + * `REDISMODULE_CTX_FLAGS_REPLICA_IS_STALE`: No active link with the master. - * REDISMODULE_CTX_FLAGS_REPLICA_IS_CONNECTING: The replica is trying to + * `REDISMODULE_CTX_FLAGS_REPLICA_IS_CONNECTING`: The replica is trying to connect with the master. - * REDISMODULE_CTX_FLAGS_REPLICA_IS_TRANSFERRING: Master -> Replica RDB + * `REDISMODULE_CTX_FLAGS_REPLICA_IS_TRANSFERRING`: Master -> Replica RDB transfer is in progress. - * REDISMODULE_CTX_FLAGS_REPLICA_IS_ONLINE: The replica has an active link + * `REDISMODULE_CTX_FLAGS_REPLICA_IS_ONLINE`: The replica has an active link with its master. This is the contrary of STALE state. - * REDISMODULE_CTX_FLAGS_ACTIVE_CHILD: There is currently some background + * `REDISMODULE_CTX_FLAGS_ACTIVE_CHILD`: There is currently some background process active (RDB, AUX or module). -## `RedisModule_AvoidReplicaTraffic` + * `REDISMODULE_CTX_FLAGS_MULTI_DIRTY`: The next EXEC will fail due to dirty + CAS (touched keys). + + * `REDISMODULE_CTX_FLAGS_IS_CHILD`: Redis is currently running inside + background child process. + + + +### `RedisModule_AvoidReplicaTraffic` int RedisModule_AvoidReplicaTraffic(); @@ -810,8 +1108,8 @@ without adding further data to the replication channel, that the replicas replication offset, match the one of the master. When this happens, it is safe to failover the master without data loss. -However modules may generate traffic by calling `RedisModule_Call()` with -the "!" flag, or by calling `RedisModule_Replicate()`, in a context outside +However modules may generate traffic by calling [`RedisModule_Call()`](#RedisModule_Call) with +the "!" flag, or by calling [`RedisModule_Replicate()`](#RedisModule_Replicate), in a context outside commands execution, for instance in timeout callbacks, threads safe contexts, and so forth. When modules will generate too much traffic, it will be hard for the master and replicas offset to match, because there @@ -823,7 +1121,9 @@ returns true. This is mostly useful for modules that have background garbage collection tasks, or that do writes and replicate such writes periodically in timer callbacks or other periodic callbacks. -## `RedisModule_SelectDb` + + +### `RedisModule_SelectDb` int RedisModule_SelectDb(RedisModuleCtx *ctx, int newid); @@ -835,10 +1135,12 @@ the Redis command implemented by the module calling this function returns. If the module command wishes to change something in a different DB and -returns back to the original one, it should call `RedisModule_GetSelectedDb()` +returns back to the original one, it should call [`RedisModule_GetSelectedDb()`](#RedisModule_GetSelectedDb) before in order to restore the old DB number before returning. -## `RedisModule_OpenKey` + + +### `RedisModule_OpenKey` void *RedisModule_OpenKey(RedisModuleCtx *ctx, robj *keyname, int mode); @@ -847,30 +1149,36 @@ to call other APIs with the key handle as argument to perform operations on the key. The return value is the handle representing the key, that must be -closed with `RM_CloseKey()`. +closed with [`RedisModule_CloseKey()`](#RedisModule_CloseKey). If the key does not exist and WRITE mode is requested, the handle is still returned, since it is possible to perform operations on a yet not existing key (that will be created, for example, after a list push operation). If the mode is just READ instead, and the key does not exist, NULL is returned. However it is still safe to -call `RedisModule_CloseKey()` and `RedisModule_KeyType()` on a NULL +call [`RedisModule_CloseKey()`](#RedisModule_CloseKey) and [`RedisModule_KeyType()`](#RedisModule_KeyType) on a NULL value. -## `RedisModule_CloseKey` + + +### `RedisModule_CloseKey` void RedisModule_CloseKey(RedisModuleKey *key); Close a key handle. -## `RedisModule_KeyType` + + +### `RedisModule_KeyType` int RedisModule_KeyType(RedisModuleKey *key); Return the type of the key. If the key pointer is NULL then `REDISMODULE_KEYTYPE_EMPTY` is returned. -## `RedisModule_ValueLength` + + +### `RedisModule_ValueLength` size_t RedisModule_ValueLength(RedisModuleKey *key); @@ -880,7 +1188,9 @@ is the number of elements (just counting keys for hashes). If the key pointer is NULL or the key is empty, zero is returned. -## `RedisModule_DeleteKey` + + +### `RedisModule_DeleteKey` int RedisModule_DeleteKey(RedisModuleKey *key); @@ -889,7 +1199,9 @@ accept new writes as an empty key (that will be created on demand). On success `REDISMODULE_OK` is returned. If the key is not open for writing `REDISMODULE_ERR` is returned. -## `RedisModule_UnlinkKey` + + +### `RedisModule_UnlinkKey` int RedisModule_UnlinkKey(RedisModuleKey *key); @@ -899,7 +1211,9 @@ accept new writes as an empty key (that will be created on demand). On success `REDISMODULE_OK` is returned. If the key is not open for writing `REDISMODULE_ERR` is returned. -## `RedisModule_GetExpire` + + +### `RedisModule_GetExpire` mstime_t RedisModule_GetExpire(RedisModuleKey *key); @@ -907,7 +1221,9 @@ Return the key expire value, as milliseconds of remaining TTL. If no TTL is associated with the key or if the key is empty, `REDISMODULE_NO_EXPIRE` is returned. -## `RedisModule_SetExpire` + + +### `RedisModule_SetExpire` int RedisModule_SetExpire(RedisModuleKey *key, mstime_t expire); @@ -921,28 +1237,68 @@ the number of milliseconds of TTL the key should have. The function returns `REDISMODULE_OK` on success or `REDISMODULE_ERR` if the key was not open for writing or is an empty key. -## `RedisModule_ResetDataset` + + +### `RedisModule_GetAbsExpire` + + mstime_t RedisModule_GetAbsExpire(RedisModuleKey *key); + +Return the key expire value, as absolute Unix timestamp. +If no TTL is associated with the key or if the key is empty, +`REDISMODULE_NO_EXPIRE` is returned. + + + +### `RedisModule_SetAbsExpire` + + int RedisModule_SetAbsExpire(RedisModuleKey *key, mstime_t expire); + +Set a new expire for the key. If the special expire +`REDISMODULE_NO_EXPIRE` is set, the expire is cancelled if there was +one (the same as the PERSIST command). + +Note that the expire must be provided as a positive integer representing +the absolute Unix timestamp the key should have. + +The function returns `REDISMODULE_OK` on success or `REDISMODULE_ERR` if +the key was not open for writing or is an empty key. + + + +### `RedisModule_ResetDataset` void RedisModule_ResetDataset(int restart_aof, int async); Performs similar operation to FLUSHALL, and optionally start a new AOF file (if enabled) -If restart_aof is true, you must make sure the command that triggered this call is not +If `restart_aof` is true, you must make sure the command that triggered this call is not propagated to the AOF file. When async is set to true, db contents will be freed by a background thread. -## `RedisModule_DbSize` + + +### `RedisModule_DbSize` unsigned long long RedisModule_DbSize(RedisModuleCtx *ctx); Returns the number of keys in the current db. -## `RedisModule_RandomKey` + + +### `RedisModule_RandomKey` RedisModuleString *RedisModule_RandomKey(RedisModuleCtx *ctx); Returns a name of a random key, or NULL if current db is empty. -## `RedisModule_StringSet` + + +## Key API for String type + +See also [`RedisModule_ValueLength()`](#RedisModule_ValueLength), which returns the length of a string. + + + +### `RedisModule_StringSet` int RedisModule_StringSet(RedisModuleKey *key, RedisModuleString *str); @@ -951,7 +1307,9 @@ value of the key, deleting the old value if any. On success `REDISMODULE_OK` is returned. If the key is not open for writing or there is an active iterator, `REDISMODULE_ERR` is returned. -## `RedisModule_StringDMA` + + +### `RedisModule_StringDMA` char *RedisModule_StringDMA(RedisModuleKey *key, size_t *len, int mode); @@ -975,23 +1333,25 @@ DMA access rules: the pointer is obtained, for all the time we want to use DMA access to read or modify the string. -2. Each time `RM_StringTruncate()` is called, to continue with the DMA -access, `RM_StringDMA()` should be called again to re-obtain +2. Each time [`RedisModule_StringTruncate()`](#RedisModule_StringTruncate) is called, to continue with the DMA +access, [`RedisModule_StringDMA()`](#RedisModule_StringDMA) should be called again to re-obtain a new pointer and length. 3. If the returned pointer is not NULL, but the length is zero, no byte can be touched (the string is empty, or the key itself is empty) -so a `RM_StringTruncate()` call should be used if there is to enlarge +so a [`RedisModule_StringTruncate()`](#RedisModule_StringTruncate) call should be used if there is to enlarge the string, and later call StringDMA() again to get the pointer. -## `RedisModule_StringTruncate` + + +### `RedisModule_StringTruncate` int RedisModule_StringTruncate(RedisModuleKey *key, size_t newlen); If the string is open for writing and is of string type, resize it, padding with zero bytes if the new length is greater than the old one. -After this call, `RM_StringDMA()` must be called again to continue +After this call, [`RedisModule_StringDMA()`](#RedisModule_StringDMA) must be called again to continue DMA access with the new pointer. The function returns `REDISMODULE_OK` on success, and `REDISMODULE_ERR` on @@ -1001,43 +1361,54 @@ or resizing for more than 512 MB is requested. If the key is empty, a string key is created with the new string value unless the new length value requested is zero. -## `RedisModule_ListPush` + + +## Key API for List type + +See also [`RedisModule_ValueLength()`](#RedisModule_ValueLength), which returns the length of a list. + + + +### `RedisModule_ListPush` - int RedisModule_ListPush(RedisModuleKey *key, int where, RedisModuleString *ele); + int RedisModule_ListPush(RedisModuleKey *key, + int where, + RedisModuleString *ele); Push an element into a list, on head or tail depending on 'where' argument. If the key pointer is about an empty key opened for writing, the key is created. On error (key opened for read-only operations or of the wrong type) `REDISMODULE_ERR` is returned, otherwise `REDISMODULE_OK` is returned. -## `RedisModule_ListPop` + + +### `RedisModule_ListPop` RedisModuleString *RedisModule_ListPop(RedisModuleKey *key, int where); Pop an element from the list, and returns it as a module string object -that the user should be free with `RM_FreeString()` or by enabling +that the user should be free with [`RedisModule_FreeString()`](#RedisModule_FreeString) or by enabling automatic memory. 'where' specifies if the element should be popped from head or tail. The command returns NULL if: -1) The list is empty. -2) The key was not open for writing. -3) The key is not a list. - -## `RedisModule_ZsetAddFlagsToCoreFlags` - int RedisModule_ZsetAddFlagsToCoreFlags(int flags); +1. The list is empty. +2. The key was not open for writing. +3. The key is not a list. -Conversion from/to public flags of the Modules API and our private flags, -so that we have everything decoupled. + -## `RedisModule_ZsetAddFlagsFromCoreFlags` +## Key API for Sorted Set type - int RedisModule_ZsetAddFlagsFromCoreFlags(int flags); +See also [`RedisModule_ValueLength()`](#RedisModule_ValueLength), which returns the length of a sorted set. -See previous function comment. + -## `RedisModule_ZsetAdd` +### `RedisModule_ZsetAdd` - int RedisModule_ZsetAdd(RedisModuleKey *key, double score, RedisModuleString *ele, int *flagsptr); + int RedisModule_ZsetAdd(RedisModuleKey *key, + double score, + RedisModuleString *ele, + int *flagsptr); Add a new element into a sorted set, with the specified 'score'. If the element already exists, the score is updated. @@ -1053,8 +1424,10 @@ The input flags are: REDISMODULE_ZADD_XX: Element must already exist. Do nothing otherwise. REDISMODULE_ZADD_NX: Element must not exist. Do nothing otherwise. - REDISMODULE_ZADD_LT: For existing element, only update if the new score is less than the current score. - REDISMODULE_ZADD_GT: For existing element, only update if the new score is greater than the current score. + REDISMODULE_ZADD_GT: If element exists, new score must be greater than the current score. + Do nothing otherwise. Can optionally be combined with XX. + REDISMODULE_ZADD_LT: If element exists, new score must be less than the current score. + Do nothing otherwise. Can optionally be combined with XX. The output flags are: @@ -1069,11 +1442,17 @@ On success the function returns `REDISMODULE_OK`. On the following errors * The key is of the wrong type. * 'score' double value is not a number (NaN). -## `RedisModule_ZsetIncrby` + - int RedisModule_ZsetIncrby(RedisModuleKey *key, double score, RedisModuleString *ele, int *flagsptr, double *newscore); +### `RedisModule_ZsetIncrby` -This function works exactly like `RM_ZsetAdd()`, but instead of setting + int RedisModule_ZsetIncrby(RedisModuleKey *key, + double score, + RedisModuleString *ele, + int *flagsptr, + double *newscore); + +This function works exactly like [`RedisModule_ZsetAdd()`](#RedisModule_ZsetAdd), but instead of setting a new score, the score of the existing element is incremented, or if the element does not already exist, it is added assuming the old score was zero. @@ -1087,9 +1466,13 @@ This function has an additional field 'newscore', if not NULL is filled with the new score of the element after the increment, if no error is returned. -## `RedisModule_ZsetRem` + + +### `RedisModule_ZsetRem` - int RedisModule_ZsetRem(RedisModuleKey *key, RedisModuleString *ele, int *deleted); + int RedisModule_ZsetRem(RedisModuleKey *key, + RedisModuleString *ele, + int *deleted); Remove the specified element from the sorted set. The function returns `REDISMODULE_OK` on success, and `REDISMODULE_ERR` @@ -1110,9 +1493,13 @@ to know if the element was really removed. Empty keys will be handled correctly by doing nothing. -## `RedisModule_ZsetScore` + - int RedisModule_ZsetScore(RedisModuleKey *key, RedisModuleString *ele, double *score); +### `RedisModule_ZsetScore` + + int RedisModule_ZsetScore(RedisModuleKey *key, + RedisModuleString *ele, + double *score); On success retrieve the double score associated at the sorted set element 'ele' and returns `REDISMODULE_OK`. Otherwise `REDISMODULE_ERR` is returned @@ -1122,21 +1509,35 @@ to signal one of the following conditions: * The key is not a sorted set. * The key is an open empty key. -## `RedisModule_ZsetRangeStop` + + +## Key API for Sorted Set iterator + + + +### `RedisModule_ZsetRangeStop` void RedisModule_ZsetRangeStop(RedisModuleKey *key); Stop a sorted set iteration. -## `RedisModule_ZsetRangeEndReached` + + +### `RedisModule_ZsetRangeEndReached` int RedisModule_ZsetRangeEndReached(RedisModuleKey *key); Return the "End of range" flag value to signal the end of the iteration. -## `RedisModule_ZsetFirstInScoreRange` + + +### `RedisModule_ZsetFirstInScoreRange` - int RedisModule_ZsetFirstInScoreRange(RedisModuleKey *key, double min, double max, int minex, int maxex); + int RedisModule_ZsetFirstInScoreRange(RedisModuleKey *key, + double min, + double max, + int minex, + int maxex); Setup a sorted set iterator seeking the first element in the specified range. Returns `REDISMODULE_OK` if the iterator was correctly initialized @@ -1147,23 +1548,33 @@ otherwise `REDISMODULE_ERR` is returned in the following conditions: The range is specified according to the two double values 'min' and 'max'. Both can be infinite using the following two macros: -`REDISMODULE_POSITIVE_INFINITE` for positive infinite value -`REDISMODULE_NEGATIVE_INFINITE` for negative infinite value +* `REDISMODULE_POSITIVE_INFINITE` for positive infinite value +* `REDISMODULE_NEGATIVE_INFINITE` for negative infinite value 'minex' and 'maxex' parameters, if true, respectively setup a range where the min and max value are exclusive (not included) instead of inclusive. -## `RedisModule_ZsetLastInScoreRange` + - int RedisModule_ZsetLastInScoreRange(RedisModuleKey *key, double min, double max, int minex, int maxex); +### `RedisModule_ZsetLastInScoreRange` -Exactly like `RedisModule_ZsetFirstInScoreRange()` but the last element of + int RedisModule_ZsetLastInScoreRange(RedisModuleKey *key, + double min, + double max, + int minex, + int maxex); + +Exactly like [`RedisModule_ZsetFirstInScoreRange()`](#RedisModule_ZsetFirstInScoreRange) but the last element of the range is selected for the start of the iteration instead. -## `RedisModule_ZsetFirstInLexRange` + + +### `RedisModule_ZsetFirstInLexRange` - int RedisModule_ZsetFirstInLexRange(RedisModuleKey *key, RedisModuleString *min, RedisModuleString *max); + int RedisModule_ZsetFirstInLexRange(RedisModuleKey *key, + RedisModuleString *min, + RedisModuleString *max); Setup a sorted set iterator seeking the first element in the specified lexicographical range. Returns `REDISMODULE_OK` if the iterator was correctly @@ -1173,27 +1584,36 @@ following conditions: 1. The value stored at key is not a sorted set or the key is empty. 2. The lexicographical range 'min' and 'max' format is invalid. -'min' and 'max' should be provided as two RedisModuleString objects +'min' and 'max' should be provided as two `RedisModuleString` objects in the same format as the parameters passed to the ZRANGEBYLEX command. The function does not take ownership of the objects, so they can be released ASAP after the iterator is setup. -## `RedisModule_ZsetLastInLexRange` + - int RedisModule_ZsetLastInLexRange(RedisModuleKey *key, RedisModuleString *min, RedisModuleString *max); +### `RedisModule_ZsetLastInLexRange` -Exactly like `RedisModule_ZsetFirstInLexRange()` but the last element of + int RedisModule_ZsetLastInLexRange(RedisModuleKey *key, + RedisModuleString *min, + RedisModuleString *max); + +Exactly like [`RedisModule_ZsetFirstInLexRange()`](#RedisModule_ZsetFirstInLexRange) but the last element of the range is selected for the start of the iteration instead. -## `RedisModule_ZsetRangeCurrentElement` + + +### `RedisModule_ZsetRangeCurrentElement` - RedisModuleString *RedisModule_ZsetRangeCurrentElement(RedisModuleKey *key, double *score); + RedisModuleString *RedisModule_ZsetRangeCurrentElement(RedisModuleKey *key, + double *score); Return the current sorted set element of an active sorted set iterator or NULL if the range specified in the iterator does not include any element. -## `RedisModule_ZsetRangeNext` + + +### `RedisModule_ZsetRangeNext` int RedisModule_ZsetRangeNext(RedisModuleKey *key); @@ -1201,7 +1621,9 @@ Go to the next element of the sorted set iterator. Returns 1 if there was a next element, 0 if we are already at the latest element or the range does not include any item at all. -## `RedisModule_ZsetRangePrev` + + +### `RedisModule_ZsetRangePrev` int RedisModule_ZsetRangePrev(RedisModuleKey *key); @@ -1209,7 +1631,15 @@ Go to the previous element of the sorted set iterator. Returns 1 if there was a previous element, 0 if we are already at the first element or the range does not include any item at all. -## `RedisModule_HashSet` + + +## Key API for Hash type + +See also [`RedisModule_ValueLength()`](#RedisModule_ValueLength), which returns the number of fields in a hash. + + + +### `RedisModule_HashSet` int RedisModule_HashSet(RedisModuleKey *key, int flags, ...); @@ -1218,7 +1648,7 @@ If the key is an empty key open for writing, it is created with an empty hash value, in order to set the specified field. The function is variadic and the user must specify pairs of field -names and values, both as RedisModuleString pointers (unless the +names and values, both as `RedisModuleString` pointers (unless the CFIELD option is set, see later). At the end of the field/value-ptr pairs, NULL must be specified as last argument to signal the end of the arguments in the variadic function. @@ -1244,6 +1674,10 @@ set to `REDISMODULE_HASH_NONE` if no special behavior is needed. are created. REDISMODULE_HASH_CFIELDS: The field names passed are null terminated C strings instead of RedisModuleString objects. + REDISMODULE_HASH_COUNT_ALL: Include the number of inserted fields in the + returned number, in addition to the number of + updated and deleted fields. (Added in Redis + 6.2.) Unless NX is specified, the command overwrites the old field value with the new one. @@ -1257,22 +1691,35 @@ code can be used: Return value: -The number of fields updated (that may be less than the number of fields -specified because of the XX or NX options). +The number of fields existing in the hash prior to the call, which have been +updated (its old value has been replaced by a new value) or deleted. If the +flag `REDISMODULE_HASH_COUNT_ALL` is set, insterted fields not previously +existing in the hash are also counted. -In the following case the return value is always zero: +If the return value is zero, `errno` is set (since Redis 6.2) as follows: -* The key was not open for writing. -* The key was associated with a non Hash value. +- EINVAL if any unknown flags are set or if key is NULL. +- ENOTSUP if the key is associated with a non Hash value. +- EBADF if the key was not opened for writing. +- ENOENT if no fields were counted as described under Return value above. + This is not actually an error. The return value can be zero if all fields + were just created and the `COUNT_ALL` flag was unset, or if changes were held + back due to the NX and XX flags. -## `RedisModule_HashGet` +NOTICE: The return value semantics of this function are very different +between Redis 6.2 and older versions. Modules that use it should determine +the Redis version and handle it accordingly. + + + +### `RedisModule_HashGet` int RedisModule_HashGet(RedisModuleKey *key, int flags, ...); Get fields from an hash value. This function is called using a variable -number of arguments, alternating a field name (as a StringRedisModule -pointer) with a pointer to a StringRedisModule pointer, that is set to the -value of the field if the field exist, or NULL if the field did not exist. +number of arguments, alternating a field name (as a `RedisModuleString` +pointer) with a pointer to a `RedisModuleString` pointer, that is set to the +value of the field if the field exists, or NULL if the field does not exist. At the end of the field/value-ptr pairs, NULL must be specified as last argument to signal the end of the arguments in the variadic function. @@ -1280,351 +1727,778 @@ This is an example usage: RedisModuleString *first, *second; RedisModule_HashGet(mykey,REDISMODULE_HASH_NONE,argv[1],&first, - argv[2],&second,NULL); + argv[2],&second,NULL); -As with `RedisModule_HashSet()` the behavior of the command can be specified +As with [`RedisModule_HashSet()`](#RedisModule_HashSet) the behavior of the command can be specified passing flags different than `REDISMODULE_HASH_NONE`: -`REDISMODULE_HASH_CFIELD`: field names as null terminated C strings. +`REDISMODULE_HASH_CFIELDS`: field names as null terminated C strings. `REDISMODULE_HASH_EXISTS`: instead of setting the value of the field -expecting a RedisModuleString pointer to pointer, the function just +expecting a `RedisModuleString` pointer to pointer, the function just reports if the field exists or not and expects an integer pointer as the second element of each pair. -Example of `REDISMODULE_HASH_CFIELD`: +Example of `REDISMODULE_HASH_CFIELDS`: RedisModuleString *username, *hashedpass; - RedisModule_HashGet(mykey,"username",&username,"hp",&hashedpass, NULL); + RedisModule_HashGet(mykey,REDISMODULE_HASH_CFIELDS,"username",&username,"hp",&hashedpass, NULL); Example of `REDISMODULE_HASH_EXISTS`: int exists; - RedisModule_HashGet(mykey,argv[1],&exists,NULL); + RedisModule_HashGet(mykey,REDISMODULE_HASH_EXISTS,argv[1],&exists,NULL); The function returns `REDISMODULE_OK` on success and `REDISMODULE_ERR` if the key is not an hash value. Memory management: -The returned RedisModuleString objects should be released with -`RedisModule_FreeString()`, or by enabling automatic memory management. +The returned `RedisModuleString` objects should be released with +[`RedisModule_FreeString()`](#RedisModule_FreeString), or by enabling automatic memory management. -## `RedisModule_FreeCallReply_Rec` + - void RedisModule_FreeCallReply_Rec(RedisModuleCallReply *reply, int freenested); - -Free a Call reply and all the nested replies it contains if it's an -array. +## Key API for Stream type -## `RedisModule_FreeCallReply` +For an introduction to streams, see [https://redis.io/topics/streams-intro](https://redis.io/topics/streams-intro). - void RedisModule_FreeCallReply(RedisModuleCallReply *reply); +The type `RedisModuleStreamID`, which is used in stream functions, is a struct +with two 64-bit fields and is defined as -Wrapper for the recursive free reply function. This is needed in order -to have the first level function to return on nested replies, but only -if called by the module API. + typedef struct RedisModuleStreamID { + uint64_t ms; + uint64_t seq; + } RedisModuleStreamID; -## `RedisModule_CallReplyType` +See also [`RedisModule_ValueLength()`](#RedisModule_ValueLength), which returns the length of a stream, and the +conversion functions [`RedisModule_StringToStreamID()`](#RedisModule_StringToStreamID) and [`RedisModule_CreateStringFromStreamID()`](#RedisModule_CreateStringFromStreamID). - int RedisModule_CallReplyType(RedisModuleCallReply *reply); + -Return the reply type. +### `RedisModule_StreamAdd` -## `RedisModule_CallReplyLength` + int RedisModule_StreamAdd(RedisModuleKey *key, + int flags, + RedisModuleStreamID *id, + RedisModuleString **argv, + long numfields); - size_t RedisModule_CallReplyLength(RedisModuleCallReply *reply); +Adds an entry to a stream. Like XADD without trimming. -Return the reply type length, where applicable. +- `key`: The key where the stream is (or will be) stored +- `flags`: A bit field of + - `REDISMODULE_STREAM_ADD_AUTOID`: Assign a stream ID automatically, like + `*` in the XADD command. +- `id`: If the `AUTOID` flag is set, this is where the assigned ID is + returned. Can be NULL if `AUTOID` is set, if you don't care to receive the + ID. If `AUTOID` is not set, this is the requested ID. +- `argv`: A pointer to an array of size `numfields * 2` containing the + fields and values. +- `numfields`: The number of field-value pairs in `argv`. -## `RedisModule_CallReplyArrayElement` +Returns `REDISMODULE_OK` if an entry has been added. On failure, +`REDISMODULE_ERR` is returned and `errno` is set as follows: - RedisModuleCallReply *RedisModule_CallReplyArrayElement(RedisModuleCallReply *reply, size_t idx); +- EINVAL if called with invalid arguments +- ENOTSUP if the key refers to a value of a type other than stream +- EBADF if the key was not opened for writing +- EDOM if the given ID was 0-0 or not greater than all other IDs in the + stream (only if the AUTOID flag is unset) +- EFBIG if the stream has reached the last possible ID -Return the 'idx'-th nested call reply element of an array reply, or NULL -if the reply type is wrong or the index is out of range. + -## `RedisModule_CallReplyInteger` +### `RedisModule_StreamDelete` - long long RedisModule_CallReplyInteger(RedisModuleCallReply *reply); + int RedisModule_StreamDelete(RedisModuleKey *key, RedisModuleStreamID *id); -Return the long long of an integer reply. +Deletes an entry from a stream. -## `RedisModule_CallReplyStringPtr` +- `key`: A key opened for writing, with no stream iterator started. +- `id`: The stream ID of the entry to delete. - const char *RedisModule_CallReplyStringPtr(RedisModuleCallReply *reply, size_t *len); +Returns `REDISMODULE_OK` on success. On failure, `REDISMODULE_ERR` is returned +and `errno` is set as follows: -Return the pointer and length of a string or error reply. +- EINVAL if called with invalid arguments +- ENOTSUP if the key refers to a value of a type other than stream or if the + key is empty +- EBADF if the key was not opened for writing or if a stream iterator is + associated with the key +- ENOENT if no entry with the given stream ID exists -## `RedisModule_CreateStringFromCallReply` +See also [`RedisModule_StreamIteratorDelete()`](#RedisModule_StreamIteratorDelete) for deleting the current entry while +iterating using a stream iterator. - RedisModuleString *RedisModule_CreateStringFromCallReply(RedisModuleCallReply *reply); + -Return a new string object from a call reply of type string, error or -integer. Otherwise (wrong reply type) return NULL. +### `RedisModule_StreamIteratorStart` -## `RedisModule_Call` + int RedisModule_StreamIteratorStart(RedisModuleKey *key, + int flags, + RedisModuleStreamID *start, + RedisModuleStreamID *end); - RedisModuleCallReply *RedisModule_Call(RedisModuleCtx *ctx, const char *cmdname, const char *fmt, ...); +Sets up a stream iterator. -Exported API to call any Redis command from modules. -On success a RedisModuleCallReply object is returned, otherwise -NULL is returned and errno is set to the following values: +- `key`: The stream key opened for reading using [`RedisModule_OpenKey()`](#RedisModule_OpenKey). +- `flags`: + - `REDISMODULE_STREAM_ITERATOR_EXCLUSIVE`: Don't include `start` and `end` + in the iterated range. + - `REDISMODULE_STREAM_ITERATOR_REVERSE`: Iterate in reverse order, starting + from the `end` of the range. +- `start`: The lower bound of the range. Use NULL for the beginning of the + stream. +- `end`: The upper bound of the range. Use NULL for the end of the stream. -EBADF: wrong format specifier. -EINVAL: wrong command arity. -ENOENT: command does not exist. -EPERM: operation in Cluster instance with key in non local slot. -EROFS: operation in Cluster instance when a write command is sent - in a readonly state. -ENETDOWN: operation in Cluster instance when cluster is down. +Returns `REDISMODULE_OK` on success. On failure, `REDISMODULE_ERR` is returned +and `errno` is set as follows: + +- EINVAL if called with invalid arguments +- ENOTSUP if the key refers to a value of a type other than stream or if the + key is empty +- EBADF if the key was not opened for writing or if a stream iterator is + already associated with the key +- EDOM if `start` or `end` is outside the valid range + +Returns `REDISMODULE_OK` on success and `REDISMODULE_ERR` if the key doesn't +refer to a stream or if invalid arguments were given. + +The stream IDs are retrieved using [`RedisModule_StreamIteratorNextID()`](#RedisModule_StreamIteratorNextID) and +for each stream ID, the fields and values are retrieved using +[`RedisModule_StreamIteratorNextField()`](#RedisModule_StreamIteratorNextField). The iterator is freed by calling +[`RedisModule_StreamIteratorStop()`](#RedisModule_StreamIteratorStop). + +Example (error handling omitted): + + RedisModule_StreamIteratorStart(key, 0, startid_ptr, endid_ptr); + RedisModuleStreamID id; + long numfields; + while (RedisModule_StreamIteratorNextID(key, &id, &numfields) == + REDISMODULE_OK) { + RedisModuleString *field, *value; + while (RedisModule_StreamIteratorNextField(key, &field, &value) == + REDISMODULE_OK) { + // + // ... Do stuff ... + // + RedisModule_FreeString(ctx, field); + RedisModule_FreeString(ctx, value); + } + } + RedisModule_StreamIteratorStop(key); -This API is documented here: https://redis.io/topics/modules-intro + -## `RedisModule_CallReplyProto` +### `RedisModule_StreamIteratorStop` - const char *RedisModule_CallReplyProto(RedisModuleCallReply *reply, size_t *len); + int RedisModule_StreamIteratorStop(RedisModuleKey *key); -Return a pointer, and a length, to the protocol returned by the command -that returned the reply object. +Stops a stream iterator created using [`RedisModule_StreamIteratorStart()`](#RedisModule_StreamIteratorStart) and +reclaims its memory. -## `RedisModule_CreateDataType` +Returns `REDISMODULE_OK` on success. On failure, `REDISMODULE_ERR` is returned +and `errno` is set as follows: - moduleType *RedisModule_CreateDataType(RedisModuleCtx *ctx, const char *name, int encver, void *typemethods_ptr); +- EINVAL if called with a NULL key +- ENOTSUP if the key refers to a value of a type other than stream or if the + key is empty +- EBADF if the key was not opened for writing or if no stream iterator is + associated with the key -Register a new data type exported by the module. The parameters are the -following. Please for in depth documentation check the modules API -documentation, especially the TYPES.md file. + -* **name**: A 9 characters data type name that MUST be unique in the Redis - Modules ecosystem. Be creative... and there will be no collisions. Use - the charset A-Z a-z 9-0, plus the two "-_" characters. A good - idea is to use, for example `-`. For example - "tree-AntZ" may mean "Tree data structure by @antirez". To use both - lower case and upper case letters helps in order to prevent collisions. -* **encver**: Encoding version, which is, the version of the serialization - that a module used in order to persist data. As long as the "name" - matches, the RDB loading will be dispatched to the type callbacks - whatever 'encver' is used, however the module can understand if - the encoding it must load are of an older version of the module. - For example the module "tree-AntZ" initially used encver=0. Later - after an upgrade, it started to serialize data in a different format - and to register the type with encver=1. However this module may - still load old data produced by an older version if the rdb_load - callback is able to check the encver value and act accordingly. - The encver must be a positive value between 0 and 1023. -* **typemethods_ptr** is a pointer to a RedisModuleTypeMethods structure - that should be populated with the methods callbacks and structure - version, like in the following example: +### `RedisModule_StreamIteratorNextID` - RedisModuleTypeMethods tm = { - .version = REDISMODULE_TYPE_METHOD_VERSION, - .rdb_load = myType_RDBLoadCallBack, - .rdb_save = myType_RDBSaveCallBack, - .aof_rewrite = myType_AOFRewriteCallBack, - .free = myType_FreeCallBack, - - // Optional fields - .digest = myType_DigestCallBack, - .mem_usage = myType_MemUsageCallBack, - .aux_load = myType_AuxRDBLoadCallBack, - .aux_save = myType_AuxRDBSaveCallBack, - } + int RedisModule_StreamIteratorNextID(RedisModuleKey *key, + RedisModuleStreamID *id, + long *numfields); -* **rdb_load**: A callback function pointer that loads data from RDB files. -* **rdb_save**: A callback function pointer that saves data to RDB files. -* **aof_rewrite**: A callback function pointer that rewrites data as commands. -* **digest**: A callback function pointer that is used for `DEBUG DIGEST`. -* **free**: A callback function pointer that can free a type value. -* **aux_save**: A callback function pointer that saves out of keyspace data to RDB files. - 'when' argument is either REDISMODULE_AUX_BEFORE_RDB or REDISMODULE_AUX_AFTER_RDB. -* **aux_load**: A callback function pointer that loads out of keyspace data from RDB files. - Similar to aux_save, returns REDISMODULE_OK on success, and ERR otherwise. +Finds the next stream entry and returns its stream ID and the number of +fields. -The **digest* and **mem_usage** methods should currently be omitted since -they are not yet implemented inside the Redis modules core. +- `key`: Key for which a stream iterator has been started using + [`RedisModule_StreamIteratorStart()`](#RedisModule_StreamIteratorStart). +- `id`: The stream ID returned. NULL if you don't care. +- `numfields`: The number of fields in the found stream entry. NULL if you + don't care. -Note: the module name "AAAAAAAAA" is reserved and produces an error, it -happens to be pretty lame as well. +Returns `REDISMODULE_OK` and sets `*id` and `*numfields` if an entry was found. +On failure, `REDISMODULE_ERR` is returned and `errno` is set as follows: -If there is already a module registering a type with the same name, -and if the module name or encver is invalid, NULL is returned. -Otherwise the new type is registered into Redis, and a reference of -type RedisModuleType is returned: the caller of the function should store -this reference into a gobal variable to make future use of it in the -modules type API, since a single module may register multiple types. -Example code fragment: +- EINVAL if called with a NULL key +- ENOTSUP if the key refers to a value of a type other than stream or if the + key is empty +- EBADF if no stream iterator is associated with the key +- ENOENT if there are no more entries in the range of the iterator - static RedisModuleType *BalancedTreeType; +In practice, if [`RedisModule_StreamIteratorNextID()`](#RedisModule_StreamIteratorNextID) is called after a successful call +to [`RedisModule_StreamIteratorStart()`](#RedisModule_StreamIteratorStart) and with the same key, it is safe to assume that +an `REDISMODULE_ERR` return value means that there are no more entries. - int RedisModule_OnLoad(RedisModuleCtx *ctx) { - // some code here ... - BalancedTreeType = RM_CreateDataType(...); - } +Use [`RedisModule_StreamIteratorNextField()`](#RedisModule_StreamIteratorNextField) to retrieve the fields and values. +See the example at [`RedisModule_StreamIteratorStart()`](#RedisModule_StreamIteratorStart). -## `RedisModule_ModuleTypeSetValue` + - int RedisModule_ModuleTypeSetValue(RedisModuleKey *key, moduleType *mt, void *value); +### `RedisModule_StreamIteratorNextField` -If the key is open for writing, set the specified module type object -as the value of the key, deleting the old value if any. -On success `REDISMODULE_OK` is returned. If the key is not open for -writing or there is an active iterator, `REDISMODULE_ERR` is returned. + int RedisModule_StreamIteratorNextField(RedisModuleKey *key, + RedisModuleString **field_ptr, + RedisModuleString **value_ptr); -## `RedisModule_ModuleTypeGetType` +Retrieves the next field of the current stream ID and its corresponding value +in a stream iteration. This function should be called repeatedly after calling +[`RedisModule_StreamIteratorNextID()`](#RedisModule_StreamIteratorNextID) to fetch each field-value pair. - moduleType *RedisModule_ModuleTypeGetType(RedisModuleKey *key); +- `key`: Key where a stream iterator has been started. +- `field_ptr`: This is where the field is returned. +- `value_ptr`: This is where the value is returned. -Assuming `RedisModule_KeyType()` returned `REDISMODULE_KEYTYPE_MODULE` on -the key, returns the module type pointer of the value stored at key. +Returns `REDISMODULE_OK` and points `*field_ptr` and `*value_ptr` to freshly +allocated `RedisModuleString` objects. The string objects are freed +automatically when the callback finishes if automatic memory is enabled. On +failure, `REDISMODULE_ERR` is returned and `errno` is set as follows: -If the key is NULL, is not associated with a module type, or is empty, -then NULL is returned instead. +- EINVAL if called with a NULL key +- ENOTSUP if the key refers to a value of a type other than stream or if the + key is empty +- EBADF if no stream iterator is associated with the key +- ENOENT if there are no more fields in the current stream entry -## `RedisModule_ModuleTypeGetValue` +In practice, if [`RedisModule_StreamIteratorNextField()`](#RedisModule_StreamIteratorNextField) is called after a successful +call to [`RedisModule_StreamIteratorNextID()`](#RedisModule_StreamIteratorNextID) and with the same key, it is safe to assume +that an `REDISMODULE_ERR` return value means that there are no more fields. - void *RedisModule_ModuleTypeGetValue(RedisModuleKey *key); +See the example at [`RedisModule_StreamIteratorStart()`](#RedisModule_StreamIteratorStart). -Assuming `RedisModule_KeyType()` returned `REDISMODULE_KEYTYPE_MODULE` on -the key, returns the module type low-level value stored at key, as -it was set by the user via `RedisModule_ModuleTypeSet()`. + -If the key is NULL, is not associated with a module type, or is empty, -then NULL is returned instead. +### `RedisModule_StreamIteratorDelete` -## `RedisModule_IsIOError` + int RedisModule_StreamIteratorDelete(RedisModuleKey *key); - int RedisModule_IsIOError(RedisModuleIO *io); +Deletes the current stream entry while iterating. -Returns true if any previous IO API failed. -for Load* APIs the `REDISMODULE_OPTIONS_HANDLE_IO_ERRORS` flag must be set with -RediModule_SetModuleOptions first. +This function can be called after [`RedisModule_StreamIteratorNextID()`](#RedisModule_StreamIteratorNextID) or after any +calls to [`RedisModule_StreamIteratorNextField()`](#RedisModule_StreamIteratorNextField). -## `RedisModule_SaveUnsigned` +Returns `REDISMODULE_OK` on success. On failure, `REDISMODULE_ERR` is returned +and `errno` is set as follows: - void RedisModule_SaveUnsigned(RedisModuleIO *io, uint64_t value); +- EINVAL if key is NULL +- ENOTSUP if the key is empty or is of another type than stream +- EBADF if the key is not opened for writing, if no iterator has been started +- ENOENT if the iterator has no current stream entry -Save an unsigned 64 bit value into the RDB file. This function should only -be called in the context of the rdb_save method of modules implementing new -data types. + -## `RedisModule_LoadUnsigned` +### `RedisModule_StreamTrimByLength` - uint64_t RedisModule_LoadUnsigned(RedisModuleIO *io); + long long RedisModule_StreamTrimByLength(RedisModuleKey *key, + int flags, + long long length); -Load an unsigned 64 bit value from the RDB file. This function should only -be called in the context of the rdb_load method of modules implementing -new data types. +Trim a stream by length, similar to XTRIM with MAXLEN. -## `RedisModule_SaveSigned` +- `key`: Key opened for writing. +- `flags`: A bitfield of + - `REDISMODULE_STREAM_TRIM_APPROX`: Trim less if it improves performance, + like XTRIM with `~`. +- `length`: The number of stream entries to keep after trimming. - void RedisModule_SaveSigned(RedisModuleIO *io, int64_t value); +Returns the number of entries deleted. On failure, a negative value is +returned and `errno` is set as follows: -Like `RedisModule_SaveUnsigned()` but for signed 64 bit values. +- EINVAL if called with invalid arguments +- ENOTSUP if the key is empty or of a type other than stream +- EBADF if the key is not opened for writing -## `RedisModule_LoadSigned` + - int64_t RedisModule_LoadSigned(RedisModuleIO *io); +### `RedisModule_StreamTrimByID` -Like `RedisModule_LoadUnsigned()` but for signed 64 bit values. + long long RedisModule_StreamTrimByID(RedisModuleKey *key, + int flags, + RedisModuleStreamID *id); -## `RedisModule_SaveString` +Trim a stream by ID, similar to XTRIM with MINID. - void RedisModule_SaveString(RedisModuleIO *io, RedisModuleString *s); +- `key`: Key opened for writing. +- `flags`: A bitfield of + - `REDISMODULE_STREAM_TRIM_APPROX`: Trim less if it improves performance, + like XTRIM with `~`. +- `id`: The smallest stream ID to keep after trimming. -In the context of the rdb_save method of a module type, saves a -string into the RDB file taking as input a RedisModuleString. +Returns the number of entries deleted. On failure, a negative value is +returned and `errno` is set as follows: -The string can be later loaded with `RedisModule_LoadString()` or -other Load family functions expecting a serialized string inside -the RDB file. +- EINVAL if called with invalid arguments +- ENOTSUP if the key is empty or of a type other than stream +- EBADF if the key is not opened for writing -## `RedisModule_SaveStringBuffer` + - void RedisModule_SaveStringBuffer(RedisModuleIO *io, const char *str, size_t len); +## Calling Redis commands from modules -Like `RedisModule_SaveString()` but takes a raw C pointer and length -as input. +[`RedisModule_Call()`](#RedisModule_Call) sends a command to Redis. The remaining functions handle the reply. -## `RedisModule_LoadString` + - RedisModuleString *RedisModule_LoadString(RedisModuleIO *io); +### `RedisModule_FreeCallReply` -In the context of the rdb_load method of a module data type, loads a string -from the RDB file, that was previously saved with `RedisModule_SaveString()` -functions family. + void RedisModule_FreeCallReply(RedisModuleCallReply *reply); -The returned string is a newly allocated RedisModuleString object, and -the user should at some point free it with a call to `RedisModule_FreeString()`. +Free a Call reply and all the nested replies it contains if it's an +array. + + + +### `RedisModule_CallReplyType` + + int RedisModule_CallReplyType(RedisModuleCallReply *reply); + +Return the reply type. + + + +### `RedisModule_CallReplyLength` + + size_t RedisModule_CallReplyLength(RedisModuleCallReply *reply); + +Return the reply type length, where applicable. + + + +### `RedisModule_CallReplyArrayElement` + + RedisModuleCallReply *RedisModule_CallReplyArrayElement(RedisModuleCallReply *reply, + size_t idx); + +Return the 'idx'-th nested call reply element of an array reply, or NULL +if the reply type is wrong or the index is out of range. + + + +### `RedisModule_CallReplyInteger` + + long long RedisModule_CallReplyInteger(RedisModuleCallReply *reply); + +Return the long long of an integer reply. + + + +### `RedisModule_CallReplyStringPtr` + + const char *RedisModule_CallReplyStringPtr(RedisModuleCallReply *reply, + size_t *len); + +Return the pointer and length of a string or error reply. + + + +### `RedisModule_CreateStringFromCallReply` + + RedisModuleString *RedisModule_CreateStringFromCallReply(RedisModuleCallReply *reply); + +Return a new string object from a call reply of type string, error or +integer. Otherwise (wrong reply type) return NULL. + + + +### `RedisModule_Call` + + RedisModuleCallReply *RedisModule_Call(RedisModuleCtx *ctx, + const char *cmdname, + const char *fmt, + ...); + +Exported API to call any Redis command from modules. + +* **cmdname**: The Redis command to call. +* **fmt**: A format specifier string for the command's arguments. Each + of the arguments should be specified by a valid type specification. The + format specifier can also contain the modifiers `!`, `A` and `R` which + don't have a corresponding argument. + + * `b` -- The argument is a buffer and is immediately followed by another + argument that is the buffer's length. + * `c` -- The argument is a pointer to a plain C string (null-terminated). + * `l` -- The argument is long long integer. + * `s` -- The argument is a RedisModuleString. + * `v` -- The argument(s) is a vector of RedisModuleString. + * `!` -- Sends the Redis command and its arguments to replicas and AOF. + * `A` -- Suppress AOF propagation, send only to replicas (requires `!`). + * `R` -- Suppress replicas propagation, send only to AOF (requires `!`). +* **...**: The actual arguments to the Redis command. + +On success a `RedisModuleCallReply` object is returned, otherwise +NULL is returned and errno is set to the following values: + +* EBADF: wrong format specifier. +* EINVAL: wrong command arity. +* ENOENT: command does not exist. +* EPERM: operation in Cluster instance with key in non local slot. +* EROFS: operation in Cluster instance when a write command is sent + in a readonly state. +* ENETDOWN: operation in Cluster instance when cluster is down. + +Example code fragment: + + reply = RedisModule_Call(ctx,"INCRBY","sc",argv[1],"10"); + if (RedisModule_CallReplyType(reply) == REDISMODULE_REPLY_INTEGER) { + long long myval = RedisModule_CallReplyInteger(reply); + // Do something with myval. + } + +This API is documented here: [https://redis.io/topics/modules-intro](https://redis.io/topics/modules-intro) + + + +### `RedisModule_CallReplyProto` + + const char *RedisModule_CallReplyProto(RedisModuleCallReply *reply, + size_t *len); + +Return a pointer, and a length, to the protocol returned by the command +that returned the reply object. + + + +## Modules data types + +When String DMA or using existing data structures is not enough, it is +possible to create new data types from scratch and export them to +Redis. The module must provide a set of callbacks for handling the +new values exported (for example in order to provide RDB saving/loading, +AOF rewrite, and so forth). In this section we define this API. + + + +### `RedisModule_CreateDataType` -If the data structure does not store strings as RedisModuleString objects, -the similar function `RedisModule_LoadStringBuffer()` could be used instead. + moduleType *RedisModule_CreateDataType(RedisModuleCtx *ctx, + const char *name, + int encver, + void *typemethods_ptr); -## `RedisModule_LoadStringBuffer` +Register a new data type exported by the module. The parameters are the +following. Please for in depth documentation check the modules API +documentation, especially [https://redis.io/topics/modules-native-types](https://redis.io/topics/modules-native-types). + +* **name**: A 9 characters data type name that MUST be unique in the Redis + Modules ecosystem. Be creative... and there will be no collisions. Use + the charset A-Z a-z 9-0, plus the two "-_" characters. A good + idea is to use, for example `-`. For example + "tree-AntZ" may mean "Tree data structure by @antirez". To use both + lower case and upper case letters helps in order to prevent collisions. +* **encver**: Encoding version, which is, the version of the serialization + that a module used in order to persist data. As long as the "name" + matches, the RDB loading will be dispatched to the type callbacks + whatever 'encver' is used, however the module can understand if + the encoding it must load are of an older version of the module. + For example the module "tree-AntZ" initially used encver=0. Later + after an upgrade, it started to serialize data in a different format + and to register the type with encver=1. However this module may + still load old data produced by an older version if the `rdb_load` + callback is able to check the encver value and act accordingly. + The encver must be a positive value between 0 and 1023. + +* **typemethods_ptr** is a pointer to a `RedisModuleTypeMethods` structure + that should be populated with the methods callbacks and structure + version, like in the following example: + + RedisModuleTypeMethods tm = { + .version = REDISMODULE_TYPE_METHOD_VERSION, + .rdb_load = myType_RDBLoadCallBack, + .rdb_save = myType_RDBSaveCallBack, + .aof_rewrite = myType_AOFRewriteCallBack, + .free = myType_FreeCallBack, + + // Optional fields + .digest = myType_DigestCallBack, + .mem_usage = myType_MemUsageCallBack, + .aux_load = myType_AuxRDBLoadCallBack, + .aux_save = myType_AuxRDBSaveCallBack, + .free_effort = myType_FreeEffortCallBack, + .unlink = myType_UnlinkCallBack, + .copy = myType_CopyCallback, + .defrag = myType_DefragCallback + } + +* **rdb_load**: A callback function pointer that loads data from RDB files. +* **rdb_save**: A callback function pointer that saves data to RDB files. +* **aof_rewrite**: A callback function pointer that rewrites data as commands. +* **digest**: A callback function pointer that is used for `DEBUG DIGEST`. +* **free**: A callback function pointer that can free a type value. +* **aux_save**: A callback function pointer that saves out of keyspace data to RDB files. + 'when' argument is either `REDISMODULE_AUX_BEFORE_RDB` or `REDISMODULE_AUX_AFTER_RDB`. +* **aux_load**: A callback function pointer that loads out of keyspace data from RDB files. + Similar to `aux_save`, returns `REDISMODULE_OK` on success, and ERR otherwise. +* **free_effort**: A callback function pointer that used to determine whether the module's + memory needs to be lazy reclaimed. The module should return the complexity involved by + freeing the value. for example: how many pointers are gonna be freed. Note that if it + returns 0, we'll always do an async free. +* **unlink**: A callback function pointer that used to notifies the module that the key has + been removed from the DB by redis, and may soon be freed by a background thread. Note that + it won't be called on FLUSHALL/FLUSHDB (both sync and async), and the module can use the + `RedisModuleEvent_FlushDB` to hook into that. +* **copy**: A callback function pointer that is used to make a copy of the specified key. + The module is expected to perform a deep copy of the specified value and return it. + In addition, hints about the names of the source and destination keys is provided. + A NULL return value is considered an error and the copy operation fails. + Note: if the target key exists and is being overwritten, the copy callback will be + called first, followed by a free callback to the value that is being replaced. + +* **defrag**: A callback function pointer that is used to request the module to defrag + a key. The module should then iterate pointers and call the relevant `RedisModule_Defrag*()` + functions to defragment pointers or complex types. The module should continue + iterating as long as [`RedisModule_DefragShouldStop()`](#RedisModule_DefragShouldStop) returns a zero value, and return a + zero value if finished or non-zero value if more work is left to be done. If more work + needs to be done, [`RedisModule_DefragCursorSet()`](#RedisModule_DefragCursorSet) and [`RedisModule_DefragCursorGet()`](#RedisModule_DefragCursorGet) can be used to track + this work across different calls. + Normally, the defrag mechanism invokes the callback without a time limit, so + [`RedisModule_DefragShouldStop()`](#RedisModule_DefragShouldStop) always returns zero. The "late defrag" mechanism which has + a time limit and provides cursor support is used only for keys that are determined + to have significant internal complexity. To determine this, the defrag mechanism + uses the `free_effort` callback and the 'active-defrag-max-scan-fields' config directive. + NOTE: The value is passed as a `void**` and the function is expected to update the + pointer if the top-level value pointer is defragmented and consequentially changes. + +Note: the module name "AAAAAAAAA" is reserved and produces an error, it +happens to be pretty lame as well. + +If there is already a module registering a type with the same name, +and if the module name or encver is invalid, NULL is returned. +Otherwise the new type is registered into Redis, and a reference of +type `RedisModuleType` is returned: the caller of the function should store +this reference into a global variable to make future use of it in the +modules type API, since a single module may register multiple types. +Example code fragment: + + static RedisModuleType *BalancedTreeType; + + int RedisModule_OnLoad(RedisModuleCtx *ctx) { + // some code here ... + BalancedTreeType = RM_CreateDataType(...); + } + + + +### `RedisModule_ModuleTypeSetValue` + + int RedisModule_ModuleTypeSetValue(RedisModuleKey *key, + moduleType *mt, + void *value); + +If the key is open for writing, set the specified module type object +as the value of the key, deleting the old value if any. +On success `REDISMODULE_OK` is returned. If the key is not open for +writing or there is an active iterator, `REDISMODULE_ERR` is returned. + + + +### `RedisModule_ModuleTypeGetType` + + moduleType *RedisModule_ModuleTypeGetType(RedisModuleKey *key); + +Assuming [`RedisModule_KeyType()`](#RedisModule_KeyType) returned `REDISMODULE_KEYTYPE_MODULE` on +the key, returns the module type pointer of the value stored at key. + +If the key is NULL, is not associated with a module type, or is empty, +then NULL is returned instead. + + + +### `RedisModule_ModuleTypeGetValue` + + void *RedisModule_ModuleTypeGetValue(RedisModuleKey *key); + +Assuming [`RedisModule_KeyType()`](#RedisModule_KeyType) returned `REDISMODULE_KEYTYPE_MODULE` on +the key, returns the module type low-level value stored at key, as +it was set by the user via [`RedisModule_ModuleTypeSetValue()`](#RedisModule_ModuleTypeSetValue). + +If the key is NULL, is not associated with a module type, or is empty, +then NULL is returned instead. + + + +## RDB loading and saving functions + + + +### `RedisModule_IsIOError` + + int RedisModule_IsIOError(RedisModuleIO *io); + +Returns true if any previous IO API failed. +for `Load*` APIs the `REDISMODULE_OPTIONS_HANDLE_IO_ERRORS` flag must be set with +[`RedisModule_SetModuleOptions`](#RedisModule_SetModuleOptions) first. + + + +### `RedisModule_SaveUnsigned` + + void RedisModule_SaveUnsigned(RedisModuleIO *io, uint64_t value); + +Save an unsigned 64 bit value into the RDB file. This function should only +be called in the context of the `rdb_save` method of modules implementing new +data types. + + + +### `RedisModule_LoadUnsigned` + + uint64_t RedisModule_LoadUnsigned(RedisModuleIO *io); + +Load an unsigned 64 bit value from the RDB file. This function should only +be called in the context of the `rdb_load` method of modules implementing +new data types. + + + +### `RedisModule_SaveSigned` + + void RedisModule_SaveSigned(RedisModuleIO *io, int64_t value); + +Like [`RedisModule_SaveUnsigned()`](#RedisModule_SaveUnsigned) but for signed 64 bit values. + + + +### `RedisModule_LoadSigned` + + int64_t RedisModule_LoadSigned(RedisModuleIO *io); + +Like [`RedisModule_LoadUnsigned()`](#RedisModule_LoadUnsigned) but for signed 64 bit values. + + + +### `RedisModule_SaveString` + + void RedisModule_SaveString(RedisModuleIO *io, RedisModuleString *s); + +In the context of the `rdb_save` method of a module type, saves a +string into the RDB file taking as input a `RedisModuleString`. + +The string can be later loaded with [`RedisModule_LoadString()`](#RedisModule_LoadString) or +other Load family functions expecting a serialized string inside +the RDB file. + + + +### `RedisModule_SaveStringBuffer` + + void RedisModule_SaveStringBuffer(RedisModuleIO *io, + const char *str, + size_t len); + +Like [`RedisModule_SaveString()`](#RedisModule_SaveString) but takes a raw C pointer and length +as input. + + + +### `RedisModule_LoadString` + + RedisModuleString *RedisModule_LoadString(RedisModuleIO *io); + +In the context of the `rdb_load` method of a module data type, loads a string +from the RDB file, that was previously saved with [`RedisModule_SaveString()`](#RedisModule_SaveString) +functions family. + +The returned string is a newly allocated `RedisModuleString` object, and +the user should at some point free it with a call to [`RedisModule_FreeString()`](#RedisModule_FreeString). + +If the data structure does not store strings as `RedisModuleString` objects, +the similar function [`RedisModule_LoadStringBuffer()`](#RedisModule_LoadStringBuffer) could be used instead. + + + +### `RedisModule_LoadStringBuffer` char *RedisModule_LoadStringBuffer(RedisModuleIO *io, size_t *lenptr); -Like `RedisModule_LoadString()` but returns an heap allocated string that -was allocated with `RedisModule_Alloc()`, and can be resized or freed with -`RedisModule_Realloc()` or `RedisModule_Free()`. +Like [`RedisModule_LoadString()`](#RedisModule_LoadString) but returns an heap allocated string that +was allocated with [`RedisModule_Alloc()`](#RedisModule_Alloc), and can be resized or freed with +[`RedisModule_Realloc()`](#RedisModule_Realloc) or [`RedisModule_Free()`](#RedisModule_Free). The size of the string is stored at '*lenptr' if not NULL. The returned string is not automatically NULL terminated, it is loaded -exactly as it was stored inisde the RDB file. +exactly as it was stored inside the RDB file. -## `RedisModule_SaveDouble` + + +### `RedisModule_SaveDouble` void RedisModule_SaveDouble(RedisModuleIO *io, double value); -In the context of the rdb_save method of a module data type, saves a double +In the context of the `rdb_save` method of a module data type, saves a double value to the RDB file. The double can be a valid number, a NaN or infinity. -It is possible to load back the value with `RedisModule_LoadDouble()`. +It is possible to load back the value with [`RedisModule_LoadDouble()`](#RedisModule_LoadDouble). + + -## `RedisModule_LoadDouble` +### `RedisModule_LoadDouble` double RedisModule_LoadDouble(RedisModuleIO *io); -In the context of the rdb_save method of a module data type, loads back the -double value saved by `RedisModule_SaveDouble()`. +In the context of the `rdb_save` method of a module data type, loads back the +double value saved by [`RedisModule_SaveDouble()`](#RedisModule_SaveDouble). -## `RedisModule_SaveFloat` + + +### `RedisModule_SaveFloat` void RedisModule_SaveFloat(RedisModuleIO *io, float value); -In the context of the rdb_save method of a module data type, saves a float +In the context of the `rdb_save` method of a module data type, saves a float value to the RDB file. The float can be a valid number, a NaN or infinity. -It is possible to load back the value with `RedisModule_LoadFloat()`. +It is possible to load back the value with [`RedisModule_LoadFloat()`](#RedisModule_LoadFloat). + + -## `RedisModule_LoadFloat` +### `RedisModule_LoadFloat` float RedisModule_LoadFloat(RedisModuleIO *io); -In the context of the rdb_save method of a module data type, loads back the -float value saved by `RedisModule_SaveFloat()`. +In the context of the `rdb_save` method of a module data type, loads back the +float value saved by [`RedisModule_SaveFloat()`](#RedisModule_SaveFloat). -## `RedisModule_SaveLongDouble` + + +### `RedisModule_SaveLongDouble` void RedisModule_SaveLongDouble(RedisModuleIO *io, long double value); -In the context of the rdb_save method of a module data type, saves a long double +In the context of the `rdb_save` method of a module data type, saves a long double value to the RDB file. The double can be a valid number, a NaN or infinity. -It is possible to load back the value with `RedisModule_LoadLongDouble()`. +It is possible to load back the value with [`RedisModule_LoadLongDouble()`](#RedisModule_LoadLongDouble). + + -## `RedisModule_LoadLongDouble` +### `RedisModule_LoadLongDouble` long double RedisModule_LoadLongDouble(RedisModuleIO *io); -In the context of the rdb_save method of a module data type, loads back the -long double value saved by `RedisModule_SaveLongDouble()`. +In the context of the `rdb_save` method of a module data type, loads back the +long double value saved by [`RedisModule_SaveLongDouble()`](#RedisModule_SaveLongDouble). -## `RedisModule_DigestAddStringBuffer` + - void RedisModule_DigestAddStringBuffer(RedisModuleDigest *md, unsigned char *ele, size_t len); +## Key digest API (DEBUG DIGEST interface for modules types) + + + +### `RedisModule_DigestAddStringBuffer` + + void RedisModule_DigestAddStringBuffer(RedisModuleDigest *md, + unsigned char *ele, + size_t len); Add a new element to the digest. This function can be called multiple times one element after the other, for all the elements that constitute a given data structure. The function call must be followed by the call to -``RedisModule_DigestEndSequence`` eventually, when all the elements that are +[`RedisModule_DigestEndSequence`](#RedisModule_DigestEndSequence) eventually, when all the elements that are always in a given order are added. See the Redis Modules data types documentation for more info. However this is a quick example that uses Redis data types as an example. @@ -1658,64 +2532,121 @@ A list of ordered elements would be implemented with: } EndSequence(); -## `RedisModule_DigestAddLongLong` + + +### `RedisModule_DigestAddLongLong` void RedisModule_DigestAddLongLong(RedisModuleDigest *md, long long ll); -Like ``RedisModule_DigestAddStringBuffer()`` but takes a long long as input +Like [`RedisModule_DigestAddStringBuffer()`](#RedisModule_DigestAddStringBuffer) but takes a long long as input that gets converted into a string before adding it to the digest. -## `RedisModule_DigestEndSequence` + + +### `RedisModule_DigestEndSequence` void RedisModule_DigestEndSequence(RedisModuleDigest *md); -See the documentation for ``RedisModule_DigestAddElement()``. +See the documentation for `RedisModule_DigestAddElement()`. + + + +### `RedisModule_LoadDataTypeFromString` + + void *RedisModule_LoadDataTypeFromString(const RedisModuleString *str, + const moduleType *mt); + +Decode a serialized representation of a module data type 'mt' from string +'str' and return a newly allocated value, or NULL if decoding failed. + +This call basically reuses the '`rdb_load`' callback which module data types +implement in order to allow a module to arbitrarily serialize/de-serialize +keys, similar to how the Redis 'DUMP' and 'RESTORE' commands are implemented. + +Modules should generally use the `REDISMODULE_OPTIONS_HANDLE_IO_ERRORS` flag and +make sure the de-serialization code properly checks and handles IO errors +(freeing allocated buffers and returning a NULL). + +If this is NOT done, Redis will handle corrupted (or just truncated) serialized +data by producing an error message and terminating the process. + + + +### `RedisModule_SaveDataTypeToString` + + RedisModuleString *RedisModule_SaveDataTypeToString(RedisModuleCtx *ctx, + void *data, + const moduleType *mt); + +Encode a module data type 'mt' value 'data' into serialized form, and return it +as a newly allocated `RedisModuleString`. + +This call basically reuses the '`rdb_save`' callback which module data types +implement in order to allow a module to arbitrarily serialize/de-serialize +keys, similar to how the Redis 'DUMP' and 'RESTORE' commands are implemented. + + + +## AOF API for modules data types -## `RedisModule_EmitAOF` + - void RedisModule_EmitAOF(RedisModuleIO *io, const char *cmdname, const char *fmt, ...); +### `RedisModule_EmitAOF` + + void RedisModule_EmitAOF(RedisModuleIO *io, + const char *cmdname, + const char *fmt, + ...); Emits a command into the AOF during the AOF rewriting process. This function -is only called in the context of the aof_rewrite method of data types exported -by a module. The command works exactly like `RedisModule_Call()` in the way +is only called in the context of the `aof_rewrite` method of data types exported +by a module. The command works exactly like [`RedisModule_Call()`](#RedisModule_Call) in the way the parameters are passed, but it does not return anything as the error handling is performed by Redis itself. -## `RedisModule_GetKeyNameFromIO` + + +## IO context handling + + + +### `RedisModule_GetKeyNameFromIO` const RedisModuleString *RedisModule_GetKeyNameFromIO(RedisModuleIO *io); -Returns a RedisModuleString with the name of the key currently saving or +Returns a `RedisModuleString` with the name of the key currently saving or loading, when an IO data type callback is called. There is no guarantee that the key name is always available, so this may return NULL. -## `RedisModule_GetKeyNameFromModuleKey` + - const RedisModuleString *RedisModule_GetKeyNameFromModuleKey(RedisModuleKey *key); +### `RedisModule_GetKeyNameFromModuleKey` -Returns a RedisModuleString with the name of the key from RedisModuleKey + const RedisModuleString *RedisModule_GetKeyNameFromModuleKey(RedisModuleKey *key); -## `RedisModule_LogRaw` +Returns a `RedisModuleString` with the name of the key from `RedisModuleKey`. - void RedisModule_LogRaw(RedisModule *module, const char *levelstr, const char *fmt, va_list ap); + -This is the low level function implementing both: +## Logging - RM_Log() - RM_LogIOError() + -## `RedisModule_Log` +### `RedisModule_Log` - void RedisModule_Log(RedisModuleCtx *ctx, const char *levelstr, const char *fmt, ...); + void RedisModule_Log(RedisModuleCtx *ctx, + const char *levelstr, + const char *fmt, + ...); Produces a log message to the standard Redis log, the format accepts printf-alike specifiers, while level is a string describing the log level to use when emitting the log, and must be one of the following: -* "debug" -* "verbose" -* "notice" -* "warning" +* "debug" (`REDISMODULE_LOGLEVEL_DEBUG`) +* "verbose" (`REDISMODULE_LOGLEVEL_VERBOSE`) +* "notice" (`REDISMODULE_LOGLEVEL_NOTICE`) +* "warning" (`REDISMODULE_LOGLEVEL_WARNING`) If the specified log level is invalid, verbose is used by default. There is a fixed limit to the length of the log line this function is able @@ -1726,9 +2657,14 @@ The ctx argument may be NULL if cannot be provided in the context of the caller for instance threads or callbacks, in which case a generic "module" will be used instead of the module name. -## `RedisModule_LogIOError` + - void RedisModule_LogIOError(RedisModuleIO *io, const char *levelstr, const char *fmt, ...); +### `RedisModule_LogIOError` + + void RedisModule_LogIOError(RedisModuleIO *io, + const char *levelstr, + const char *fmt, + ...); Log errors from RDB / AOF serialization callbacks. @@ -1736,16 +2672,23 @@ This function should be used when a callback is returning a critical error to the caller since cannot load or save the data for some critical reason. -## `RedisModule__Assert` + + +### `RedisModule__Assert` void RedisModule__Assert(const char *estr, const char *file, int line); Redis-like assert function. +The macro `RedisModule_Assert(expression)` is recommended, rather than +calling this function directly. + A failed assertion will shut down the server and produce logging information that looks identical to information generated by Redis itself. -## `RedisModule_LatencyAddSample` + + +### `RedisModule_LatencyAddSample` void RedisModule_LatencyAddSample(const char *event, mstime_t latency); @@ -1753,114 +2696,146 @@ Allows adding event to the latency monitor to be observed by the LATENCY command. The call is skipped if the latency is smaller than the configured latency-monitor-threshold. -## `RedisModule_BlockClient` + + +## Blocking clients from modules + +For a guide about blocking commands in modules, see +[https://redis.io/topics/modules-blocking-ops](https://redis.io/topics/modules-blocking-ops). - RedisModuleBlockedClient *RedisModule_BlockClient(RedisModuleCtx *ctx, RedisModuleCmdFunc reply_callback, RedisModuleCmdFunc timeout_callback, void (*free_privdata)(RedisModuleCtx*,void*), long long timeout_ms); + + +### `RedisModule_BlockClient` + + RedisModuleBlockedClient *RedisModule_BlockClient(RedisModuleCtx *ctx, + RedisModuleCmdFunc reply_callback, + RedisModuleCmdFunc timeout_callback, + void (*free_privdata)(RedisModuleCtx*, void*), + long long timeout_ms); Block a client in the context of a blocking command, returning an handle which will be used, later, in order to unblock the client with a call to -`RedisModule_UnblockClient()`. The arguments specify callback functions +[`RedisModule_UnblockClient()`](#RedisModule_UnblockClient). The arguments specify callback functions and a timeout after which the client is unblocked. The callbacks are called in the following contexts: - reply_callback: called after a successful RedisModule_UnblockClient() - call in order to reply to the client and unblock it. + reply_callback: called after a successful RedisModule_UnblockClient() + call in order to reply to the client and unblock it. - reply_timeout: called when the timeout is reached in order to send an - error to the client. + timeout_callback: called when the timeout is reached in order to send an + error to the client. - free_privdata: called in order to free the private data that is passed - by RedisModule_UnblockClient() call. + free_privdata: called in order to free the private data that is passed + by RedisModule_UnblockClient() call. -Note: `RedisModule_UnblockClient` should be called for every blocked client, +Note: [`RedisModule_UnblockClient`](#RedisModule_UnblockClient) should be called for every blocked client, even if client was killed, timed-out or disconnected. Failing to do so will result in memory leaks. -There are some cases where `RedisModule_BlockClient()` cannot be used: +There are some cases where [`RedisModule_BlockClient()`](#RedisModule_BlockClient) cannot be used: 1. If the client is a Lua script. 2. If the client is executing a MULTI block. -In these cases, a call to `RedisModule_BlockClient()` will **not** block the +In these cases, a call to [`RedisModule_BlockClient()`](#RedisModule_BlockClient) will **not** block the client, but instead produce a specific error reply. -## `RedisModule_BlockClientOnKeys` +Measuring background time: By default the time spent in the blocked command +is not account for the total command duration. To include such time you should +use [`RedisModule_BlockedClientMeasureTimeStart()`](#RedisModule_BlockedClientMeasureTimeStart) and [`RedisModule_BlockedClientMeasureTimeEnd()`](#RedisModule_BlockedClientMeasureTimeEnd) one, +or multiple times within the blocking command background work. + + - RedisModuleBlockedClient *RedisModule_BlockClientOnKeys(RedisModuleCtx *ctx, RedisModuleCmdFunc reply_callback, RedisModuleCmdFunc timeout_callback, void (*free_privdata)(RedisModuleCtx*,void*), long long timeout_ms, RedisModuleString **keys, int numkeys, void *privdata); +### `RedisModule_BlockClientOnKeys` -This call is similar to `RedisModule_BlockClient()`, however in this case we + RedisModuleBlockedClient *RedisModule_BlockClientOnKeys(RedisModuleCtx *ctx, + RedisModuleCmdFunc reply_callback, + RedisModuleCmdFunc timeout_callback, + void (*free_privdata)(RedisModuleCtx*, void*), + long long timeout_ms, + RedisModuleString **keys, + int numkeys, + void *privdata); + +This call is similar to [`RedisModule_BlockClient()`](#RedisModule_BlockClient), however in this case we don't just block the client, but also ask Redis to unblock it automatically once certain keys become "ready", that is, contain more data. Basically this is similar to what a typical Redis command usually does, -like BLPOP or ZPOPMAX: the client blocks if it cannot be served ASAP, +like BLPOP or BZPOPMAX: the client blocks if it cannot be served ASAP, and later when the key receives new data (a list push for instance), the client is unblocked and served. However in the case of this module API, when the client is unblocked? -1. If you block ok a key of a type that has blocking operations associated, +1. If you block on a key of a type that has blocking operations associated, like a list, a sorted set, a stream, and so forth, the client may be unblocked once the relevant key is targeted by an operation that normally unblocks the native blocking operations for that type. So if we block on a list key, an RPUSH command may unblock our client and so forth. 2. If you are implementing your native data type, or if you want to add new unblocking conditions in addition to "1", you can call the modules API - RedisModule_SignalKeyAsReady(). + [`RedisModule_SignalKeyAsReady()`](#RedisModule_SignalKeyAsReady). Anyway we can't be sure if the client should be unblocked just because the key is signaled as ready: for instance a successive operation may change the key, or a client in queue before this one can be served, modifying the key as well and making it empty again. So when a client is blocked with -`RedisModule_BlockClientOnKeys()` the reply callback is not called after -`RM_UnblockCLient()` is called, but every time a key is signaled as ready: +[`RedisModule_BlockClientOnKeys()`](#RedisModule_BlockClientOnKeys) the reply callback is not called after +[`RedisModule_UnblockClient()`](#RedisModule_UnblockClient) is called, but every time a key is signaled as ready: if the reply callback can serve the client, it returns `REDISMODULE_OK` and the client is unblocked, otherwise it will return `REDISMODULE_ERR` and we'll try again later. The reply callback can access the key that was signaled as ready by -calling the API `RedisModule_GetBlockedClientReadyKey()`, that returns -just the string name of the key as a RedisModuleString object. +calling the API [`RedisModule_GetBlockedClientReadyKey()`](#RedisModule_GetBlockedClientReadyKey), that returns +just the string name of the key as a `RedisModuleString` object. Thanks to this system we can setup complex blocking scenarios, like unblocking a client only if a list contains at least 5 items or other more fancy logics. -Note that another difference with `RedisModule_BlockClient()`, is that here +Note that another difference with [`RedisModule_BlockClient()`](#RedisModule_BlockClient), is that here we pass the private data directly when blocking the client: it will be accessible later in the reply callback. Normally when blocking with -`RedisModule_BlockClient()` the private data to reply to the client is -passed when calling `RedisModule_UnblockClient()` but here the unblocking +[`RedisModule_BlockClient()`](#RedisModule_BlockClient) the private data to reply to the client is +passed when calling [`RedisModule_UnblockClient()`](#RedisModule_UnblockClient) but here the unblocking is performed by Redis itself, so we need to have some private data before hand. The private data is used to store any information about the specific unblocking operation that you are implementing. Such information will be -freed using the free_privdata callback provided by the user. +freed using the `free_privdata` callback provided by the user. However the reply callback will be able to access the argument vector of the command, so the private data is often not needed. -Note: Under normal circumstances `RedisModule_UnblockClient` should not be +Note: Under normal circumstances [`RedisModule_UnblockClient`](#RedisModule_UnblockClient) should not be called for clients that are blocked on keys (Either the key will become ready or a timeout will occur). If for some reason you do want to call RedisModule_UnblockClient it is possible: Client will be handled as if it were timed-out (You must implement the timeout callback in that case). -## `RedisModule_SignalKeyAsReady` + + +### `RedisModule_SignalKeyAsReady` void RedisModule_SignalKeyAsReady(RedisModuleCtx *ctx, RedisModuleString *key); This function is used in order to potentially unblock a client blocked -on keys with `RedisModule_BlockClientOnKeys()`. When this function is called, -all the clients blocked for this key will get their reply callback called, -and if the callback returns `REDISMODULE_OK` the client will be unblocked. +on keys with [`RedisModule_BlockClientOnKeys()`](#RedisModule_BlockClientOnKeys). When this function is called, +all the clients blocked for this key will get their `reply_callback` called. + +Note: The function has no effect if the signaled key doesn't exist. + + -## `RedisModule_UnblockClient` +### `RedisModule_UnblockClient` int RedisModule_UnblockClient(RedisModuleBlockedClient *bc, void *privdata); -Unblock a client blocked by ``RedisModule_BlockedClient``. This will trigger +Unblock a client blocked by `RedisModule_BlockedClient`. This will trigger the reply callbacks to be called in order to reply to the client. The 'privdata' argument will be accessible by the reply callback, so the caller of this function can pass any value that is needed in order to @@ -1872,31 +2847,35 @@ to compute reply or some reply obtained via networking. Note 1: this function can be called from threads spawned by the module. -Note 2: when we unblock a client that is blocked for keys using -the API `RedisModule_BlockClientOnKeys()`, the privdata argument here is -not used, and the reply callback is called with the privdata pointer that -was passed when blocking the client. - +Note 2: when we unblock a client that is blocked for keys using the API +[`RedisModule_BlockClientOnKeys()`](#RedisModule_BlockClientOnKeys), the privdata argument here is not used. Unblocking a client that was blocked for keys using this API will still require the client to get some reply, so the function will use the -"timeout" handler in order to do so. +"timeout" handler in order to do so (The privdata provided in +[`RedisModule_BlockClientOnKeys()`](#RedisModule_BlockClientOnKeys) is accessible from the timeout +callback via [`RedisModule_GetBlockedClientPrivateData`](#RedisModule_GetBlockedClientPrivateData)). + + -## `RedisModule_AbortBlock` +### `RedisModule_AbortBlock` int RedisModule_AbortBlock(RedisModuleBlockedClient *bc); Abort a blocked client blocking operation: the client will be unblocked without firing any callback. -## `RedisModule_SetDisconnectCallback` + - void RedisModule_SetDisconnectCallback(RedisModuleBlockedClient *bc, RedisModuleDisconnectFunc callback); +### `RedisModule_SetDisconnectCallback` + + void RedisModule_SetDisconnectCallback(RedisModuleBlockedClient *bc, + RedisModuleDisconnectFunc callback); Set a callback that will be called if a blocked client disconnects -before the module has a chance to call `RedisModule_UnblockClient()` +before the module has a chance to call [`RedisModule_UnblockClient()`](#RedisModule_UnblockClient) Usually what you want to do there, is to cleanup your module state -so that you can call `RedisModule_UnblockClient()` safely, otherwise +so that you can call [`RedisModule_UnblockClient()`](#RedisModule_UnblockClient) safely, otherwise the client will remain blocked forever if the timeout is large. Notes: @@ -1908,34 +2887,44 @@ Notes: a timeout. In such a case, the client is unblocked automatically and the timeout callback is called. -## `RedisModule_IsBlockedReplyRequest` + + +### `RedisModule_IsBlockedReplyRequest` int RedisModule_IsBlockedReplyRequest(RedisModuleCtx *ctx); Return non-zero if a module command was called in order to fill the reply for a blocked client. -## `RedisModule_IsBlockedTimeoutRequest` + + +### `RedisModule_IsBlockedTimeoutRequest` int RedisModule_IsBlockedTimeoutRequest(RedisModuleCtx *ctx); Return non-zero if a module command was called in order to fill the reply for a blocked client that timed out. -## `RedisModule_GetBlockedClientPrivateData` + + +### `RedisModule_GetBlockedClientPrivateData` void *RedisModule_GetBlockedClientPrivateData(RedisModuleCtx *ctx); -Get the private data set by `RedisModule_UnblockClient()` +Get the private data set by [`RedisModule_UnblockClient()`](#RedisModule_UnblockClient) -## `RedisModule_GetBlockedClientReadyKey` + + +### `RedisModule_GetBlockedClientReadyKey` RedisModuleString *RedisModule_GetBlockedClientReadyKey(RedisModuleCtx *ctx); Get the key that is ready when the reply callback is called in the context -of a client blocked by `RedisModule_BlockClientOnKeys()`. +of a client blocked by [`RedisModule_BlockClientOnKeys()`](#RedisModule_BlockClientOnKeys). + + -## `RedisModule_GetBlockedClientHandle` +### `RedisModule_GetBlockedClientHandle` RedisModuleBlockedClient *RedisModule_GetBlockedClientHandle(RedisModuleCtx *ctx); @@ -1944,7 +2933,9 @@ This is useful in the reply and timeout callbacks of blocked clients, before sometimes the module has the blocked client handle references around, and wants to cleanup it. -## `RedisModule_BlockedClientDisconnected` + + +### `RedisModule_BlockedClientDisconnected` int RedisModule_BlockedClientDisconnected(RedisModuleCtx *ctx); @@ -1952,14 +2943,20 @@ Return true if when the free callback of a blocked client is called, the reason for the client to be unblocked is that it disconnected while it was blocked. -## `RedisModule_GetThreadSafeContext` + + +## Thread Safe Contexts + + + +### `RedisModule_GetThreadSafeContext` RedisModuleCtx *RedisModule_GetThreadSafeContext(RedisModuleBlockedClient *bc); Return a context which can be used inside threads to make Redis context calls with certain modules APIs. If 'bc' is not NULL then the module will be bound to a blocked client, and it will be possible to use the -``RedisModule_Reply`*` family of functions to accumulate a reply for when the +`RedisModule_Reply*` family of functions to accumulate a reply for when the client will be unblocked. Otherwise the thread safe context will be detached by a specific client. @@ -1969,61 +2966,102 @@ To call non-reply APIs, the thread safe context must be prepared with: ... make your call here ... RedisModule_ThreadSafeContextUnlock(ctx); -This is not needed when using ``RedisModule_Reply`*` functions, assuming +This is not needed when using `RedisModule_Reply*` functions, assuming that a blocked client was used when the context was created, otherwise no `RedisModule_Reply`* call should be made at all. -TODO: thread safe contexts do not inherit the blocked client -selected database. +NOTE: If you're creating a detached thread safe context (bc is NULL), +consider using `RM_GetDetachedThreadSafeContext` which will also retain +the module ID and thus be more useful for logging. + + + +### `RedisModule_GetDetachedThreadSafeContext` + + RedisModuleCtx *RedisModule_GetDetachedThreadSafeContext(RedisModuleCtx *ctx); -## `RedisModule_FreeThreadSafeContext` +Return a detached thread safe context that is not associated with any +specific blocked client, but is associated with the module's context. + +This is useful for modules that wish to hold a global context over +a long term, for purposes such as logging. + + + +### `RedisModule_FreeThreadSafeContext` void RedisModule_FreeThreadSafeContext(RedisModuleCtx *ctx); Release a thread safe context. -## `RedisModule_ThreadSafeContextLock` + + +### `RedisModule_ThreadSafeContextLock` void RedisModule_ThreadSafeContextLock(RedisModuleCtx *ctx); Acquire the server lock before executing a thread safe API call. -This is not needed for ``RedisModule_Reply`*` calls when there is +This is not needed for `RedisModule_Reply*` calls when there is a blocked client connected to the thread safe context. -## `RedisModule_ThreadSafeContextUnlock` + + +### `RedisModule_ThreadSafeContextTryLock` + + int RedisModule_ThreadSafeContextTryLock(RedisModuleCtx *ctx); + +Similar to [`RedisModule_ThreadSafeContextLock`](#RedisModule_ThreadSafeContextLock) but this function +would not block if the server lock is already acquired. + +If successful (lock acquired) `REDISMODULE_OK` is returned, +otherwise `REDISMODULE_ERR` is returned and errno is set +accordingly. + + + +### `RedisModule_ThreadSafeContextUnlock` void RedisModule_ThreadSafeContextUnlock(RedisModuleCtx *ctx); Release the server lock after a thread safe API call was executed. -## `RedisModule_SubscribeToKeyspaceEvents` + + +## Module Keyspace Notifications API + + - int RedisModule_SubscribeToKeyspaceEvents(RedisModuleCtx *ctx, int types, RedisModuleNotificationFunc callback); +### `RedisModule_SubscribeToKeyspaceEvents` + + int RedisModule_SubscribeToKeyspaceEvents(RedisModuleCtx *ctx, + int types, + RedisModuleNotificationFunc callback); Subscribe to keyspace notifications. This is a low-level version of the keyspace-notifications API. A module can register callbacks to be notified -when keyspce events occur. +when keyspace events occur. Notification events are filtered by their type (string events, set events, etc), and the subscriber callback receives only events that match a specific mask of event types. -When subscribing to notifications with `RedisModule_SubscribeToKeyspaceEvents` +When subscribing to notifications with [`RedisModule_SubscribeToKeyspaceEvents`](#RedisModule_SubscribeToKeyspaceEvents) the module must provide an event type-mask, denoting the events the subscriber is interested in. This can be an ORed mask of any of the following flags: - - REDISMODULE_NOTIFY_GENERIC: Generic commands like DEL, EXPIRE, RENAME - - REDISMODULE_NOTIFY_STRING: String events - - REDISMODULE_NOTIFY_LIST: List events - - REDISMODULE_NOTIFY_SET: Set events - - REDISMODULE_NOTIFY_HASH: Hash events - - REDISMODULE_NOTIFY_ZSET: Sorted Set events - - REDISMODULE_NOTIFY_EXPIRED: Expiration events - - REDISMODULE_NOTIFY_EVICTED: Eviction events - - REDISMODULE_NOTIFY_STREAM: Stream events - - REDISMODULE_NOTIFY_KEYMISS: Key-miss events - - REDISMODULE_NOTIFY_ALL: All events (Excluding REDISMODULE_NOTIFY_KEYMISS) - - REDISMODULE_NOTIFY_LOADED: A special notification available only for modules, + - `REDISMODULE_NOTIFY_GENERIC`: Generic commands like DEL, EXPIRE, RENAME + - `REDISMODULE_NOTIFY_STRING`: String events + - `REDISMODULE_NOTIFY_LIST`: List events + - `REDISMODULE_NOTIFY_SET`: Set events + - `REDISMODULE_NOTIFY_HASH`: Hash events + - `REDISMODULE_NOTIFY_ZSET`: Sorted Set events + - `REDISMODULE_NOTIFY_EXPIRED`: Expiration events + - `REDISMODULE_NOTIFY_EVICTED`: Eviction events + - `REDISMODULE_NOTIFY_STREAM`: Stream events + - `REDISMODULE_NOTIFY_MODULE`: Module types events + - `REDISMODULE_NOTIFY_KEYMISS`: Key-miss events + - `REDISMODULE_NOTIFY_ALL`: All events (Excluding `REDISMODULE_NOTIFY_KEYMISS`) + - `REDISMODULE_NOTIFY_LOADED`: A special notification available only for modules, indicates that the key was loaded from persistence. Notice, when this event fires, the given key can not be retained, use RM_CreateStringFromString @@ -2034,9 +3072,9 @@ to the module to filter the actions taken based on the key. The subscriber signature is: - int (*RedisModuleNotificationFunc) (RedisModuleCtx *ctx, int type, - const char *event, - RedisModuleString *key); + int (*RedisModuleNotificationFunc) (RedisModuleCtx *ctx, int type, + const char *event, + RedisModuleString *key); `type` is the event type bit, that must match the mask given at registration time. The event string is the actual command being executed, and key is the @@ -2053,24 +3091,39 @@ Warning: the notification callbacks are performed in a synchronous manner, so notification callbacks must to be fast, or they would slow Redis down. If you need to take long actions, use threads to offload them. -See https://redis.io/topics/notifications for more information. +See [https://redis.io/topics/notifications](https://redis.io/topics/notifications) for more information. + + -## `RedisModule_GetNotifyKeyspaceEvents` +### `RedisModule_GetNotifyKeyspaceEvents` int RedisModule_GetNotifyKeyspaceEvents(); Get the configured bitmap of notify-keyspace-events (Could be used -for additional filtering in RedisModuleNotificationFunc) +for additional filtering in `RedisModuleNotificationFunc`) -## `RedisModule_NotifyKeyspaceEvent` + - int RedisModule_NotifyKeyspaceEvent(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key); +### `RedisModule_NotifyKeyspaceEvent` + + int RedisModule_NotifyKeyspaceEvent(RedisModuleCtx *ctx, + int type, + const char *event, + RedisModuleString *key); Expose notifyKeyspaceEvent to modules -## `RedisModule_RegisterClusterMessageReceiver` + + +## Modules Cluster API - void RedisModule_RegisterClusterMessageReceiver(RedisModuleCtx *ctx, uint8_t type, RedisModuleClusterMessageReceiver callback); + + +### `RedisModule_RegisterClusterMessageReceiver` + + void RedisModule_RegisterClusterMessageReceiver(RedisModuleCtx *ctx, + uint8_t type, + RedisModuleClusterMessageReceiver callback); Register a callback receiver for cluster messages of type 'type'. If there was already a registered callback, this will replace the callback function @@ -2078,9 +3131,15 @@ with the one provided, otherwise if the callback is set to NULL and there is already a callback for this function, the callback is unregistered (so this API call is also used in order to delete the receiver). -## `RedisModule_SendClusterMessage` + - int RedisModule_SendClusterMessage(RedisModuleCtx *ctx, char *target_id, uint8_t type, unsigned char *msg, uint32_t len); +### `RedisModule_SendClusterMessage` + + int RedisModule_SendClusterMessage(RedisModuleCtx *ctx, + char *target_id, + uint8_t type, + unsigned char *msg, + uint32_t len); Send a message to all the nodes in the cluster if `target` is NULL, otherwise at the specified target, which is a `REDISMODULE_NODE_ID_LEN` bytes node ID, as @@ -2090,7 +3149,9 @@ The function returns `REDISMODULE_OK` if the message was successfully sent, otherwise if the node is not connected or such node ID does not map to any known cluster node, `REDISMODULE_ERR` is returned. -## `RedisModule_GetClusterNodesList` + + +### `RedisModule_GetClusterNodesList` char **RedisModule_GetClusterNodesList(RedisModuleCtx *ctx, size_t *numnodes); @@ -2100,11 +3161,11 @@ The number of returned node IDs is stored into `*numnodes`. However if this function is called by a module not running an a Redis instance with Redis Cluster enabled, NULL is returned instead. -The IDs returned can be used with `RedisModule_GetClusterNodeInfo()` in order +The IDs returned can be used with [`RedisModule_GetClusterNodeInfo()`](#RedisModule_GetClusterNodeInfo) in order to get more information about single nodes. The array returned by this function must be freed using the function -`RedisModule_FreeClusterNodesList()`. +[`RedisModule_FreeClusterNodesList()`](#RedisModule_FreeClusterNodesList). Example: @@ -2116,20 +3177,26 @@ Example: } RedisModule_FreeClusterNodesList(ids); -## `RedisModule_FreeClusterNodesList` + + +### `RedisModule_FreeClusterNodesList` void RedisModule_FreeClusterNodesList(char **ids); -Free the node list obtained with `RedisModule_GetClusterNodesList`. +Free the node list obtained with [`RedisModule_GetClusterNodesList`](#RedisModule_GetClusterNodesList). + + -## `RedisModule_GetMyClusterID` +### `RedisModule_GetMyClusterID` const char *RedisModule_GetMyClusterID(void); Return this node ID (`REDISMODULE_CLUSTER_ID_LEN` bytes) or NULL if the cluster is disabled. -## `RedisModule_GetClusterSize` + + +### `RedisModule_GetClusterSize` size_t RedisModule_GetClusterSize(void); @@ -2138,7 +3205,39 @@ Return the number of nodes in the cluster, regardless of their state be smaller, but not greater than this number. If the instance is not in cluster mode, zero is returned. -## `RedisModule_SetClusterFlags` + + +### `RedisModule_GetClusterNodeInfo` + + int RedisModule_GetClusterNodeInfo(RedisModuleCtx *ctx, + const char *id, + char *ip, + char *master_id, + int *port, + int *flags); + +Populate the specified info for the node having as ID the specified 'id', +then returns `REDISMODULE_OK`. Otherwise if the node ID does not exist from +the POV of this local node, `REDISMODULE_ERR` is returned. + +The arguments `ip`, `master_id`, `port` and `flags` can be NULL in case we don't +need to populate back certain info. If an `ip` and `master_id` (only populated +if the instance is a slave) are specified, they point to buffers holding +at least `REDISMODULE_NODE_ID_LEN` bytes. The strings written back as `ip` +and `master_id` are not null terminated. + +The list of flags reported is the following: + +* `REDISMODULE_NODE_MYSELF`: This node +* `REDISMODULE_NODE_MASTER`: The node is a master +* `REDISMODULE_NODE_SLAVE`: The node is a replica +* `REDISMODULE_NODE_PFAIL`: We see the node as failing +* `REDISMODULE_NODE_FAIL`: The cluster agrees the node is failing +* `REDISMODULE_NODE_NOFAILOVER`: The slave is configured to never failover + + + +### `RedisModule_SetClusterFlags` void RedisModule_SetClusterFlags(RedisModuleCtx *ctx, uint64_t flags); @@ -2148,39 +3247,81 @@ This is useful for modules that use the Cluster API in order to create a different distributed system, but still want to use the Redis Cluster message bus. Flags that can be set: - CLUSTER_MODULE_FLAG_NO_FAILOVER - CLUSTER_MODULE_FLAG_NO_REDIRECTION +* `CLUSTER_MODULE_FLAG_NO_FAILOVER` +* `CLUSTER_MODULE_FLAG_NO_REDIRECTION` With the following effects: - NO_FAILOVER: prevent Redis Cluster slaves to failover a failing master. - Also disables the replica migration feature. +* `NO_FAILOVER`: prevent Redis Cluster slaves to failover a failing master. + Also disables the replica migration feature. + +* `NO_REDIRECTION`: Every node will accept any key, without trying to perform + partitioning according to the user Redis Cluster algorithm. + Slots informations will still be propagated across the + cluster, but without effects. + + + +## Modules Timers API + +Module timers are an high precision "green timers" abstraction where +every module can register even millions of timers without problems, even if +the actual event loop will just have a single timer that is used to awake the +module timers subsystem in order to process the next event. - NO_REDIRECTION: Every node will accept any key, without trying to perform - partitioning according to the user Redis Cluster algorithm. - Slots informations will still be propagated across the - cluster, but without effects. +All the timers are stored into a radix tree, ordered by expire time, when +the main Redis event loop timer callback is called, we try to process all +the timers already expired one after the other. Then we re-enter the event +loop registering a timer that will expire when the next to process module +timer will expire. -## `RedisModule_CreateTimer` +Every time the list of active timers drops to zero, we unregister the +main event loop timer, so that there is no overhead when such feature is +not used. - RedisModuleTimerID RedisModule_CreateTimer(RedisModuleCtx *ctx, mstime_t period, RedisModuleTimerProc callback, void *data); + + +### `RedisModule_CreateTimer` + + RedisModuleTimerID RedisModule_CreateTimer(RedisModuleCtx *ctx, + mstime_t period, + RedisModuleTimerProc callback, + void *data); Create a new timer that will fire after `period` milliseconds, and will call the specified function using `data` as argument. The returned timer ID can be used to get information from the timer or to stop it before it fires. +Note that for the common use case of a repeating timer (Re-registration +of the timer inside the `RedisModuleTimerProc` callback) it matters when +this API is called: +If it is called at the beginning of 'callback' it means +the event will triggered every 'period'. +If it is called at the end of 'callback' it means +there will 'period' milliseconds gaps between events. +(If the time it takes to execute 'callback' is negligible the two +statements above mean the same) -## `RedisModule_StopTimer` + - int RedisModule_StopTimer(RedisModuleCtx *ctx, RedisModuleTimerID id, void **data); +### `RedisModule_StopTimer` + + int RedisModule_StopTimer(RedisModuleCtx *ctx, + RedisModuleTimerID id, + void **data); Stop a timer, returns `REDISMODULE_OK` if the timer was found, belonged to the calling module, and was stopped, otherwise `REDISMODULE_ERR` is returned. If not NULL, the data pointer is set to the value of the data argument when the timer was created. -## `RedisModule_GetTimerInfo` + + +### `RedisModule_GetTimerInfo` - int RedisModule_GetTimerInfo(RedisModuleCtx *ctx, RedisModuleTimerID id, uint64_t *remaining, void **data); + int RedisModule_GetTimerInfo(RedisModuleCtx *ctx, + RedisModuleTimerID id, + uint64_t *remaining, + void **data); Obtain information about a timer: its remaining time before firing (in milliseconds), and the private data pointer associated with the timer. @@ -2189,13 +3330,21 @@ no information is returned and the function returns `REDISMODULE_ERR`, otherwise `REDISMODULE_OK` is returned. The arguments remaining or data can be NULL if the caller does not need certain information. -## `RedisModule_CreateModuleUser` + + +## Modules ACL API + +Implements a hook into the authentication and authorization within Redis. + + + +### `RedisModule_CreateModuleUser` RedisModuleUser *RedisModule_CreateModuleUser(const char *name); Creates a Redis ACL user that the module can use to authenticate a client. After obtaining the user, the module should set what such user can do -using the `RM_SetUserACL()` function. Once configured, the user +using the `RedisModule_SetUserACL()` function. Once configured, the user can be used in order to authenticate a connection, with the specified ACL rules, using the `RedisModule_AuthClientWithUser()` function. @@ -2208,69 +3357,118 @@ Note that: * The created user can be used to authenticate multiple Redis connections. The caller can later free the user using the function -`RM_FreeModuleUser()`. When this function is called, if there are +[`RedisModule_FreeModuleUser()`](#RedisModule_FreeModuleUser). When this function is called, if there are still clients authenticated with this user, they are disconnected. The function to free the user should only be used when the caller really wants to invalidate the user to define a new one with different capabilities. -## `RedisModule_FreeModuleUser` + + +### `RedisModule_FreeModuleUser` int RedisModule_FreeModuleUser(RedisModuleUser *user); Frees a given user and disconnects all of the clients that have been -authenticated with it. See `RM_CreateModuleUser` for detailed usage. +authenticated with it. See [`RedisModule_CreateModuleUser`](#RedisModule_CreateModuleUser) for detailed usage. + + -## `RedisModule_SetModuleUserACL` +### `RedisModule_SetModuleUserACL` int RedisModule_SetModuleUserACL(RedisModuleUser *user, const char* acl); Sets the permissions of a user created through the redis module interface. The syntax is the same as ACL SETUSER, so refer to the -documentation in acl.c for more information. See `RM_CreateModuleUser` +documentation in acl.c for more information. See [`RedisModule_CreateModuleUser`](#RedisModule_CreateModuleUser) for detailed usage. Returns `REDISMODULE_OK` on success and `REDISMODULE_ERR` on failure and will set an errno describing why the operation failed. -## `RedisModule_AuthenticateClientWithUser` + - int RedisModule_AuthenticateClientWithUser(RedisModuleCtx *ctx, RedisModuleUser *module_user, RedisModuleUserChangedFunc callback, void *privdata, uint64_t *client_id); +### `RedisModule_AuthenticateClientWithUser` + + int RedisModule_AuthenticateClientWithUser(RedisModuleCtx *ctx, + RedisModuleUser *module_user, + RedisModuleUserChangedFunc callback, + void *privdata, + uint64_t *client_id); Authenticate the current context's user with the provided redis acl user. Returns `REDISMODULE_ERR` if the user is disabled. -See authenticateClientWithUser for information about callback, client_id, +See authenticateClientWithUser for information about callback, `client_id`, and general usage for authentication. -## `RedisModule_AuthenticateClientWithACLUser` + + +### `RedisModule_AuthenticateClientWithACLUser` - int RedisModule_AuthenticateClientWithACLUser(RedisModuleCtx *ctx, const char *name, size_t len, RedisModuleUserChangedFunc callback, void *privdata, uint64_t *client_id); + int RedisModule_AuthenticateClientWithACLUser(RedisModuleCtx *ctx, + const char *name, + size_t len, + RedisModuleUserChangedFunc callback, + void *privdata, + uint64_t *client_id); Authenticate the current context's user with the provided redis acl user. Returns `REDISMODULE_ERR` if the user is disabled or the user does not exist. -See authenticateClientWithUser for information about callback, client_id, +See authenticateClientWithUser for information about callback, `client_id`, and general usage for authentication. -## `RedisModule_DeauthenticateAndCloseClient` + - int RedisModule_DeauthenticateAndCloseClient(RedisModuleCtx *ctx, uint64_t client_id); +### `RedisModule_DeauthenticateAndCloseClient` + + int RedisModule_DeauthenticateAndCloseClient(RedisModuleCtx *ctx, + uint64_t client_id); Deauthenticate and close the client. The client resources will not be be immediately freed, but will be cleaned up in a background job. This is the recommended way to deauthenicate a client since most clients can't -handle users becomming deauthenticated. Returns `REDISMODULE_ERR` when the +handle users becoming deauthenticated. Returns `REDISMODULE_ERR` when the client doesn't exist and `REDISMODULE_OK` when the operation was successful. -The client ID is returned from the `RM_AuthenticateClientWithUser` and -`RM_AuthenticateClientWithACLUser` APIs, but can be obtained through +The client ID is returned from the [`RedisModule_AuthenticateClientWithUser`](#RedisModule_AuthenticateClientWithUser) and +[`RedisModule_AuthenticateClientWithACLUser`](#RedisModule_AuthenticateClientWithACLUser) APIs, but can be obtained through the CLIENT api or through server events. This function is not thread safe, and must be executed within the context of a command or thread safe context. -## `RedisModule_CreateDict` + + +### `RedisModule_GetClientCertificate` + + RedisModuleString *RedisModule_GetClientCertificate(RedisModuleCtx *ctx, + uint64_t client_id); + +Return the X.509 client-side certificate used by the client to authenticate +this connection. + +The return value is an allocated `RedisModuleString` that is a X.509 certificate +encoded in PEM (Base64) format. It should be freed (or auto-freed) by the caller. + +A NULL value is returned in the following conditions: + +- Connection ID does not exist +- Connection is not a TLS connection +- Connection is a TLS connection but no client ceritifcate was used + + + +## Modules Dictionary API + +Implements a sorted dictionary (actually backed by a radix tree) with +the usual get / set / del / num-items API, together with an iterator +capable of going back and forth. + + + +### `RedisModule_CreateDict` RedisModuleDict *RedisModule_CreateDict(RedisModuleCtx *ctx); @@ -2287,51 +3485,76 @@ or NULL, depending on what you want. Please follow the following rules: reclaim the dictionary memory, as well as the strings returned by the Next / Prev dictionary iterator calls. -## `RedisModule_FreeDict` + + +### `RedisModule_FreeDict` void RedisModule_FreeDict(RedisModuleCtx *ctx, RedisModuleDict *d); -Free a dictionary created with `RM_CreateDict()`. You need to pass the +Free a dictionary created with [`RedisModule_CreateDict()`](#RedisModule_CreateDict). You need to pass the context pointer 'ctx' only if the dictionary was created using the context instead of passing NULL. -## `RedisModule_DictSize` + + +### `RedisModule_DictSize` uint64_t RedisModule_DictSize(RedisModuleDict *d); Return the size of the dictionary (number of keys). -## `RedisModule_DictSetC` + + +### `RedisModule_DictSetC` - int RedisModule_DictSetC(RedisModuleDict *d, void *key, size_t keylen, void *ptr); + int RedisModule_DictSetC(RedisModuleDict *d, + void *key, + size_t keylen, + void *ptr); Store the specified key into the dictionary, setting its value to the pointer 'ptr'. If the key was added with success, since it did not already exist, `REDISMODULE_OK` is returned. Otherwise if the key already exists the function returns `REDISMODULE_ERR`. -## `RedisModule_DictReplaceC` + - int RedisModule_DictReplaceC(RedisModuleDict *d, void *key, size_t keylen, void *ptr); +### `RedisModule_DictReplaceC` -Like `RedisModule_DictSetC()` but will replace the key with the new + int RedisModule_DictReplaceC(RedisModuleDict *d, + void *key, + size_t keylen, + void *ptr); + +Like [`RedisModule_DictSetC()`](#RedisModule_DictSetC) but will replace the key with the new value if the key already exists. -## `RedisModule_DictSet` + + +### `RedisModule_DictSet` int RedisModule_DictSet(RedisModuleDict *d, RedisModuleString *key, void *ptr); -Like `RedisModule_DictSetC()` but takes the key as a RedisModuleString. +Like [`RedisModule_DictSetC()`](#RedisModule_DictSetC) but takes the key as a `RedisModuleString`. + + + +### `RedisModule_DictReplace` -## `RedisModule_DictReplace` + int RedisModule_DictReplace(RedisModuleDict *d, + RedisModuleString *key, + void *ptr); - int RedisModule_DictReplace(RedisModuleDict *d, RedisModuleString *key, void *ptr); +Like [`RedisModule_DictReplaceC()`](#RedisModule_DictReplaceC) but takes the key as a `RedisModuleString`. -Like `RedisModule_DictReplaceC()` but takes the key as a RedisModuleString. + -## `RedisModule_DictGetC` +### `RedisModule_DictGetC` - void *RedisModule_DictGetC(RedisModuleDict *d, void *key, size_t keylen, int *nokey); + void *RedisModule_DictGetC(RedisModuleDict *d, + void *key, + size_t keylen, + int *nokey); Return the value stored at the specified key. The function returns NULL both in the case the key does not exist, or if you actually stored @@ -2339,15 +3562,24 @@ NULL at key. So, optionally, if the 'nokey' pointer is not NULL, it will be set by reference to 1 if the key does not exist, or to 0 if the key exists. -## `RedisModule_DictGet` + - void *RedisModule_DictGet(RedisModuleDict *d, RedisModuleString *key, int *nokey); +### `RedisModule_DictGet` -Like `RedisModule_DictGetC()` but takes the key as a RedisModuleString. + void *RedisModule_DictGet(RedisModuleDict *d, + RedisModuleString *key, + int *nokey); -## `RedisModule_DictDelC` +Like [`RedisModule_DictGetC()`](#RedisModule_DictGetC) but takes the key as a `RedisModuleString`. - int RedisModule_DictDelC(RedisModuleDict *d, void *key, size_t keylen, void *oldval); + + +### `RedisModule_DictDelC` + + int RedisModule_DictDelC(RedisModuleDict *d, + void *key, + size_t keylen, + void *oldval); Remove the specified key from the dictionary, returning `REDISMODULE_OK` if the key was found and delted, or `REDISMODULE_ERR` if instead there was @@ -2355,82 +3587,110 @@ no such key in the dictionary. When the operation is successful, if 'oldval' is not NULL, then '*oldval' is set to the value stored at the key before it was deleted. Using this feature it is possible to get a pointer to the value (for instance in order to release it), without -having to call `RedisModule_DictGet()` before deleting the key. +having to call [`RedisModule_DictGet()`](#RedisModule_DictGet) before deleting the key. + + + +### `RedisModule_DictDel` -## `RedisModule_DictDel` + int RedisModule_DictDel(RedisModuleDict *d, + RedisModuleString *key, + void *oldval); - int RedisModule_DictDel(RedisModuleDict *d, RedisModuleString *key, void *oldval); +Like [`RedisModule_DictDelC()`](#RedisModule_DictDelC) but gets the key as a `RedisModuleString`. -Like `RedisModule_DictDelC()` but gets the key as a RedisModuleString. + -## `RedisModule_DictIteratorStartC` +### `RedisModule_DictIteratorStartC` - RedisModuleDictIter *RedisModule_DictIteratorStartC(RedisModuleDict *d, const char *op, void *key, size_t keylen); + RedisModuleDictIter *RedisModule_DictIteratorStartC(RedisModuleDict *d, + const char *op, + void *key, + size_t keylen); -Return an interator, setup in order to start iterating from the specified +Return an iterator, setup in order to start iterating from the specified key by applying the operator 'op', which is just a string specifying the comparison operator to use in order to seek the first element. The -operators avalable are: +operators available are: -"^" -- Seek the first (lexicographically smaller) key. -"$" -- Seek the last (lexicographically biffer) key. -">" -- Seek the first element greter than the specified key. -">=" -- Seek the first element greater or equal than the specified key. -"<" -- Seek the first element smaller than the specified key. -"<=" -- Seek the first element smaller or equal than the specified key. -"==" -- Seek the first element matching exactly the specified key. +* `^` – Seek the first (lexicographically smaller) key. +* `$` – Seek the last (lexicographically biffer) key. +* `>` – Seek the first element greater than the specified key. +* `>=` – Seek the first element greater or equal than the specified key. +* `<` – Seek the first element smaller than the specified key. +* `<=` – Seek the first element smaller or equal than the specified key. +* `==` – Seek the first element matching exactly the specified key. -Note that for "^" and "$" the passed key is not used, and the user may +Note that for `^` and `$` the passed key is not used, and the user may just pass NULL with a length of 0. If the element to start the iteration cannot be seeked based on the -key and operator passed, `RedisModule_DictNext()` / Prev() will just return +key and operator passed, [`RedisModule_DictNext()`](#RedisModule_DictNext) / Prev() will just return `REDISMODULE_ERR` at the first call, otherwise they'll produce elements. -## `RedisModule_DictIteratorStart` + - RedisModuleDictIter *RedisModule_DictIteratorStart(RedisModuleDict *d, const char *op, RedisModuleString *key); +### `RedisModule_DictIteratorStart` -Exactly like `RedisModule_DictIteratorStartC`, but the key is passed as a -RedisModuleString. + RedisModuleDictIter *RedisModule_DictIteratorStart(RedisModuleDict *d, + const char *op, + RedisModuleString *key); -## `RedisModule_DictIteratorStop` +Exactly like [`RedisModule_DictIteratorStartC`](#RedisModule_DictIteratorStartC), but the key is passed as a +`RedisModuleString`. + + + +### `RedisModule_DictIteratorStop` void RedisModule_DictIteratorStop(RedisModuleDictIter *di); -Release the iterator created with `RedisModule_DictIteratorStart()`. This call +Release the iterator created with [`RedisModule_DictIteratorStart()`](#RedisModule_DictIteratorStart). This call is mandatory otherwise a memory leak is introduced in the module. -## `RedisModule_DictIteratorReseekC` + - int RedisModule_DictIteratorReseekC(RedisModuleDictIter *di, const char *op, void *key, size_t keylen); +### `RedisModule_DictIteratorReseekC` -After its creation with `RedisModule_DictIteratorStart()`, it is possible to + int RedisModule_DictIteratorReseekC(RedisModuleDictIter *di, + const char *op, + void *key, + size_t keylen); + +After its creation with [`RedisModule_DictIteratorStart()`](#RedisModule_DictIteratorStart), it is possible to change the currently selected element of the iterator by using this API call. The result based on the operator and key is exactly like -the function `RedisModule_DictIteratorStart()`, however in this case the +the function [`RedisModule_DictIteratorStart()`](#RedisModule_DictIteratorStart), however in this case the return value is just `REDISMODULE_OK` in case the seeked element was found, or `REDISMODULE_ERR` in case it was not possible to seek the specified element. It is possible to reseek an iterator as many times as you want. -## `RedisModule_DictIteratorReseek` + + +### `RedisModule_DictIteratorReseek` - int RedisModule_DictIteratorReseek(RedisModuleDictIter *di, const char *op, RedisModuleString *key); + int RedisModule_DictIteratorReseek(RedisModuleDictIter *di, + const char *op, + RedisModuleString *key); -Like `RedisModule_DictIteratorReseekC()` but takes the key as as a -RedisModuleString. +Like [`RedisModule_DictIteratorReseekC()`](#RedisModule_DictIteratorReseekC) but takes the key as as a +`RedisModuleString`. -## `RedisModule_DictNextC` + - void *RedisModule_DictNextC(RedisModuleDictIter *di, size_t *keylen, void **dataptr); +### `RedisModule_DictNextC` -Return the current item of the dictionary iterator 'di' and steps to the + void *RedisModule_DictNextC(RedisModuleDictIter *di, + size_t *keylen, + void **dataptr); + +Return the current item of the dictionary iterator `di` and steps to the next element. If the iterator already yield the last element and there are no other elements to return, NULL is returned, otherwise a pointer -to a string representing the key is provided, and the '*keylen' length -is set by reference (if keylen is not NULL). The '*dataptr', if not NULL +to a string representing the key is provided, and the `*keylen` length +is set by reference (if keylen is not NULL). The `*dataptr`, if not NULL is set to the value of the pointer stored at the returned key as auxiliary -data (as set by the `RedisModule_DictSet` API). +data (as set by the [`RedisModule_DictSet`](#RedisModule_DictSet) API). Usage example: @@ -2442,7 +3702,7 @@ Usage example: } The returned pointer is of type void because sometimes it makes sense -to cast it to a char* sometimes to an unsigned char* depending on the +to cast it to a `char*` sometimes to an unsigned `char*` depending on the fact it contains or not binary data, so this API ends being more comfortable to use. @@ -2450,41 +3710,58 @@ The validity of the returned pointer is until the next call to the next/prev iterator step. Also the pointer is no longer valid once the iterator is released. -## `RedisModule_DictPrevC` + + +### `RedisModule_DictPrevC` - void *RedisModule_DictPrevC(RedisModuleDictIter *di, size_t *keylen, void **dataptr); + void *RedisModule_DictPrevC(RedisModuleDictIter *di, + size_t *keylen, + void **dataptr); -This function is exactly like `RedisModule_DictNext()` but after returning +This function is exactly like [`RedisModule_DictNext()`](#RedisModule_DictNext) but after returning the currently selected element in the iterator, it selects the previous element (laxicographically smaller) instead of the next one. -## `RedisModule_DictNext` + - RedisModuleString *RedisModule_DictNext(RedisModuleCtx *ctx, RedisModuleDictIter *di, void **dataptr); +### `RedisModule_DictNext` -Like RedisModuleNextC(), but instead of returning an internally allocated + RedisModuleString *RedisModule_DictNext(RedisModuleCtx *ctx, + RedisModuleDictIter *di, + void **dataptr); + +Like `RedisModuleNextC()`, but instead of returning an internally allocated buffer and key length, it returns directly a module string object allocated in the specified context 'ctx' (that may be NULL exactly like for the main -API `RedisModule_CreateString)`. +API [`RedisModule_CreateString`](#RedisModule_CreateString)). The returned string object should be deallocated after use, either manually or by using a context that has automatic memory management active. -## `RedisModule_DictPrev` + + +### `RedisModule_DictPrev` - RedisModuleString *RedisModule_DictPrev(RedisModuleCtx *ctx, RedisModuleDictIter *di, void **dataptr); + RedisModuleString *RedisModule_DictPrev(RedisModuleCtx *ctx, + RedisModuleDictIter *di, + void **dataptr); -Like `RedisModule_DictNext()` but after returning the currently selected +Like [`RedisModule_DictNext()`](#RedisModule_DictNext) but after returning the currently selected element in the iterator, it selects the previous element (laxicographically smaller) instead of the next one. -## `RedisModule_DictCompareC` + - int RedisModule_DictCompareC(RedisModuleDictIter *di, const char *op, void *key, size_t keylen); +### `RedisModule_DictCompareC` + + int RedisModule_DictCompareC(RedisModuleDictIter *di, + const char *op, + void *key, + size_t keylen); Compare the element currently pointed by the iterator to the specified element given by key/keylen, according to the operator 'op' (the set of -valid operators are the same valid for `RedisModule_DictIteratorStart)`. +valid operators are the same valid for [`RedisModule_DictIteratorStart`](#RedisModule_DictIteratorStart)). If the comparision is successful the command returns `REDISMODULE_OK` otherwise `REDISMODULE_ERR` is returned. @@ -2492,106 +3769,204 @@ This is useful when we want to just emit a lexicographical range, so in the loop, as we iterate elements, we can also check if we are still on range. -The function returne `REDISMODULE_ERR` if the iterator reached the +The function return `REDISMODULE_ERR` if the iterator reached the end of elements condition as well. -## `RedisModule_DictCompare` + + +### `RedisModule_DictCompare` + + int RedisModule_DictCompare(RedisModuleDictIter *di, + const char *op, + RedisModuleString *key); + +Like [`RedisModule_DictCompareC`](#RedisModule_DictCompareC) but gets the key to compare with the current +iterator key as a `RedisModuleString`. - int RedisModule_DictCompare(RedisModuleDictIter *di, const char *op, RedisModuleString *key); + -Like `RedisModule_DictCompareC` but gets the key to compare with the current -iterator key as a RedisModuleString. +## Modules Info fields -## `RedisModule_InfoAddSection` + + +### `RedisModule_InfoAddSection` int RedisModule_InfoAddSection(RedisModuleInfoCtx *ctx, char *name); Used to start a new section, before adding any fields. the section name will -be prefixed by "_" and must only include A-Z,a-z,0-9. -NULL or empty string indicates the default section (only ) is used. +be prefixed by `_` and must only include A-Z,a-z,0-9. +NULL or empty string indicates the default section (only ``) is used. When return value is `REDISMODULE_ERR`, the section should and will be skipped. -## `RedisModule_InfoBeginDictField` + + +### `RedisModule_InfoBeginDictField` int RedisModule_InfoBeginDictField(RedisModuleInfoCtx *ctx, char *name); Starts a dict field, similar to the ones in INFO KEYSPACE. Use normal `RedisModule_InfoAddField`* functions to add the items to this field, and -terminate with `RedisModule_InfoEndDictField`. +terminate with [`RedisModule_InfoEndDictField`](#RedisModule_InfoEndDictField). -## `RedisModule_InfoEndDictField` + + +### `RedisModule_InfoEndDictField` int RedisModule_InfoEndDictField(RedisModuleInfoCtx *ctx); -Ends a dict field, see `RedisModule_InfoBeginDictField` +Ends a dict field, see [`RedisModule_InfoBeginDictField`](#RedisModule_InfoBeginDictField) + + + +### `RedisModule_InfoAddFieldString` + + int RedisModule_InfoAddFieldString(RedisModuleInfoCtx *ctx, + char *field, + RedisModuleString *value); + +Used by `RedisModuleInfoFunc` to add info fields. +Each field will be automatically prefixed by `_`. +Field names or values must not include `\r\n` or `:`. -## `RedisModule_InfoAddFieldString` + - int RedisModule_InfoAddFieldString(RedisModuleInfoCtx *ctx, char *field, RedisModuleString *value); +### `RedisModule_InfoAddFieldCString` -Used by RedisModuleInfoFunc to add info fields. -Each field will be automatically prefixed by "_". -Field names or values must not include \r\n of ":" + int RedisModule_InfoAddFieldCString(RedisModuleInfoCtx *ctx, + char *field, + char *value); -## `RedisModule_GetServerInfo` +See [`RedisModule_InfoAddFieldString()`](#RedisModule_InfoAddFieldString). - RedisModuleServerInfoData *RedisModule_GetServerInfo(RedisModuleCtx *ctx, const char *section); + + +### `RedisModule_InfoAddFieldDouble` + + int RedisModule_InfoAddFieldDouble(RedisModuleInfoCtx *ctx, + char *field, + double value); + +See [`RedisModule_InfoAddFieldString()`](#RedisModule_InfoAddFieldString). + + + +### `RedisModule_InfoAddFieldLongLong` + + int RedisModule_InfoAddFieldLongLong(RedisModuleInfoCtx *ctx, + char *field, + long long value); + +See [`RedisModule_InfoAddFieldString()`](#RedisModule_InfoAddFieldString). + + + +### `RedisModule_InfoAddFieldULongLong` + + int RedisModule_InfoAddFieldULongLong(RedisModuleInfoCtx *ctx, + char *field, + unsigned long long value); + +See [`RedisModule_InfoAddFieldString()`](#RedisModule_InfoAddFieldString). + + + +### `RedisModule_RegisterInfoFunc` + + int RedisModule_RegisterInfoFunc(RedisModuleCtx *ctx, RedisModuleInfoFunc cb); + +Registers callback for the INFO command. The callback should add INFO fields +by calling the `RedisModule_InfoAddField*()` functions. + + + +### `RedisModule_GetServerInfo` + + RedisModuleServerInfoData *RedisModule_GetServerInfo(RedisModuleCtx *ctx, + const char *section); Get information about the server similar to the one that returns from the INFO command. This function takes an optional 'section' argument that may be NULL. The return value holds the output and can be used with -`RedisModule_ServerInfoGetField` and alike to get the individual fields. -When done, it needs to be freed with `RedisModule_FreeServerInfo` or with the +[`RedisModule_ServerInfoGetField`](#RedisModule_ServerInfoGetField) and alike to get the individual fields. +When done, it needs to be freed with [`RedisModule_FreeServerInfo`](#RedisModule_FreeServerInfo) or with the automatic memory management mechanism if enabled. -## `RedisModule_FreeServerInfo` + + +### `RedisModule_FreeServerInfo` - void RedisModule_FreeServerInfo(RedisModuleCtx *ctx, RedisModuleServerInfoData *data); + void RedisModule_FreeServerInfo(RedisModuleCtx *ctx, + RedisModuleServerInfoData *data); -Free data created with `RM_GetServerInfo()`. You need to pass the +Free data created with [`RedisModule_GetServerInfo()`](#RedisModule_GetServerInfo). You need to pass the context pointer 'ctx' only if the dictionary was created using the context instead of passing NULL. -## `RedisModule_ServerInfoGetField` + - RedisModuleString *RedisModule_ServerInfoGetField(RedisModuleCtx *ctx, RedisModuleServerInfoData *data, const char* field); +### `RedisModule_ServerInfoGetField` -Get the value of a field from data collected with `RM_GetServerInfo()`. You + RedisModuleString *RedisModule_ServerInfoGetField(RedisModuleCtx *ctx, + RedisModuleServerInfoData *data, + const char* field); + +Get the value of a field from data collected with [`RedisModule_GetServerInfo()`](#RedisModule_GetServerInfo). You need to pass the context pointer 'ctx' only if you want to use auto memory mechanism to release the returned string. Return value will be NULL if the field was not found. -## `RedisModule_ServerInfoGetFieldC` + + +### `RedisModule_ServerInfoGetFieldC` - const char *RedisModule_ServerInfoGetFieldC(RedisModuleServerInfoData *data, const char* field); + const char *RedisModule_ServerInfoGetFieldC(RedisModuleServerInfoData *data, + const char* field); -Similar to `RM_ServerInfoGetField`, but returns a char* which should not be freed but the caller. +Similar to [`RedisModule_ServerInfoGetField`](#RedisModule_ServerInfoGetField), but returns a char* which should not be freed but the caller. -## `RedisModule_ServerInfoGetFieldSigned` + - long long RedisModule_ServerInfoGetFieldSigned(RedisModuleServerInfoData *data, const char* field, int *out_err); +### `RedisModule_ServerInfoGetFieldSigned` -Get the value of a field from data collected with `RM_GetServerInfo()`. If the + long long RedisModule_ServerInfoGetFieldSigned(RedisModuleServerInfoData *data, + const char* field, + int *out_err); + +Get the value of a field from data collected with [`RedisModule_GetServerInfo()`](#RedisModule_GetServerInfo). If the field is not found, or is not numerical or out of range, return value will be -0, and the optional out_err argument will be set to `REDISMODULE_ERR`. +0, and the optional `out_err` argument will be set to `REDISMODULE_ERR`. + + -## `RedisModule_ServerInfoGetFieldUnsigned` +### `RedisModule_ServerInfoGetFieldUnsigned` - unsigned long long RedisModule_ServerInfoGetFieldUnsigned(RedisModuleServerInfoData *data, const char* field, int *out_err); + unsigned long long RedisModule_ServerInfoGetFieldUnsigned(RedisModuleServerInfoData *data, + const char* field, + int *out_err); -Get the value of a field from data collected with `RM_GetServerInfo()`. If the +Get the value of a field from data collected with [`RedisModule_GetServerInfo()`](#RedisModule_GetServerInfo). If the field is not found, or is not numerical or out of range, return value will be -0, and the optional out_err argument will be set to `REDISMODULE_ERR`. +0, and the optional `out_err` argument will be set to `REDISMODULE_ERR`. -## `RedisModule_ServerInfoGetFieldDouble` + - double RedisModule_ServerInfoGetFieldDouble(RedisModuleServerInfoData *data, const char* field, int *out_err); +### `RedisModule_ServerInfoGetFieldDouble` -Get the value of a field from data collected with `RM_GetServerInfo()`. If the + double RedisModule_ServerInfoGetFieldDouble(RedisModuleServerInfoData *data, + const char* field, + int *out_err); + +Get the value of a field from data collected with [`RedisModule_GetServerInfo()`](#RedisModule_GetServerInfo). If the field is not found, or is not a double, return value will be 0, and the -optional out_err argument will be set to `REDISMODULE_ERR`. +optional `out_err` argument will be set to `REDISMODULE_ERR`. + + + +## Modules utility APIs + + -## `RedisModule_GetRandomBytes` +### `RedisModule_GetRandomBytes` void RedisModule_GetRandomBytes(unsigned char *dst, size_t len); @@ -2600,21 +3975,31 @@ initialized seed. This function is fast so can be used to generate many bytes without any effect on the operating system entropy pool. Currently this function is not thread safe. -## `RedisModule_GetRandomHexChars` + + +### `RedisModule_GetRandomHexChars` void RedisModule_GetRandomHexChars(char *dst, size_t len); -Like `RedisModule_GetRandomBytes()` but instead of setting the string to +Like [`RedisModule_GetRandomBytes()`](#RedisModule_GetRandomBytes) but instead of setting the string to random bytes the string is set to random characters in the in the hex charset [0-9a-f]. -## `RedisModule_ExportSharedAPI` + + +## Modules API exporting / importing + + - int RedisModule_ExportSharedAPI(RedisModuleCtx *ctx, const char *apiname, void *func); +### `RedisModule_ExportSharedAPI` + + int RedisModule_ExportSharedAPI(RedisModuleCtx *ctx, + const char *apiname, + void *func); This function is called by a module in order to export some API with a given name. Other modules will be able to use this API by calling the -symmetrical function `RM_GetSharedAPI()` and casting the return value to +symmetrical function [`RedisModule_GetSharedAPI()`](#RedisModule_GetSharedAPI) and casting the return value to the right function pointer. The function will return `REDISMODULE_OK` if the name is not already taken, @@ -2625,7 +4010,9 @@ IMPORTANT: the apiname argument should be a string literal with static lifetime. The API relies on the fact that it will always be valid in the future. -## `RedisModule_GetSharedAPI` + + +### `RedisModule_GetSharedAPI` void *RedisModule_GetSharedAPI(RedisModuleCtx *ctx, const char *apiname); @@ -2640,7 +4027,7 @@ step, that is called every time a module attempts to execute a command that requires external APIs: if some API cannot be resolved, the command should return an error. -Here is an exmaple: +Here is an example: int ... myCommandImplementation() { if (getExternalAPIs() == 0) { @@ -2662,102 +4049,163 @@ And the function registerAPI() is: return 1; } -## `RedisModule_UnregisterCommandFilter` + + +## Module Command Filter API + + - int RedisModule_UnregisterCommandFilter(RedisModuleCtx *ctx, RedisModuleCommandFilter *filter); +### `RedisModule_UnregisterCommandFilter` + + int RedisModule_UnregisterCommandFilter(RedisModuleCtx *ctx, + RedisModuleCommandFilter *filter); Unregister a command filter. -## `RedisModule_CommandFilterArgsCount` + + +### `RedisModule_CommandFilterArgsCount` int RedisModule_CommandFilterArgsCount(RedisModuleCommandFilterCtx *fctx); Return the number of arguments a filtered command has. The number of arguments include the command itself. -## `RedisModule_CommandFilterArgGet` + - const RedisModuleString *RedisModule_CommandFilterArgGet(RedisModuleCommandFilterCtx *fctx, int pos); +### `RedisModule_CommandFilterArgGet` + + const RedisModuleString *RedisModule_CommandFilterArgGet(RedisModuleCommandFilterCtx *fctx, + int pos); Return the specified command argument. The first argument (position 0) is the command itself, and the rest are user-provided args. -## `RedisModule_CommandFilterArgDelete` + + +### `RedisModule_CommandFilterArgInsert` + + int RedisModule_CommandFilterArgInsert(RedisModuleCommandFilterCtx *fctx, + int pos, + RedisModuleString *arg); + +Modify the filtered command by inserting a new argument at the specified +position. The specified `RedisModuleString` argument may be used by Redis +after the filter context is destroyed, so it must not be auto-memory +allocated, freed or used elsewhere. + + + +### `RedisModule_CommandFilterArgReplace` + + int RedisModule_CommandFilterArgReplace(RedisModuleCommandFilterCtx *fctx, + int pos, + RedisModuleString *arg); + +Modify the filtered command by replacing an existing argument with a new one. +The specified `RedisModuleString` argument may be used by Redis after the +filter context is destroyed, so it must not be auto-memory allocated, freed +or used elsewhere. + + + +### `RedisModule_CommandFilterArgDelete` - int RedisModule_CommandFilterArgDelete(RedisModuleCommandFilterCtx *fctx, int pos); + int RedisModule_CommandFilterArgDelete(RedisModuleCommandFilterCtx *fctx, + int pos); Modify the filtered command by deleting an argument at the specified position. -## `RedisModule_MallocSize` + + +### `RedisModule_MallocSize` size_t RedisModule_MallocSize(void* ptr); -For a given pointer allocated via `RedisModule_Alloc()` or -`RedisModule_Realloc()`, return the amount of memory allocated for it. +For a given pointer allocated via [`RedisModule_Alloc()`](#RedisModule_Alloc) or +[`RedisModule_Realloc()`](#RedisModule_Realloc), return the amount of memory allocated for it. Note that this may be different (larger) than the memory we allocated with the allocation calls, since sometimes the underlying allocator will allocate more memory. -## `RedisModule_GetUsedMemoryRatio` + + +### `RedisModule_GetUsedMemoryRatio` float RedisModule_GetUsedMemoryRatio(); Return the a number between 0 to 1 indicating the amount of memory currently used, relative to the Redis "maxmemory" configuration. -0 - No memory limit configured. -Between 0 and 1 - The percentage of the memory used normalized in 0-1 range. -Exactly 1 - Memory limit reached. -Greater 1 - More memory used than the configured limit. +* 0 - No memory limit configured. +* Between 0 and 1 - The percentage of the memory used normalized in 0-1 range. +* Exactly 1 - Memory limit reached. +* Greater 1 - More memory used than the configured limit. + + + +## Scanning keyspace and hashes -## `RedisModule_ScanCursorCreate` + + +### `RedisModule_ScanCursorCreate` RedisModuleScanCursor *RedisModule_ScanCursorCreate(); -Create a new cursor to be used with `RedisModule_Scan` +Create a new cursor to be used with [`RedisModule_Scan`](#RedisModule_Scan) + + -## `RedisModule_ScanCursorRestart` +### `RedisModule_ScanCursorRestart` void RedisModule_ScanCursorRestart(RedisModuleScanCursor *cursor); Restart an existing cursor. The keys will be rescanned. -## `RedisModule_ScanCursorDestroy` + + +### `RedisModule_ScanCursorDestroy` void RedisModule_ScanCursorDestroy(RedisModuleScanCursor *cursor); Destroy the cursor struct. -## `RedisModule_Scan` + + +### `RedisModule_Scan` - int RedisModule_Scan(RedisModuleCtx *ctx, RedisModuleScanCursor *cursor, RedisModuleScanCB fn, void *privdata); + int RedisModule_Scan(RedisModuleCtx *ctx, + RedisModuleScanCursor *cursor, + RedisModuleScanCB fn, + void *privdata); Scan API that allows a module to scan all the keys and value in the selected db. Callback for scan implementation. -void scan_callback(RedisModuleCtx *ctx, RedisModuleString *keyname, - RedisModuleKey *key, void *privdata); -ctx - the redis module context provided to for the scan. -keyname - owned by the caller and need to be retained if used after this -function. -key - holds info on the key and value, it is provided as best effort, in -some cases it might be NULL, in which case the user should (can) use -`RedisModule_OpenKey` (and CloseKey too). -when it is provided, it is owned by the caller and will be free when the -callback returns. + void scan_callback(RedisModuleCtx *ctx, RedisModuleString *keyname, + RedisModuleKey *key, void *privdata); -privdata - the user data provided to `RedisModule_Scan`. +- `ctx`: the redis module context provided to for the scan. +- `keyname`: owned by the caller and need to be retained if used after this + function. +- `key`: holds info on the key and value, it is provided as best effort, in + some cases it might be NULL, in which case the user should (can) use + [`RedisModule_OpenKey()`](#RedisModule_OpenKey) (and CloseKey too). + when it is provided, it is owned by the caller and will be free when the + callback returns. +- `privdata`: the user data provided to [`RedisModule_Scan()`](#RedisModule_Scan). The way it should be used: + RedisModuleCursor *c = RedisModule_ScanCursorCreate(); while(RedisModule_Scan(ctx, c, callback, privateData)); RedisModule_ScanCursorDestroy(c); It is also possible to use this API from another thread while the lock -is acquired durring the actuall call to `RM_Scan`: +is acquired during the actuall call to [`RedisModule_Scan`](#RedisModule_Scan): RedisModuleCursor *c = RedisModule_ScanCursorCreate(); RedisModule_ThreadSafeContextLock(ctx); @@ -2771,7 +4219,7 @@ is acquired durring the actuall call to `RM_Scan`: The function will return 1 if there are more elements to scan and 0 otherwise, possibly setting errno if the call failed. -It is also possible to restart and existing cursor using `RM_CursorRestart`. +It is also possible to restart an existing cursor using [`RedisModule_ScanCursorRestart`](#RedisModule_ScanCursorRestart). IMPORTANT: This API is very similar to the Redis SCAN command from the point of view of the guarantees it provides. This means that the API @@ -2785,34 +4233,43 @@ other key. Moreover playing with the Redis keyspace while iterating may have the effect of returning more duplicates. A safe pattern is to store the keys names you want to modify elsewhere, and perform the actions on the keys -later when the iteration is complete. Howerver this can cost a lot of +later when the iteration is complete. However this can cost a lot of memory, so it may make sense to just operate on the current key when possible during the iteration, given that this is safe. -## `RedisModule_ScanKey` + + +### `RedisModule_ScanKey` - int RedisModule_ScanKey(RedisModuleKey *key, RedisModuleScanCursor *cursor, RedisModuleScanKeyCB fn, void *privdata); + int RedisModule_ScanKey(RedisModuleKey *key, + RedisModuleScanCursor *cursor, + RedisModuleScanKeyCB fn, + void *privdata); Scan api that allows a module to scan the elements in a hash, set or sorted set key Callback for scan implementation. -void scan_callback(RedisModuleKey *key, RedisModuleString* field, RedisModuleString* value, void *privdata); + + void scan_callback(RedisModuleKey *key, RedisModuleString* field, RedisModuleString* value, void *privdata); + - key - the redis key context provided to for the scan. - field - field name, owned by the caller and need to be retained if used after this function. - value - value string or NULL for set type, owned by the caller and need to be retained if used after this function. -- privdata - the user data provided to `RedisModule_ScanKey`. +- privdata - the user data provided to [`RedisModule_ScanKey`](#RedisModule_ScanKey). The way it should be used: + RedisModuleCursor *c = RedisModule_ScanCursorCreate(); RedisModuleKey *key = RedisModule_OpenKey(...) while(RedisModule_ScanKey(key, c, callback, privateData)); RedisModule_CloseKey(key); RedisModule_ScanCursorDestroy(c); -It is also possible to use this API from another thread while the lock is acquired durring -the actuall call to `RM_Scan`, and re-opening the key each time: +It is also possible to use this API from another thread while the lock is acquired during +the actuall call to [`RedisModule_ScanKey`](#RedisModule_ScanKey), and re-opening the key each time: + RedisModuleCursor *c = RedisModule_ScanCursorCreate(); RedisModule_ThreadSafeContextLock(ctx); RedisModuleKey *key = RedisModule_OpenKey(...) @@ -2828,7 +4285,7 @@ the actuall call to `RM_Scan`, and re-opening the key each time: The function will return 1 if there are more elements to scan and 0 otherwise, possibly setting errno if the call failed. -It is also possible to restart and existing cursor using `RM_CursorRestart`. +It is also possible to restart an existing cursor using [`RedisModule_ScanCursorRestart`](#RedisModule_ScanCursorRestart). NOTE: Certain operations are unsafe while iterating the object. For instance while the API guarantees to return at least one time all the elements that @@ -2838,7 +4295,13 @@ you play with the elements, the more duplicates you may get. In general deleting the current element of the data structure is safe, while removing the key you are iterating is not safe. -## `RedisModule_Fork` + + +## Module fork API + + + +### `RedisModule_Fork` int RedisModule_Fork(RedisModuleForkDoneHandler cb, void *user_data); @@ -2846,30 +4309,53 @@ Create a background child process with the current frozen snaphost of the main process where you can do some processing in the background without affecting / freezing the traffic and no need for threads and GIL locking. Note that Redis allows for only one concurrent fork. -When the child wants to exit, it should call `RedisModule_ExitFromChild`. -If the parent wants to kill the child it should call `RedisModule_KillForkChild` +When the child wants to exit, it should call [`RedisModule_ExitFromChild`](#RedisModule_ExitFromChild). +If the parent wants to kill the child it should call [`RedisModule_KillForkChild`](#RedisModule_KillForkChild) The done handler callback will be executed on the parent process when the child existed (but not when killed) Return: -1 on failure, on success the parent process will get a positive PID of the child, and the child process will get 0. -## `RedisModule_ExitFromChild` + + +### `RedisModule_SendChildHeartbeat` + + void RedisModule_SendChildHeartbeat(double progress); + +The module is advised to call this function from the fork child once in a while, +so that it can report progress and COW memory to the parent which will be +reported in INFO. +The `progress` argument should between 0 and 1, or -1 when not available. + + + +### `RedisModule_ExitFromChild` int RedisModule_ExitFromChild(int retcode); Call from the child process when you want to terminate it. retcode will be provided to the done handler executed on the parent process. -## `RedisModule_KillForkChild` + + +### `RedisModule_KillForkChild` int RedisModule_KillForkChild(int child_pid); Can be used to kill the forked child process from the parent process. -child_pid whould be the return value of `RedisModule_Fork`. +`child_pid` would be the return value of [`RedisModule_Fork`](#RedisModule_Fork). -## `RedisModule_SubscribeToServerEvent` + - int RedisModule_SubscribeToServerEvent(RedisModuleCtx *ctx, RedisModuleEvent event, RedisModuleEventCallback callback); +## Server hooks implementation + + + +### `RedisModule_SubscribeToServerEvent` + + int RedisModule_SubscribeToServerEvent(RedisModuleCtx *ctx, + RedisModuleEvent event, + RedisModuleEventCallback callback); Register to be notified, via a callback, when the specified server event happens. The callback is called with the event as argument, and an additional @@ -2883,10 +4369,10 @@ is not null, the old callback will be replaced with the new one. The callback must be of this type: - int (*RedisModuleEventCallback)(RedisModuleCtx *ctx, - RedisModuleEvent eid, - uint64_t subevent, - void *data); + int (*RedisModuleEventCallback)(RedisModuleCtx *ctx, + RedisModuleEvent eid, + uint64_t subevent, + void *data); The 'ctx' is a normal Redis module context that the callback can use in order to call other modules APIs. The 'eid' is the event itself, this @@ -2900,189 +4386,239 @@ more relevant data. Here is a list of events you can use as 'eid' and related sub events: - RedisModuleEvent_ReplicationRoleChanged +* `RedisModuleEvent_ReplicationRoleChanged`: + + This event is called when the instance switches from master + to replica or the other way around, however the event is + also called when the replica remains a replica but starts to + replicate with a different master. + + The following sub events are available: + + * `REDISMODULE_SUBEVENT_REPLROLECHANGED_NOW_MASTER` + * `REDISMODULE_SUBEVENT_REPLROLECHANGED_NOW_REPLICA` + + The 'data' field can be casted by the callback to a + `RedisModuleReplicationInfo` structure with the following fields: - This event is called when the instance switches from master - to replica or the other way around, however the event is - also called when the replica remains a replica but starts to - replicate with a different master. + int master; // true if master, false if replica + char *masterhost; // master instance hostname for NOW_REPLICA + int masterport; // master instance port for NOW_REPLICA + char *replid1; // Main replication ID + char *replid2; // Secondary replication ID + uint64_t repl1_offset; // Main replication offset + uint64_t repl2_offset; // Offset of replid2 validity - The following sub events are available: +* `RedisModuleEvent_Persistence` - REDISMODULE_SUBEVENT_REPLROLECHANGED_NOW_MASTER - REDISMODULE_SUBEVENT_REPLROLECHANGED_NOW_REPLICA + This event is called when RDB saving or AOF rewriting starts + and ends. The following sub events are available: - The 'data' field can be casted by the callback to a - RedisModuleReplicationInfo structure with the following fields: + * `REDISMODULE_SUBEVENT_PERSISTENCE_RDB_START` + * `REDISMODULE_SUBEVENT_PERSISTENCE_AOF_START` + * `REDISMODULE_SUBEVENT_PERSISTENCE_SYNC_RDB_START` + * `REDISMODULE_SUBEVENT_PERSISTENCE_ENDED` + * `REDISMODULE_SUBEVENT_PERSISTENCE_FAILED` - int master; // true if master, false if replica - char *masterhost; // master instance hostname for NOW_REPLICA - int masterport; // master instance port for NOW_REPLICA - char *replid1; // Main replication ID - char *replid2; // Secondary replication ID - uint64_t repl1_offset; // Main replication offset - uint64_t repl2_offset; // Offset of replid2 validity + The above events are triggered not just when the user calls the + relevant commands like BGSAVE, but also when a saving operation + or AOF rewriting occurs because of internal server triggers. + The SYNC_RDB_START sub events are happening in the forground due to + SAVE command, FLUSHALL, or server shutdown, and the other RDB and + AOF sub events are executed in a background fork child, so any + action the module takes can only affect the generated AOF or RDB, + but will not be reflected in the parent process and affect connected + clients and commands. Also note that the AOF_START sub event may end + up saving RDB content in case of an AOF with rdb-preamble. - RedisModuleEvent_Persistence +* `RedisModuleEvent_FlushDB` - This event is called when RDB saving or AOF rewriting starts - and ends. The following sub events are available: + The FLUSHALL, FLUSHDB or an internal flush (for instance + because of replication, after the replica synchronization) + happened. The following sub events are available: - REDISMODULE_SUBEVENT_PERSISTENCE_RDB_START - REDISMODULE_SUBEVENT_PERSISTENCE_AOF_START - REDISMODULE_SUBEVENT_PERSISTENCE_SYNC_RDB_START - REDISMODULE_SUBEVENT_PERSISTENCE_ENDED - REDISMODULE_SUBEVENT_PERSISTENCE_FAILED + * `REDISMODULE_SUBEVENT_FLUSHDB_START` + * `REDISMODULE_SUBEVENT_FLUSHDB_END` - The above events are triggered not just when the user calls the - relevant commands like BGSAVE, but also when a saving operation - or AOF rewriting occurs because of internal server triggers. - The SYNC_RDB_START sub events are happening in the forground due to - SAVE command, FLUSHALL, or server shutdown, and the other RDB and - AOF sub events are executed in a background fork child, so any - action the module takes can only affect the generated AOF or RDB, - but will not be reflected in the parent process and affect connected - clients and commands. Also note that the AOF_START sub event may end - up saving RDB content in case of an AOF with rdb-preamble. + The data pointer can be casted to a RedisModuleFlushInfo + structure with the following fields: - RedisModuleEvent_FlushDB + int32_t async; // True if the flush is done in a thread. + // See for instance FLUSHALL ASYNC. + // In this case the END callback is invoked + // immediately after the database is put + // in the free list of the thread. + int32_t dbnum; // Flushed database number, -1 for all the DBs + // in the case of the FLUSHALL operation. - The FLUSHALL, FLUSHDB or an internal flush (for instance - because of replication, after the replica synchronization) - happened. The following sub events are available: + The start event is called *before* the operation is initated, thus + allowing the callback to call DBSIZE or other operation on the + yet-to-free keyspace. - REDISMODULE_SUBEVENT_FLUSHDB_START - REDISMODULE_SUBEVENT_FLUSHDB_END +* `RedisModuleEvent_Loading` - The data pointer can be casted to a RedisModuleFlushInfo - structure with the following fields: + Called on loading operations: at startup when the server is + started, but also after a first synchronization when the + replica is loading the RDB file from the master. + The following sub events are available: - int32_t async; // True if the flush is done in a thread. - See for instance FLUSHALL ASYNC. - In this case the END callback is invoked - immediately after the database is put - in the free list of the thread. - int32_t dbnum; // Flushed database number, -1 for all the DBs - in the case of the FLUSHALL operation. + * `REDISMODULE_SUBEVENT_LOADING_RDB_START` + * `REDISMODULE_SUBEVENT_LOADING_AOF_START` + * `REDISMODULE_SUBEVENT_LOADING_REPL_START` + * `REDISMODULE_SUBEVENT_LOADING_ENDED` + * `REDISMODULE_SUBEVENT_LOADING_FAILED` - The start event is called *before* the operation is initated, thus - allowing the callback to call DBSIZE or other operation on the - yet-to-free keyspace. + Note that AOF loading may start with an RDB data in case of + rdb-preamble, in which case you'll only receive an AOF_START event. - RedisModuleEvent_Loading +* `RedisModuleEvent_ClientChange` - Called on loading operations: at startup when the server is - started, but also after a first synchronization when the - replica is loading the RDB file from the master. - The following sub events are available: + Called when a client connects or disconnects. + The data pointer can be casted to a RedisModuleClientInfo + structure, documented in RedisModule_GetClientInfoById(). + The following sub events are available: - REDISMODULE_SUBEVENT_LOADING_RDB_START - REDISMODULE_SUBEVENT_LOADING_AOF_START - REDISMODULE_SUBEVENT_LOADING_REPL_START - REDISMODULE_SUBEVENT_LOADING_ENDED - REDISMODULE_SUBEVENT_LOADING_FAILED + * `REDISMODULE_SUBEVENT_CLIENT_CHANGE_CONNECTED` + * `REDISMODULE_SUBEVENT_CLIENT_CHANGE_DISCONNECTED` - Note that AOF loading may start with an RDB data in case of - rdb-preamble, in which case you'll only recieve an AOF_START event. +* `RedisModuleEvent_Shutdown` + The server is shutting down. No subevents are available. - RedisModuleEvent_ClientChange +* `RedisModuleEvent_ReplicaChange` - Called when a client connects or disconnects. - The data pointer can be casted to a RedisModuleClientInfo - structure, documented in RedisModule_GetClientInfoById(). - The following sub events are available: + This event is called when the instance (that can be both a + master or a replica) get a new online replica, or lose a + replica since it gets disconnected. + The following sub events are available: - REDISMODULE_SUBEVENT_CLIENT_CHANGE_CONNECTED - REDISMODULE_SUBEVENT_CLIENT_CHANGE_DISCONNECTED + * `REDISMODULE_SUBEVENT_REPLICA_CHANGE_ONLINE` + * `REDISMODULE_SUBEVENT_REPLICA_CHANGE_OFFLINE` - RedisModuleEvent_Shutdown + No additional information is available so far: future versions + of Redis will have an API in order to enumerate the replicas + connected and their state. - The server is shutting down. No subevents are available. +* `RedisModuleEvent_CronLoop` - RedisModuleEvent_ReplicaChange + This event is called every time Redis calls the serverCron() + function in order to do certain bookkeeping. Modules that are + required to do operations from time to time may use this callback. + Normally Redis calls this function 10 times per second, but + this changes depending on the "hz" configuration. + No sub events are available. - This event is called when the instance (that can be both a - master or a replica) get a new online replica, or lose a - replica since it gets disconnected. - The following sub events are availble: + The data pointer can be casted to a RedisModuleCronLoop + structure with the following fields: - REDISMODULE_SUBEVENT_REPLICA_CHANGE_ONLINE - REDISMODULE_SUBEVENT_REPLICA_CHANGE_OFFLINE + int32_t hz; // Approximate number of events per second. - No additional information is available so far: future versions - of Redis will have an API in order to enumerate the replicas - connected and their state. +* `RedisModuleEvent_MasterLinkChange` - RedisModuleEvent_CronLoop + This is called for replicas in order to notify when the + replication link becomes functional (up) with our master, + or when it goes down. Note that the link is not considered + up when we just connected to the master, but only if the + replication is happening correctly. + The following sub events are available: - This event is called every time Redis calls the serverCron() - function in order to do certain bookkeeping. Modules that are - required to do operations from time to time may use this callback. - Normally Redis calls this function 10 times per second, but - this changes depending on the "hz" configuration. - No sub events are available. + * `REDISMODULE_SUBEVENT_MASTER_LINK_UP` + * `REDISMODULE_SUBEVENT_MASTER_LINK_DOWN` - The data pointer can be casted to a RedisModuleCronLoop - structure with the following fields: +* `RedisModuleEvent_ModuleChange` - int32_t hz; // Approximate number of events per second. + This event is called when a new module is loaded or one is unloaded. + The following sub events are available: - RedisModuleEvent_MasterLinkChange + * `REDISMODULE_SUBEVENT_MODULE_LOADED` + * `REDISMODULE_SUBEVENT_MODULE_UNLOADED` - This is called for replicas in order to notify when the - replication link becomes functional (up) with our master, - or when it goes down. Note that the link is not considered - up when we just connected to the master, but only if the - replication is happening correctly. - The following sub events are available: + The data pointer can be casted to a RedisModuleModuleChange + structure with the following fields: - REDISMODULE_SUBEVENT_MASTER_LINK_UP - REDISMODULE_SUBEVENT_MASTER_LINK_DOWN + const char* module_name; // Name of module loaded or unloaded. + int32_t module_version; // Module version. - RedisModuleEvent_ModuleChange +* `RedisModuleEvent_LoadingProgress` - This event is called when a new module is loaded or one is unloaded. - The following sub events are availble: + This event is called repeatedly called while an RDB or AOF file + is being loaded. + The following sub events are availble: - REDISMODULE_SUBEVENT_MODULE_LOADED - REDISMODULE_SUBEVENT_MODULE_UNLOADED + * `REDISMODULE_SUBEVENT_LOADING_PROGRESS_RDB` + * `REDISMODULE_SUBEVENT_LOADING_PROGRESS_AOF` - The data pointer can be casted to a RedisModuleModuleChange - structure with the following fields: + The data pointer can be casted to a RedisModuleLoadingProgress + structure with the following fields: - const char* module_name; // Name of module loaded or unloaded. - int32_t module_version; // Module version. + int32_t hz; // Approximate number of events per second. + int32_t progress; // Approximate progress between 0 and 1024, + // or -1 if unknown. - RedisModuleEvent_LoadingProgress +* `RedisModuleEvent_SwapDB` - This event is called repeatedly called while an RDB or AOF file - is being loaded. - The following sub events are availble: + This event is called when a SWAPDB command has been successfully + Executed. + For this event call currently there is no subevents available. - REDISMODULE_SUBEVENT_LOADING_PROGRESS_RDB - REDISMODULE_SUBEVENT_LOADING_PROGRESS_AOF + The data pointer can be casted to a RedisModuleSwapDbInfo + structure with the following fields: - The data pointer can be casted to a RedisModuleLoadingProgress - structure with the following fields: + int32_t dbnum_first; // Swap Db first dbnum + int32_t dbnum_second; // Swap Db second dbnum - int32_t hz; // Approximate number of events per second. - int32_t progress; // Approximate progress between 0 and 1024, - or -1 if unknown. +* `RedisModuleEvent_ReplBackup` -The function returns `REDISMODULE_OK` if the module was successfully subscrived -for the specified event. If the API is called from a wrong context then -`REDISMODULE_ERR` is returned. + Called when diskless-repl-load config is set to swapdb, + And redis needs to backup the the current database for the + possibility to be restored later. A module with global data and + maybe with aux_load and aux_save callbacks may need to use this + notification to backup / restore / discard its globals. + The following sub events are available: -## `RedisModule_SetLRU` + * `REDISMODULE_SUBEVENT_REPL_BACKUP_CREATE` + * `REDISMODULE_SUBEVENT_REPL_BACKUP_RESTORE` + * `REDISMODULE_SUBEVENT_REPL_BACKUP_DISCARD` + +* `RedisModuleEvent_ForkChild` + + Called when a fork child (AOFRW, RDBSAVE, module fork...) is born/dies + The following sub events are available: + + * `REDISMODULE_SUBEVENT_FORK_CHILD_BORN` + * `REDISMODULE_SUBEVENT_FORK_CHILD_DIED` + +The function returns `REDISMODULE_OK` if the module was successfully subscribed +for the specified event. If the API is called from a wrong context or unsupported event +is given then `REDISMODULE_ERR` is returned. + + + +### `RedisModule_IsSubEventSupported` + + int RedisModule_IsSubEventSupported(RedisModuleEvent event, int64_t subevent); + + +For a given server event and subevent, return zero if the +subevent is not supported and non-zero otherwise. + + + +## Key eviction API + + + +### `RedisModule_SetLRU` int RedisModule_SetLRU(RedisModuleKey *key, mstime_t lru_idle); -Set the key last access time for LRU based eviction. not relevent if the +Set the key last access time for LRU based eviction. not relevant if the servers's maxmemory policy is LFU based. Value is idle time in milliseconds. returns `REDISMODULE_OK` if the LRU was updated, `REDISMODULE_ERR` otherwise. -## `RedisModule_GetLRU` + + +### `RedisModule_GetLRU` int RedisModule_GetLRU(RedisModuleKey *key, mstime_t *lru_idle); @@ -3091,7 +4627,9 @@ Value is idletime in milliseconds or -1 if the server's eviction policy is LFU based. returns `REDISMODULE_OK` if when key is valid. -## `RedisModule_SetLFU` + + +### `RedisModule_SetLFU` int RedisModule_SetLFU(RedisModuleKey *key, long long lfu_freq); @@ -3101,7 +4639,9 @@ The frequency is a logarithmic counter that provides an indication of the access frequencyonly (must be <= 255). returns `REDISMODULE_OK` if the LFU was updated, `REDISMODULE_ERR` otherwise. -## `RedisModule_GetLFU` + + +### `RedisModule_GetLFU` int RedisModule_GetLFU(RedisModuleKey *key, long long *lfu_freq); @@ -3109,16 +4649,84 @@ Gets the key access frequency or -1 if the server's eviction policy is not LFU based. returns `REDISMODULE_OK` if when key is valid. -## `RedisModule_ModuleTypeReplaceValue` + + +## Miscellaneous APIs + + + +### `RedisModule_GetContextFlagsAll` + + int RedisModule_GetContextFlagsAll(); + + +Returns the full ContextFlags mask, using the return value +the module can check if a certain set of flags are supported +by the redis server version in use. +Example: + + int supportedFlags = RM_GetContextFlagsAll(); + if (supportedFlags & REDISMODULE_CTX_FLAGS_MULTI) { + // REDISMODULE_CTX_FLAGS_MULTI is supported + } else{ + // REDISMODULE_CTX_FLAGS_MULTI is not supported + } + + + +### `RedisModule_GetKeyspaceNotificationFlagsAll` - int RedisModule_ModuleTypeReplaceValue(RedisModuleKey *key, moduleType *mt, void *new_value, void **old_value); + int RedisModule_GetKeyspaceNotificationFlagsAll(); + + +Returns the full KeyspaceNotification mask, using the return value +the module can check if a certain set of flags are supported +by the redis server version in use. +Example: + + int supportedFlags = RM_GetKeyspaceNotificationFlagsAll(); + if (supportedFlags & REDISMODULE_NOTIFY_LOADED) { + // REDISMODULE_NOTIFY_LOADED is supported + } else{ + // REDISMODULE_NOTIFY_LOADED is not supported + } + + + +### `RedisModule_GetServerVersion` + + int RedisModule_GetServerVersion(); + + +Return the redis version in format of 0x00MMmmpp. +Example for 6.0.7 the return value will be 0x00060007. + + + +### `RedisModule_GetTypeMethodVersion` + + int RedisModule_GetTypeMethodVersion(); + + +Return the current redis-server runtime value of `REDISMODULE_TYPE_METHOD_VERSION`. +You can use that when calling [`RedisModule_CreateDataType`](#RedisModule_CreateDataType) to know which fields of +`RedisModuleTypeMethods` are gonna be supported and which will be ignored. + + + +### `RedisModule_ModuleTypeReplaceValue` + + int RedisModule_ModuleTypeReplaceValue(RedisModuleKey *key, + moduleType *mt, + void *new_value, + void **old_value); Replace the value assigned to a module type. The key must be open for writing, have an existing value, and have a moduleType that matches the one specified by the caller. -Unlike `RM_ModuleTypeSetValue()` which will free the old value, this function +Unlike [`RedisModule_ModuleTypeSetValue()`](#RedisModule_ModuleTypeSetValue) which will free the old value, this function simply swaps the old value with the new value. The function returns `REDISMODULE_OK` on success, `REDISMODULE_ERR` on errors @@ -3128,5 +4736,402 @@ such as: 2. Key is not a module data type key. 3. Key is a module datatype other than 'mt'. -If old_value is non-NULL, the old value is returned by reference. +If `old_value` is non-NULL, the old value is returned by reference. + + + +### `RedisModule_GetCommandKeys` + + int *RedisModule_GetCommandKeys(RedisModuleCtx *ctx, + RedisModuleString **argv, + int argc, + int *num_keys); + +For a specified command, parse its arguments and return an array that +contains the indexes of all key name arguments. This function is +essnetially a more efficient way to do COMMAND GETKEYS. + +A NULL return value indicates the specified command has no keys, or +an error condition. Error conditions are indicated by setting errno +as folllows: + +* ENOENT: Specified command does not exist. +* EINVAL: Invalid command arity specified. + +NOTE: The returned array is not a Redis Module object so it does not +get automatically freed even when auto-memory is used. The caller +must explicitly call [`RedisModule_Free()`](#RedisModule_Free) to free it. + + + +## Defrag API + + + +### `RedisModule_RegisterDefragFunc` + + int RedisModule_RegisterDefragFunc(RedisModuleCtx *ctx, + RedisModuleDefragFunc cb); + +Register a defrag callback for global data, i.e. anything that the module +may allocate that is not tied to a specific data type. + + + +### `RedisModule_DefragShouldStop` + + int RedisModule_DefragShouldStop(RedisModuleDefragCtx *ctx); + +When the data type defrag callback iterates complex structures, this +function should be called periodically. A zero (false) return +indicates the callback may continue its work. A non-zero value (true) +indicates it should stop. + +When stopped, the callback may use [`RedisModule_DefragCursorSet()`](#RedisModule_DefragCursorSet) to store its +position so it can later use [`RedisModule_DefragCursorGet()`](#RedisModule_DefragCursorGet) to resume defragging. + +When stopped and more work is left to be done, the callback should +return 1. Otherwise, it should return 0. + +NOTE: Modules should consider the frequency in which this function is called, +so it generally makes sense to do small batches of work in between calls. + + + +### `RedisModule_DefragCursorSet` + + int RedisModule_DefragCursorSet(RedisModuleDefragCtx *ctx, + unsigned long cursor); + +Store an arbitrary cursor value for future re-use. + +This should only be called if [`RedisModule_DefragShouldStop()`](#RedisModule_DefragShouldStop) has returned a non-zero +value and the defrag callback is about to exit without fully iterating its +data type. + +This behavior is reserved to cases where late defrag is performed. Late +defrag is selected for keys that implement the `free_effort` callback and +return a `free_effort` value that is larger than the defrag +'active-defrag-max-scan-fields' configuration directive. + +Smaller keys, keys that do not implement `free_effort` or the global +defrag callback are not called in late-defrag mode. In those cases, a +call to this function will return `REDISMODULE_ERR`. + +The cursor may be used by the module to represent some progress into the +module's data type. Modules may also store additional cursor-related +information locally and use the cursor as a flag that indicates when +traversal of a new key begins. This is possible because the API makes +a guarantee that concurrent defragmentation of multiple keys will +not be performed. + + + +### `RedisModule_DefragCursorGet` + + int RedisModule_DefragCursorGet(RedisModuleDefragCtx *ctx, + unsigned long *cursor); + +Fetch a cursor value that has been previously stored using [`RedisModule_DefragCursorSet()`](#RedisModule_DefragCursorSet). + +If not called for a late defrag operation, `REDISMODULE_ERR` will be returned and +the cursor should be ignored. See [`RedisModule_DefragCursorSet()`](#RedisModule_DefragCursorSet) for more details on +defrag cursors. + + + +### `RedisModule_DefragAlloc` + + void *RedisModule_DefragAlloc(RedisModuleDefragCtx *ctx, void *ptr); + +Defrag a memory allocation previously allocated by [`RedisModule_Alloc`](#RedisModule_Alloc), [`RedisModule_Calloc`](#RedisModule_Calloc), etc. +The defragmentation process involves allocating a new memory block and copying +the contents to it, like `realloc()`. + +If defragmentation was not necessary, NULL is returned and the operation has +no other effect. + +If a non-NULL value is returned, the caller should use the new pointer instead +of the old one and update any reference to the old pointer, which must not +be used again. + + + +### `RedisModule_DefragRedisModuleString` + + RedisModuleString *RedisModule_DefragRedisModuleString(RedisModuleDefragCtx *ctx, + RedisModuleString *str); + +Defrag a `RedisModuleString` previously allocated by [`RedisModule_Alloc`](#RedisModule_Alloc), [`RedisModule_Calloc`](#RedisModule_Calloc), etc. +See [`RedisModule_DefragAlloc()`](#RedisModule_DefragAlloc) for more information on how the defragmentation process +works. + +NOTE: It is only possible to defrag strings that have a single reference. +Typically this means strings retained with [`RedisModule_RetainString`](#RedisModule_RetainString) or [`RedisModule_HoldString`](#RedisModule_HoldString) +may not be defragmentable. One exception is command argvs which, if retained +by the module, will end up with a single reference (because the reference +on the Redis side is dropped as soon as the command callback returns). + + + +## Function index + +* [`RedisModule_AbortBlock`](#RedisModule_AbortBlock) +* [`RedisModule_Alloc`](#RedisModule_Alloc) +* [`RedisModule_AuthenticateClientWithACLUser`](#RedisModule_AuthenticateClientWithACLUser) +* [`RedisModule_AuthenticateClientWithUser`](#RedisModule_AuthenticateClientWithUser) +* [`RedisModule_AutoMemory`](#RedisModule_AutoMemory) +* [`RedisModule_AvoidReplicaTraffic`](#RedisModule_AvoidReplicaTraffic) +* [`RedisModule_BlockClient`](#RedisModule_BlockClient) +* [`RedisModule_BlockClientOnKeys`](#RedisModule_BlockClientOnKeys) +* [`RedisModule_BlockedClientDisconnected`](#RedisModule_BlockedClientDisconnected) +* [`RedisModule_BlockedClientMeasureTimeEnd`](#RedisModule_BlockedClientMeasureTimeEnd) +* [`RedisModule_BlockedClientMeasureTimeStart`](#RedisModule_BlockedClientMeasureTimeStart) +* [`RedisModule_Call`](#RedisModule_Call) +* [`RedisModule_CallReplyArrayElement`](#RedisModule_CallReplyArrayElement) +* [`RedisModule_CallReplyInteger`](#RedisModule_CallReplyInteger) +* [`RedisModule_CallReplyLength`](#RedisModule_CallReplyLength) +* [`RedisModule_CallReplyProto`](#RedisModule_CallReplyProto) +* [`RedisModule_CallReplyStringPtr`](#RedisModule_CallReplyStringPtr) +* [`RedisModule_CallReplyType`](#RedisModule_CallReplyType) +* [`RedisModule_Calloc`](#RedisModule_Calloc) +* [`RedisModule_CloseKey`](#RedisModule_CloseKey) +* [`RedisModule_CommandFilterArgDelete`](#RedisModule_CommandFilterArgDelete) +* [`RedisModule_CommandFilterArgGet`](#RedisModule_CommandFilterArgGet) +* [`RedisModule_CommandFilterArgInsert`](#RedisModule_CommandFilterArgInsert) +* [`RedisModule_CommandFilterArgReplace`](#RedisModule_CommandFilterArgReplace) +* [`RedisModule_CommandFilterArgsCount`](#RedisModule_CommandFilterArgsCount) +* [`RedisModule_CreateCommand`](#RedisModule_CreateCommand) +* [`RedisModule_CreateDataType`](#RedisModule_CreateDataType) +* [`RedisModule_CreateDict`](#RedisModule_CreateDict) +* [`RedisModule_CreateModuleUser`](#RedisModule_CreateModuleUser) +* [`RedisModule_CreateString`](#RedisModule_CreateString) +* [`RedisModule_CreateStringFromCallReply`](#RedisModule_CreateStringFromCallReply) +* [`RedisModule_CreateStringFromDouble`](#RedisModule_CreateStringFromDouble) +* [`RedisModule_CreateStringFromLongDouble`](#RedisModule_CreateStringFromLongDouble) +* [`RedisModule_CreateStringFromLongLong`](#RedisModule_CreateStringFromLongLong) +* [`RedisModule_CreateStringFromStreamID`](#RedisModule_CreateStringFromStreamID) +* [`RedisModule_CreateStringFromString`](#RedisModule_CreateStringFromString) +* [`RedisModule_CreateStringPrintf`](#RedisModule_CreateStringPrintf) +* [`RedisModule_CreateTimer`](#RedisModule_CreateTimer) +* [`RedisModule_DbSize`](#RedisModule_DbSize) +* [`RedisModule_DeauthenticateAndCloseClient`](#RedisModule_DeauthenticateAndCloseClient) +* [`RedisModule_DefragAlloc`](#RedisModule_DefragAlloc) +* [`RedisModule_DefragCursorGet`](#RedisModule_DefragCursorGet) +* [`RedisModule_DefragCursorSet`](#RedisModule_DefragCursorSet) +* [`RedisModule_DefragRedisModuleString`](#RedisModule_DefragRedisModuleString) +* [`RedisModule_DefragShouldStop`](#RedisModule_DefragShouldStop) +* [`RedisModule_DeleteKey`](#RedisModule_DeleteKey) +* [`RedisModule_DictCompare`](#RedisModule_DictCompare) +* [`RedisModule_DictCompareC`](#RedisModule_DictCompareC) +* [`RedisModule_DictDel`](#RedisModule_DictDel) +* [`RedisModule_DictDelC`](#RedisModule_DictDelC) +* [`RedisModule_DictGet`](#RedisModule_DictGet) +* [`RedisModule_DictGetC`](#RedisModule_DictGetC) +* [`RedisModule_DictIteratorReseek`](#RedisModule_DictIteratorReseek) +* [`RedisModule_DictIteratorReseekC`](#RedisModule_DictIteratorReseekC) +* [`RedisModule_DictIteratorStart`](#RedisModule_DictIteratorStart) +* [`RedisModule_DictIteratorStartC`](#RedisModule_DictIteratorStartC) +* [`RedisModule_DictIteratorStop`](#RedisModule_DictIteratorStop) +* [`RedisModule_DictNext`](#RedisModule_DictNext) +* [`RedisModule_DictNextC`](#RedisModule_DictNextC) +* [`RedisModule_DictPrev`](#RedisModule_DictPrev) +* [`RedisModule_DictPrevC`](#RedisModule_DictPrevC) +* [`RedisModule_DictReplace`](#RedisModule_DictReplace) +* [`RedisModule_DictReplaceC`](#RedisModule_DictReplaceC) +* [`RedisModule_DictSet`](#RedisModule_DictSet) +* [`RedisModule_DictSetC`](#RedisModule_DictSetC) +* [`RedisModule_DictSize`](#RedisModule_DictSize) +* [`RedisModule_DigestAddLongLong`](#RedisModule_DigestAddLongLong) +* [`RedisModule_DigestAddStringBuffer`](#RedisModule_DigestAddStringBuffer) +* [`RedisModule_DigestEndSequence`](#RedisModule_DigestEndSequence) +* [`RedisModule_EmitAOF`](#RedisModule_EmitAOF) +* [`RedisModule_ExitFromChild`](#RedisModule_ExitFromChild) +* [`RedisModule_ExportSharedAPI`](#RedisModule_ExportSharedAPI) +* [`RedisModule_Fork`](#RedisModule_Fork) +* [`RedisModule_Free`](#RedisModule_Free) +* [`RedisModule_FreeCallReply`](#RedisModule_FreeCallReply) +* [`RedisModule_FreeClusterNodesList`](#RedisModule_FreeClusterNodesList) +* [`RedisModule_FreeDict`](#RedisModule_FreeDict) +* [`RedisModule_FreeModuleUser`](#RedisModule_FreeModuleUser) +* [`RedisModule_FreeServerInfo`](#RedisModule_FreeServerInfo) +* [`RedisModule_FreeString`](#RedisModule_FreeString) +* [`RedisModule_FreeThreadSafeContext`](#RedisModule_FreeThreadSafeContext) +* [`RedisModule_GetAbsExpire`](#RedisModule_GetAbsExpire) +* [`RedisModule_GetBlockedClientHandle`](#RedisModule_GetBlockedClientHandle) +* [`RedisModule_GetBlockedClientPrivateData`](#RedisModule_GetBlockedClientPrivateData) +* [`RedisModule_GetBlockedClientReadyKey`](#RedisModule_GetBlockedClientReadyKey) +* [`RedisModule_GetClientCertificate`](#RedisModule_GetClientCertificate) +* [`RedisModule_GetClientId`](#RedisModule_GetClientId) +* [`RedisModule_GetClientInfoById`](#RedisModule_GetClientInfoById) +* [`RedisModule_GetClientUserNameById`](#RedisModule_GetClientUserNameById) +* [`RedisModule_GetClusterNodeInfo`](#RedisModule_GetClusterNodeInfo) +* [`RedisModule_GetClusterNodesList`](#RedisModule_GetClusterNodesList) +* [`RedisModule_GetClusterSize`](#RedisModule_GetClusterSize) +* [`RedisModule_GetCommandKeys`](#RedisModule_GetCommandKeys) +* [`RedisModule_GetContextFlags`](#RedisModule_GetContextFlags) +* [`RedisModule_GetContextFlagsAll`](#RedisModule_GetContextFlagsAll) +* [`RedisModule_GetDetachedThreadSafeContext`](#RedisModule_GetDetachedThreadSafeContext) +* [`RedisModule_GetExpire`](#RedisModule_GetExpire) +* [`RedisModule_GetKeyNameFromIO`](#RedisModule_GetKeyNameFromIO) +* [`RedisModule_GetKeyNameFromModuleKey`](#RedisModule_GetKeyNameFromModuleKey) +* [`RedisModule_GetKeyspaceNotificationFlagsAll`](#RedisModule_GetKeyspaceNotificationFlagsAll) +* [`RedisModule_GetLFU`](#RedisModule_GetLFU) +* [`RedisModule_GetLRU`](#RedisModule_GetLRU) +* [`RedisModule_GetMyClusterID`](#RedisModule_GetMyClusterID) +* [`RedisModule_GetNotifyKeyspaceEvents`](#RedisModule_GetNotifyKeyspaceEvents) +* [`RedisModule_GetRandomBytes`](#RedisModule_GetRandomBytes) +* [`RedisModule_GetRandomHexChars`](#RedisModule_GetRandomHexChars) +* [`RedisModule_GetSelectedDb`](#RedisModule_GetSelectedDb) +* [`RedisModule_GetServerInfo`](#RedisModule_GetServerInfo) +* [`RedisModule_GetServerVersion`](#RedisModule_GetServerVersion) +* [`RedisModule_GetSharedAPI`](#RedisModule_GetSharedAPI) +* [`RedisModule_GetThreadSafeContext`](#RedisModule_GetThreadSafeContext) +* [`RedisModule_GetTimerInfo`](#RedisModule_GetTimerInfo) +* [`RedisModule_GetTypeMethodVersion`](#RedisModule_GetTypeMethodVersion) +* [`RedisModule_GetUsedMemoryRatio`](#RedisModule_GetUsedMemoryRatio) +* [`RedisModule_HashGet`](#RedisModule_HashGet) +* [`RedisModule_HashSet`](#RedisModule_HashSet) +* [`RedisModule_HoldString`](#RedisModule_HoldString) +* [`RedisModule_InfoAddFieldCString`](#RedisModule_InfoAddFieldCString) +* [`RedisModule_InfoAddFieldDouble`](#RedisModule_InfoAddFieldDouble) +* [`RedisModule_InfoAddFieldLongLong`](#RedisModule_InfoAddFieldLongLong) +* [`RedisModule_InfoAddFieldString`](#RedisModule_InfoAddFieldString) +* [`RedisModule_InfoAddFieldULongLong`](#RedisModule_InfoAddFieldULongLong) +* [`RedisModule_InfoAddSection`](#RedisModule_InfoAddSection) +* [`RedisModule_InfoBeginDictField`](#RedisModule_InfoBeginDictField) +* [`RedisModule_InfoEndDictField`](#RedisModule_InfoEndDictField) +* [`RedisModule_IsBlockedReplyRequest`](#RedisModule_IsBlockedReplyRequest) +* [`RedisModule_IsBlockedTimeoutRequest`](#RedisModule_IsBlockedTimeoutRequest) +* [`RedisModule_IsIOError`](#RedisModule_IsIOError) +* [`RedisModule_IsKeysPositionRequest`](#RedisModule_IsKeysPositionRequest) +* [`RedisModule_IsModuleNameBusy`](#RedisModule_IsModuleNameBusy) +* [`RedisModule_IsSubEventSupported`](#RedisModule_IsSubEventSupported) +* [`RedisModule_KeyAtPos`](#RedisModule_KeyAtPos) +* [`RedisModule_KeyType`](#RedisModule_KeyType) +* [`RedisModule_KillForkChild`](#RedisModule_KillForkChild) +* [`RedisModule_LatencyAddSample`](#RedisModule_LatencyAddSample) +* [`RedisModule_ListPop`](#RedisModule_ListPop) +* [`RedisModule_ListPush`](#RedisModule_ListPush) +* [`RedisModule_LoadDataTypeFromString`](#RedisModule_LoadDataTypeFromString) +* [`RedisModule_LoadDouble`](#RedisModule_LoadDouble) +* [`RedisModule_LoadFloat`](#RedisModule_LoadFloat) +* [`RedisModule_LoadLongDouble`](#RedisModule_LoadLongDouble) +* [`RedisModule_LoadSigned`](#RedisModule_LoadSigned) +* [`RedisModule_LoadString`](#RedisModule_LoadString) +* [`RedisModule_LoadStringBuffer`](#RedisModule_LoadStringBuffer) +* [`RedisModule_LoadUnsigned`](#RedisModule_LoadUnsigned) +* [`RedisModule_Log`](#RedisModule_Log) +* [`RedisModule_LogIOError`](#RedisModule_LogIOError) +* [`RedisModule_MallocSize`](#RedisModule_MallocSize) +* [`RedisModule_Milliseconds`](#RedisModule_Milliseconds) +* [`RedisModule_ModuleTypeGetType`](#RedisModule_ModuleTypeGetType) +* [`RedisModule_ModuleTypeGetValue`](#RedisModule_ModuleTypeGetValue) +* [`RedisModule_ModuleTypeReplaceValue`](#RedisModule_ModuleTypeReplaceValue) +* [`RedisModule_ModuleTypeSetValue`](#RedisModule_ModuleTypeSetValue) +* [`RedisModule_NotifyKeyspaceEvent`](#RedisModule_NotifyKeyspaceEvent) +* [`RedisModule_OpenKey`](#RedisModule_OpenKey) +* [`RedisModule_PoolAlloc`](#RedisModule_PoolAlloc) +* [`RedisModule_PublishMessage`](#RedisModule_PublishMessage) +* [`RedisModule_RandomKey`](#RedisModule_RandomKey) +* [`RedisModule_Realloc`](#RedisModule_Realloc) +* [`RedisModule_RegisterClusterMessageReceiver`](#RedisModule_RegisterClusterMessageReceiver) +* [`RedisModule_RegisterDefragFunc`](#RedisModule_RegisterDefragFunc) +* [`RedisModule_RegisterInfoFunc`](#RedisModule_RegisterInfoFunc) +* [`RedisModule_Replicate`](#RedisModule_Replicate) +* [`RedisModule_ReplicateVerbatim`](#RedisModule_ReplicateVerbatim) +* [`RedisModule_ReplySetArrayLength`](#RedisModule_ReplySetArrayLength) +* [`RedisModule_ReplyWithArray`](#RedisModule_ReplyWithArray) +* [`RedisModule_ReplyWithCString`](#RedisModule_ReplyWithCString) +* [`RedisModule_ReplyWithCallReply`](#RedisModule_ReplyWithCallReply) +* [`RedisModule_ReplyWithDouble`](#RedisModule_ReplyWithDouble) +* [`RedisModule_ReplyWithEmptyArray`](#RedisModule_ReplyWithEmptyArray) +* [`RedisModule_ReplyWithEmptyString`](#RedisModule_ReplyWithEmptyString) +* [`RedisModule_ReplyWithError`](#RedisModule_ReplyWithError) +* [`RedisModule_ReplyWithLongDouble`](#RedisModule_ReplyWithLongDouble) +* [`RedisModule_ReplyWithLongLong`](#RedisModule_ReplyWithLongLong) +* [`RedisModule_ReplyWithNull`](#RedisModule_ReplyWithNull) +* [`RedisModule_ReplyWithNullArray`](#RedisModule_ReplyWithNullArray) +* [`RedisModule_ReplyWithSimpleString`](#RedisModule_ReplyWithSimpleString) +* [`RedisModule_ReplyWithString`](#RedisModule_ReplyWithString) +* [`RedisModule_ReplyWithStringBuffer`](#RedisModule_ReplyWithStringBuffer) +* [`RedisModule_ReplyWithVerbatimString`](#RedisModule_ReplyWithVerbatimString) +* [`RedisModule_ResetDataset`](#RedisModule_ResetDataset) +* [`RedisModule_RetainString`](#RedisModule_RetainString) +* [`RedisModule_SaveDataTypeToString`](#RedisModule_SaveDataTypeToString) +* [`RedisModule_SaveDouble`](#RedisModule_SaveDouble) +* [`RedisModule_SaveFloat`](#RedisModule_SaveFloat) +* [`RedisModule_SaveLongDouble`](#RedisModule_SaveLongDouble) +* [`RedisModule_SaveSigned`](#RedisModule_SaveSigned) +* [`RedisModule_SaveString`](#RedisModule_SaveString) +* [`RedisModule_SaveStringBuffer`](#RedisModule_SaveStringBuffer) +* [`RedisModule_SaveUnsigned`](#RedisModule_SaveUnsigned) +* [`RedisModule_Scan`](#RedisModule_Scan) +* [`RedisModule_ScanCursorCreate`](#RedisModule_ScanCursorCreate) +* [`RedisModule_ScanCursorDestroy`](#RedisModule_ScanCursorDestroy) +* [`RedisModule_ScanCursorRestart`](#RedisModule_ScanCursorRestart) +* [`RedisModule_ScanKey`](#RedisModule_ScanKey) +* [`RedisModule_SelectDb`](#RedisModule_SelectDb) +* [`RedisModule_SendChildHeartbeat`](#RedisModule_SendChildHeartbeat) +* [`RedisModule_SendClusterMessage`](#RedisModule_SendClusterMessage) +* [`RedisModule_ServerInfoGetField`](#RedisModule_ServerInfoGetField) +* [`RedisModule_ServerInfoGetFieldC`](#RedisModule_ServerInfoGetFieldC) +* [`RedisModule_ServerInfoGetFieldDouble`](#RedisModule_ServerInfoGetFieldDouble) +* [`RedisModule_ServerInfoGetFieldSigned`](#RedisModule_ServerInfoGetFieldSigned) +* [`RedisModule_ServerInfoGetFieldUnsigned`](#RedisModule_ServerInfoGetFieldUnsigned) +* [`RedisModule_SetAbsExpire`](#RedisModule_SetAbsExpire) +* [`RedisModule_SetClusterFlags`](#RedisModule_SetClusterFlags) +* [`RedisModule_SetDisconnectCallback`](#RedisModule_SetDisconnectCallback) +* [`RedisModule_SetExpire`](#RedisModule_SetExpire) +* [`RedisModule_SetLFU`](#RedisModule_SetLFU) +* [`RedisModule_SetLRU`](#RedisModule_SetLRU) +* [`RedisModule_SetModuleOptions`](#RedisModule_SetModuleOptions) +* [`RedisModule_SetModuleUserACL`](#RedisModule_SetModuleUserACL) +* [`RedisModule_SignalKeyAsReady`](#RedisModule_SignalKeyAsReady) +* [`RedisModule_SignalModifiedKey`](#RedisModule_SignalModifiedKey) +* [`RedisModule_StopTimer`](#RedisModule_StopTimer) +* [`RedisModule_Strdup`](#RedisModule_Strdup) +* [`RedisModule_StreamAdd`](#RedisModule_StreamAdd) +* [`RedisModule_StreamDelete`](#RedisModule_StreamDelete) +* [`RedisModule_StreamIteratorDelete`](#RedisModule_StreamIteratorDelete) +* [`RedisModule_StreamIteratorNextField`](#RedisModule_StreamIteratorNextField) +* [`RedisModule_StreamIteratorNextID`](#RedisModule_StreamIteratorNextID) +* [`RedisModule_StreamIteratorStart`](#RedisModule_StreamIteratorStart) +* [`RedisModule_StreamIteratorStop`](#RedisModule_StreamIteratorStop) +* [`RedisModule_StreamTrimByID`](#RedisModule_StreamTrimByID) +* [`RedisModule_StreamTrimByLength`](#RedisModule_StreamTrimByLength) +* [`RedisModule_StringAppendBuffer`](#RedisModule_StringAppendBuffer) +* [`RedisModule_StringCompare`](#RedisModule_StringCompare) +* [`RedisModule_StringDMA`](#RedisModule_StringDMA) +* [`RedisModule_StringPtrLen`](#RedisModule_StringPtrLen) +* [`RedisModule_StringSet`](#RedisModule_StringSet) +* [`RedisModule_StringToDouble`](#RedisModule_StringToDouble) +* [`RedisModule_StringToLongDouble`](#RedisModule_StringToLongDouble) +* [`RedisModule_StringToLongLong`](#RedisModule_StringToLongLong) +* [`RedisModule_StringToStreamID`](#RedisModule_StringToStreamID) +* [`RedisModule_StringTruncate`](#RedisModule_StringTruncate) +* [`RedisModule_SubscribeToKeyspaceEvents`](#RedisModule_SubscribeToKeyspaceEvents) +* [`RedisModule_SubscribeToServerEvent`](#RedisModule_SubscribeToServerEvent) +* [`RedisModule_ThreadSafeContextLock`](#RedisModule_ThreadSafeContextLock) +* [`RedisModule_ThreadSafeContextTryLock`](#RedisModule_ThreadSafeContextTryLock) +* [`RedisModule_ThreadSafeContextUnlock`](#RedisModule_ThreadSafeContextUnlock) +* [`RedisModule_UnblockClient`](#RedisModule_UnblockClient) +* [`RedisModule_UnlinkKey`](#RedisModule_UnlinkKey) +* [`RedisModule_UnregisterCommandFilter`](#RedisModule_UnregisterCommandFilter) +* [`RedisModule_ValueLength`](#RedisModule_ValueLength) +* [`RedisModule_WrongArity`](#RedisModule_WrongArity) +* [`RedisModule_ZsetAdd`](#RedisModule_ZsetAdd) +* [`RedisModule_ZsetFirstInLexRange`](#RedisModule_ZsetFirstInLexRange) +* [`RedisModule_ZsetFirstInScoreRange`](#RedisModule_ZsetFirstInScoreRange) +* [`RedisModule_ZsetIncrby`](#RedisModule_ZsetIncrby) +* [`RedisModule_ZsetLastInLexRange`](#RedisModule_ZsetLastInLexRange) +* [`RedisModule_ZsetLastInScoreRange`](#RedisModule_ZsetLastInScoreRange) +* [`RedisModule_ZsetRangeCurrentElement`](#RedisModule_ZsetRangeCurrentElement) +* [`RedisModule_ZsetRangeEndReached`](#RedisModule_ZsetRangeEndReached) +* [`RedisModule_ZsetRangeNext`](#RedisModule_ZsetRangeNext) +* [`RedisModule_ZsetRangePrev`](#RedisModule_ZsetRangePrev) +* [`RedisModule_ZsetRangeStop`](#RedisModule_ZsetRangeStop) +* [`RedisModule_ZsetRem`](#RedisModule_ZsetRem) +* [`RedisModule_ZsetScore`](#RedisModule_ZsetScore) +* [`RedisModule__Assert`](#RedisModule__Assert) diff --git a/topics/modules-intro.md b/topics/modules-intro.md index fdc8586f..564252c0 100644 --- a/topics/modules-intro.md +++ b/topics/modules-intro.md @@ -8,12 +8,12 @@ The modules documentation is composed of the following pages: * [Blocking operations](/topics/modules-blocking-ops) shows how to write blocking commands that will not reply immediately, but will block the client, without blocking the Redis server, and will provide a reply whenever will be possible. * [Redis modules API reference](/topics/modules-api-ref) is generated from module.c top comments of RedisModule functions. It is a good reference in order to understand how each function works. -Redis modules make possible to extend Redis functionality using external -modules, implementing new Redis commands at a speed and with features +Redis modules make it possible to extend Redis functionality using external +modules, rapidly implementing new Redis commands with features similar to what can be done inside the core itself. -Redis modules are dynamic libraries, that can be loaded into Redis at -startup or using the `MODULE LOAD` command. Redis exports a C API, in the +Redis modules are dynamic libraries that can be loaded into Redis at +startup, or using the `MODULE LOAD` command. Redis exports a C API, in the form of a single C header file called `redismodule.h`. Modules are meant to be written in C, however it will be possible to use C++ or other languages that have C binding functionalities. diff --git a/topics/notifications.md b/topics/notifications.md index bf2c0f3b..c8a3051b 100644 --- a/topics/notifications.md +++ b/topics/notifications.md @@ -78,10 +78,11 @@ following table: h Hash commands z Sorted set commands t Stream commands + d Module key type events x Expired events (events generated every time a key expires) e Evicted events (events generated when a key is evicted for maxmemory) m Key miss events (events generated when a key that doesn't exist is accessed) - A Alias for "g$lshztxe", so that the "AKE" string means all the events except "m". + A Alias for "g$lshztxed", so that the "AKE" string means all the events except "m". At least `K` or `E` should be present in the string, otherwise no event will be delivered regardless of the rest of the string. @@ -99,6 +100,7 @@ Different commands generate different kind of events according to the following * `DEL` generates a `del` event for every deleted key. * `RENAME` generates two events, a `rename_from` event for the source key, and a `rename_to` event for the destination key. * `MOVE` generates two events, a `move_from` event for the source key, and a `move_to` event for the destination key. +* `COPY` generates a `copy_to` event. * `MIGRATE` generates a `del` event if the source key is removed. * `RESTORE` generates a `restore` event for the key. * `EXPIRE` and all its variants (`PEXPIRE`, `EXPIREAT`, `PEXPIREAT`) generate an `expire` event when called with a positive timeout (or a future timestamp). Note that when these commands are called with a negative timeout value or timestamp in the past, the key is deleted and only a `del` event is generated instead. @@ -133,7 +135,7 @@ Different commands generate different kind of events according to the following * `ZREM` generates a single `zrem` event even when multiple elements are deleted. When the resulting sorted set is empty and the key is generated, an additional `del` event is generated. * `ZREMBYSCORE` generates a single `zrembyscore` event. When the resulting sorted set is empty and the key is generated, an additional `del` event is generated. * `ZREMBYRANK` generates a single `zrembyrank` event. When the resulting sorted set is empty and the key is generated, an additional `del` event is generated. -* `ZINTERSTORE` and `ZUNIONSTORE` respectively generate `zinterstore` and `zunionstore` events. In the special case the resulting sorted set is empty, and the key where the result is stored already exists, a `del` event is generated since the key is removed. +* `ZDIFFSTORE`, `ZINTERSTORE` and `ZUNIONSTORE` respectively generate `zdiffstore`, `zinterstore` and `zunionstore` events. In the special case the resulting sorted set is empty, and the key where the result is stored already exists, a `del` event is generated since the key is removed. * `XADD` generates an `xadd` event, possibly followed an `xtrim` event when used with the `MAXLEN` subcommand. * `XDEL` generates a single `xdel` event even when multiple entries are deleted. * `XGROUP CREATE` generates an `xgroup-create` event. diff --git a/topics/partitioning.md b/topics/partitioning.md index bba6ac25..d7c626d6 100644 --- a/topics/partitioning.md +++ b/topics/partitioning.md @@ -114,6 +114,6 @@ You can read more about Twemproxy [in this antirez blog post](http://antirez.com Clients supporting consistent hashing --- -An alternative to Twemproxy is to use a client that implements client side partitioning via consistent hashing or other similar algorithms. There are multiple Redis clients with support for consistent hashing, notably [Redis-rb](https://github.com/redis/redis-rb) and [Predis](https://github.com/nrk/predis). +An alternative to Twemproxy is to use a client that implements client side partitioning via consistent hashing or other similar algorithms. There are multiple Redis clients with support for consistent hashing, notably [Redis-rb](https://github.com/redis/redis-rb), [Predis](https://github.com/nrk/predis) and [Jedis](https://github.com/redis/jedis). Please check the [full list of Redis clients](https://redis.io/clients) to check if there is a mature client with consistent hashing implementation for your language. diff --git a/topics/performance-on-cpu.md b/topics/performance-on-cpu.md new file mode 100644 index 00000000..128d6892 --- /dev/null +++ b/topics/performance-on-cpu.md @@ -0,0 +1,226 @@ +Redis performance engineering guide - on-CPU profiling and tracing +=== + + +Filling the performance checklist +------------ + +Redis is developed with a great emphasis on performance: +We do our best with every release to make sure you'll experience a very stable and fast product. + +Noneteless wether you're finding room for improving the efficiency of Redis +or pursuing an performance regression investigation you will need a concise methodical +way of monitoring and analyzing Redis performance. + + +To do so you can rely on different methodologies (some more suited than other +depending on the class of issues/analysis we intent to make). +A curated list of methodologies and their steps are enumerated by Brendan Greg +on the [following link](http://www.brendangregg.com/methodology.html). + + +We recommend the Utilization Saturation and Errors (USE) Method for answering the question of what is your bottleneck. +Check the following mapping between system resource, metric and tools for a pratical deep dive +[[USE method](http://www.brendangregg.com/USEmethod/use-rosetta.html)]. + + +### Ensuring the CPU is your bottleneck + +This guide assumes you've followed one of the above methodologies to perform a +complete check of system health, and identified the bottleneck being the CPU. +**If you've identified that most of the time is spent blocked on I/O, locks, timers, +paging/swapping, etc, this guide is not for you**. + +Further guides on Off-CPU Analysis are in the process of being released. +Help on filling the blanks on the new "guides" or improving this one is much welcome! + + +### Build Prerequisites + +For a proper On-CPU analysis, Redis (and any dynamically loaded library like Redis Modules) +requires stack traces to be available to tracers, which you may need to fix first. + +By default, Redis is compiled with the `-O2` switch (which we intent to keep during profiling). +This means that compiler optimizations are enabled. +Many compilers omit the frame pointer as a runtime optimization (saving a register), thus breaking frame pointer-based stack walking. +This makes the Redis executable faster, but at the same time it makes Redis (like any other program) harder to trace, potentially wrongfully pinpointing on-CPU time to the last available frame pointer of a call stack that can get a lot deeper (but impossible to trace). + +It's important that you ensure: +- debug information is present: compile option `-g` +- frame pointer register is present: `-fno-omit-frame-pointer` +- we still run with optimizations to get an accurate representation of production run times, meaning we will keep: `-O2` + +You can do it as follows within redis main repo + + make REDIS_CFLAGS="-g -fno-omit-frame-pointer" + + +A set of instruments to identify performance regressions and/or potential **on-CPU performance** improvements +------------ + +This document focuses specifically on **on-CPU** resource bottlenecks analysis, meaning we're interested in understanding where threads are spending CPU cycles while running on-CPU and, as importantly, whether those cycles are effectively being used for computation or stalled waiting (not blocked!) for memory I/O, and cache misses, etc... + +For that we will rely on toolkits (perf, bcc tools), and hardware specific PMCs (Performance Monitoring Counters), to proceed with: + +- Hotspot analysis (pref or bcc tools): to profile code execution and determine which functions are consuming the most time and thus are targets for optimization. + We'll present two options to collect, report and visualize hotspots either with perf or bcc/BPF tracing tools. + +- Call counts analysis: to count events including function calls, enabling us to correlate several calls/components at once, relying on bcc/BPF tracing tools. + +- Hardware event sampling: crucial for understanding CPU behavior, including memory I/O, stall cycles, and cache misses. + + +### Tool prerequesits + +The following steps rely on Linux perf_events (aka ["perf"](https://man7.org/linux/man-pages/man1/perf.1.html)), [bcc/BPF tracing tools](https://github.com/iovisor/bcc) and Brendan Greg’s [FlameGraph repo](https://github.com/brendangregg/FlameGraph). + +We assume beforehand you have: + +- Installed the perf tool on your system. + Most Linux distributions will likely package this as a package related to the kernel. + More information about the perf tool can be found at perf [wiki](https://perf.wiki.kernel.org/). + +- Followed the Install [bcc/BPF](https://github.com/iovisor/bcc/blob/master/INSTALL.md#installing-bcc) instructions to install bcc toolkit on your machine. + +- Cloned Brendan Greg’s [FlameGraph repo](https://github.com/brendangregg/FlameGraph) and made accessible the `difffolded.pl` and `flamegraph.pl` files, to generated the collapsed stack traces and Flame Graphs. + + +Hotspot analysis with perf or eBPF (stack traces sampling) +------------ + +Profiling CPU usage by sampling stack traces at a timed interval is a fast and easy way to identify performance-critical code sections (hotspots). + +### Sampling stack traces using perf + +To profile both user and kernel-level stacks of redis-server for a specific length of time, for example 60 seconds, at a sampling frequency of 999 samples per second: + + perf record -g --pid $(pgrep redis-server) -F 999 -- sleep 60 + + +#### Displaying the recorded profile information using perf report + + +By default perf record will generate a perf.data file in the current working directory. + +You can then report with a call-graph output (call chain, stack backtrace), with a minimum call graph inclusion threshold of 0.5%, with: + + perf report -g "graph,0.5,caller" + +See the [perf report](https://man7.org/linux/man-pages/man1/perf-report.1.html) documention for advanced filtering, sorting and aggregation capabilities. + +#### Visualizing the recorded profile information using Flame Graphs + +[Flame graphs](http://www.brendangregg.com/flamegraphs.html) allow for a quick and accurate visualization of frequent code-paths. +They can be generated using Brendan Greg's open source programs on [github](https://github.com/brendangregg/FlameGraph), which create interactive SVGs from folded stack files. + + +Specifically, for perf we need to convert the generated perf.data into the captured stacks, and fold each of them into single lines. +You can then render the on-CPU flame graph with: + + perf script > redis.perf.stacks + stackcollapse-perf.pl redis.perf.stacks > redis.folded.stacks + flamegraph.pl redis.folded.stacks > redis.svg + +By default, perf script will generate a perf.data file in the current working directory. +See the [perf script](https://linux.die.net/man/1/perf-script.html) documentation for advanced usage. + +See [FlameGraph usage options](https://github.com/brendangregg/FlameGraph#options) for more advanced stack trace visualizations (like the differential one). + + +#### Archiving and sharing recorded profile information + +So that analysis of the perf.data contents can be possible on another machine than the one in which +collection happened, you need to export along with the perf.data file all object files +with build-ids found in the record data file. This can be easily done with the help of +[perf-archive.sh](https://github.com/torvalds/linux/blob/master/tools/perf/perf-archive.sh) script: + + perf-archive.sh perf.data + + +Now please run: + + tar xvf perf.data.tar.bz2 -C ~/.debug + +on the machine where you need to run `perf report`. + + +### Sampling stack traces using bcc/BPF's profile + +Similarly to perf, as of Linux kernel 4.9, BPF-optimized profiling is now fully available with the promise of lower overhead on CPU (as stack traces are frequency counted in +kernel context) and disk I/O resources during profiling. + +Apart from that, and relying solely on bcc/BPF's profile tool, we've also removed the perf.data and intermediate steps if stack traces analysis is our main goal. +You can use bcc's profile tool to output folded format directly, for flame graph generation: + + $ /usr/share/bcc/tools/profile -F 999 -f --pid $(pgrep redis-server) --duration 60 > redis.folded.stacks + +In that manner, we've remove any preprocessing and can render the on-CPU flame graph with a single command: + + flamegraph.pl redis.folded.stacks > redis.svg + + +### Visualizing the recorded profile information using Flame Graphs + +Call counts analysis with bcc/BPF +------------ + +A function may consume significant CPU cycles either because its code is slow or because it's frequently called. +To answer at what rate functions are being called, you can rely upon call counts analysis using BCC's `funccount` tool: + + $ /usr/share/bcc/tools/funccount 'redis-server:(call*|*Read*|*Write*)' --pid $(pgrep redis-server) --duration 60 + Tracing 64 functions for "redis-server:(call*|*Read*|*Write*)"... Hit Ctrl-C to end. + + FUNC COUNT + call 334 + handleClientsWithPendingWrites 388 + clientInstallWriteHandler 388 + postponeClientRead 514 + handleClientsWithPendingReadsUsingThreads 735 + handleClientsWithPendingWritesUsingThreads 735 + prepareClientToWrite 1442 + Detaching... + + +The above output shows that, while tracing, the Redis's call() function was called 334 times, handleClientsWithPendingWrites() 388 times, etc. + + +Hardware event counting with Performance Monitoring Counters (PMCs) +------------ + +Many modern processors contain a performance monitoring unit (PMU) exposing Performance Monitoring Counters (PMCs). +PMCs are crucial for understanding CPU behavior, including memory I/O, stall cycles, and cache misses, and provide low-level CPU performance statistics that aren't available anywhere else. + +The design and functionality of a PMU is CPU-specific and you should assess your CPU supported counters and features by using `perf list`. + +To calculate the number of instructions per cycle, the number of micro ops executed, the number of cycles during which no micro ops were dispatched, the number stalled cycles on memory, including a per memory type stalls, for the duration of 60s, specifically for redis process: + + perf stat -e "cpu-clock,cpu-cycles,instructions,uops_executed.core,uops_executed.stall_cycles,cache-references,cache-misses,cycle_activity.stalls_total,cycle_activity.stalls_mem_any,cycle_activity.stalls_l3_miss,cycle_activity.stalls_l2_miss,cycle_activity.stalls_l1d_miss" --pid $(pgrep redis-server) -- sleep 60 + + Performance counter stats for process id '3038': + + 60046.411437 cpu-clock (msec) # 1.001 CPUs utilized + 168991975443 cpu-cycles # 2.814 GHz (36.40%) + 388248178431 instructions # 2.30 insn per cycle (45.50%) + 443134227322 uops_executed.core # 7379.862 M/sec (45.51%) + 30317116399 uops_executed.stall_cycles # 504.895 M/sec (45.51%) + 670821512 cache-references # 11.172 M/sec (45.52%) + 23727619 cache-misses # 3.537 % of all cache refs (45.43%) + 30278479141 cycle_activity.stalls_total # 504.251 M/sec (36.33%) + 19981138777 cycle_activity.stalls_mem_any # 332.762 M/sec (36.33%) + 725708324 cycle_activity.stalls_l3_miss # 12.086 M/sec (36.33%) + 8487905659 cycle_activity.stalls_l2_miss # 141.356 M/sec (36.32%) + 10011909368 cycle_activity.stalls_l1d_miss # 166.736 M/sec (36.31%) + + 60.002765665 seconds time elapsed + + +It's important to know that there are two very different ways in which PMCs can be used (couting and sampling), and we've focused solely on PMCs counting for the sake of this analysis. +Brendan Greg clearly explains it on the following [link](http://www.brendangregg.com/blog/2017-05-04/the-pmcs-of-ec2.html). + + +Thank you +--------- + +Contributions and corrections are gratefully accepted. +Help filling in the blanks, like static or dynamic data parallelism analysis, is much appreciated. +New "guides" on Off-CPU Analysis are welcome, too! diff --git a/topics/persistence.md b/topics/persistence.md index eb5207b9..b9bd0570 100644 --- a/topics/persistence.md +++ b/topics/persistence.md @@ -5,10 +5,10 @@ Redis Persistence Redis provides a different range of persistence options: -* The RDB persistence performs point-in-time snapshots of your dataset at specified intervals. -* The AOF persistence logs every write operation received by the server, that will be played again at server startup, reconstructing the original dataset. Commands are logged using the same format as the Redis protocol itself, in an append-only fashion. Redis is able to rewrite the log in the background when it gets too big. -* If you wish, you can disable persistence completely, if you want your data to just exist as long as the server is running. -* It is possible to combine both AOF and RDB in the same instance. Notice that, in this case, when Redis restarts the AOF file will be used to reconstruct the original dataset since it is guaranteed to be the most complete. +* **RDB** (Redis Database): The RDB persistence performs point-in-time snapshots of your dataset at specified intervals. +* **AOF** (Append Only File): The AOF persistence logs every write operation received by the server, that will be played again at server startup, reconstructing the original dataset. Commands are logged using the same format as the Redis protocol itself, in an append-only fashion. Redis is able to rewrite the log in the background when it gets too big. +* **No persistence**: If you wish, you can disable persistence completely, if you want your data to just exist as long as the server is running. +* **RDB + AOF**: It is possible to combine both AOF and RDB in the same instance. Notice that, in this case, when Redis restarts the AOF file will be used to reconstruct the original dataset since it is guaranteed to be the most complete. The most important thing to understand is the different trade-offs between the RDB and AOF persistence. Let's start with RDB: @@ -20,6 +20,7 @@ RDB advantages * RDB is very good for disaster recovery, being a single compact file that can be transferred to far data centers, or onto Amazon S3 (possibly encrypted). * RDB maximizes Redis performances since the only work the Redis parent process needs to do in order to persist is forking a child that will do all the rest. The parent instance will never perform disk I/O or alike. * RDB allows faster restarts with big datasets compared to AOF. +* On replicas, RDB supports [partial resynchronizations after restarts and failovers](https://redis.io/topics/replication#partial-resynchronizations-after-restarts-and-failovers). RDB disadvantages --- @@ -33,7 +34,7 @@ AOF advantages * Using AOF Redis is much more durable: you can have different fsync policies: no fsync at all, fsync every second, fsync at every query. With the default policy of fsync every second write performances are still great (fsync is performed using a background thread and the main thread will try hard to perform writes when no fsync is in progress.) but you can only lose one second worth of writes. * The AOF log is an append only log, so there are no seeks, nor corruption problems if there is a power outage. Even if the log ends with an half-written command for some reason (disk full or other reasons) the redis-check-aof tool is able to fix it easily. * Redis is able to automatically rewrite the AOF in background when it gets too big. The rewrite is completely safe as while Redis continues appending to the old file, a completely new one is produced with the minimal set of operations needed to create the current data set, and once this second file is ready Redis switches the two and starts appending to the new one. -* AOF contains a log of all the operations one after the other in an easy to understand and parse format. You can even easily export an AOF file. For instance even if you flushed everything for an error using a `FLUSHALL` command, if no rewrite of the log was performed in the meantime you can still save your data set just stopping the server, removing the latest command, and restarting Redis again. +* AOF contains a log of all the operations one after the other in an easy to understand and parse format. You can even easily export an AOF file. For instance even if you've accidentally flushed everything using the `FLUSHALL` command, as long as no rewrite of the log was performed in the meantime, you can still save your data set just by stopping the server, removing the latest command, and restarting Redis again. AOF disadvantages --- diff --git a/topics/pipelining.md b/topics/pipelining.md index 3554eaf4..66935d35 100644 --- a/topics/pipelining.md +++ b/topics/pipelining.md @@ -22,9 +22,9 @@ So for instance a four commands sequence is something like this: * *Client:* INCR X * *Server:* 4 -Clients and Servers are connected via a networking link. Such a link can be very fast (a loopback interface) or very slow (a connection established over the Internet with many hops between the two hosts). Whatever the network latency is, there is a time for the packets to travel from the client to the server, and back from the server to the client to carry the reply. +Clients and Servers are connected via a network link. Such a link can be very fast (a loopback interface) or very slow (a connection established over the Internet with many hops between the two hosts). Whatever the network latency is, it takes time for the packets to travel from the client to the server, and back from the server to the client to carry the reply. -This time is called RTT (Round Trip Time). It is very easy to see how this can affect the performances when a client needs to perform many requests in a row (for instance adding many elements to the same list, or populating a database with many keys). For instance if the RTT time is 250 milliseconds (in the case of a very slow link over the Internet), even if the server is able to process 100k requests per second, we'll be able to process at max four requests per second. +This time is called RTT (Round Trip Time). It is very easy to see how this can affect performance when a client needs to perform many requests in a row (for instance adding many elements to the same list, or populating a database with many keys). For instance if the RTT time is 250 milliseconds (in the case of a very slow link over the Internet), even if the server is able to process 100k requests per second, we'll be able to process at max four requests per second. If the interface used is a loopback interface, the RTT is much shorter (for instance my host reports 0,044 milliseconds pinging 127.0.0.1), but it is still a lot if you need to perform many writes in a row. @@ -33,9 +33,9 @@ Fortunately there is a way to improve this use case. Redis Pipelining --- -A Request/Response server can be implemented so that it is able to process new requests even if the client didn't already read the old responses. This way it is possible to send *multiple commands* to the server without waiting for the replies at all, and finally read the replies in a single step. +A Request/Response server can be implemented so that it is able to process new requests even if the client hasn't already read the old responses. This way it is possible to send *multiple commands* to the server without waiting for the replies at all, and finally read the replies in a single step. -This is called pipelining, and is a technique widely in use since many decades. For instance many POP3 protocol implementations already supported this feature, dramatically speeding up the process of downloading new emails from the server. +This is called pipelining, and is a technique widely in use for many decades. For instance many POP3 protocol implementations already support this feature, dramatically speeding up the process of downloading new emails from the server. Redis has supported pipelining since the very early days, so whatever version you are running, you can use pipelining with Redis. This is an example using the raw netcat utility: @@ -44,7 +44,7 @@ Redis has supported pipelining since the very early days, so whatever version yo +PONG +PONG -This time we are not paying the cost of RTT for every call, but just one time for the three commands. +This time we are not paying the cost of RTT for every call, but just once for the three commands. To be very explicit, with pipelining the order of operations of our very first example will be the following: @@ -57,13 +57,13 @@ To be very explicit, with pipelining the order of operations of our very first e * *Server:* 3 * *Server:* 4 -**IMPORTANT NOTE**: While the client sends commands using pipelining, the server will be forced to queue the replies, using memory. So if you need to send a lot of commands with pipelining, it is better to send them as batches having a reasonable number, for instance 10k commands, read the replies, and then send another 10k commands again, and so forth. The speed will be nearly the same, but the additional memory used will be at max the amount needed to queue the replies for these 10k commands. +**IMPORTANT NOTE**: While the client sends commands using pipelining, the server will be forced to queue the replies, using memory. So if you need to send a lot of commands with pipelining, it is better to send them as batches each containing a reasonable number, for instance 10k commands, read the replies, and then send another 10k commands again, and so forth. The speed will be nearly the same, but the additional memory used will be at max the amount needed to queue the replies for these 10k commands. It's not just a matter of RTT --- -Pipelining is not just a way in order to reduce the latency cost due to the -round trip time, it actually improves by a huge amount the total operations +Pipelining is not just a way to reduce the latency cost associated with the +round trip time, it actually greatly improves the number of operations you can perform per second in a given Redis server. This is the result of the fact that, without using pipelining, serving each command is very cheap from the point of view of accessing the data structures and producing the reply, @@ -75,12 +75,12 @@ When pipelining is used, many commands are usually read with a single `read()` system call, and multiple replies are delivered with a single `write()` system call. Because of this, the number of total queries performed per second initially increases almost linearly with longer pipelines, and eventually -reaches 10 times the baseline obtained not using pipelining, as you can +reaches 10 times the baseline obtained without pipelining, as you can see from the following graph: ![Pipeline size and IOPs](https://redis.io/images/redisdoc/pipeline_iops.png) -Some real world code example +A real world code example --- In the following benchmark we'll use the Redis Ruby client, supporting pipelining, to test the speed improvement due to pipelining: @@ -89,42 +89,42 @@ In the following benchmark we'll use the Redis Ruby client, supporting pipelinin require 'redis' def bench(descr) - start = Time.now - yield - puts "#{descr} #{Time.now-start} seconds" + start = Time.now + yield + puts "#{descr} #{Time.now - start} seconds" end def without_pipelining - r = Redis.new - 10000.times { - r.ping - } + r = Redis.new + 10_000.times do + r.ping + end end def with_pipelining - r = Redis.new - r.pipelined { - 10000.times { - r.ping - } - } + r = Redis.new + r.pipelined do + 10_000.times do + r.ping + end + end end - bench("without pipelining") { - without_pipelining - } - bench("with pipelining") { - with_pipelining - } + bench('without pipelining') do + without_pipelining + end + bench('with pipelining') do + with_pipelining + end -Running the above simple script will provide the following figures in my Mac OS X system, running over the loopback interface, where pipelining will provide the smallest improvement as the RTT is already pretty low: +Running the above simple script yields the following figures on my Mac OS X system, running over the loopback interface, where pipelining will provide the smallest improvement as the RTT is already pretty low: without pipelining 1.185238 seconds with pipelining 0.250783 seconds As you can see, using pipelining, we improved the transfer by a factor of five. -Pipelining VS Scripting +Pipelining vs Scripting --- Using [Redis scripting](/commands/eval) (available in Redis version 2.6 or greater) a number of use cases for pipelining can be addressed more efficiently using scripts that perform a lot of the work needed at the server side. A big advantage of scripting is that it is able to both read and write data with minimal latency, making operations like *read, compute, write* very fast (pipelining can't help in this scenario since the client needs the reply of the read command before it can call the write command). @@ -144,17 +144,17 @@ in the same physical machine: END After all if both the Redis process and the benchmark are running in the same -box, isn't this just messages copied via memory from one place to another without -any actual latency and actual networking involved? +box, isn't it just copying messages in memory from one place to another without +any actual latency or networking involved? The reason is that processes in a system are not always running, actually it is -the kernel scheduler that let the process run, so what happens is that, for -instance, the benchmark is allowed to run, reads the reply from the Redis server +the kernel scheduler that lets the process run. So, for +instance, when the benchmark is allowed to run, it reads the reply from the Redis server (related to the last command executed), and writes a new command. The command is now in the loopback interface buffer, but in order to be read by the server, the kernel should schedule the server process (currently blocked in a system call) to run, and so forth. So in practical terms the loopback interface still involves -network-alike latency, because of how the kernel scheduler works. +network-like latency, because of how the kernel scheduler works. Basically a busy loop benchmark is the silliest thing that can be done when metering performances in a networked server. The wise thing is just avoiding diff --git a/topics/rediscli.md b/topics/rediscli.md index 6b6c7d06..65fce1f9 100644 --- a/topics/rediscli.md +++ b/topics/rediscli.md @@ -114,7 +114,7 @@ you can specify a certificate and a corresponding private key using `--cert` and There are two ways you can use `redis-cli` in order to get the input from other commands (from the standard input, basically). One is to use as last argument the payload we read from *stdin*. For example, in order to set a Redis key -to the content of the file `/etc/services` if my computer, I can use the `-x` +to the content of the file `/etc/services` of my computer, I can use the `-x` option: $ redis-cli -x set foo < /etc/services @@ -351,9 +351,10 @@ syntax hints. This behavior can be turned on and off via the CLI preferences. ## Preferences There are two ways to customize the CLI's behavior. The file `.redisclirc` -in your home directory is loaded by the CLI on startup. Preferences can also -be set during a CLI session, in which case they will last only the the -duration of the session. +in your home directory is loaded by the CLI on startup. You can override the +file's default location by setting the `REDISCLI_RCFILE` environment variable to +an alternative path. Preferences can also be set during a CLI session, in which +case they will last only the the duration of the session. To set preferences, use the special `:set` command. The following preferences can be set, either by typing the command in the CLI or adding it to the diff --git a/topics/releases.md b/topics/releases.md index c709145c..e313c0b2 100644 --- a/topics/releases.md +++ b/topics/releases.md @@ -1,81 +1,111 @@ -Redis release cycle +Redis Release Cycle === -Redis is system software, and a type of system software that holds user -data, so it is among the most critical pieces of a software stack. +Redis is system software and a type of system software that holds user data, so it is among the most critical pieces of a software stack. -For this reason our release cycle tries hard to make sure that a stable -release is only released when it reaches a sufficiently high level of -stability, even at the cost of a slower release cycle. +For this reason, Redis' release cycle is such that it ensures highly-stable releases, even at the cost of slower cycles. + +New releases are published in the [Redis GitHub repository](http://github.com/redis/redis) and are also available for [download](/download). +Announcements are sent to the [Redis mailing list](http://groups.google.com/group/redis-db) and by [@redisfeed on Twitter](https://twitter.com/redisfeed). + +Release Cycle +--- A given version of Redis can be at three different levels of stability: -* unstable -* development -* frozen -* release candidate -* stable +* Unstable +* Release Candidate +* Stable -Unstable tree -=== +### Unstable Tree -The unstable version of Redis is always located in the `unstable` branch in -the [Redis GitHub Repository](http://github.com/redis/redis). +The unstable version of Redis is located in the `unstable` branch in the [Redis GitHub repository](http://github.com/redis/redis). -This is the source tree where most of the new features are developed and -is not considered to be production ready: it may contain critical bugs, -not entirely ready features, and may be unstable. +This branch is the source tree where most of the new features under development. +`unstable` is not considered production-ready: it may contain critical bugs, incomplete features, and is potentially unstable. -However, we try hard to make sure that even the unstable branch is -usable most of the time in a development environment without major -issues. +However, we try hard to make sure that even the unstable branch is usable most of the time in a development environment without significant issues. -Forked, Frozen, Release candidate tree -=== +### Release Candidate -When a new version of Redis starts to be planned, the unstable branch -(or sometimes the currently stable branch) is forked into a new -branch that has the name of the target release. +New minor and major versions of Redis begin as forks of the `unstable` branch. +The forked branch's name is the target release -For instance, when Redis 2.6 was released as stable, the `unstable` branch -was forked into the `2.8` branch. +For example, when Redis 6.0 was released as a release candidate, the `unstable` branch was forked into the `6.0` branch. The new branch is the release candidate (RC) for that version. -This new branch can be at three different levels of stability: -development, frozen, and release candidate. +Bug fixes and new features that can be stabilized during the release's time frame are committed to the unstable branch and backported to the release candidate branch. +The `unstable` branch may include additional work that is not a part of the release candidate and scheduled for future releases. -* Development: new features and bug fixes are committed into the branch, but not everything going into `unstable` is merged here. Only the features that can become stable in a reasonable time frame are merged. -* Frozen: no new feature is added, unless it is almost guaranteed to have zero stability impacts on the source code, and at the same time for some reason it is a very important feature that must be shipped ASAP. Big code changes are only allowed when they are needed in order to fix bugs. -* Release Candidate: only fixes are committed against this release. +The first release candidate, or RC1, is released once it can be used for development purposes and for testing the new version. +At this stage, most of the new features and changes the new version brings are ready for review, and the release's purpose is collecting the public's feedback. -Stable tree -=== +Subsequent release candidates are released every three weeks or so, primarily for fixing bugs. +These may also add new features and introduce changes, but at a decreasing rate and decreasing potential risk towards the final release candidate. + +### Stable Tree + +Once development has ended and the frequency of critical bug reports for the release candidate wanes, it is ready for the final release. +At this point, the release is marked as stable and is released with "0" as its patch-level version. + +Versioning +--- + +Stable releases liberally follow the usual `major.minor.patch` semantic versioning schema. +The primary goal is to provide explicit guarantees regarding backward compatibility. + +### Patch-Level Versions + +Patches primarily consist of bug fixes and very rarely introduce any compatibility issues. + +Upgrading from a previous patch-level version is almost always safe and seamless. -At some point, when a given Redis release is in the Release Candidate state -for enough time, we observe that the frequency at which critical bugs are -signaled starts to decrease, to the point that for a few weeks we don't have -any serious bugs reported. +New features and configuration directives may be added, or default values changed, as long as these don’t carry significant impacts or introduce operations-related issues. -When this happens, the release is marked as stable. +### Minor Versions -Version numbers +Minor versions usually deliver maturity and extended functionality. + +Upgrading between minor versions does not introduce any application-level compatibility issues. + +Minor releases may include new commands and data types that introduce operations-related incompatibilities, including changes in data persistence format and replication protocol. + +### Major Versions + +Major versions introduce new capabilities and significant changes. + +Ideally, these don't introduce application-level compatibility issues. + +Release Schedule --- -Stable releases follow the usual `major.minor.patch` versioning schema, with the following special rules: +A new major version is planned for release once a year. + +Generally, every major release is followed by a minor version after six months. -* The minor is even in stable versions of Redis. -* The minor is odd in unstable, development, frozen, release candidates. For instance the unstable version of 2.8.x will have a version number in the form 2.7.x. In general the unstable version of x.y.z will have a version x.(y-1).z. -* As an unstable version of Redis progresses, the patch level is incremented from time to time, so at a given time you may have 2.7.2, and later 2.7.3 and so forth. However when the release candidate state is reached, the patch level starts from 101. So for instance 2.7.101 is the first release candidate for 2.8, 2.7.105 is Release Candidate 5, and so forth. +Patches are released as needed to fix high-urgency issues, or once a stable version accumulates enough fixes to justify it. + +For contacting the core team on sensitive matters and security issues, please email [redis@redis.io](mailto:redis@redis.io). Support --- -Older versions are not supported as we try very hard to make the -Redis API mostly backward compatible. Upgrading to newer versions -is usually trivial. +As a rule, older versions are not supported as we try very hard to make the Redis API mostly backward compatible. + +Upgrading to newer versions is the recommended approach and is usually trivial. + +The latest stable release is always fully supported and maintained. + +Two additional versions receive maintenance only, meaning that only fixes for critical bugs and major security issues are committed and released as patches: + +* The previous minor version of the latest stable release. +* The previous stable major release. + +For example, consider the following hypothetical versions: 1.2, 2.0, 2.2, 3.0, 3.2, ... + +When version 2.2 is the latest stable release, both 2.0 and 1.2 are maintained. + +Once version 3.0.0 replaces 2.2 as the latest stable, versions 2.0 and 2.2 are maintained, whereas version 1.x reaches its end of life. -For example, if the current stable release is 2.6.x, we accept bug -reports and provide support for the previous stable release -(2.4.x), but not for older ones such as 2.2.x. +This process repeats with version 3.2.0, after which only versions 2.2 and 3.0 are maintained. -When 2.8 becomes the current stable release, the 2.6.x will be the -oldest supported release. +The above are guidelines rather than rules set in stone and will not replace common sense. diff --git a/topics/replication.md b/topics/replication.md index 0acf8429..306464e3 100644 --- a/topics/replication.md +++ b/topics/replication.md @@ -321,3 +321,18 @@ It is not possible to partially resynchronize a replica that restarted via the AOF file. However the instance may be turned to RDB persistence before shutting down it, than can be restarted, and finally AOF can be enabled again. +Maxmemory on replicas +--- + +Replicas don't honor `maxmemory` because by default a replica will ignore this setting (unless it is promoted to master after a failover or manually). +It means that the eviction of keys will just be handled by the master, sending the DEL commands to the replica as keys evict in the master side. + +This behavior ensures that masters and replicas stay consistent, which is usually what you want. +However, if your replica is writable, or you want the replica to have a different memory setting, and you are sure all the writes performed to the replica are idempotent, then you may change this default (but be sure to understand what you are doing). + +Note that since the replica by default does not evict, it may end up using more memory than what is set via `maxmemory` (since there are certain buffers that may be larger on the replica, or data structures may sometimes take more memory and so forth). +So make sure you monitor your replicas and make sure they have enough memory to never hit a real out-of-memory condition before the master hits the configured `maxmemory` setting. + +In order to change this behavior, it is possible to allow a replica to not ignore the maxmemory. The configuration directives to use is: + + replica-ignore-maxmemory no diff --git a/topics/security.md b/topics/security.md index ecc6f710..797801af 100644 --- a/topics/security.md +++ b/topics/security.md @@ -178,62 +178,54 @@ GPG key ``` -----BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1.4.13 (Darwin) - -mQINBFJ7ouABEAC5HwiDmE+tRCsWyTaPLBFEGDHcWOLWzph5HdrRtB//UUlSVt9P -tTWZpDvZQvq/ujnS2i2c54V+9NcgVqsCEpA0uJ/U1sUZ3RVBGfGO/l+BIMBnM+B+ -TzK825TxER57ILeT/2ZNSebZ+xHJf2Bgbun45pq3KaXUrRnuS8HWSysC+XyMoXET -nksApwMmFWEPZy62gbeayf1U/4yxP/YbHfwSaldpEILOKmsZaGp8PAtVYMVYHsie -gOUdS/jO0P3silagq39cPQLiTMSsyYouxaagbmtdbwINUX0cjtoeKddd4AK7PIww -7su/lhqHZ58ZJdlApCORhXPaDCVrXp/uxAQfT2HhEGCJDTpctGyKMFXQbLUhSuzf -IilRKJ4jqjcwy+h5lCfDJUvCNYfwyYApsMCs6OWGmHRd7QSFNSs335wAEbVPpO1n -oBJHtOLywZFPF+qAm3LPV4a0OeLyA260c05QZYO59itakjDCBdHwrwv3EU8Z8hPd -6pMNLZ/H1MNK/wWDVeSL8ZzVJabSPTfADXpc1NSwPPWSETS7JYWssdoK+lXMw5vK -q2mSxabL/y91sQ5uscEDzDyJxEPlToApyc5qOUiqQj/thlA6FYBlo1uuuKrpKU1I -e6AA3Gt3fJHXH9TlIcO6DoHvd5fS/o7/RxyFVxqbRqjUoSKQeBzXos3u+QARAQAB -tChTYWx2YXRvcmUgU2FuZmlsaXBwbyA8YW50aXJlekBnbWFpbC5jb20+iQI+BBMB -AgAoBQJSe6LgAhsDBQld/A8ABgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRAx -gTcoDlyI1riPD/oDDvyIVHtgHvdHqB8/GnF2EsaZgbNuwbiNZ+ilmqnjXzZpu5Su -kGPXAAo+v+rJVLSU2rjCUoL5PaoSlhznw5PL1xpBosN9QzfynWLvJE42T4i0uNU/ -a7a1PQCluShnBchm4Xnb3ohNVthFF2MGFRT4OZ5VvK7UcRLYTZoGRlKRGKi9HWea -2xFvyUd9jSuGZG/MMuoslgEPxei09rhDrKxnDNQzQZQpamm/42MITh/1dzEC5ZRx -8hgh1J70/c+zEU7s6kVSGvmYtqbV49/YkqAbhENIeZQ+bCxcTpojEhfk6HoQkXoJ -oK5m21BkMlUEvf1oTX22c0tuOrAX8k0y1M5oismT2e3bqs2OfezNsSfK2gKbeASk -CyYivnbTjmOSPbkvtb27nDqXjb051q6m2A5d59KHfey8BZVuV9j35Ettx4nrS1Ni -S7QrHWRvqceRrIrqXJKopyetzJ6kYDlbP+EVN9NJ2kz/WG6ermltMJQoC0oMhwAG -dfrttG+QJ8PCOlaYiZLD2bjzkDfdfanE74EKYWt+cseenZUf0tsncltRbNdeGTQb -1/GHfwJ+nbA1uKhcHCQ2WrEeGiYpvwKv2/nxBWZ3gwaiAwsz/kI6DQlPZqJoMea9 -8gDK2rQigMgbE88vIli4sNhc0yAtm3AbNgAO28NUhzIitB+av/xYxN/W/LkCDQRS -e6LgARAAtdfwe05ZQ0TZYAoeAQXxx2mil4XLzj6ycNjj2JCnFgpYxA8m6nf1gudr -C5V7HDlctp0i9i0wXbf07ubt4Szq4v3ihQCnPQKrZZWfRXxqg0/TOXFfkOdeIoXl -Fl+yC5lUaSTJSg21nxIr8pEq/oPbwpdnWdEGSL9wFanfDUNJExJdzxgyPzD6xubc -OIn2KviV9gbFzQfOIkgkl75V7gn/OA5g2SOLOIPzETLCvQYAGY9ppZrkUz+ji+aT -Tg7HBL6zySt1sCCjyBjFFgNF1RZY4ErtFj5bdBGKCuglyZou4o2ETfA8A5NNpu7x -zkls45UmqRTbmsTD2FU8Id77EaXxDz8nrmjz8f646J0rqn9pGnIg6Lc2PV8j7ACm -/xaTH03taIloOBkTs/Cl01XYeloM0KQwrML43TIm3xSE/AyGF9IGTQo3zmv8SnMO -F+Rv7+55QGlSkfIkXUNCUSm1+dJSBnUhVj/RAjxkekG2di+Jh/y8pkSUxPMDrYEa -OtDoiq2G/roXjVQcbOyOrWA2oB58IVuXO6RzMYi6k6BMpcbmQm0y+TcJqo64tREV -tjogZeIeYDu31eylwijwP67dtbWgiorrFLm2F7+povfXjsDBCQTYhjH4mZgV94ri -hYjP7X2YfLV3tvGyjsMhw3/qLlEyx/f/97gdAaosbpGlVjnhqicAEQEAAYkCJQQY -AQIADwUCUnui4AIbDAUJXfwPAAAKCRAxgTcoDlyI1kAND/sGnXTbMvfHd9AOzv7i -hDX15SSeMDBMWC+8jH/XZASQF/zuHk0jZNTJ01VAdpIxHIVb9dxRrZ3bl56BByyI -8m5DKJiIQWVai+pfjKj6C7p44My3KLodjEeR1oOODXXripGzqJTJNqpW5eCrCxTM -yz1rzO1H1wziJrRNc+ACjVBE3eqcxsZkDZhWN1m8StlX40YgmQmID1CC+kRlV+hg -LUlZLWQIFCGo2UJYoIL/xvUT3Sx4uKD4lpOjyApWzU40mGDaM5+SOsYYrT8rdwvk -nd/efspff64meT9PddX1hi7Cdqbq9woQRu6YhGoCtrHyi/kklGF3EZiw0zWehGAR -2pUeCTD28vsMfJ3ZL1mUGiwlFREUZAcjIlwWDG1RjZDJeZ0NV07KH1N1U8L8aFcu -+CObnlwiavZxOR2yKvwkqmu9c7iXi/R7SVcGQlNao5CWINdzCLHj6/6drPQfGoBS -K/w4JPe7fqmIonMR6O1Gmgkq3Bwl3rz6MWIBN6z+LuUF/b3ODY9rODsJGp21dl2q -xCedf//PAyFnxBNf5NSjyEoPQajKfplfVS3mG8USkS2pafyq6RK9M5wpBR9I1Smm -gon60uMJRIZbxUjQMPLOViGNXbPIilny3FdqbUgMieTBDxrJkE7mtkHfuYw8bERy -vI1sAEeV6ZM/uc4CDI3E2TxEbQ== -``` - -**Key fingerprint** -``` -pub 4096R/0E5C88D6 2013-11-07 [expires: 2063-10-26] - Key fingerprint = E5F3 DA80 35F0 2EC1 47F9 020F 3181 3728 0E5C 88D6 - uid Salvatore Sanfilippo - sub 4096R/3B34D15F 2013-11-07 [expires: 2063-10-26] +mQINBF9FWioBEADfBiOE/iKpj2EF/cJ/KzFX+jSBKa8SKrE/9RE0faVF6OYnqstL +S5ox/o+yT45FdfFiRNDflKenjFbOmCbAdIys9Ta0iq6I9hs4sKfkNfNVlKZWtSVG +W4lI6zO2Zyc2wLZonI+Q32dDiXWNcCEsmajFcddukPevj9vKMTJZtF79P2SylEPq +mUuhMy/jOt7q1ibJCj5srtaureBH9662t4IJMFjsEe+hiZ5v071UiQA6Tp7rxLqZ +O6ZRzuamFP3xfy2Lz5NQ7QwnBH1ROabhJPoBOKCATCbfgFcM1Rj+9AOGfoDCOJKH +7yiEezMqr9VbDrEmYSmCO4KheqwC0T06lOLIQC4nnwKopNO/PN21mirCLHvfo01O +H/NUG1LZifOwAURbiFNF8Z3+L0csdhD8JnO+1nphjDHr0Xn9Vff2Vej030pRI/9C +SJ2s5fZUq8jK4n06sKCbqA4pekpbKyhRy3iuITKv7Nxesl4T/uhkc9ccpAvbuD1E +NczN1IH05jiMUMM3lC1A9TSvxSqflqI46TZU3qWLa9yg45kDC8Ryr39TY37LscQk +9x3WwLLkuHeUurnwAk46fSj7+FCKTGTdPVw8v7XbvNOTDf8vJ3o2PxX1uh2P2BHs +9L+E1P96oMkiEy1ug7gu8V+mKu5PAuD3QFzU3XCB93DpDakgtznRRXCkAQARAQAB +tBtSZWRpcyBMYWJzIDxyZWRpc0ByZWRpcy5pbz6JAk4EEwEKADgWIQR5sNCo1OBf +WO913l22qvOUq0evbgUCX0VaKgIbAwULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAK +CRC2qvOUq0evbpZaD/4rN7xesDcAG4ec895Fqzk3w74W1/K9lzRKZDwRsAqI+sAz +ZXvQMtWSxLfF2BITxLnHJXK5P+2Y6XlNgrn1GYwC1MsARyM9e1AzwDJHcXFkHU82 +2aALIMXGtiZs/ejFh9ZSs5cgRlxBSqot/uxXm9AvKEByhmIeHPZse/Rc6e3qa57v +OhCkVZB4ETx5iZrgA+gdmS8N7MXG0cEu5gJLacG57MHi+2WMOCU9Xfj6+Pqhw3qc +E6lBinKcA/LdgUJ1onK0JCnOG1YVHjuFtaisfPXvEmUBGaSGE6lM4J7lass/OWps +Dd+oHCGI+VOGNx6AiBDZG8mZacu0/7goRnOTdljJ93rKkj31I+6+j4xzkAC0IXW8 +LAP9Mmo9TGx0L5CaljykhW6z/RK3qd7dAYE+i7e8J9PuQaGG5pjFzuW4vY45j0V/ +9JUMKDaGbU5choGqsCpAVtAMFfIBj3UQ5LCt5zKyescKCUb9uifOLeeQ1vay3R9o +eRSD52YpRBpor0AyYxcLur/pkHB0sSvXEfRZENQTohpY71rHSaFd3q1Hkk7lZl95 +m24NRlrJnjFmeSPKP22vqUYIwoGNUF/D38UzvqHD8ltTPgkZc+Y+RRbVNqkQYiwW +GH/DigNB8r2sdkt+1EUu+YkYosxtzxpxxpYGKXYXx0uf+EZmRqRt/OSHKnf2GLkC +DQRfRVoqARAApffsrDNo4JWjX3r6wHJJ8IpwnGEJ2IzGkg8f1Ofk2uKrjkII/oIx +sXC3EeauC1Plhs+m9GP/SPY0LXmZ0OzGD/S1yMpmBeBuXJ0gONDo+xCg1pKGshPs +75XzpbggSOtEYR5S8Z46yCu7TGJRXBMGBhDgCfPVFBBNsnG5B0EeHXM4trqqlN6d +PAcwtLnKPz/Z+lloKR6bFXvYGuN5vjRXjcVYZLLCEwdV9iY5/Opqk9sCluasb3t/ +c2gcsLWWFnNz2desvb/Y4ADJzxY+Um848DSR8IcdoArSsqmcCTiYvYC/UU7XPVNk +Jrx/HwgTVYiLGbtMB3u3fUpHW8SabdHc4xG3sx0LeIvl+JwHgx7yVhNYJEyOQfnE +mfS97x6surXgTVLbWVjXKIJhoWnWbLP4NkBc27H4qo8wM/IWH4SSXYNzFLlCDPnw +vQZSel21qxdqAWaSxkKcymfMS4nVDhVj0jhlcTY3aZcHMjqoUB07p5+laJr9CCGv +0Y0j0qT2aUO22A3kbv6H9c1Yjv8EI7eNz07aoH1oYU6ShsiaLfIqPfGYb7LwOFWi +PSl0dCY7WJg2H6UHsV/y2DwRr/3oH0a9hv/cvcMneMi3tpIkRwYFBPXEsIcoD9xr +RI5dp8BBdO/Nt+puoQq9oyialWnQK5+AY7ErW1yxjgie4PQ+XtN+85UAEQEAAYkC +NgQYAQoAIBYhBHmw0KjU4F9Y73XeXbaq85SrR69uBQJfRVoqAhsMAAoJELaq85Sr +R69uoV0QAIvlxAHYTjvH1lt5KbpVGs5gwIAnCMPxmaOXcaZ8V0Z1GEU+/IztwV+N +MYCBv1tYa7OppNs1pn75DhzoNAi+XQOVvU0OZgVJutthZe0fNDFGG9B4i/cxRscI +Ld8TPQQNiZPBZ4ubcxbZyBinE9HsYUM49otHjsyFZ0GqTpyne+zBf1GAQoekxlKo +tWSkkmW0x4qW6eiAmyo5lPS1bBjvaSc67i+6Bv5QkZa0UIkRqAzKN4zVvc2FyILz ++7wVLCzWcXrJt8dOeS6Y/Fjbhb6m7dtapUSETAKu6wJvSd9ndDUjFHD33NQIZ/nL +WaPbn01+e/PHtUDmyZ2W2KbcdlIT9nb2uHrruqdCN04sXkID8E2m2gYMA+TjhC0Q +JBJ9WPmdBeKH91R6wWDq6+HwOpgc/9na+BHZXMG+qyEcvNHB5RJdiu2r1Haf6gHi +Fd6rJ6VzaVwnmKmUSKA2wHUuUJ6oxVJ1nFb7Aaschq8F79TAfee0iaGe9cP+xUHL +zBDKwZ9PtyGfdBp1qNOb94sfEasWPftT26rLgKPFcroCSR2QCK5qHsMNCZL+u71w +NnTtq9YZDRaQ2JAc6VDZCcgu+dLiFxVIi1PFcJQ31rVe16+AQ9zsafiNsxkPdZcY +U9XKndQE028dGZv1E3S5BwpnikrUkWdxcYrVZ4fiNIy5I3My2yCe +=J9BD +-----END PGP PUBLIC KEY BLOCK----- ``` diff --git a/topics/sentinel.md b/topics/sentinel.md index 70458a75..5a20315c 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -82,7 +82,7 @@ Fundamental things to know about Sentinel before deploying 3. Sentinel + Redis distributed system does not guarantee that acknowledged writes are retained during failures, since Redis uses asynchronous replication. However there are ways to deploy Sentinel that make the window to lose writes limited to certain moments, while there are other less secure ways to deploy it. 4. You need Sentinel support in your clients. Popular client libraries have Sentinel support, but not all. 5. There is no HA setup which is safe if you don't test from time to time in development environments, or even better if you can, in production environments, if they work. You may have a misconfiguration that will become apparent only when it's too late (at 3am when your master stops working). -6. **Sentinel, Docker, or other forms of Network Address Translation or Port Mapping should be mixed with care**: Docker performs port remapping, breaking Sentinel auto discovery of other Sentinel processes and the list of replicas for a master. Check the section about Sentinel and Docker later in this document for more information. +6. **Sentinel, Docker, or other forms of Network Address Translation or Port Mapping should be mixed with care**: Docker performs port remapping, breaking Sentinel auto discovery of other Sentinel processes and the list of replicas for a master. Check the [section about _Sentinel and Docker_](#sentinel-docker-nat-and-possible-issues) later in this document for more information. Configuring Sentinel --- @@ -161,7 +161,12 @@ Additional options are described in the rest of this document and documented in the example `sentinel.conf` file shipped with the Redis distribution. -All the configuration parameters can be modified at runtime using the `SENTINEL SET` command. See the **Reconfiguring Sentinel at runtime** section for more information. +Configuration parameters can be modified at runtime: + +* Master-specific configuration parameters are modified using `SENTINEL SET`. +* Global configuration parameters are modified using `SENTINEL CONFIG SET`. + +See the [_Reconfiguring Sentinel at runtime_ section](#reconfiguring-sentinel-at-runtime) for more information. Example Sentinel deployments --- @@ -413,10 +418,34 @@ in order to force Sentinel to announce a specific set of IP and port: Note that Docker has the ability to run in *host networking mode* (check the `--net=host` option for more information). This should create no issues since ports are not remapped in this setup. +IP Addresses and DNS names +--- + +Older versions of Sentinel did not support host names and required IP addresses to be specified everywhere. +Starting with version 6.2, Sentinel has *optional* support for host names. + +**This capability is disabled by default. If you're going to enable DNS/hostnames support, please note:** + +1. The name resolution configuration on your Redis and Sentinel nodes must be reliable and be able to resolve addresses quickly. Unexpected delays in address resolution may have a negative impact on Sentinel. +2. You should use hostnames everywhere and avoid mixing hostnames and IP addresses. To do that, use `replica-announce-ip ` and `sentinel announce-ip ` for all Redis and Sentinel instances, respectively. + +Enabling the `resolve-hostnames` global configuration allows Sentinel to accept host names: + +* As part of a `sentinel monitor` command +* As a replica address, if the replica uses a host name value for `replica-announce-ip` + +Sentinel will accept host names as valid inputs and resolve them, but will still refer to IP addresses when announcing an instance, updating configuration files, etc. + +Enabling the `announce-hostnames` global configuration makes Sentinel use host names instead. This affects replies to clients, values written in configuration files, the `REPLICAOF` command issued to replicas, etc. + +This behavior may not be compatible with all Sentinel clients, that may explicitly expect an IP address. + +Using host names may be useful when clients use TLS to connect to instances and require a name rather than an IP address in order to perform certificate ASN matching. + A quick tutorial === -In the next sections of this document, all the details about Sentinel API, +In the next sections of this document, all the details about [_Sentinel API_](#sentinel-api), configuration and semantics will be covered incrementally. However for people that want to play with the system ASAP, this section is a tutorial that shows how to configure and interact with 3 Sentinel instances. @@ -449,7 +478,7 @@ Once you start the three Sentinels, you'll see a few messages they log, like: +monitor master mymaster 127.0.0.1 6379 quorum 2 This is a Sentinel event, and you can receive this kind of events via Pub/Sub -if you `SUBSCRIBE` to the event name as specified later. +if you `SUBSCRIBE` to the event name as specified later in [_Pub/Sub Messages_ section](#pubsub-messages). Sentinel generates and logs different events during failure detection and failover. @@ -583,30 +612,52 @@ so forth. Sentinel commands --- -The following is a list of accepted commands, not covering commands used in -order to modify the Sentinel configuration, which are covered later. - +The `SENTINEL` command is the main API for Sentinel. The following is the list of its subcommands (minimal version is noted for where applicable): + +* **SENTINEL CONFIG GET ``** (`>= 6.2`) Get the current value of a global Sentinel configuration parameter. The specified name may be a wildcard, similar to the Redis `CONFIG GET` command. +* **SENTINEL CONFIG SET `` ``** (`>= 6.2`) Set the value of a global Sentinel configuration parameter. +* **SENTINEL CKQUORUM ``** Check if the current Sentinel configuration is able to reach the quorum needed to failover a master, and the majority needed to authorize the failover. This command should be used in monitoring systems to check if a Sentinel deployment is ok. +* **SENTINEL FLUSHCONFIG** Force Sentinel to rewrite its configuration on disk, including the current Sentinel state. Normally Sentinel rewrites the configuration every time something changes in its state (in the context of the subset of the state which is persisted on disk across restart). However sometimes it is possible that the configuration file is lost because of operation errors, disk failures, package upgrade scripts or configuration managers. In those cases a way to to force Sentinel to rewrite the configuration file is handy. This command works even if the previous configuration file is completely missing. +* **SENTINEL FAILOVER ``** Force a failover as if the master was not reachable, and without asking for agreement to other Sentinels (however a new version of the configuration will be published so that the other Sentinels will update their configurations). +* **SENTINEL GET-MASTER-ADDR-BY-NAME ``** Return the ip and port number of the master with that name. If a failover is in progress or terminated successfully for this master it returns the address and port of the promoted replica. +* **SENTINEL INFO-CACHE** (`>= 3.2`) Return cached `INFO` output from masters and replicas. +* **SENTINEL IS-MASTER-DOWN-BY-ADDR ** Check if the master specified by ip:port is down from current Sentinel's point of view. This command is mostly for internal use. +* **SENTINEL MASTER ``** Show the state and info of the specified master. +* **SENTINEL MASTERS** Show a list of monitored masters and their state. +* **SENTINEL MONITOR** Start Sentinel's monitoring. Refer to the [_Reconfiguring Sentinel at Runtime_ section](#reconfiguring-sentinel-at-runtime) for more information. +* **SENTINEL MYID** (`>= 6.2`) Return the ID of the Sentinel instance. +* **SENTINEL PENDING-SCRIPTS** This command returns information about pending scripts. +* **SENTINEL REMOVE** Stop Sentinel's monitoring. Refer to the [_Reconfiguring Sentinel at Runtime_ section](#reconfiguring-sentinel-at-runtime) for more information. +* **SENTINEL REPLICAS ``** (`>= 5.0`) Show a list of replicas for this master, and their state. +* **SENTINEL SENTINELS ``** Show a list of sentinel instances for this master, and their state. +* **SENTINEL SET** Set Sentinel's monitoring configuration. Refer to the [_Reconfiguring Sentinel at Runtime_ section](#reconfiguring-sentinel-at-runtime) for more information. +* **SENTINEL SIMULATE-FAILURE (crash-after-election|crash-after-promotion|help)** (`>= 3.2`) This command simulates different Sentinel crash scenarios. +* **SENTINEL RESET ``** This command will reset all the masters with matching name. The pattern argument is a glob-style pattern. The reset process clears any previous state in a master (including a failover in progress), and removes every replica and sentinel already discovered and associated with the master. + +For connection management and administration purposes, Sentinel supports the following subset of Redis' commands: + +* **ACL** (`>= 6.2`) This command manages the Sentinel Access Control List. For more information refer to the [ACL](/topics/acl) documentation page and the [_Sentinel Access Control List authentication_](#sentinel-access-control-list-authentication). +* **AUTH** (`>= 5.0.1`) Authenticate a client connection. For more information refer to the `AUTH` command and the [_Configuring Sentinel instances with authentication_ section](#configuring-sentinel-instances-with-authentication). +* **CLIENT** This command manages client connections. For more information refer to the its subcommands' pages. +* **COMMAND** (`>= 6.2`) This command returns information about commands. For more information refer to the `COMMAND` command and its various subcommands. +* **HELLO** (`>= 6.0`) Switch the connection's protocol. For more information refer to the `HELLO` command. +* **INFO** Return information and statistics about the Sentinel server. For more information see the `INFO` command. * **PING** This command simply returns PONG. -* **SENTINEL masters** Show a list of monitored masters and their state. -* **SENTINEL master ``** Show the state and info of the specified master. -* **SENTINEL replicas ``** Show a list of replicas for this master, and their state. -* **SENTINEL sentinels ``** Show a list of sentinel instances for this master, and their state. -* **SENTINEL get-master-addr-by-name ``** Return the ip and port number of the master with that name. If a failover is in progress or terminated successfully for this master it returns the address and port of the promoted replica. -* **SENTINEL reset ``** This command will reset all the masters with matching name. The pattern argument is a glob-style pattern. The reset process clears any previous state in a master (including a failover in progress), and removes every replica and sentinel already discovered and associated with the master. -* **SENTINEL failover ``** Force a failover as if the master was not reachable, and without asking for agreement to other Sentinels (however a new version of the configuration will be published so that the other Sentinels will update their configurations). -* **SENTINEL ckquorum ``** Check if the current Sentinel configuration is able to reach the quorum needed to failover a master, and the majority needed to authorize the failover. This command should be used in monitoring systems to check if a Sentinel deployment is ok. -* **SENTINEL flushconfig** Force Sentinel to rewrite its configuration on disk, including the current Sentinel state. Normally Sentinel rewrites the configuration every time something changes in its state (in the context of the subset of the state which is persisted on disk across restart). However sometimes it is possible that the configuration file is lost because of operation errors, disk failures, package upgrade scripts or configuration managers. In those cases a way to to force Sentinel to rewrite the configuration file is handy. This command works even if the previous configuration file is completely missing. +* **ROLE** This command returns the string "sentinel" and a list of monitored masters. For more information refer to the `ROLE` command. +* **SHUTDOWN** Shut down the Sentinel instance. + +Lastly, Sentinel also supports the `SUBSCRIBE`, `UNSUBSCRIBE`, `PSUBSCRIBE` and `PUNSUBSCRIBE` commands. Refer to the [_Pub/Sub Messages_ section](#pubsub-messages) for more details. Reconfiguring Sentinel at Runtime --- Starting with Redis version 2.8.4, Sentinel provides an API in order to add, remove, or change the configuration of a given master. Note that if you have multiple sentinels you should apply the changes to all to your instances for Redis Sentinel to work properly. This means that changing the configuration of a single Sentinel does not automatically propagates the changes to the other Sentinels in the network. -The following is a list of `SENTINEL` sub commands used in order to update the configuration of a Sentinel instance. +The following is a list of `SENTINEL` subcommands used in order to update the configuration of a Sentinel instance. * **SENTINEL MONITOR `` `` `` ``** This command tells the Sentinel to start monitoring a new master with the specified name, ip, port, and quorum. It is identical to the `sentinel monitor` configuration directive in `sentinel.conf` configuration file, with the difference that you can't use an hostname in as `ip`, but you need to provide an IPv4 or IPv6 address. * **SENTINEL REMOVE ``** is used in order to remove the specified master: the master will no longer be monitored, and will totally be removed from the internal state of the Sentinel, so it will no longer listed by `SENTINEL masters` and so forth. -* **SENTINEL SET `` `