From f47051a48f5e8e9e28d34abf77c9c224635dff48 Mon Sep 17 00:00:00 2001 From: Water-Melon Date: Sat, 22 Jun 2024 12:13:37 +0000 Subject: [PATCH 01/42] chore(build): remove unrelated patches --- ...210510_03_patch_macro_luajit_version.patch | 14 - .../LuaJIT-2.1-20210510_04_pass_cc_env.patch | 40 -- ...0.8_02-handle-large-string-correctly.patch | 387 ------------------ ...resty-core-0.1.22_05-ngx-worker-pids.patch | 81 ---- ...set-ssl-option-ignore-unexpected-eof.patch | 15 - ...pid-reset-ddos-attack-cve-2023-44487.patch | 53 --- .../patches/nginx-cross-endianness-fix.patch | 79 ---- build/openresty/patches/nginx-cross.patch | 214 ---------- .../ngx_lua-0.10.20_03-ngx-worker-pids.patch | 57 --- ...ffering-with-invalid-if-match-header.patch | 239 ----------- ...stream_lua-0.0.10_02-ngx-worker-pids.patch | 49 --- build/tests/01-base.sh | 2 +- 12 files changed, 1 insertion(+), 1229 deletions(-) delete mode 100644 build/openresty/patches/LuaJIT-2.1-20210510_03_patch_macro_luajit_version.patch delete mode 100644 build/openresty/patches/LuaJIT-2.1-20210510_04_pass_cc_env.patch delete mode 100644 build/openresty/patches/lua-cjson-2.1.0.8_02-handle-large-string-correctly.patch delete mode 100644 build/openresty/patches/lua-resty-core-0.1.22_05-ngx-worker-pids.patch delete mode 100644 build/openresty/patches/nginx-1.19.9_06-set-ssl-option-ignore-unexpected-eof.patch delete mode 100644 build/openresty/patches/nginx-1.19.9_09-http2-rapid-reset-ddos-attack-cve-2023-44487.patch delete mode 100644 build/openresty/patches/nginx-cross-endianness-fix.patch delete mode 100644 build/openresty/patches/nginx-cross.patch delete mode 100644 build/openresty/patches/ngx_lua-0.10.20_03-ngx-worker-pids.patch delete mode 100644 build/openresty/patches/ngx_lua-0.10.20_04-crash-when-buffering-with-invalid-if-match-header.patch delete mode 100644 build/openresty/patches/ngx_stream_lua-0.0.10_02-ngx-worker-pids.patch diff --git a/build/openresty/patches/LuaJIT-2.1-20210510_03_patch_macro_luajit_version.patch b/build/openresty/patches/LuaJIT-2.1-20210510_03_patch_macro_luajit_version.patch deleted file mode 100644 index 5e7869bcf3e..00000000000 --- a/build/openresty/patches/LuaJIT-2.1-20210510_03_patch_macro_luajit_version.patch +++ /dev/null @@ -1,14 +0,0 @@ -diff --git a/bundle/LuaJIT-2.1-20210510/src/luajit.h b/bundle/LuaJIT-2.1-20210510/src/luajit.h -index 1dddaec..651de67 100644 ---- a/bundle/LuaJIT-2.1-20210510/src/luajit.h -+++ b/bundle/LuaJIT-2.1-20210510/src/luajit.h -@@ -32,7 +32,9 @@ - - #define OPENRESTY_LUAJIT - -+#ifndef LUAJIT_VERSION - #define LUAJIT_VERSION "LuaJIT 2.1.0-beta3" -+#endif - #define LUAJIT_VERSION_NUM 20100 /* Version 2.1.0 = 02.01.00. */ - #define LUAJIT_VERSION_SYM luaJIT_version_2_1_0_beta3 - #define LUAJIT_COPYRIGHT "Copyright (C) 2005-2021 Mike Pall" diff --git a/build/openresty/patches/LuaJIT-2.1-20210510_04_pass_cc_env.patch b/build/openresty/patches/LuaJIT-2.1-20210510_04_pass_cc_env.patch deleted file mode 100644 index afe165ab78a..00000000000 --- a/build/openresty/patches/LuaJIT-2.1-20210510_04_pass_cc_env.patch +++ /dev/null @@ -1,40 +0,0 @@ -diff --git a/bundle/LuaJIT-2.1-20210510/src/Makefile b/bundle/LuaJIT-2.1-20210510/src/Makefile -index 47a21c9..c60b94e 100644 ---- a/bundle/LuaJIT-2.1-20210510/src/Makefile -+++ b/bundle/LuaJIT-2.1-20210510/src/Makefile -@@ -27,7 +27,8 @@ NODOTABIVER= 51 - DEFAULT_CC = gcc - # - # LuaJIT builds as a native 32 or 64 bit binary by default. --CC= $(DEFAULT_CC) -+CC?= $(DEFAULT_CC) -+AR?= ar - # - # Use this if you want to force a 32 bit build on a 64 bit multilib OS. - #CC= $(DEFAULT_CC) -m32 -@@ -211,7 +212,7 @@ TARGET_CC= $(STATIC_CC) - TARGET_STCC= $(STATIC_CC) - TARGET_DYNCC= $(DYNAMIC_CC) - TARGET_LD= $(CROSS)$(CC) --TARGET_AR= $(CROSS)ar rcus 2>/dev/null -+TARGET_AR= $(CROSS)$(AR) rcus 2>/dev/null - TARGET_STRIP= $(CROSS)strip - - TARGET_LIBPATH= $(or $(PREFIX),/usr/local)/$(or $(MULTILIB),lib) -@@ -291,11 +292,11 @@ TARGET_XCFLAGS+= $(CCOPT_$(TARGET_LJARCH)) - TARGET_ARCH+= $(patsubst %,-DLUAJIT_TARGET=LUAJIT_ARCH_%,$(TARGET_LJARCH)) - - ifneq (,$(PREFIX)) --ifneq (/usr/local,$(PREFIX)) -- TARGET_XCFLAGS+= -DLUA_ROOT=\"$(PREFIX)\" -- ifneq (/usr,$(PREFIX)) -- TARGET_DYNXLDOPTS= -Wl,-rpath,$(TARGET_LIBPATH) -- endif -+ifneq (/usr/local,$(LUA_ROOT)) -+ TARGET_XCFLAGS+= -DLUA_ROOT=\"$(LUA_ROOT)\" -+endif -+ifneq (/usr,$(PREFIX)) -+ TARGET_DYNXLDOPTS= -Wl,-rpath,$(TARGET_LIBPATH) - endif - endif - ifneq (,$(MULTILIB)) \ No newline at end of file diff --git a/build/openresty/patches/lua-cjson-2.1.0.8_02-handle-large-string-correctly.patch b/build/openresty/patches/lua-cjson-2.1.0.8_02-handle-large-string-correctly.patch deleted file mode 100644 index 12a8fd806a4..00000000000 --- a/build/openresty/patches/lua-cjson-2.1.0.8_02-handle-large-string-correctly.patch +++ /dev/null @@ -1,387 +0,0 @@ -diff --git a/bundle/lua-cjson-2.1.0.8/lua_cjson.c b/bundle/lua-cjson-2.1.0.8/lua_cjson.c -index 875bdaf..4fd2c93 100644 ---- a/bundle/lua-cjson-2.1.0.8/lua_cjson.c -+++ b/bundle/lua-cjson-2.1.0.8/lua_cjson.c -@@ -40,6 +40,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -173,13 +174,13 @@ typedef struct { - - typedef struct { - json_token_type_t type; -- int index; -+ size_t index; - union { - const char *string; - double number; - int boolean; - } value; -- int string_len; -+ size_t string_len; - } json_token_t; - - static const char *char2escape[256] = { -@@ -540,6 +541,8 @@ static void json_append_string(lua_State *l, strbuf_t *json, int lindex) - * This buffer is reused constantly for small strings - * If there are any excess pages, they won't be hit anyway. - * This gains ~5% speedup. */ -+ if (len > SIZE_MAX / 6 - 3) -+ abort(); /* Overflow check */ - strbuf_ensure_empty_length(json, len * 6 + 2); - - strbuf_append_char_unsafe(json, '\"'); -@@ -814,7 +817,7 @@ static int json_encode(lua_State *l) - strbuf_t local_encode_buf; - strbuf_t *encode_buf; - char *json; -- int len; -+ size_t len; - - luaL_argcheck(l, lua_gettop(l) == 1, 1, "expected 1 argument"); - -diff --git a/bundle/lua-cjson-2.1.0.8/strbuf.c b/bundle/lua-cjson-2.1.0.8/strbuf.c -index f0f7f4b..2dc30be 100644 ---- a/bundle/lua-cjson-2.1.0.8/strbuf.c -+++ b/bundle/lua-cjson-2.1.0.8/strbuf.c -@@ -26,6 +26,7 @@ - #include - #include - #include -+#include - - #include "strbuf.h" - -@@ -38,22 +39,22 @@ static void die(const char *fmt, ...) - va_end(arg); - fprintf(stderr, "\n"); - -- exit(-1); -+ abort(); - } - --void strbuf_init(strbuf_t *s, int len) -+void strbuf_init(strbuf_t *s, size_t len) - { -- int size; -+ size_t size; - -- if (len <= 0) -+ if (!len) - size = STRBUF_DEFAULT_SIZE; - else -- size = len + 1; /* \0 terminator */ -- -+ size = len + 1; -+ if (size < len) -+ die("Overflow, len: %zu", len); - s->buf = NULL; - s->size = size; - s->length = 0; -- s->increment = STRBUF_DEFAULT_INCREMENT; - s->dynamic = 0; - s->reallocs = 0; - s->debug = 0; -@@ -65,7 +66,7 @@ void strbuf_init(strbuf_t *s, int len) - strbuf_ensure_null(s); - } - --strbuf_t *strbuf_new(int len) -+strbuf_t *strbuf_new(size_t len) - { - strbuf_t *s; - -@@ -81,20 +82,10 @@ strbuf_t *strbuf_new(int len) - return s; - } - --void strbuf_set_increment(strbuf_t *s, int increment) --{ -- /* Increment > 0: Linear buffer growth rate -- * Increment < -1: Exponential buffer growth rate */ -- if (increment == 0 || increment == -1) -- die("BUG: Invalid string increment"); -- -- s->increment = increment; --} -- - static inline void debug_stats(strbuf_t *s) - { - if (s->debug) { -- fprintf(stderr, "strbuf(%lx) reallocs: %d, length: %d, size: %d\n", -+ fprintf(stderr, "strbuf(%lx) reallocs: %d, length: %zd, size: %zd\n", - (long)s, s->reallocs, s->length, s->size); - } - } -@@ -113,7 +104,7 @@ void strbuf_free(strbuf_t *s) - free(s); - } - --char *strbuf_free_to_string(strbuf_t *s, int *len) -+char *strbuf_free_to_string(strbuf_t *s, size_t *len) - { - char *buf; - -@@ -131,57 +122,63 @@ char *strbuf_free_to_string(strbuf_t *s, int *len) - return buf; - } - --static int calculate_new_size(strbuf_t *s, int len) -+static size_t calculate_new_size(strbuf_t *s, size_t len) - { -- int reqsize, newsize; -+ size_t reqsize, newsize; - - if (len <= 0) - die("BUG: Invalid strbuf length requested"); - - /* Ensure there is room for optional NULL termination */ - reqsize = len + 1; -+ if (reqsize < len) -+ die("Overflow, len: %zu", len); - - /* If the user has requested to shrink the buffer, do it exactly */ - if (s->size > reqsize) - return reqsize; - - newsize = s->size; -- if (s->increment < 0) { -+ if (reqsize >= SIZE_MAX / 2) { -+ newsize = reqsize; -+ } else { - /* Exponential sizing */ - while (newsize < reqsize) -- newsize *= -s->increment; -- } else { -- /* Linear sizing */ -- newsize = ((newsize + s->increment - 1) / s->increment) * s->increment; -+ newsize *= 2; - } - -+ if (newsize < reqsize) -+ die("BUG: strbuf length would overflow, len: %zu", len); -+ -+ - return newsize; - } - - - /* Ensure strbuf can handle a string length bytes long (ignoring NULL - * optional termination). */ --void strbuf_resize(strbuf_t *s, int len) -+void strbuf_resize(strbuf_t *s, size_t len) - { -- int newsize; -+ size_t newsize; - - newsize = calculate_new_size(s, len); - - if (s->debug > 1) { -- fprintf(stderr, "strbuf(%lx) resize: %d => %d\n", -+ fprintf(stderr, "strbuf(%lx) resize: %zd => %zd\n", - (long)s, s->size, newsize); - } - - s->size = newsize; - s->buf = realloc(s->buf, s->size); - if (!s->buf) -- die("Out of memory"); -+ die("Out of memory, len: %zu", len); - s->reallocs++; - } - - void strbuf_append_string(strbuf_t *s, const char *str) - { -- int space, i; -+ int i; -+ size_t space; - - space = strbuf_empty_length(s); - -@@ -197,55 +194,6 @@ void strbuf_append_string(strbuf_t *s, const char *str) - } - } - --/* strbuf_append_fmt() should only be used when an upper bound -- * is known for the output string. */ --void strbuf_append_fmt(strbuf_t *s, int len, const char *fmt, ...) --{ -- va_list arg; -- int fmt_len; -- -- strbuf_ensure_empty_length(s, len); -- -- va_start(arg, fmt); -- fmt_len = vsnprintf(s->buf + s->length, len, fmt, arg); -- va_end(arg); -- -- if (fmt_len < 0) -- die("BUG: Unable to convert number"); /* This should never happen.. */ -- -- s->length += fmt_len; --} -- --/* strbuf_append_fmt_retry() can be used when the there is no known -- * upper bound for the output string. */ --void strbuf_append_fmt_retry(strbuf_t *s, const char *fmt, ...) --{ -- va_list arg; -- int fmt_len, try; -- int empty_len; -- -- /* If the first attempt to append fails, resize the buffer appropriately -- * and try again */ -- for (try = 0; ; try++) { -- va_start(arg, fmt); -- /* Append the new formatted string */ -- /* fmt_len is the length of the string required, excluding the -- * trailing NULL */ -- empty_len = strbuf_empty_length(s); -- /* Add 1 since there is also space to store the terminating NULL. */ -- fmt_len = vsnprintf(s->buf + s->length, empty_len + 1, fmt, arg); -- va_end(arg); -- -- if (fmt_len <= empty_len) -- break; /* SUCCESS */ -- if (try > 0) -- die("BUG: length of formatted string changed"); -- -- strbuf_resize(s, s->length + fmt_len); -- } -- -- s->length += fmt_len; --} - - /* vi:ai et sw=4 ts=4: - */ -diff --git a/bundle/lua-cjson-2.1.0.8/strbuf.h b/bundle/lua-cjson-2.1.0.8/strbuf.h -index 5df0b7b..d77e0f4 100644 ---- a/bundle/lua-cjson-2.1.0.8/strbuf.h -+++ b/bundle/lua-cjson-2.1.0.8/strbuf.h -@@ -32,15 +32,13 @@ - - /* Size: Total bytes allocated to *buf - * Length: String length, excluding optional NULL terminator. -- * Increment: Allocation increments when resizing the string buffer. - * Dynamic: True if created via strbuf_new() - */ - - typedef struct { - char *buf; -- int size; -- int length; -- int increment; -+ size_t size; -+ size_t length; - int dynamic; - int reallocs; - int debug; -@@ -49,32 +47,27 @@ typedef struct { - #ifndef STRBUF_DEFAULT_SIZE - #define STRBUF_DEFAULT_SIZE 1023 - #endif --#ifndef STRBUF_DEFAULT_INCREMENT --#define STRBUF_DEFAULT_INCREMENT -2 --#endif - - /* Initialise */ --extern strbuf_t *strbuf_new(int len); --extern void strbuf_init(strbuf_t *s, int len); --extern void strbuf_set_increment(strbuf_t *s, int increment); -+extern strbuf_t *strbuf_new(size_t len); -+extern void strbuf_init(strbuf_t *s, size_t len); - - /* Release */ - extern void strbuf_free(strbuf_t *s); --extern char *strbuf_free_to_string(strbuf_t *s, int *len); -+extern char *strbuf_free_to_string(strbuf_t *s, size_t *len); - - /* Management */ --extern void strbuf_resize(strbuf_t *s, int len); --static int strbuf_empty_length(strbuf_t *s); --static int strbuf_length(strbuf_t *s); --static char *strbuf_string(strbuf_t *s, int *len); --static void strbuf_ensure_empty_length(strbuf_t *s, int len); -+extern void strbuf_resize(strbuf_t *s, size_t len); -+static size_t strbuf_empty_length(strbuf_t *s); -+static size_t strbuf_length(strbuf_t *s); -+static char *strbuf_string(strbuf_t *s, size_t *len); -+static void strbuf_ensure_empty_length(strbuf_t *s, size_t len); - static char *strbuf_empty_ptr(strbuf_t *s); --static void strbuf_extend_length(strbuf_t *s, int len); -+static void strbuf_extend_length(strbuf_t *s, size_t len); -+static void strbuf_set_length(strbuf_t *s, int len); - - /* Update */ --extern void strbuf_append_fmt(strbuf_t *s, int len, const char *fmt, ...); --extern void strbuf_append_fmt_retry(strbuf_t *s, const char *format, ...); --static void strbuf_append_mem(strbuf_t *s, const char *c, int len); -+static void strbuf_append_mem(strbuf_t *s, const char *c, size_t len); - extern void strbuf_append_string(strbuf_t *s, const char *str); - static void strbuf_append_char(strbuf_t *s, const char c); - static void strbuf_ensure_null(strbuf_t *s); -@@ -92,12 +85,12 @@ static inline int strbuf_allocated(strbuf_t *s) - - /* Return bytes remaining in the string buffer - * Ensure there is space for a NULL terminator. */ --static inline int strbuf_empty_length(strbuf_t *s) -+static inline size_t strbuf_empty_length(strbuf_t *s) - { - return s->size - s->length - 1; - } - --static inline void strbuf_ensure_empty_length(strbuf_t *s, int len) -+static inline void strbuf_ensure_empty_length(strbuf_t *s, size_t len) - { - if (len > strbuf_empty_length(s)) - strbuf_resize(s, s->length + len); -@@ -108,12 +101,17 @@ static inline char *strbuf_empty_ptr(strbuf_t *s) - return s->buf + s->length; - } - --static inline void strbuf_extend_length(strbuf_t *s, int len) -+static inline void strbuf_set_length(strbuf_t *s, int len) -+{ -+ s->length = len; -+} -+ -+static inline void strbuf_extend_length(strbuf_t *s, size_t len) - { - s->length += len; - } - --static inline int strbuf_length(strbuf_t *s) -+static inline size_t strbuf_length(strbuf_t *s) - { - return s->length; - } -@@ -129,14 +127,14 @@ static inline void strbuf_append_char_unsafe(strbuf_t *s, const char c) - s->buf[s->length++] = c; - } - --static inline void strbuf_append_mem(strbuf_t *s, const char *c, int len) -+static inline void strbuf_append_mem(strbuf_t *s, const char *c, size_t len) - { - strbuf_ensure_empty_length(s, len); - memcpy(s->buf + s->length, c, len); - s->length += len; - } - --static inline void strbuf_append_mem_unsafe(strbuf_t *s, const char *c, int len) -+static inline void strbuf_append_mem_unsafe(strbuf_t *s, const char *c, size_t len) - { - memcpy(s->buf + s->length, c, len); - s->length += len; -@@ -147,7 +145,7 @@ static inline void strbuf_ensure_null(strbuf_t *s) - s->buf[s->length] = 0; - } - --static inline char *strbuf_string(strbuf_t *s, int *len) -+static inline char *strbuf_string(strbuf_t *s, size_t *len) - { - if (len) - *len = s->length; diff --git a/build/openresty/patches/lua-resty-core-0.1.22_05-ngx-worker-pids.patch b/build/openresty/patches/lua-resty-core-0.1.22_05-ngx-worker-pids.patch deleted file mode 100644 index 96ef6e9a26b..00000000000 --- a/build/openresty/patches/lua-resty-core-0.1.22_05-ngx-worker-pids.patch +++ /dev/null @@ -1,81 +0,0 @@ -From 79f520183bb5b1a278d8a8be3f53659737232253 Mon Sep 17 00:00:00 2001 -From: attenuation -Date: Sun, 21 Aug 2022 22:17:30 +0800 -Subject: [PATCH] feat: add ngx.worker.pids to get all workers pid map - ---- - -diff --git a/bundle/lua-resty-core-0.1.22/lib/resty/core/worker.lua b/bundle/lua-resty-core-0.1.22/lib/resty/core/worker.lua -index c336debdb..187289786 100644 ---- a/bundle/lua-resty-core-0.1.22/lib/resty/core/worker.lua -+++ b/bundle/lua-resty-core-0.1.22/lib/resty/core/worker.lua -@@ -6,12 +6,14 @@ local base = require "resty.core.base" - - - local C = ffi.C -+local ffi_new = ffi.new - local new_tab = base.new_tab - local subsystem = ngx.config.subsystem - - - local ngx_lua_ffi_worker_id - local ngx_lua_ffi_worker_pid -+local ngx_lua_ffi_worker_pids - local ngx_lua_ffi_worker_count - local ngx_lua_ffi_worker_exiting - -@@ -23,12 +25,14 @@ if subsystem == "http" then - ffi.cdef[[ - int ngx_http_lua_ffi_worker_id(void); - int ngx_http_lua_ffi_worker_pid(void); -+ int ngx_http_lua_ffi_worker_pids(int *pids, size_t *pids_len); - int ngx_http_lua_ffi_worker_count(void); - int ngx_http_lua_ffi_worker_exiting(void); - ]] - - ngx_lua_ffi_worker_id = C.ngx_http_lua_ffi_worker_id - ngx_lua_ffi_worker_pid = C.ngx_http_lua_ffi_worker_pid -+ ngx_lua_ffi_worker_pids = C.ngx_http_lua_ffi_worker_pids - ngx_lua_ffi_worker_count = C.ngx_http_lua_ffi_worker_count - ngx_lua_ffi_worker_exiting = C.ngx_http_lua_ffi_worker_exiting - -@@ -36,12 +40,14 @@ elseif subsystem == "stream" then - ffi.cdef[[ - int ngx_stream_lua_ffi_worker_id(void); - int ngx_stream_lua_ffi_worker_pid(void); -+ int ngx_stream_lua_ffi_worker_pids(int *pids, size_t *pids_len); - int ngx_stream_lua_ffi_worker_count(void); - int ngx_stream_lua_ffi_worker_exiting(void); - ]] - - ngx_lua_ffi_worker_id = C.ngx_stream_lua_ffi_worker_id - ngx_lua_ffi_worker_pid = C.ngx_stream_lua_ffi_worker_pid -+ ngx_lua_ffi_worker_pids = C.ngx_stream_lua_ffi_worker_pids - ngx_lua_ffi_worker_count = C.ngx_stream_lua_ffi_worker_count - ngx_lua_ffi_worker_exiting = C.ngx_stream_lua_ffi_worker_exiting - end -@@ -56,6 +62,24 @@ function ngx.worker.pid() - return ngx_lua_ffi_worker_pid() - end - -+local size_ptr = ffi_new("size_t[1]") -+local pids_ptr = ffi_new("int[1024]") -- using NGX_MAX_PROCESSES -+ -+function ngx.worker.pids() -+ if ngx.get_phase() == "init" or ngx.get_phase() == "init_worker" then -+ return nil, "API disabled in the current context" -+ end -+ -+ local res = ngx_lua_ffi_worker_pids(pids_ptr, size_ptr) -+ -+ local pids = {} -+ if res == 0 then -+ for i = 1, tonumber(size_ptr[0]) do -+ pids[i] = pids_ptr[i-1] -+ end -+ end -+ return pids -+end - - function ngx.worker.id() - local id = ngx_lua_ffi_worker_id() \ No newline at end of file diff --git a/build/openresty/patches/nginx-1.19.9_06-set-ssl-option-ignore-unexpected-eof.patch b/build/openresty/patches/nginx-1.19.9_06-set-ssl-option-ignore-unexpected-eof.patch deleted file mode 100644 index d42f8902a4a..00000000000 --- a/build/openresty/patches/nginx-1.19.9_06-set-ssl-option-ignore-unexpected-eof.patch +++ /dev/null @@ -1,15 +0,0 @@ -diff --git a/bundle/nginx-1.19.9/src/event/ngx_event_openssl.c b/bundle/nginx-1.19.9/src/event/ngx_event_openssl.c -index 6361810..54ff114 100644 ---- a/bundle/nginx-1.19.9/src/event/ngx_event_openssl.c -+++ b/bundle/nginx-1.19.9/src/event/ngx_event_openssl.c -@@ -378,6 +378,10 @@ ngx_ssl_create(ngx_ssl_t *ssl, ngx_uint_t protocols, void *data) - SSL_CTX_set_options(ssl->ctx, SSL_OP_NO_CLIENT_RENEGOTIATION); - #endif - -+#ifdef SSL_OP_IGNORE_UNEXPECTED_EOF -+ SSL_CTX_set_options(ssl->ctx, SSL_OP_IGNORE_UNEXPECTED_EOF); -+#endif -+ - #ifdef SSL_MODE_RELEASE_BUFFERS - SSL_CTX_set_mode(ssl->ctx, SSL_MODE_RELEASE_BUFFERS); - #endif diff --git a/build/openresty/patches/nginx-1.19.9_09-http2-rapid-reset-ddos-attack-cve-2023-44487.patch b/build/openresty/patches/nginx-1.19.9_09-http2-rapid-reset-ddos-attack-cve-2023-44487.patch deleted file mode 100644 index 4ff6a377ac5..00000000000 --- a/build/openresty/patches/nginx-1.19.9_09-http2-rapid-reset-ddos-attack-cve-2023-44487.patch +++ /dev/null @@ -1,53 +0,0 @@ -diff --git a/bundle/nginx-1.19.9/src/http/v2/ngx_http_v2.c b/bundle/nginx-1.19.9/src/http/v2/ngx_http_v2.c -index 3afa8b6..228b060 100644 ---- a/bundle/nginx-1.19.9/src/http/v2/ngx_http_v2.c -+++ b/bundle/nginx-1.19.9/src/http/v2/ngx_http_v2.c -@@ -361,6 +361,7 @@ ngx_http_v2_read_handler(ngx_event_t *rev) - ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http2 read handler"); - - h2c->blocked = 1; -+ h2c->new_streams = 0; - - if (c->close) { - c->close = 0; -@@ -1321,6 +1322,14 @@ ngx_http_v2_state_headers(ngx_http_v2_connection_t *h2c, u_char *pos, - goto rst_stream; - } - -+ if (h2c->new_streams++ >= 2 * h2scf->concurrent_streams) { -+ ngx_log_error(NGX_LOG_INFO, h2c->connection->log, 0, -+ "client sent too many streams at once"); -+ -+ status = NGX_HTTP_V2_REFUSED_STREAM; -+ goto rst_stream; -+ } -+ - if (!h2c->settings_ack - && !(h2c->state.flags & NGX_HTTP_V2_END_STREAM_FLAG) - && h2scf->preread_size < NGX_HTTP_V2_DEFAULT_WINDOW) -@@ -1386,6 +1395,12 @@ ngx_http_v2_state_headers(ngx_http_v2_connection_t *h2c, u_char *pos, - - rst_stream: - -+ if (h2c->refused_streams++ > ngx_max(h2scf->concurrent_streams, 100)) { -+ ngx_log_error(NGX_LOG_INFO, h2c->connection->log, 0, -+ "client sent too many refused streams"); -+ return ngx_http_v2_connection_error(h2c, NGX_HTTP_V2_NO_ERROR); -+ } -+ - if (ngx_http_v2_send_rst_stream(h2c, h2c->state.sid, status) != NGX_OK) { - return ngx_http_v2_connection_error(h2c, NGX_HTTP_V2_INTERNAL_ERROR); - } -diff --git a/bundle/nginx-1.19.9/src/http/v2/ngx_http_v2.h b/bundle/nginx-1.19.9/src/http/v2/ngx_http_v2.h -index 0eceae3..aef40bb 100644 ---- a/bundle/nginx-1.19.9/src/http/v2/ngx_http_v2.h -+++ b/bundle/nginx-1.19.9/src/http/v2/ngx_http_v2.h -@@ -124,6 +124,8 @@ struct ngx_http_v2_connection_s { - ngx_uint_t processing; - ngx_uint_t frames; - ngx_uint_t idle; -+ ngx_uint_t new_streams; -+ ngx_uint_t refused_streams; - ngx_uint_t priority_limit; - - ngx_uint_t pushing; diff --git a/build/openresty/patches/nginx-cross-endianness-fix.patch b/build/openresty/patches/nginx-cross-endianness-fix.patch deleted file mode 100644 index da3d6745705..00000000000 --- a/build/openresty/patches/nginx-cross-endianness-fix.patch +++ /dev/null @@ -1,79 +0,0 @@ -# http://cgit.openembedded.org/meta-openembedded/tree/meta-webserver/recipes-httpd/nginx/files/0001-Allow-the-overriding-of-the-endianness-via-the-confi.patch -From be9970aa16c5142ef814531d74a07990a8e9eb14 Mon Sep 17 00:00:00 2001 -From: Derek Straka -Date: Fri, 1 Dec 2017 10:32:29 -0500 -Subject: [PATCH] Allow the overriding of the endianness via the configure flag - --with-endian - -The existing configure options contain the --with-endian; however, the command -line flag does not actually function. It does not set the endianness and it -appears to do nothing. - -Upstream-Status: Pending - -Signed-off-by: Derek Straka - -diff --git a/auto/endianness b/auto/endianness -index 1b552b6..be84487 100644 ---- a/bundle/nginx-1.19.9/endianness -+++ b/bundle/nginx-1.19.9/auto/endianness -@@ -13,7 +13,13 @@ checking for system byte ordering - END - - --cat << END > $NGX_AUTOTEST.c -+if [ ".$NGX_WITH_ENDIAN" = ".little" ]; then -+ echo " little endian" -+ have=NGX_HAVE_LITTLE_ENDIAN . auto/have -+elif [ ".$NGX_WITH_ENDIAN" = ".big" ]; then -+ echo " big endian" -+else -+ cat << END > $NGX_AUTOTEST.c - - int main(void) { - int i = 0x11223344; -@@ -26,25 +32,26 @@ int main(void) { - - END - --ngx_test="$CC $CC_TEST_FLAGS $CC_AUX_FLAGS \ -- -o $NGX_AUTOTEST $NGX_AUTOTEST.c $NGX_LD_OPT $ngx_feature_libs" -+ ngx_test="$CC $CC_TEST_FLAGS $CC_AUX_FLAGS \ -+ -o $NGX_AUTOTEST $NGX_AUTOTEST.c $NGX_LD_OPT $ngx_feature_libs" - --eval "$ngx_test >> $NGX_AUTOCONF_ERR 2>&1" -+ eval "$ngx_test >> $NGX_AUTOCONF_ERR 2>&1" - --if [ -x $NGX_AUTOTEST ]; then -- if $NGX_AUTOTEST >/dev/null 2>&1; then -- echo " little endian" -- have=NGX_HAVE_LITTLE_ENDIAN . auto/have -- else -- echo " big endian" -- fi -+ if [ -x $NGX_AUTOTEST ]; then -+ if $NGX_AUTOTEST >/dev/null 2>&1; then -+ echo " little endian" -+ have=NGX_HAVE_LITTLE_ENDIAN . auto/have -+ else -+ echo " big endian" -+ fi - -- rm -rf $NGX_AUTOTEST* -+ rm -rf $NGX_AUTOTEST* - --else -- rm -rf $NGX_AUTOTEST* -+ else -+ rm -rf $NGX_AUTOTEST* - -- echo -- echo "$0: error: cannot detect system byte ordering" -- exit 1 -+ echo -+ echo "$0: error: cannot detect system byte ordering" -+ exit 1 -+ fi - fi --- -2.7.4 \ No newline at end of file diff --git a/build/openresty/patches/nginx-cross.patch b/build/openresty/patches/nginx-cross.patch deleted file mode 100644 index f83c19d0526..00000000000 --- a/build/openresty/patches/nginx-cross.patch +++ /dev/null @@ -1,214 +0,0 @@ -Rebased from http://cgit.openembedded.org/meta-openembedded/tree/meta-webserver/recipes-httpd/nginx/files/nginx-cross.patch - - -=================================================================== -diff --git a/bundle/nginx-1.19.9/auto/feature b/bundle/nginx-1.19.9/auto/feature -index 3561f59..d6a2889 100644 ---- a/bundle/nginx-1.19.9/auto/feature -+++ b/bundle/nginx-1.19.9/auto/feature -@@ -49,12 +49,20 @@ eval "/bin/sh -c \"$ngx_test\" >> $NGX_AUTOCONF_ERR 2>&1" - - if [ -x $NGX_AUTOTEST ]; then - -+ if [ ".$NGX_CROSS_COMPILE" = ".yes" ]; then -+ NGX_AUTOTEST_EXEC="true" -+ NGX_FOUND_MSG=" (not tested, cross compiling)" -+ else -+ NGX_AUTOTEST_EXEC="$NGX_AUTOTEST" -+ NGX_FOUND_MSG="" -+ fi -+ - case "$ngx_feature_run" in - - yes) - # /bin/sh is used to intercept "Killed" or "Abort trap" messages -- if /bin/sh -c $NGX_AUTOTEST >> $NGX_AUTOCONF_ERR 2>&1; then -- echo " found" -+ if /bin/sh -c $NGX_AUTOTEST_EXEC >> $NGX_AUTOCONF_ERR 2>&1; then -+ echo " found$NGX_FOUND_MSG" - ngx_found=yes - - if test -n "$ngx_feature_name"; then -@@ -68,17 +76,27 @@ if [ -x $NGX_AUTOTEST ]; then - - value) - # /bin/sh is used to intercept "Killed" or "Abort trap" messages -- if /bin/sh -c $NGX_AUTOTEST >> $NGX_AUTOCONF_ERR 2>&1; then -- echo " found" -+ if /bin/sh -c $NGX_AUTOTEST_EXEC >> $NGX_AUTOCONF_ERR 2>&1; then -+ echo " found$NGX_FOUND_MSG" - ngx_found=yes - -- cat << END >> $NGX_AUTO_CONFIG_H -+ if [ ".$NGX_CROSS_COMPILE" = ".yes" ]; then -+ cat << END >> $NGX_AUTO_CONFIG_H - - #ifndef $ngx_feature_name --#define $ngx_feature_name `$NGX_AUTOTEST` -+#define $ngx_feature_name $(eval "echo \$NGX_WITH_${ngx_feature_name}") - #endif - - END -+ else -+ cat << END >> $NGX_AUTO_CONFIG_H -+ -+#ifndef $ngx_feature_name -+#define $ngx_feature_name `$NGX_AUTOTEST_EXEC` -+#endif -+ -+END -+ fi - else - echo " found but is not working" - fi -@@ -86,7 +104,7 @@ END - - bug) - # /bin/sh is used to intercept "Killed" or "Abort trap" messages -- if /bin/sh -c $NGX_AUTOTEST >> $NGX_AUTOCONF_ERR 2>&1; then -+ if /bin/sh -c $NGX_AUTOTEST_EXEC >> $NGX_AUTOCONF_ERR 2>&1; then - echo " not found" - - else -diff --git a/bundle/nginx-1.19.9/auto/options b/bundle/nginx-1.19.9/auto/options -index 182c799..e9eb7b8 100644 ---- a/bundle/nginx-1.19.9/auto/options -+++ b/bundle/nginx-1.19.9/auto/options -@@ -400,6 +400,18 @@ $0: warning: the \"--with-sha1-asm\" option is deprecated" - --test-build-epoll) NGX_TEST_BUILD_EPOLL=YES ;; - --test-build-solaris-sendfilev) NGX_TEST_BUILD_SOLARIS_SENDFILEV=YES ;; - -+ # cross compile support -+ --with-int=*) NGX_WITH_INT="$value" ;; -+ --with-long=*) NGX_WITH_LONG="$value" ;; -+ --with-long-long=*) NGX_WITH_LONG_LONG="$value" ;; -+ --with-ptr-size=*) NGX_WITH_PTR_SIZE="$value" ;; -+ --with-sig-atomic-t=*) NGX_WITH_SIG_ATOMIC_T="$value" ;; -+ --with-size-t=*) NGX_WITH_SIZE_T="$value" ;; -+ --with-off-t=*) NGX_WITH_OFF_T="$value" ;; -+ --with-time-t=*) NGX_WITH_TIME_T="$value" ;; -+ --with-sys-nerr=*) NGX_WITH_NGX_SYS_NERR="$value" ;; -+ --with-endian=*) NGX_WITH_ENDIAN="$value" ;; -+ - *) - echo "$0: error: invalid option \"$option\"" - exit 1 -@@ -590,6 +602,17 @@ cat << END - - --with-debug enable debug logging - -+ --with-int=VALUE force int size -+ --with-long=VALUE force long size -+ --with-long-long=VALUE force long long size -+ --with-ptr-size=VALUE force pointer size -+ --with-sig-atomic-t=VALUE force sig_atomic_t size -+ --with-size-t=VALUE force size_t size -+ --with-off-t=VALUE force off_t size -+ --with-time-t=VALUE force time_t size -+ --with-sys-nerr=VALUE force sys_nerr value -+ --with-endian=VALUE force system endianess -+ - END - - exit 1 -@@ -598,6 +621,8 @@ fi - - if [ ".$NGX_PLATFORM" = ".win32" ]; then - NGX_WINE=$WINE -+elif [ ! -z "$NGX_PLATFORM" ]; then -+ NGX_CROSS_COMPILE="yes" - fi - - -diff --git a/bundle/nginx-1.19.9/auto/types/sizeof b/bundle/nginx-1.19.9/auto/types/sizeof -index 480d8cf..23c5171 100644 ---- a/bundle/nginx-1.19.9/auto/types/sizeof -+++ b/bundle/nginx-1.19.9/auto/types/sizeof -@@ -12,9 +12,12 @@ checking for $ngx_type size - - END - --ngx_size= -+ngx_size=$(eval "echo \$NGX_WITH_${ngx_param}") - --cat << END > $NGX_AUTOTEST.c -+if [ ".$ngx_size" != "." ]; then -+ echo " $ngx_size bytes" -+else -+ cat << END > $NGX_AUTOTEST.c - - #include - #include -@@ -33,15 +36,16 @@ int main(void) { - END - - --ngx_test="$CC $CC_TEST_FLAGS $CC_AUX_FLAGS \ -- -o $NGX_AUTOTEST $NGX_AUTOTEST.c $NGX_LD_OPT $ngx_feature_libs" -+ ngx_test="$CC $CC_TEST_FLAGS $CC_AUX_FLAGS \ -+ -o $NGX_AUTOTEST $NGX_AUTOTEST.c $NGX_LD_OPT $ngx_feature_libs" - --eval "$ngx_test >> $NGX_AUTOCONF_ERR 2>&1" -+ eval "$ngx_test >> $NGX_AUTOCONF_ERR 2>&1" - - --if [ -x $NGX_AUTOTEST ]; then -- ngx_size=`$NGX_AUTOTEST` -- echo " $ngx_size bytes" -+ if [ -x $NGX_AUTOTEST ]; then -+ ngx_size=`$NGX_AUTOTEST` -+ echo " $ngx_size bytes" -+ fi - fi - - -diff --git a/bundle/nginx-1.19.9/auto/unix b/bundle/nginx-1.19.9/auto/unix -index b41c70f..febbf3c 100644 ---- a/bundle/nginx-1.19.9/auto/unix -+++ b/bundle/nginx-1.19.9/auto/unix -@@ -592,13 +592,13 @@ ngx_feature_libs= - - # C types - --ngx_type="int"; . auto/types/sizeof -+ngx_type="int"; ngx_param="INT"; . auto/types/sizeof - --ngx_type="long"; . auto/types/sizeof -+ngx_type="long"; ngx_param="LONG"; . auto/types/sizeof - --ngx_type="long long"; . auto/types/sizeof -+ngx_type="long long"; ngx_param="LONG_LONG"; . auto/types/sizeof - --ngx_type="void *"; . auto/types/sizeof; ngx_ptr_size=$ngx_size -+ngx_type="void *"; ngx_param="PTR_SIZE"; . auto/types/sizeof; ngx_ptr_size=$ngx_size - ngx_param=NGX_PTR_SIZE; ngx_value=$ngx_size; . auto/types/value - - -@@ -609,7 +609,7 @@ NGX_INCLUDE_AUTO_CONFIG_H="#include \"ngx_auto_config.h\"" - ngx_type="uint32_t"; ngx_types="u_int32_t"; . auto/types/typedef - ngx_type="uint64_t"; ngx_types="u_int64_t"; . auto/types/typedef - --ngx_type="sig_atomic_t"; ngx_types="int"; . auto/types/typedef -+ngx_type="sig_atomic_t"; ngx_param="SIG_ATOMIC_T"; ngx_types="int"; . auto/types/typedef - . auto/types/sizeof - ngx_param=NGX_SIG_ATOMIC_T_SIZE; ngx_value=$ngx_size; . auto/types/value - -@@ -625,15 +625,15 @@ ngx_type="rlim_t"; ngx_types="int"; . auto/types/typedef - - . auto/endianness - --ngx_type="size_t"; . auto/types/sizeof -+ngx_type="size_t"; ngx_param="SIZE_T"; . auto/types/sizeof - ngx_param=NGX_MAX_SIZE_T_VALUE; ngx_value=$ngx_max_value; . auto/types/value - ngx_param=NGX_SIZE_T_LEN; ngx_value=$ngx_max_len; . auto/types/value - --ngx_type="off_t"; . auto/types/sizeof -+ngx_type="off_t"; ngx_param="OFF_T"; . auto/types/sizeof - ngx_param=NGX_MAX_OFF_T_VALUE; ngx_value=$ngx_max_value; . auto/types/value - ngx_param=NGX_OFF_T_LEN; ngx_value=$ngx_max_len; . auto/types/value - --ngx_type="time_t"; . auto/types/sizeof -+ngx_type="time_t"; ngx_param="TIME_T"; . auto/types/sizeof - ngx_param=NGX_TIME_T_SIZE; ngx_value=$ngx_size; . auto/types/value - ngx_param=NGX_TIME_T_LEN; ngx_value=$ngx_max_len; . auto/types/value - ngx_param=NGX_MAX_TIME_T_VALUE; ngx_value=$ngx_max_value; . auto/types/value diff --git a/build/openresty/patches/ngx_lua-0.10.20_03-ngx-worker-pids.patch b/build/openresty/patches/ngx_lua-0.10.20_03-ngx-worker-pids.patch deleted file mode 100644 index 27b2f253123..00000000000 --- a/build/openresty/patches/ngx_lua-0.10.20_03-ngx-worker-pids.patch +++ /dev/null @@ -1,57 +0,0 @@ -From edfa0f984ec60bd0658b80643c2fd253f3c5ad0b Mon Sep 17 00:00:00 2001 -From: attenuation -Date: Sun, 21 Aug 2022 21:59:28 +0800 -Subject: [PATCH] feat: add ngx_http_lua_ffi_worker_pids to get all workers pid - map - ---- - -diff --git a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_worker.c b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_worker.c -index 0ca2d414e3..52ec34a844 100644 ---- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_worker.c -+++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_worker.c -@@ -8,6 +8,7 @@ - #define DDEBUG 0 - #endif - #include "ddebug.h" -+#include - - - #define NGX_PROCESS_PRIVILEGED_AGENT 99 -@@ -20,6 +21,36 @@ ngx_http_lua_ffi_worker_pid(void) - } - - -+int -+ngx_http_lua_ffi_worker_pids(int *pids, size_t *pids_len) -+{ -+ ngx_int_t i, n; -+ -+ n = 0; -+ for (i = 0; i < NGX_MAX_PROCESSES; i++) { -+ if (i != ngx_process_slot && ngx_processes[i].pid == 0) { -+ break; -+ } -+ -+ if (i == ngx_process_slot && ngx_processes[i].pid == 0) { -+ pids[n++] = ngx_pid; -+ } -+ -+ if (ngx_processes[i].pid > 0) { -+ pids[n++] = ngx_processes[i].pid; -+ } -+ } -+ -+ if (n == 0) { -+ return NGX_ERROR; -+ } -+ -+ *pids_len = n; -+ -+ return NGX_OK; -+} -+ -+ - int - ngx_http_lua_ffi_worker_id(void) - { \ No newline at end of file diff --git a/build/openresty/patches/ngx_lua-0.10.20_04-crash-when-buffering-with-invalid-if-match-header.patch b/build/openresty/patches/ngx_lua-0.10.20_04-crash-when-buffering-with-invalid-if-match-header.patch deleted file mode 100644 index cf4d3ed17e9..00000000000 --- a/build/openresty/patches/ngx_lua-0.10.20_04-crash-when-buffering-with-invalid-if-match-header.patch +++ /dev/null @@ -1,239 +0,0 @@ -diff --git a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_accessby.c b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_accessby.c -index 58c2514..d40eab1 100644 ---- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_accessby.c -+++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_accessby.c -@@ -240,7 +240,7 @@ ngx_http_lua_access_by_chunk(lua_State *L, ngx_http_request_t *r) - ngx_event_t *rev; - ngx_connection_t *c; - ngx_http_lua_ctx_t *ctx; -- ngx_http_cleanup_t *cln; -+ ngx_pool_cleanup_t *cln; - - ngx_http_lua_loc_conf_t *llcf; - -@@ -291,9 +291,9 @@ ngx_http_lua_access_by_chunk(lua_State *L, ngx_http_request_t *r) - - /* }}} */ - -- /* {{{ register request cleanup hooks */ -+ /* {{{ register nginx pool cleanup hooks */ - if (ctx->cleanup == NULL) { -- cln = ngx_http_cleanup_add(r, 0); -+ cln = ngx_pool_cleanup_add(r->pool, 0); - if (cln == NULL) { - return NGX_HTTP_INTERNAL_SERVER_ERROR; - } -diff --git a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_bodyfilterby.c b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_bodyfilterby.c -index 7560869..8e308ae 100644 ---- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_bodyfilterby.c -+++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_bodyfilterby.c -@@ -233,7 +233,7 @@ ngx_http_lua_body_filter(ngx_http_request_t *r, ngx_chain_t *in) - ngx_http_lua_ctx_t *ctx; - ngx_int_t rc; - uint16_t old_context; -- ngx_http_cleanup_t *cln; -+ ngx_pool_cleanup_t *cln; - ngx_chain_t *out; - ngx_http_lua_main_conf_t *lmcf; - -@@ -273,7 +273,7 @@ ngx_http_lua_body_filter(ngx_http_request_t *r, ngx_chain_t *in) - } - - if (ctx->cleanup == NULL) { -- cln = ngx_http_cleanup_add(r, 0); -+ cln = ngx_pool_cleanup_add(r->pool, 0); - if (cln == NULL) { - return NGX_ERROR; - } -diff --git a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_common.h b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_common.h -index 7a66cb1..0bfed59 100644 ---- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_common.h -+++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_common.h -@@ -540,7 +540,7 @@ typedef struct ngx_http_lua_ctx_s { - ngx_chain_t *busy_bufs; - ngx_chain_t *free_recv_bufs; - -- ngx_http_cleanup_pt *cleanup; -+ ngx_pool_cleanup_pt *cleanup; - - ngx_http_cleanup_t *free_cleanup; /* free list of cleanup records */ - -diff --git a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_contentby.c b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_contentby.c -index 76e6a07..5e2ae55 100644 ---- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_contentby.c -+++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_contentby.c -@@ -29,7 +29,7 @@ ngx_http_lua_content_by_chunk(lua_State *L, ngx_http_request_t *r) - lua_State *co; - ngx_event_t *rev; - ngx_http_lua_ctx_t *ctx; -- ngx_http_cleanup_t *cln; -+ ngx_pool_cleanup_t *cln; - - ngx_http_lua_loc_conf_t *llcf; - -@@ -83,7 +83,7 @@ ngx_http_lua_content_by_chunk(lua_State *L, ngx_http_request_t *r) - - /* {{{ register request cleanup hooks */ - if (ctx->cleanup == NULL) { -- cln = ngx_http_cleanup_add(r, 0); -+ cln = ngx_pool_cleanup_add(r->pool, 0); - if (cln == NULL) { - return NGX_HTTP_INTERNAL_SERVER_ERROR; - } -diff --git a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_directive.c b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_directive.c -index 1ec641e..e276663 100644 ---- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_directive.c -+++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_directive.c -@@ -1265,7 +1265,7 @@ ngx_http_lua_set_by_lua_init(ngx_http_request_t *r) - { - lua_State *L; - ngx_http_lua_ctx_t *ctx; -- ngx_http_cleanup_t *cln; -+ ngx_pool_cleanup_t *cln; - - ctx = ngx_http_get_module_ctx(r, ngx_http_lua_module); - if (ctx == NULL) { -@@ -1280,7 +1280,7 @@ ngx_http_lua_set_by_lua_init(ngx_http_request_t *r) - } - - if (ctx->cleanup == NULL) { -- cln = ngx_http_cleanup_add(r, 0); -+ cln = ngx_pool_cleanup_add(r->pool, 0); - if (cln == NULL) { - return NGX_ERROR; - } -diff --git a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_headerfilterby.c b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_headerfilterby.c -index 4741c72..9f49a8e 100644 ---- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_headerfilterby.c -+++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_headerfilterby.c -@@ -230,7 +230,7 @@ ngx_http_lua_header_filter(ngx_http_request_t *r) - ngx_http_lua_loc_conf_t *llcf; - ngx_http_lua_ctx_t *ctx; - ngx_int_t rc; -- ngx_http_cleanup_t *cln; -+ ngx_pool_cleanup_t *cln; - uint16_t old_context; - - ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, -@@ -259,7 +259,7 @@ ngx_http_lua_header_filter(ngx_http_request_t *r) - } - - if (ctx->cleanup == NULL) { -- cln = ngx_http_cleanup_add(r, 0); -+ cln = ngx_pool_cleanup_add(r->pool, 0); - if (cln == NULL) { - return NGX_ERROR; - } -diff --git a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_rewriteby.c b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_rewriteby.c -index d1eabec..4109f28 100644 ---- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_rewriteby.c -+++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_rewriteby.c -@@ -241,7 +241,7 @@ ngx_http_lua_rewrite_by_chunk(lua_State *L, ngx_http_request_t *r) - ngx_event_t *rev; - ngx_connection_t *c; - ngx_http_lua_ctx_t *ctx; -- ngx_http_cleanup_t *cln; -+ ngx_pool_cleanup_t *cln; - - ngx_http_lua_loc_conf_t *llcf; - -@@ -291,9 +291,9 @@ ngx_http_lua_rewrite_by_chunk(lua_State *L, ngx_http_request_t *r) - - /* }}} */ - -- /* {{{ register request cleanup hooks */ -+ /* {{{ register nginx pool cleanup hooks */ - if (ctx->cleanup == NULL) { -- cln = ngx_http_cleanup_add(r, 0); -+ cln = ngx_pool_cleanup_add(r->pool, 0); - if (cln == NULL) { - return NGX_HTTP_INTERNAL_SERVER_ERROR; - } -diff --git a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_udp.c b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_udp.c -index fd3e074..74fcac1 100644 ---- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_udp.c -+++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_socket_udp.c -@@ -591,7 +591,7 @@ ngx_http_lua_socket_resolve_retval_handler(ngx_http_request_t *r, - ngx_http_lua_ctx_t *ctx; - ngx_http_lua_co_ctx_t *coctx; - ngx_connection_t *c; -- ngx_http_cleanup_t *cln; -+ ngx_pool_cleanup_t *cln; - ngx_http_upstream_resolved_t *ur; - ngx_int_t rc; - ngx_http_lua_udp_connection_t *uc; -@@ -625,7 +625,7 @@ ngx_http_lua_socket_resolve_retval_handler(ngx_http_request_t *r, - } - - if (u->cleanup == NULL) { -- cln = ngx_http_cleanup_add(r, 0); -+ cln = ngx_pool_cleanup_add(r->pool, 0); - if (cln == NULL) { - u->ft_type |= NGX_HTTP_LUA_SOCKET_FT_ERROR; - lua_pushnil(L); -diff --git a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_ssl_certby.c b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_ssl_certby.c -index 6ed2f3f..2ca8ac3 100644 ---- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_ssl_certby.c -+++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_ssl_certby.c -@@ -443,7 +443,7 @@ ngx_http_lua_ssl_cert_by_chunk(lua_State *L, ngx_http_request_t *r) - ngx_int_t rc; - lua_State *co; - ngx_http_lua_ctx_t *ctx; -- ngx_http_cleanup_t *cln; -+ ngx_pool_cleanup_t *cln; - - ctx = ngx_http_get_module_ctx(r, ngx_http_lua_module); - -@@ -497,7 +497,7 @@ ngx_http_lua_ssl_cert_by_chunk(lua_State *L, ngx_http_request_t *r) - - /* register request cleanup hooks */ - if (ctx->cleanup == NULL) { -- cln = ngx_http_cleanup_add(r, 0); -+ cln = ngx_pool_cleanup_add(r->pool, 0); - if (cln == NULL) { - rc = NGX_ERROR; - ngx_http_lua_finalize_request(r, rc); -diff --git a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_ssl_session_fetchby.c b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_ssl_session_fetchby.c -index 8d8c42b..86e6502 100644 ---- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_ssl_session_fetchby.c -+++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_ssl_session_fetchby.c -@@ -468,7 +468,7 @@ ngx_http_lua_ssl_sess_fetch_by_chunk(lua_State *L, ngx_http_request_t *r) - ngx_int_t rc; - lua_State *co; - ngx_http_lua_ctx_t *ctx; -- ngx_http_cleanup_t *cln; -+ ngx_pool_cleanup_t *cln; - - ctx = ngx_http_get_module_ctx(r, ngx_http_lua_module); - -@@ -522,7 +522,7 @@ ngx_http_lua_ssl_sess_fetch_by_chunk(lua_State *L, ngx_http_request_t *r) - - /* register request cleanup hooks */ - if (ctx->cleanup == NULL) { -- cln = ngx_http_cleanup_add(r, 0); -+ cln = ngx_pool_cleanup_add(r->pool, 0); - if (cln == NULL) { - rc = NGX_ERROR; - ngx_http_lua_finalize_request(r, rc); -diff --git a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_timer.c b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_timer.c -index 353007d..7d03db1 100644 ---- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_timer.c -+++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_timer.c -@@ -509,7 +509,7 @@ ngx_http_lua_timer_handler(ngx_event_t *ev) - ngx_connection_t *c = NULL; - ngx_http_request_t *r = NULL; - ngx_http_lua_ctx_t *ctx; -- ngx_http_cleanup_t *cln; -+ ngx_pool_cleanup_t *cln; - ngx_pool_cleanup_t *pcln; - - ngx_http_lua_timer_ctx_t tctx; -@@ -608,7 +608,7 @@ ngx_http_lua_timer_handler(ngx_event_t *ev) - - L = ngx_http_lua_get_lua_vm(r, ctx); - -- cln = ngx_http_cleanup_add(r, 0); -+ cln = ngx_pool_cleanup_add(r->pool, 0); - if (cln == NULL) { - errmsg = "could not add request cleanup"; - goto failed; diff --git a/build/openresty/patches/ngx_stream_lua-0.0.10_02-ngx-worker-pids.patch b/build/openresty/patches/ngx_stream_lua-0.0.10_02-ngx-worker-pids.patch deleted file mode 100644 index 96fd7ff5033..00000000000 --- a/build/openresty/patches/ngx_stream_lua-0.0.10_02-ngx-worker-pids.patch +++ /dev/null @@ -1,49 +0,0 @@ -From 9ce0848cff7c3c5eb0a7d5adfe2de22ea98e1e63 Mon Sep 17 00:00:00 2001 -From: attenuation -Date: Sun, 21 Aug 2022 22:12:22 +0800 -Subject: [PATCH] feat: add ngx_stream_lua_ffi_worker_pids to get all workers - pid map - ---- - -diff --git a/bundle/ngx_stream_lua-0.0.10/src/ngx_stream_lua_worker.c b/bundle/ngx_stream_lua-0.0.10/src/ngx_stream_lua_worker.c -index e4a9c915..4fb23314 100644 ---- a/bundle/ngx_stream_lua-0.0.10/src/ngx_stream_lua_worker.c -+++ b/bundle/ngx_stream_lua-0.0.10/src/ngx_stream_lua_worker.c -@@ -28,6 +28,36 @@ ngx_stream_lua_ffi_worker_pid(void) - } - - -+int -+ngx_stream_lua_ffi_worker_pids(int *pids, size_t *pids_len) -+{ -+ ngx_int_t i, n; -+ -+ n = 0; -+ for (i = 0; i < NGX_MAX_PROCESSES; i++) { -+ if (i != ngx_process_slot && ngx_processes[i].pid == 0) { -+ break; -+ } -+ -+ if (i == ngx_process_slot && ngx_processes[i].pid == 0) { -+ pids[n++] = ngx_pid; -+ } -+ -+ if (ngx_processes[i].pid > 0) { -+ pids[n++] = ngx_processes[i].pid; -+ } -+ } -+ -+ if (n == 0) { -+ return NGX_ERROR; -+ } -+ -+ *pids_len = n; -+ -+ return NGX_OK; -+} -+ -+ - int - ngx_stream_lua_ffi_worker_id(void) - { \ No newline at end of file diff --git a/build/tests/01-base.sh b/build/tests/01-base.sh index 42235f323c1..ca5829a957f 100755 --- a/build/tests/01-base.sh +++ b/build/tests/01-base.sh @@ -98,7 +98,7 @@ msg_test 'resty CLI can be run by kong user' assert_exec 0 'kong' "/usr/local/openresty/bin/resty -e 'print(jit.version)'" msg_test 'resty CLI functions and returns valid version of LuaJIT' -assert_exec 0 'root' "/usr/local/openresty/bin/resty -e 'print(jit.version)' | grep -E 'LuaJIT\ ([0-9]\.*){3}\-20[0-9]+'" +assert_exec 0 'root' "/usr/local/openresty/bin/resty -e 'print(jit.version)' | grep -E 'LuaJIT\ ([0-9]\.*){3}\-beta[0-9]+'" ### # From 5d3922cc4d1521b09f65c7396df0ddf78cfe606e Mon Sep 17 00:00:00 2001 From: Water-Melon Date: Sat, 22 Jun 2024 12:13:52 +0000 Subject: [PATCH 02/42] chore(build): fix docker image version --- .devcontainer/Dockerfile | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 92016901500..f407c17ee0a 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,12 +1,9 @@ -FROM kong/kong:3.0.0-ubuntu +FROM kong/kong:2.7.0 USER root -RUN apt-get update - -RUN apt-get install -y \ - build-essential \ - unzip \ - git \ - m4 \ - libyaml-dev +RUN apk add --update \ + alpine-sdk \ + build-base \ + bsd-compat-headers \ + m4 From 0b18b0128c4286d4e79bacfbfafa12fec911975b Mon Sep 17 00:00:00 2001 From: Niklaus Schen <8458369+Water-Melon@users.noreply.github.com> Date: Sun, 23 Jun 2024 02:07:49 +0800 Subject: [PATCH 03/42] chore(build): downgrade dependencies versions (#13282) --- .requirements | 7 +- build/luarocks/luarocks_repositories.bzl | 2 +- .../openssl/openssl_repositories.bzl | 2 +- .../LuaJIT-2.1-20210510_04_pass_cc_env.patch | 40 ++++ .../patches/nginx-cross-endianness-fix.patch | 79 +++++++ build/openresty/patches/nginx-cross.patch | 214 ++++++++++++++++++ .../fixtures/alpine-amd64.txt | 13 +- .../fixtures/amazonlinux-2-amd64.txt | 13 +- .../fixtures/amazonlinux-2023-amd64.txt | 13 +- .../fixtures/debian-10-amd64.txt | 13 +- .../fixtures/debian-11-amd64.txt | 13 +- .../explain_manifest/fixtures/el7-amd64.txt | 13 +- .../explain_manifest/fixtures/el8-amd64.txt | 13 +- .../fixtures/ubuntu-20.04-amd64.txt | 13 +- .../fixtures/ubuntu-22.04-amd64.txt | 13 +- 15 files changed, 347 insertions(+), 114 deletions(-) create mode 100644 build/openresty/patches/LuaJIT-2.1-20210510_04_pass_cc_env.patch create mode 100644 build/openresty/patches/nginx-cross-endianness-fix.patch create mode 100644 build/openresty/patches/nginx-cross.patch diff --git a/.requirements b/.requirements index 2bafa6cf7d1..4c2a1b0b423 100644 --- a/.requirements +++ b/.requirements @@ -12,8 +12,7 @@ KONG_BUILD_TOOLS_VERSION=4.40.1 KONG_NGINX_MODULE_BRANCH=0.2.2 PCRE=8.45 -OPENSSL=1.1.1w -LIBEXPAT=2.5.0 +OPENSSL=1.1.1o OPENRESTY=1.19.9.1 -LUAROCKS=3.9.2 -LUA_KONG_NGINX_MODULE=f52a34c17af4543245ff79e3227b9d514b8cfa5c # 0.2.2 +LUAROCKS=3.8.0 +LUA_KONG_NGINX_MODULE=6b2fa308e091e2daed2407dc38d54fbcd8fae768 # 0.2.1-sr1 diff --git a/build/luarocks/luarocks_repositories.bzl b/build/luarocks/luarocks_repositories.bzl index de37ea9ee07..87ad74f8515 100644 --- a/build/luarocks/luarocks_repositories.bzl +++ b/build/luarocks/luarocks_repositories.bzl @@ -12,7 +12,7 @@ def luarocks_repositories(): name = "luarocks", build_file = "//build/luarocks:BUILD.luarocks.bazel", strip_prefix = "luarocks-" + version, - sha256 = "bca6e4ecc02c203e070acdb5f586045d45c078896f6236eb46aa33ccd9b94edb", + sha256 = "56ab9b90f5acbc42eb7a94cf482e6c058a63e8a1effdf572b8b2a6323a06d923", urls = [ "https://luarocks.org/releases/luarocks-" + version + ".tar.gz", ], diff --git a/build/openresty/openssl/openssl_repositories.bzl b/build/openresty/openssl/openssl_repositories.bzl index 2bc32bdce0c..c549b59fb1b 100644 --- a/build/openresty/openssl/openssl_repositories.bzl +++ b/build/openresty/openssl/openssl_repositories.bzl @@ -12,7 +12,7 @@ def openssl_repositories(): http_archive, name = "openssl", build_file = "//build/openresty/openssl:BUILD.bazel", - sha256 = "cf3098950cb4d853ad95c0841f1f9c6d3dc102dccfcacd521d93925208b76ac8", + sha256 = "9384a2b0570dd80358841464677115df785edb941c71211f75076d72fe6b438f", strip_prefix = "openssl-" + version, urls = [ "https://www.openssl.org/source/openssl-" + version + ".tar.gz", diff --git a/build/openresty/patches/LuaJIT-2.1-20210510_04_pass_cc_env.patch b/build/openresty/patches/LuaJIT-2.1-20210510_04_pass_cc_env.patch new file mode 100644 index 00000000000..afe165ab78a --- /dev/null +++ b/build/openresty/patches/LuaJIT-2.1-20210510_04_pass_cc_env.patch @@ -0,0 +1,40 @@ +diff --git a/bundle/LuaJIT-2.1-20210510/src/Makefile b/bundle/LuaJIT-2.1-20210510/src/Makefile +index 47a21c9..c60b94e 100644 +--- a/bundle/LuaJIT-2.1-20210510/src/Makefile ++++ b/bundle/LuaJIT-2.1-20210510/src/Makefile +@@ -27,7 +27,8 @@ NODOTABIVER= 51 + DEFAULT_CC = gcc + # + # LuaJIT builds as a native 32 or 64 bit binary by default. +-CC= $(DEFAULT_CC) ++CC?= $(DEFAULT_CC) ++AR?= ar + # + # Use this if you want to force a 32 bit build on a 64 bit multilib OS. + #CC= $(DEFAULT_CC) -m32 +@@ -211,7 +212,7 @@ TARGET_CC= $(STATIC_CC) + TARGET_STCC= $(STATIC_CC) + TARGET_DYNCC= $(DYNAMIC_CC) + TARGET_LD= $(CROSS)$(CC) +-TARGET_AR= $(CROSS)ar rcus 2>/dev/null ++TARGET_AR= $(CROSS)$(AR) rcus 2>/dev/null + TARGET_STRIP= $(CROSS)strip + + TARGET_LIBPATH= $(or $(PREFIX),/usr/local)/$(or $(MULTILIB),lib) +@@ -291,11 +292,11 @@ TARGET_XCFLAGS+= $(CCOPT_$(TARGET_LJARCH)) + TARGET_ARCH+= $(patsubst %,-DLUAJIT_TARGET=LUAJIT_ARCH_%,$(TARGET_LJARCH)) + + ifneq (,$(PREFIX)) +-ifneq (/usr/local,$(PREFIX)) +- TARGET_XCFLAGS+= -DLUA_ROOT=\"$(PREFIX)\" +- ifneq (/usr,$(PREFIX)) +- TARGET_DYNXLDOPTS= -Wl,-rpath,$(TARGET_LIBPATH) +- endif ++ifneq (/usr/local,$(LUA_ROOT)) ++ TARGET_XCFLAGS+= -DLUA_ROOT=\"$(LUA_ROOT)\" ++endif ++ifneq (/usr,$(PREFIX)) ++ TARGET_DYNXLDOPTS= -Wl,-rpath,$(TARGET_LIBPATH) + endif + endif + ifneq (,$(MULTILIB)) \ No newline at end of file diff --git a/build/openresty/patches/nginx-cross-endianness-fix.patch b/build/openresty/patches/nginx-cross-endianness-fix.patch new file mode 100644 index 00000000000..da3d6745705 --- /dev/null +++ b/build/openresty/patches/nginx-cross-endianness-fix.patch @@ -0,0 +1,79 @@ +# http://cgit.openembedded.org/meta-openembedded/tree/meta-webserver/recipes-httpd/nginx/files/0001-Allow-the-overriding-of-the-endianness-via-the-confi.patch +From be9970aa16c5142ef814531d74a07990a8e9eb14 Mon Sep 17 00:00:00 2001 +From: Derek Straka +Date: Fri, 1 Dec 2017 10:32:29 -0500 +Subject: [PATCH] Allow the overriding of the endianness via the configure flag + --with-endian + +The existing configure options contain the --with-endian; however, the command +line flag does not actually function. It does not set the endianness and it +appears to do nothing. + +Upstream-Status: Pending + +Signed-off-by: Derek Straka + +diff --git a/auto/endianness b/auto/endianness +index 1b552b6..be84487 100644 +--- a/bundle/nginx-1.19.9/endianness ++++ b/bundle/nginx-1.19.9/auto/endianness +@@ -13,7 +13,13 @@ checking for system byte ordering + END + + +-cat << END > $NGX_AUTOTEST.c ++if [ ".$NGX_WITH_ENDIAN" = ".little" ]; then ++ echo " little endian" ++ have=NGX_HAVE_LITTLE_ENDIAN . auto/have ++elif [ ".$NGX_WITH_ENDIAN" = ".big" ]; then ++ echo " big endian" ++else ++ cat << END > $NGX_AUTOTEST.c + + int main(void) { + int i = 0x11223344; +@@ -26,25 +32,26 @@ int main(void) { + + END + +-ngx_test="$CC $CC_TEST_FLAGS $CC_AUX_FLAGS \ +- -o $NGX_AUTOTEST $NGX_AUTOTEST.c $NGX_LD_OPT $ngx_feature_libs" ++ ngx_test="$CC $CC_TEST_FLAGS $CC_AUX_FLAGS \ ++ -o $NGX_AUTOTEST $NGX_AUTOTEST.c $NGX_LD_OPT $ngx_feature_libs" + +-eval "$ngx_test >> $NGX_AUTOCONF_ERR 2>&1" ++ eval "$ngx_test >> $NGX_AUTOCONF_ERR 2>&1" + +-if [ -x $NGX_AUTOTEST ]; then +- if $NGX_AUTOTEST >/dev/null 2>&1; then +- echo " little endian" +- have=NGX_HAVE_LITTLE_ENDIAN . auto/have +- else +- echo " big endian" +- fi ++ if [ -x $NGX_AUTOTEST ]; then ++ if $NGX_AUTOTEST >/dev/null 2>&1; then ++ echo " little endian" ++ have=NGX_HAVE_LITTLE_ENDIAN . auto/have ++ else ++ echo " big endian" ++ fi + +- rm -rf $NGX_AUTOTEST* ++ rm -rf $NGX_AUTOTEST* + +-else +- rm -rf $NGX_AUTOTEST* ++ else ++ rm -rf $NGX_AUTOTEST* + +- echo +- echo "$0: error: cannot detect system byte ordering" +- exit 1 ++ echo ++ echo "$0: error: cannot detect system byte ordering" ++ exit 1 ++ fi + fi +-- +2.7.4 \ No newline at end of file diff --git a/build/openresty/patches/nginx-cross.patch b/build/openresty/patches/nginx-cross.patch new file mode 100644 index 00000000000..f83c19d0526 --- /dev/null +++ b/build/openresty/patches/nginx-cross.patch @@ -0,0 +1,214 @@ +Rebased from http://cgit.openembedded.org/meta-openembedded/tree/meta-webserver/recipes-httpd/nginx/files/nginx-cross.patch + + +=================================================================== +diff --git a/bundle/nginx-1.19.9/auto/feature b/bundle/nginx-1.19.9/auto/feature +index 3561f59..d6a2889 100644 +--- a/bundle/nginx-1.19.9/auto/feature ++++ b/bundle/nginx-1.19.9/auto/feature +@@ -49,12 +49,20 @@ eval "/bin/sh -c \"$ngx_test\" >> $NGX_AUTOCONF_ERR 2>&1" + + if [ -x $NGX_AUTOTEST ]; then + ++ if [ ".$NGX_CROSS_COMPILE" = ".yes" ]; then ++ NGX_AUTOTEST_EXEC="true" ++ NGX_FOUND_MSG=" (not tested, cross compiling)" ++ else ++ NGX_AUTOTEST_EXEC="$NGX_AUTOTEST" ++ NGX_FOUND_MSG="" ++ fi ++ + case "$ngx_feature_run" in + + yes) + # /bin/sh is used to intercept "Killed" or "Abort trap" messages +- if /bin/sh -c $NGX_AUTOTEST >> $NGX_AUTOCONF_ERR 2>&1; then +- echo " found" ++ if /bin/sh -c $NGX_AUTOTEST_EXEC >> $NGX_AUTOCONF_ERR 2>&1; then ++ echo " found$NGX_FOUND_MSG" + ngx_found=yes + + if test -n "$ngx_feature_name"; then +@@ -68,17 +76,27 @@ if [ -x $NGX_AUTOTEST ]; then + + value) + # /bin/sh is used to intercept "Killed" or "Abort trap" messages +- if /bin/sh -c $NGX_AUTOTEST >> $NGX_AUTOCONF_ERR 2>&1; then +- echo " found" ++ if /bin/sh -c $NGX_AUTOTEST_EXEC >> $NGX_AUTOCONF_ERR 2>&1; then ++ echo " found$NGX_FOUND_MSG" + ngx_found=yes + +- cat << END >> $NGX_AUTO_CONFIG_H ++ if [ ".$NGX_CROSS_COMPILE" = ".yes" ]; then ++ cat << END >> $NGX_AUTO_CONFIG_H + + #ifndef $ngx_feature_name +-#define $ngx_feature_name `$NGX_AUTOTEST` ++#define $ngx_feature_name $(eval "echo \$NGX_WITH_${ngx_feature_name}") + #endif + + END ++ else ++ cat << END >> $NGX_AUTO_CONFIG_H ++ ++#ifndef $ngx_feature_name ++#define $ngx_feature_name `$NGX_AUTOTEST_EXEC` ++#endif ++ ++END ++ fi + else + echo " found but is not working" + fi +@@ -86,7 +104,7 @@ END + + bug) + # /bin/sh is used to intercept "Killed" or "Abort trap" messages +- if /bin/sh -c $NGX_AUTOTEST >> $NGX_AUTOCONF_ERR 2>&1; then ++ if /bin/sh -c $NGX_AUTOTEST_EXEC >> $NGX_AUTOCONF_ERR 2>&1; then + echo " not found" + + else +diff --git a/bundle/nginx-1.19.9/auto/options b/bundle/nginx-1.19.9/auto/options +index 182c799..e9eb7b8 100644 +--- a/bundle/nginx-1.19.9/auto/options ++++ b/bundle/nginx-1.19.9/auto/options +@@ -400,6 +400,18 @@ $0: warning: the \"--with-sha1-asm\" option is deprecated" + --test-build-epoll) NGX_TEST_BUILD_EPOLL=YES ;; + --test-build-solaris-sendfilev) NGX_TEST_BUILD_SOLARIS_SENDFILEV=YES ;; + ++ # cross compile support ++ --with-int=*) NGX_WITH_INT="$value" ;; ++ --with-long=*) NGX_WITH_LONG="$value" ;; ++ --with-long-long=*) NGX_WITH_LONG_LONG="$value" ;; ++ --with-ptr-size=*) NGX_WITH_PTR_SIZE="$value" ;; ++ --with-sig-atomic-t=*) NGX_WITH_SIG_ATOMIC_T="$value" ;; ++ --with-size-t=*) NGX_WITH_SIZE_T="$value" ;; ++ --with-off-t=*) NGX_WITH_OFF_T="$value" ;; ++ --with-time-t=*) NGX_WITH_TIME_T="$value" ;; ++ --with-sys-nerr=*) NGX_WITH_NGX_SYS_NERR="$value" ;; ++ --with-endian=*) NGX_WITH_ENDIAN="$value" ;; ++ + *) + echo "$0: error: invalid option \"$option\"" + exit 1 +@@ -590,6 +602,17 @@ cat << END + + --with-debug enable debug logging + ++ --with-int=VALUE force int size ++ --with-long=VALUE force long size ++ --with-long-long=VALUE force long long size ++ --with-ptr-size=VALUE force pointer size ++ --with-sig-atomic-t=VALUE force sig_atomic_t size ++ --with-size-t=VALUE force size_t size ++ --with-off-t=VALUE force off_t size ++ --with-time-t=VALUE force time_t size ++ --with-sys-nerr=VALUE force sys_nerr value ++ --with-endian=VALUE force system endianess ++ + END + + exit 1 +@@ -598,6 +621,8 @@ fi + + if [ ".$NGX_PLATFORM" = ".win32" ]; then + NGX_WINE=$WINE ++elif [ ! -z "$NGX_PLATFORM" ]; then ++ NGX_CROSS_COMPILE="yes" + fi + + +diff --git a/bundle/nginx-1.19.9/auto/types/sizeof b/bundle/nginx-1.19.9/auto/types/sizeof +index 480d8cf..23c5171 100644 +--- a/bundle/nginx-1.19.9/auto/types/sizeof ++++ b/bundle/nginx-1.19.9/auto/types/sizeof +@@ -12,9 +12,12 @@ checking for $ngx_type size + + END + +-ngx_size= ++ngx_size=$(eval "echo \$NGX_WITH_${ngx_param}") + +-cat << END > $NGX_AUTOTEST.c ++if [ ".$ngx_size" != "." ]; then ++ echo " $ngx_size bytes" ++else ++ cat << END > $NGX_AUTOTEST.c + + #include + #include +@@ -33,15 +36,16 @@ int main(void) { + END + + +-ngx_test="$CC $CC_TEST_FLAGS $CC_AUX_FLAGS \ +- -o $NGX_AUTOTEST $NGX_AUTOTEST.c $NGX_LD_OPT $ngx_feature_libs" ++ ngx_test="$CC $CC_TEST_FLAGS $CC_AUX_FLAGS \ ++ -o $NGX_AUTOTEST $NGX_AUTOTEST.c $NGX_LD_OPT $ngx_feature_libs" + +-eval "$ngx_test >> $NGX_AUTOCONF_ERR 2>&1" ++ eval "$ngx_test >> $NGX_AUTOCONF_ERR 2>&1" + + +-if [ -x $NGX_AUTOTEST ]; then +- ngx_size=`$NGX_AUTOTEST` +- echo " $ngx_size bytes" ++ if [ -x $NGX_AUTOTEST ]; then ++ ngx_size=`$NGX_AUTOTEST` ++ echo " $ngx_size bytes" ++ fi + fi + + +diff --git a/bundle/nginx-1.19.9/auto/unix b/bundle/nginx-1.19.9/auto/unix +index b41c70f..febbf3c 100644 +--- a/bundle/nginx-1.19.9/auto/unix ++++ b/bundle/nginx-1.19.9/auto/unix +@@ -592,13 +592,13 @@ ngx_feature_libs= + + # C types + +-ngx_type="int"; . auto/types/sizeof ++ngx_type="int"; ngx_param="INT"; . auto/types/sizeof + +-ngx_type="long"; . auto/types/sizeof ++ngx_type="long"; ngx_param="LONG"; . auto/types/sizeof + +-ngx_type="long long"; . auto/types/sizeof ++ngx_type="long long"; ngx_param="LONG_LONG"; . auto/types/sizeof + +-ngx_type="void *"; . auto/types/sizeof; ngx_ptr_size=$ngx_size ++ngx_type="void *"; ngx_param="PTR_SIZE"; . auto/types/sizeof; ngx_ptr_size=$ngx_size + ngx_param=NGX_PTR_SIZE; ngx_value=$ngx_size; . auto/types/value + + +@@ -609,7 +609,7 @@ NGX_INCLUDE_AUTO_CONFIG_H="#include \"ngx_auto_config.h\"" + ngx_type="uint32_t"; ngx_types="u_int32_t"; . auto/types/typedef + ngx_type="uint64_t"; ngx_types="u_int64_t"; . auto/types/typedef + +-ngx_type="sig_atomic_t"; ngx_types="int"; . auto/types/typedef ++ngx_type="sig_atomic_t"; ngx_param="SIG_ATOMIC_T"; ngx_types="int"; . auto/types/typedef + . auto/types/sizeof + ngx_param=NGX_SIG_ATOMIC_T_SIZE; ngx_value=$ngx_size; . auto/types/value + +@@ -625,15 +625,15 @@ ngx_type="rlim_t"; ngx_types="int"; . auto/types/typedef + + . auto/endianness + +-ngx_type="size_t"; . auto/types/sizeof ++ngx_type="size_t"; ngx_param="SIZE_T"; . auto/types/sizeof + ngx_param=NGX_MAX_SIZE_T_VALUE; ngx_value=$ngx_max_value; . auto/types/value + ngx_param=NGX_SIZE_T_LEN; ngx_value=$ngx_max_len; . auto/types/value + +-ngx_type="off_t"; . auto/types/sizeof ++ngx_type="off_t"; ngx_param="OFF_T"; . auto/types/sizeof + ngx_param=NGX_MAX_OFF_T_VALUE; ngx_value=$ngx_max_value; . auto/types/value + ngx_param=NGX_OFF_T_LEN; ngx_value=$ngx_max_len; . auto/types/value + +-ngx_type="time_t"; . auto/types/sizeof ++ngx_type="time_t"; ngx_param="TIME_T"; . auto/types/sizeof + ngx_param=NGX_TIME_T_SIZE; ngx_value=$ngx_size; . auto/types/value + ngx_param=NGX_TIME_T_LEN; ngx_value=$ngx_max_len; . auto/types/value + ngx_param=NGX_MAX_TIME_T_VALUE; ngx_value=$ngx_max_value; . auto/types/value diff --git a/scripts/explain_manifest/fixtures/alpine-amd64.txt b/scripts/explain_manifest/fixtures/alpine-amd64.txt index 52d22d0e4a7..148e352515f 100644 --- a/scripts/explain_manifest/fixtures/alpine-amd64.txt +++ b/scripts/explain_manifest/fixtures/alpine-amd64.txt @@ -35,59 +35,48 @@ - Path : /usr/local/lib/lua/5.1/lfs.so Needed : - libc.so - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lpeg.so Needed : - libc.so - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lsyslog.so Needed : - libc.so - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lua_pack.so Needed : - libc.so - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lua_system_constants.so Needed : - libc.so - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/mime/core.so Needed : - libc.so - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/pb.so Needed : - libc.so - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/core.so Needed : - libc.so - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/serial.so Needed : - libc.so - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/unix.so Needed : - libc.so - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/ssl.so Needed : - libssl.so.1.1 - libcrypto.so.1.1 - libc.so - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/yaml.so Needed : @@ -121,7 +110,7 @@ Modules : - lua-kong-nginx-module - lua-kong-nginx-module/stream - OpenSSL : OpenSSL 1.1.1w 11 Sep 2023 + OpenSSL : OpenSSL 1.1.1o 3 May 2022 DWARF : True DWARF - ngx_http_request_t related DWARF DIEs: True diff --git a/scripts/explain_manifest/fixtures/amazonlinux-2-amd64.txt b/scripts/explain_manifest/fixtures/amazonlinux-2-amd64.txt index ca282c5e38b..cb5d0045ac2 100644 --- a/scripts/explain_manifest/fixtures/amazonlinux-2-amd64.txt +++ b/scripts/explain_manifest/fixtures/amazonlinux-2-amd64.txt @@ -50,59 +50,48 @@ - Path : /usr/local/lib/lua/5.1/lfs.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lpeg.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lsyslog.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lua_pack.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lua_system_constants.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/mime/core.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/pb.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/core.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/serial.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/unix.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/ssl.so Needed : - libssl.so.1.1 - libcrypto.so.1.1 - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/yaml.so Needed : @@ -140,7 +129,7 @@ Modules : - lua-kong-nginx-module - lua-kong-nginx-module/stream - OpenSSL : OpenSSL 1.1.1w 11 Sep 2023 + OpenSSL : OpenSSL 1.1.1o 3 May 2022 DWARF : True DWARF - ngx_http_request_t related DWARF DIEs: True diff --git a/scripts/explain_manifest/fixtures/amazonlinux-2023-amd64.txt b/scripts/explain_manifest/fixtures/amazonlinux-2023-amd64.txt index a4f5e87087a..01328af3a9a 100644 --- a/scripts/explain_manifest/fixtures/amazonlinux-2023-amd64.txt +++ b/scripts/explain_manifest/fixtures/amazonlinux-2023-amd64.txt @@ -45,59 +45,48 @@ - Path : /usr/local/lib/lua/5.1/lfs.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lpeg.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lsyslog.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lua_pack.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lua_system_constants.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/mime/core.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/pb.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/core.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/serial.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/unix.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/ssl.so Needed : - libssl.so.1.1 - libcrypto.so.1.1 - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/yaml.so Needed : @@ -133,7 +122,7 @@ Modules : - lua-kong-nginx-module - lua-kong-nginx-module/stream - OpenSSL : OpenSSL 1.1.1w 11 Sep 2023 + OpenSSL : OpenSSL 1.1.1o 3 May 2022 DWARF : True DWARF - ngx_http_request_t related DWARF DIEs: True diff --git a/scripts/explain_manifest/fixtures/debian-10-amd64.txt b/scripts/explain_manifest/fixtures/debian-10-amd64.txt index 860a16c4045..174773be772 100644 --- a/scripts/explain_manifest/fixtures/debian-10-amd64.txt +++ b/scripts/explain_manifest/fixtures/debian-10-amd64.txt @@ -50,59 +50,48 @@ - Path : /usr/local/lib/lua/5.1/lfs.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lpeg.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lsyslog.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lua_pack.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lua_system_constants.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/mime/core.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/pb.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/core.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/serial.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/unix.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/ssl.so Needed : - libssl.so.1.1 - libcrypto.so.1.1 - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/yaml.so Needed : @@ -140,7 +129,7 @@ Modules : - lua-kong-nginx-module - lua-kong-nginx-module/stream - OpenSSL : OpenSSL 1.1.1w 11 Sep 2023 + OpenSSL : OpenSSL 1.1.1o 3 May 2022 DWARF : True DWARF - ngx_http_request_t related DWARF DIEs: True diff --git a/scripts/explain_manifest/fixtures/debian-11-amd64.txt b/scripts/explain_manifest/fixtures/debian-11-amd64.txt index e6393f58af3..9b20fbb595b 100644 --- a/scripts/explain_manifest/fixtures/debian-11-amd64.txt +++ b/scripts/explain_manifest/fixtures/debian-11-amd64.txt @@ -50,55 +50,44 @@ - Path : /usr/local/lib/lua/5.1/lfs.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lpeg.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lsyslog.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lua_pack.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lua_system_constants.so - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/mime/core.so - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/pb.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/core.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/serial.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/unix.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/ssl.so Needed : - libssl.so.1.1 - libcrypto.so.1.1 - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/yaml.so Needed : @@ -131,7 +120,7 @@ Modules : - lua-kong-nginx-module - lua-kong-nginx-module/stream - OpenSSL : OpenSSL 1.1.1w 11 Sep 2023 + OpenSSL : OpenSSL 1.1.1o 3 May 2022 DWARF : True DWARF - ngx_http_request_t related DWARF DIEs: True diff --git a/scripts/explain_manifest/fixtures/el7-amd64.txt b/scripts/explain_manifest/fixtures/el7-amd64.txt index ca282c5e38b..cb5d0045ac2 100644 --- a/scripts/explain_manifest/fixtures/el7-amd64.txt +++ b/scripts/explain_manifest/fixtures/el7-amd64.txt @@ -50,59 +50,48 @@ - Path : /usr/local/lib/lua/5.1/lfs.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lpeg.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lsyslog.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lua_pack.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lua_system_constants.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/mime/core.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/pb.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/core.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/serial.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/unix.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/ssl.so Needed : - libssl.so.1.1 - libcrypto.so.1.1 - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/yaml.so Needed : @@ -140,7 +129,7 @@ Modules : - lua-kong-nginx-module - lua-kong-nginx-module/stream - OpenSSL : OpenSSL 1.1.1w 11 Sep 2023 + OpenSSL : OpenSSL 1.1.1o 3 May 2022 DWARF : True DWARF - ngx_http_request_t related DWARF DIEs: True diff --git a/scripts/explain_manifest/fixtures/el8-amd64.txt b/scripts/explain_manifest/fixtures/el8-amd64.txt index ca282c5e38b..cb5d0045ac2 100644 --- a/scripts/explain_manifest/fixtures/el8-amd64.txt +++ b/scripts/explain_manifest/fixtures/el8-amd64.txt @@ -50,59 +50,48 @@ - Path : /usr/local/lib/lua/5.1/lfs.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lpeg.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lsyslog.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lua_pack.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lua_system_constants.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/mime/core.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/pb.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/core.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/serial.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/unix.so Needed : - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/ssl.so Needed : - libssl.so.1.1 - libcrypto.so.1.1 - libc.so.6 - Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/yaml.so Needed : @@ -140,7 +129,7 @@ Modules : - lua-kong-nginx-module - lua-kong-nginx-module/stream - OpenSSL : OpenSSL 1.1.1w 11 Sep 2023 + OpenSSL : OpenSSL 1.1.1o 3 May 2022 DWARF : True DWARF - ngx_http_request_t related DWARF DIEs: True diff --git a/scripts/explain_manifest/fixtures/ubuntu-20.04-amd64.txt b/scripts/explain_manifest/fixtures/ubuntu-20.04-amd64.txt index 3f0729d62c9..4c29e30d397 100644 --- a/scripts/explain_manifest/fixtures/ubuntu-20.04-amd64.txt +++ b/scripts/explain_manifest/fixtures/ubuntu-20.04-amd64.txt @@ -50,57 +50,46 @@ - Path : /usr/local/lib/lua/5.1/lfs.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lpeg.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lsyslog.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lua_pack.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lua_system_constants.so - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/mime/core.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/pb.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/core.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/serial.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/unix.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/ssl.so Needed : - libssl.so.1.1 - libcrypto.so.1.1 - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/yaml.so Needed : @@ -135,7 +124,7 @@ Modules : - lua-kong-nginx-module - lua-kong-nginx-module/stream - OpenSSL : OpenSSL 1.1.1w 11 Sep 2023 + OpenSSL : OpenSSL 1.1.1o 3 May 2022 DWARF : True DWARF - ngx_http_request_t related DWARF DIEs: True diff --git a/scripts/explain_manifest/fixtures/ubuntu-22.04-amd64.txt b/scripts/explain_manifest/fixtures/ubuntu-22.04-amd64.txt index db71f90ad5b..e736498f271 100644 --- a/scripts/explain_manifest/fixtures/ubuntu-22.04-amd64.txt +++ b/scripts/explain_manifest/fixtures/ubuntu-22.04-amd64.txt @@ -45,57 +45,46 @@ - Path : /usr/local/lib/lua/5.1/lfs.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lpeg.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lsyslog.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lua_pack.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lua_system_constants.so - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/mime/core.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/pb.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/core.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/serial.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/socket/unix.so Needed : - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/ssl.so Needed : - libssl.so.1.1 - libcrypto.so.1.1 - libc.so.6 - Runpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/yaml.so Needed : @@ -128,7 +117,7 @@ Modules : - lua-kong-nginx-module - lua-kong-nginx-module/stream - OpenSSL : OpenSSL 1.1.1w 11 Sep 2023 + OpenSSL : OpenSSL 1.1.1o 3 May 2022 DWARF : True DWARF - ngx_http_request_t related DWARF DIEs: True From b5e01be636df43da7ee01c3c8ff6067630d52e13 Mon Sep 17 00:00:00 2001 From: Vinicius Mignot Date: Sat, 22 Jun 2024 15:54:13 -0300 Subject: [PATCH 04/42] fix(package): add missing zlib1g-dev dep --- build/package/nfpm.yaml | 1 + changelog/unreleased/kong/add_zlib1g-dev.yml | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog/unreleased/kong/add_zlib1g-dev.yml diff --git a/build/package/nfpm.yaml b/build/package/nfpm.yaml index 5b0d3aa902d..7c3940903cb 100644 --- a/build/package/nfpm.yaml +++ b/build/package/nfpm.yaml @@ -48,6 +48,7 @@ overrides: - libpcre3 - perl - libyaml-0-2 + - zlib1g-dev rpm: depends: - ca-certificates diff --git a/changelog/unreleased/kong/add_zlib1g-dev.yml b/changelog/unreleased/kong/add_zlib1g-dev.yml new file mode 100644 index 00000000000..8ca18a69a6c --- /dev/null +++ b/changelog/unreleased/kong/add_zlib1g-dev.yml @@ -0,0 +1,2 @@ +message: Added zlib1g-dev dependency to Ubuntu packages. +type: bugfix From 0e327f87e7f10f15a81aa8b33d4287e55b7407c1 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Sun, 23 Jun 2024 18:53:20 +0800 Subject: [PATCH 05/42] chore(ci): skip uploading centos packages to cloudsmith --- .github/workflows/release.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 517eb7e1b03..b59115e9227 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -618,7 +618,12 @@ jobs: CLOUDSMITH_API_KEY: ${{ secrets.CLOUDSMITH_API_KEY }} CLOUDSMITH_DRY_RUN: '' IGNORE_CLOUDSMITH_FAILURES: ${{ vars.IGNORE_CLOUDSMITH_FAILURES }} - USE_CLOUDSMITH: ${{ vars.USE_CLOUDSMITH }} + # skip CentOS package uploads for Cloudsmith + # + # Cloudsmith doesn't support CentOS as a unique distribution + # there is only "el" (enterprise linux) distributions that cover all + # RedHat-family OSs and that distribution is covered by the rhel label + USE_CLOUDSMITH: ${{ ! startsWith(matrix.label, 'centos') || '' && vars.USE_CLOUDSMITH }} run: | sha256sum bazel-bin/pkg/* From 61042e362c2a6aa78ac841b4e3e4481a06553a1c Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Sun, 23 Jun 2024 11:38:08 +0800 Subject: [PATCH 06/42] docs(release): genereate 2.8.5 changelog --- changelog/2.8.5/2.8.5.md | 20 +++++++++++++++++++ changelog/2.8.5/kong-manager/.gitkeep | 0 changelog/2.8.5/kong/.gitkeep | 0 .../kong/add_zlib1g-dev.yml | 0 .../{unreleased => 2.8.5}/kong/fix_hash.yml | 0 changelog/unreleased/kong-manager/.gitkeep | 0 changelog/unreleased/kong/.gitkeep | 0 7 files changed, 20 insertions(+) create mode 100644 changelog/2.8.5/2.8.5.md create mode 100644 changelog/2.8.5/kong-manager/.gitkeep create mode 100644 changelog/2.8.5/kong/.gitkeep rename changelog/{unreleased => 2.8.5}/kong/add_zlib1g-dev.yml (100%) rename changelog/{unreleased => 2.8.5}/kong/fix_hash.yml (100%) create mode 100644 changelog/unreleased/kong-manager/.gitkeep create mode 100644 changelog/unreleased/kong/.gitkeep diff --git a/changelog/2.8.5/2.8.5.md b/changelog/2.8.5/2.8.5.md new file mode 100644 index 00000000000..48d4ec4a9e0 --- /dev/null +++ b/changelog/2.8.5/2.8.5.md @@ -0,0 +1,20 @@ +## Kong + + +### Performance +#### Performance + +- Fixed an inefficiency issue in the Luajit hashing algorithm + [#13269](https://github.com/Kong/kong/issues/13269) + [KAG-4786](https://konghq.atlassian.net/browse/KAG-4786) + + + + + +### Fixes +#### Default + +- Added zlib1g-dev dependency to Ubuntu packages. + [#13269](https://github.com/Kong/kong/issues/13269) + [KAG-4786](https://konghq.atlassian.net/browse/KAG-4786) diff --git a/changelog/2.8.5/kong-manager/.gitkeep b/changelog/2.8.5/kong-manager/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/changelog/2.8.5/kong/.gitkeep b/changelog/2.8.5/kong/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/changelog/unreleased/kong/add_zlib1g-dev.yml b/changelog/2.8.5/kong/add_zlib1g-dev.yml similarity index 100% rename from changelog/unreleased/kong/add_zlib1g-dev.yml rename to changelog/2.8.5/kong/add_zlib1g-dev.yml diff --git a/changelog/unreleased/kong/fix_hash.yml b/changelog/2.8.5/kong/fix_hash.yml similarity index 100% rename from changelog/unreleased/kong/fix_hash.yml rename to changelog/2.8.5/kong/fix_hash.yml diff --git a/changelog/unreleased/kong-manager/.gitkeep b/changelog/unreleased/kong-manager/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/changelog/unreleased/kong/.gitkeep b/changelog/unreleased/kong/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d From ab163498ca3684722b01bf29cd8ead3648792664 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 07/42] Revert "docs(changelog): move changelog locations (#13236)" This reverts commit b6fb3f974ef94781995c93079ef1e212d611d1b8. --- {changelog => CHANGELOG}/unreleased/kong/11480.yaml | 0 {changelog => CHANGELOG}/unreleased/kong/11515.yaml | 0 .../unreleased/kong/optimize_keepalive_parameters.yml | 0 {changelog => CHANGELOG}/unreleased/kong/udp-socket-recycle.yml | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename {changelog => CHANGELOG}/unreleased/kong/11480.yaml (100%) rename {changelog => CHANGELOG}/unreleased/kong/11515.yaml (100%) rename {changelog => CHANGELOG}/unreleased/kong/optimize_keepalive_parameters.yml (100%) rename {changelog => CHANGELOG}/unreleased/kong/udp-socket-recycle.yml (100%) diff --git a/changelog/unreleased/kong/11480.yaml b/CHANGELOG/unreleased/kong/11480.yaml similarity index 100% rename from changelog/unreleased/kong/11480.yaml rename to CHANGELOG/unreleased/kong/11480.yaml diff --git a/changelog/unreleased/kong/11515.yaml b/CHANGELOG/unreleased/kong/11515.yaml similarity index 100% rename from changelog/unreleased/kong/11515.yaml rename to CHANGELOG/unreleased/kong/11515.yaml diff --git a/changelog/unreleased/kong/optimize_keepalive_parameters.yml b/CHANGELOG/unreleased/kong/optimize_keepalive_parameters.yml similarity index 100% rename from changelog/unreleased/kong/optimize_keepalive_parameters.yml rename to CHANGELOG/unreleased/kong/optimize_keepalive_parameters.yml diff --git a/changelog/unreleased/kong/udp-socket-recycle.yml b/CHANGELOG/unreleased/kong/udp-socket-recycle.yml similarity index 100% rename from changelog/unreleased/kong/udp-socket-recycle.yml rename to CHANGELOG/unreleased/kong/udp-socket-recycle.yml From 044d555e7f40e3cdedcd0ab6c0a22ce8be866d0d Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 08/42] Revert "fix(dns): fix the UDP socket leaking issue (#12698)" This reverts commit 89ef0a286771003c991d749067ccb2a5203cb0d2. --- .../unreleased/kong/udp-socket-recycle.yml | 3 - ...a-resty-dns-0.22_01-destory_resolver.patch | 46 -------------- kong/resty/dns/client.lua | 5 +- spec/02-integration/05-proxy/05-dns_spec.lua | 61 ------------------- 4 files changed, 1 insertion(+), 114 deletions(-) delete mode 100644 CHANGELOG/unreleased/kong/udp-socket-recycle.yml delete mode 100644 build/openresty/patches/lua-resty-dns-0.22_01-destory_resolver.patch diff --git a/CHANGELOG/unreleased/kong/udp-socket-recycle.yml b/CHANGELOG/unreleased/kong/udp-socket-recycle.yml deleted file mode 100644 index d2b12decc9d..00000000000 --- a/CHANGELOG/unreleased/kong/udp-socket-recycle.yml +++ /dev/null @@ -1,3 +0,0 @@ -message: "Fixed the UDP socket leaking issue of the DNS module." -scope: Core -type: bugfix diff --git a/build/openresty/patches/lua-resty-dns-0.22_01-destory_resolver.patch b/build/openresty/patches/lua-resty-dns-0.22_01-destory_resolver.patch deleted file mode 100644 index e52797c4b6a..00000000000 --- a/build/openresty/patches/lua-resty-dns-0.22_01-destory_resolver.patch +++ /dev/null @@ -1,46 +0,0 @@ -diff --git a/bundle/lua-resty-dns-0.22/lib/resty/dns/resolver.lua b/bundle/lua-resty-dns-0.22/lib/resty/dns/resolver.lua -index a67b3c1..0305485 100644 ---- a/bundle/lua-resty-dns-0.22/lib/resty/dns/resolver.lua -+++ b/bundle/lua-resty-dns-0.22/lib/resty/dns/resolver.lua -@@ -99,6 +99,26 @@ for i = 2, 64, 2 do - arpa_tmpl[i] = DOT_CHAR - end - -+local function udp_socks_close(self) -+ if self.socks == nil then -+ return -+ end -+ -+ for _, sock in ipairs(self.socks) do -+ sock:close() -+ end -+ -+ self.socks = nil -+end -+ -+local function tcp_socks_close(self) -+ if self.tcp_sock == nil then -+ return -+ end -+ -+ self.tcp_sock:close() -+ self.tcp_sock = nil -+end - - function _M.new(class, opts) - if not opts then -@@ -161,6 +181,14 @@ function _M.new(class, opts) - }, mt) - end - -+function _M:destroy() -+ udp_socks_close(self) -+ tcp_socks_close(self) -+ self.cur = nil -+ self.servers = nil -+ self.retrans = nil -+ self.no_recurse = nil -+end - - local function pick_sock(self, socks) - local cur = self.cur diff --git a/kong/resty/dns/client.lua b/kong/resty/dns/client.lua index b7727c08dce..913dd3efc81 100644 --- a/kong/resty/dns/client.lua +++ b/kong/resty/dns/client.lua @@ -697,6 +697,7 @@ local function parseAnswer(qname, qtype, answers, try_list) return true end + -- executes 1 individual query. -- This query will not be synchronized, every call will be 1 query. -- @param qname the name to query for @@ -713,10 +714,6 @@ local function individualQuery(qname, r_opts, try_list) local result result, err = r:query(qname, r_opts) - -- Manually destroy the resolver to avoid socket leak - -- resolver:destroy is patched in build phase, more information can be found in - -- build/openresty/patches/lua-resty-dns-0.22_01-destory_resolver.patch - r:destroy() if not result then return result, err, try_list end diff --git a/spec/02-integration/05-proxy/05-dns_spec.lua b/spec/02-integration/05-proxy/05-dns_spec.lua index 46d3ab21230..e7dfb9fe610 100644 --- a/spec/02-integration/05-proxy/05-dns_spec.lua +++ b/spec/02-integration/05-proxy/05-dns_spec.lua @@ -145,66 +145,5 @@ for _, strategy in helpers.each_strategy() do assert.response(r).has.status(503) end) end) - - -- lua-resty-dns is used for DNS query. It will create some UDP sockets - -- during initialization. These sockets should be released after Query finish. - -- The release is done by explicitly calling a destory method that we patch. - -- This test case is to check the UDP sockets are released after the DNS query - -- is done. - describe("udp sockets", function() - local domain_name = "www.example.test" - local address = "127.0.0.10" - local proxy_client - - lazy_setup(function() - local bp = helpers.get_db_utils(strategy, { - "routes", - "services", - }) - - local fixtures = { - dns_mock = helpers.dns_mock.new() - } - fixtures.dns_mock:A({ - name = domain_name, - address = address, - }) - - local service = bp.services:insert { - name = "foo", - host = domain_name, - } - - bp.routes:insert { - name = "foo", - paths = { "/foo" }, - service = service, - } - - assert(helpers.start_kong({ database = strategy }, nil, nil, fixtures)) - end) - - lazy_teardown(function() - if proxy_client then - proxy_client:close() - end - assert(helpers.stop_kong()) - end) - - it("release", function() - proxy_client = helpers.proxy_client() - proxy_client:send { - method = "GET", - path = "/foo", - headers = { - host = domain_name - } - } - assert.logfile().has.line("serving '".. domain_name .. "' from mocks", true, 30) - local ok, stderr, stdout = helpers.execute("netstat -n | grep 53 | grep udp | wc -l") - assert.truthy(ok, stderr) - assert.equals(0, assert(tonumber(stdout))) - end) - end) end) end From 187e39a36fd434c21a9303bb8c84d9e5691cdd31 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 09/42] Revert "fix(template): fix failure of starting if `proxy_access_log` is `off`" This reverts commit 524df9f0292d44f46236ba4f4521651ba92498ae. --- kong/cmd/utils/prefix_handler.lua | 1 - kong/templates/nginx_kong.lua | 5 ----- spec/01-unit/04-prefix_handler_spec.lua | 10 ---------- 3 files changed, 16 deletions(-) diff --git a/kong/cmd/utils/prefix_handler.lua b/kong/cmd/utils/prefix_handler.lua index efc8413e72e..b26e208b428 100644 --- a/kong/cmd/utils/prefix_handler.lua +++ b/kong/cmd/utils/prefix_handler.lua @@ -211,7 +211,6 @@ local function compile_conf(kong_config, conf_template) -- computed config properties for templating local compile_env = { _escape = ">", - proxy_access_log_enabled = kong_config.proxy_access_log ~= "off", pairs = pairs, ipairs = ipairs, tostring = tostring, diff --git a/kong/templates/nginx_kong.lua b/kong/templates/nginx_kong.lua index 3a416a19bf0..7aa5e5b7246 100644 --- a/kong/templates/nginx_kong.lua +++ b/kong/templates/nginx_kong.lua @@ -130,12 +130,7 @@ server { # https://github.com/Kong/lua-kong-nginx-module#lua_kong_error_log_request_id lua_kong_error_log_request_id $kong_request_id; -> if proxy_access_log_enabled then access_log ${{PROXY_ACCESS_LOG}} kong_log_format; -> else - access_log off; -> end - error_log ${{PROXY_ERROR_LOG}} ${{LOG_LEVEL}}; > if proxy_ssl_enabled then diff --git a/spec/01-unit/04-prefix_handler_spec.lua b/spec/01-unit/04-prefix_handler_spec.lua index 8446ff68b0a..b1a9d5c29ee 100644 --- a/spec/01-unit/04-prefix_handler_spec.lua +++ b/spec/01-unit/04-prefix_handler_spec.lua @@ -477,16 +477,6 @@ describe("NGINX conf compiler", function() local nginx_conf = prefix_handler.compile_kong_stream_conf(conf) assert.matches("access_log%slogs/access.log%sbasic;", nginx_conf) - local conf = assert(conf_loader(nil, { - proxy_access_log = "off", - stream_listen = "0.0.0.0:9100", - nginx_stream_tcp_nodelay = "on", - })) - local nginx_conf = prefix_handler.compile_kong_conf(conf) - assert.matches("access_log%soff;", nginx_conf) - local nginx_conf = prefix_handler.compile_kong_stream_conf(conf) - assert.matches("access_log%slogs/access.log%sbasic;", nginx_conf) - local conf = assert(conf_loader(nil, { proxy_stream_access_log = "/dev/stdout custom", stream_listen = "0.0.0.0:9100", From 84b52d6e78afc237ca6231692258395d8cba651d Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 10/42] Revert "fix(conf): fix the default value of upstream_keepalive_max_requests (#12645)" This reverts commit adadd53c2c39e04f16a7877caef4b05f0e9e63a4. --- kong.conf.default | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kong.conf.default b/kong.conf.default index 092d0545ff7..c555bc71259 100644 --- a/kong.conf.default +++ b/kong.conf.default @@ -794,7 +794,7 @@ # each upstream request to open a new # connection. -#upstream_keepalive_max_requests = 10000 # Sets the default maximum number of +#upstream_keepalive_max_requests = 1000 # Sets the default maximum number of # requests than can be proxied upstream # through one keepalive connection. # After the maximum number of requests From a5c97b0aa820f5878c6659ced9caefe049447628 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 11/42] Revert "perf(proxy): use higher default keepalive request value for Nginx tuning (#12223)" This reverts commit 609874f103b67633ce515cda1c98cb4c00043a50. --- CHANGELOG/unreleased/kong/optimize_keepalive_parameters.yml | 3 --- kong.conf.default | 2 +- kong/templates/kong_defaults.lua | 4 ++-- 3 files changed, 3 insertions(+), 6 deletions(-) delete mode 100644 CHANGELOG/unreleased/kong/optimize_keepalive_parameters.yml diff --git a/CHANGELOG/unreleased/kong/optimize_keepalive_parameters.yml b/CHANGELOG/unreleased/kong/optimize_keepalive_parameters.yml deleted file mode 100644 index 49ec8baf6d4..00000000000 --- a/CHANGELOG/unreleased/kong/optimize_keepalive_parameters.yml +++ /dev/null @@ -1,3 +0,0 @@ -message: Bumped default values of `nginx_http_keepalive_requests` and `upstream_keepalive_max_requests` to `10000`. -type: performance -scope: Configuration diff --git a/kong.conf.default b/kong.conf.default index c555bc71259..0d6f1e7123a 100644 --- a/kong.conf.default +++ b/kong.conf.default @@ -939,7 +939,7 @@ # at worst any regex Kong executes could finish within # roughly 2 seconds. -#nginx_http_keepalive_requests = 10000 # Sets the maximum number of client requests that can be served through one +#nginx_http_keepalive_requests = 1000 # Sets the maximum number of client requests that can be served through one # keep-alive connection. After the maximum number of requests are made, # the connection is closed. # Closing connections periodically is necessary to free per-connection diff --git a/kong/templates/kong_defaults.lua b/kong/templates/kong_defaults.lua index 7dba9776c3e..8046bd80bcd 100644 --- a/kong/templates/kong_defaults.lua +++ b/kong/templates/kong_defaults.lua @@ -55,7 +55,7 @@ trusted_ips = NONE error_default_type = text/plain upstream_keepalive = NONE upstream_keepalive_pool_size = 512 -upstream_keepalive_max_requests = 10000 +upstream_keepalive_max_requests = 1000 upstream_keepalive_idle_timeout = 60 nginx_user = kong kong @@ -76,7 +76,7 @@ nginx_http_ssl_dhparam = NONE nginx_http_ssl_session_tickets = NONE nginx_http_ssl_session_timeout = NONE nginx_http_lua_regex_match_limit = 100000 -nginx_http_keepalive_requests = 10000 +nginx_http_keepalive_requests = 1000 nginx_stream_ssl_protocols = NONE nginx_stream_ssl_prefer_server_ciphers = NONE nginx_stream_ssl_dhparam = NONE From 2cfa696d233d9d65ccfbc813d4c4450c86883bc7 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 12/42] Revert "perf(template): use more reasonable default value for Nginx tuning (#11515)" This reverts commit 5034d0c2cdecf0a9af19ff798462bdb4d93d5288. --- CHANGELOG/unreleased/kong/11515.yaml | 7 ---- kong.conf.default | 50 ++++++++++--------------- kong/templates/kong_defaults.lua | 7 ++-- spec/01-unit/04-prefix_handler_spec.lua | 2 +- 4 files changed, 24 insertions(+), 42 deletions(-) delete mode 100644 CHANGELOG/unreleased/kong/11515.yaml diff --git a/CHANGELOG/unreleased/kong/11515.yaml b/CHANGELOG/unreleased/kong/11515.yaml deleted file mode 100644 index 28a3209034a..00000000000 --- a/CHANGELOG/unreleased/kong/11515.yaml +++ /dev/null @@ -1,7 +0,0 @@ -message: Bumped the default value of `upstream_keepalive_pool_size` to `512` and `upstream_keepalive_max_requests` to `1000` -type: performance -scope: Configuration -prs: - - 11515 -jiras: - - "FTI-4868" diff --git a/kong.conf.default b/kong.conf.default index 0d6f1e7123a..e1d79288cfa 100644 --- a/kong.conf.default +++ b/kong.conf.default @@ -784,26 +784,26 @@ # `text/html`, `application/json`, and # `application/xml`. -#upstream_keepalive_pool_size = 512 # Sets the default size of the upstream - # keepalive connection pools. - # Upstream keepalive connection pools - # are segmented by the `dst ip/dst - # port/SNI` attributes of a connection. - # A value of `0` will disable upstream - # keepalive connections by default, forcing - # each upstream request to open a new - # connection. - -#upstream_keepalive_max_requests = 1000 # Sets the default maximum number of - # requests than can be proxied upstream - # through one keepalive connection. - # After the maximum number of requests - # is reached, the connection will be - # closed. - # A value of `0` will disable this - # behavior, and a keepalive connection - # can be used to proxy an indefinite - # number of requests. +#upstream_keepalive_pool_size = 60 # Sets the default size of the upstream + # keepalive connection pools. + # Upstream keepalive connection pools + # are segmented by the `dst ip/dst + # port/SNI` attributes of a connection. + # A value of `0` will disable upstream + # keepalive connections by default, forcing + # each upstream request to open a new + # connection. + +#upstream_keepalive_max_requests = 100 # Sets the default maximum number of + # requests than can be proxied upstream + # through one keepalive connection. + # After the maximum number of requests + # is reached, the connection will be + # closed. + # A value of `0` will disable this + # behavior, and a keepalive connection + # can be used to proxy an indefinite + # number of requests. #upstream_keepalive_idle_timeout = 60 # Sets the default timeout (in seconds) # for which an upstream keepalive @@ -939,16 +939,6 @@ # at worst any regex Kong executes could finish within # roughly 2 seconds. -#nginx_http_keepalive_requests = 1000 # Sets the maximum number of client requests that can be served through one - # keep-alive connection. After the maximum number of requests are made, - # the connection is closed. - # Closing connections periodically is necessary to free per-connection - # memory allocations. Therefore, using too high maximum number of requests - # could result in excessive memory usage and not recommended. - # See: https://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_requests - - - #------------------------------------------------------------------------------ # DATASTORE #------------------------------------------------------------------------------ diff --git a/kong/templates/kong_defaults.lua b/kong/templates/kong_defaults.lua index 8046bd80bcd..459318eaae2 100644 --- a/kong/templates/kong_defaults.lua +++ b/kong/templates/kong_defaults.lua @@ -54,8 +54,8 @@ headers_upstream = x-kong-request-id trusted_ips = NONE error_default_type = text/plain upstream_keepalive = NONE -upstream_keepalive_pool_size = 512 -upstream_keepalive_max_requests = 1000 +upstream_keepalive_pool_size = 60 +upstream_keepalive_max_requests = 100 upstream_keepalive_idle_timeout = 60 nginx_user = kong kong @@ -75,8 +75,6 @@ nginx_http_ssl_prefer_server_ciphers = NONE nginx_http_ssl_dhparam = NONE nginx_http_ssl_session_tickets = NONE nginx_http_ssl_session_timeout = NONE -nginx_http_lua_regex_match_limit = 100000 -nginx_http_keepalive_requests = 1000 nginx_stream_ssl_protocols = NONE nginx_stream_ssl_prefer_server_ciphers = NONE nginx_stream_ssl_dhparam = NONE @@ -92,6 +90,7 @@ nginx_upstream_keepalive_timeout = NONE nginx_http_upstream_keepalive = NONE nginx_http_upstream_keepalive_requests = NONE nginx_http_upstream_keepalive_timeout = NONE +nginx_http_lua_regex_match_limit = 100000 client_max_body_size = 0 client_body_buffer_size = 8k diff --git a/spec/01-unit/04-prefix_handler_spec.lua b/spec/01-unit/04-prefix_handler_spec.lua index b1a9d5c29ee..75143b3d20a 100644 --- a/spec/01-unit/04-prefix_handler_spec.lua +++ b/spec/01-unit/04-prefix_handler_spec.lua @@ -1002,7 +1002,7 @@ describe("NGINX conf compiler", function() nil, true) assert.matches("daemon on;", contents, nil, true) assert.matches("listen 0.0.0.0:9000;", contents, nil, true) - assert.not_matches("keepalive%s+%d+", contents) + assert.not_matches("keepalive", contents, nil, true) end) it("'upstream_keepalive = 0' disables keepalive", function() From cf324bc509a58ffeffb1261c8b94a6792e405926 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 13/42] Revert "chore(patches): revert the "respect max retries" patch" This reverts commit c6bbc7d3ad194a45e0dd35609ae2fc13180a5dc8. --- ...ua-0.10.20_02-dyn_upstream_keepalive.patch | 667 ++++++++---------- .../kong/balancer_respect_max_retries.yml | 3 + .../05-proxy/10-balancer/08-retries_spec.lua | 124 ++++ 3 files changed, 407 insertions(+), 387 deletions(-) create mode 100644 changelog/unreleased/kong/balancer_respect_max_retries.yml create mode 100644 spec/02-integration/05-proxy/10-balancer/08-retries_spec.lua diff --git a/build/openresty/patches/ngx_lua-0.10.20_02-dyn_upstream_keepalive.patch b/build/openresty/patches/ngx_lua-0.10.20_02-dyn_upstream_keepalive.patch index effdd5b517b..0ee6df76ae8 100644 --- a/build/openresty/patches/ngx_lua-0.10.20_02-dyn_upstream_keepalive.patch +++ b/build/openresty/patches/ngx_lua-0.10.20_02-dyn_upstream_keepalive.patch @@ -5,16 +5,43 @@ Subject: [PATCH 1/3] feature: implemented keepalive pooling in 'balancer_by_lua*'. --- - src/ngx_http_lua_balancer.c | 738 ++++++++++++++++++++++++++++++------ - src/ngx_http_lua_common.h | 4 + - src/ngx_http_lua_module.c | 3 + - 3 files changed, 629 insertions(+), 116 deletions(-) + .../nginx-1.19.9/src/http/ngx_http_upstream.c | 1 + + .../nginx-1.19.9/src/http/ngx_http_upstream.h | 2 + + .../src/ngx_http_lua_balancer.c | 848 ++++++++++++++---- + .../ngx_lua-0.10.20/src/ngx_http_lua_common.h | 11 +- + .../ngx_lua-0.10.20/src/ngx_http_lua_module.c | 3 + + 5 files changed, 689 insertions(+), 176 deletions(-) + +diff --git a/bundle/nginx-1.19.9/src/http/ngx_http_upstream.c b/bundle/nginx-1.19.9/src/http/ngx_http_upstream.c +index 4a6db93..98a8cfc 100644 +--- a/bundle/nginx-1.19.9/src/http/ngx_http_upstream.c ++++ b/bundle/nginx-1.19.9/src/http/ngx_http_upstream.c +@@ -4244,6 +4244,7 @@ ngx_http_upstream_next(ngx_http_request_t *r, ngx_http_upstream_t *u, + if (u->peer.cached && ft_type == NGX_HTTP_UPSTREAM_FT_ERROR) { + /* TODO: inform balancer instead */ + u->peer.tries++; ++ u->peer.notify(&u->peer, u->peer.data, NGX_HTTP_UPSTREAM_NOFITY_CACHED_CONNECTION_ERROR); + } + + switch (ft_type) { +diff --git a/bundle/nginx-1.19.9/src/http/ngx_http_upstream.h b/bundle/nginx-1.19.9/src/http/ngx_http_upstream.h +index 0432617..50a10cb 100644 +--- a/bundle/nginx-1.19.9/src/http/ngx_http_upstream.h ++++ b/bundle/nginx-1.19.9/src/http/ngx_http_upstream.h +@@ -56,6 +56,8 @@ + #define NGX_HTTP_UPSTREAM_IGN_VARY 0x00000200 -diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c b/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c -index f71a3e00..0d403716 100644 + ++#define NGX_HTTP_UPSTREAM_NOFITY_CACHED_CONNECTION_ERROR 0x1 ++ + typedef struct { + ngx_uint_t status; + ngx_msec_t response_time; +diff --git a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c +index e4ac57a..44e01cb 100644 --- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c +++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c -@@ -16,46 +16,102 @@ +@@ -16,46 +16,105 @@ #include "ngx_http_lua_directive.h" @@ -48,25 +75,20 @@ index f71a3e00..0d403716 100644 + + ngx_uint_t more_tries; + ngx_uint_t total_tries; - -- ngx_http_lua_srv_conf_t *conf; -- ngx_http_request_t *request; ++ + int last_peer_state; - -- ngx_uint_t more_tries; -- ngx_uint_t total_tries; ++ + uint32_t cpool_crc32; - -- struct sockaddr *sockaddr; -- socklen_t socklen; ++ + void *data; -- ngx_str_t *host; -- in_port_t port; +- ngx_http_lua_srv_conf_t *conf; +- ngx_http_request_t *request; + ngx_event_get_peer_pt original_get_peer; + ngx_event_free_peer_pt original_free_peer; -- int last_peer_state; +- ngx_uint_t more_tries; +- ngx_uint_t total_tries; +#if (NGX_HTTP_SSL) + ngx_event_set_peer_session_pt original_set_session; + ngx_event_save_peer_session_pt original_save_session; @@ -75,12 +97,17 @@ index f71a3e00..0d403716 100644 + ngx_http_request_t *request; + ngx_http_lua_srv_conf_t *conf; + ngx_http_lua_balancer_keepalive_pool_t *cpool; -+ + +- struct sockaddr *sockaddr; +- socklen_t socklen; + ngx_str_t *host; -+ + +- ngx_str_t *host; +- in_port_t port; + struct sockaddr *sockaddr; + socklen_t socklen; -+ + +- int last_peer_state; + unsigned keepalive:1; #if !(HAVE_NGX_UPSTREAM_TIMEOUT_FIELDS) @@ -108,6 +135,8 @@ index f71a3e00..0d403716 100644 - ngx_http_request_t *r); static void ngx_http_lua_balancer_free_peer(ngx_peer_connection_t *pc, void *data, ngx_uint_t state); ++static void ngx_http_lua_balancer_notify_peer(ngx_peer_connection_t *pc, ++ void *data, ngx_uint_t type); +static ngx_int_t ngx_http_lua_balancer_create_keepalive_pool(lua_State *L, + ngx_log_t *log, uint32_t cpool_crc32, ngx_uint_t cpool_size, + ngx_http_lua_balancer_keepalive_pool_t **cpool); @@ -133,11 +162,12 @@ index f71a3e00..0d403716 100644 + (bp->sockaddr && bp->socklen) + + -+static char ngx_http_lua_balancer_keepalive_pools_table_key; ++static char ngx_http_lua_balancer_keepalive_pools_table_key; ++static struct sockaddr *ngx_http_lua_balancer_default_server_sockaddr; ngx_int_t -@@ -102,6 +158,61 @@ ngx_http_lua_balancer_handler_inline(ngx_http_request_t *r, +@@ -102,6 +161,61 @@ ngx_http_lua_balancer_handler_inline(ngx_http_request_t *r, } @@ -199,7 +229,7 @@ index f71a3e00..0d403716 100644 char * ngx_http_lua_balancer_by_lua_block(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) -@@ -125,16 +236,16 @@ char * +@@ -125,16 +239,18 @@ char * ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) { @@ -211,7 +241,9 @@ index f71a3e00..0d403716 100644 + u_char *cache_key = NULL; + u_char *name; + ngx_str_t *value; ++ ngx_url_t url; ngx_http_upstream_srv_conf_t *uscf; ++ ngx_http_upstream_server_t *us; + ngx_http_lua_srv_conf_t *lscf = conf; dd("enter"); @@ -222,7 +254,7 @@ index f71a3e00..0d403716 100644 if (cmd->post == NULL) { return NGX_CONF_ERROR; } -@@ -178,11 +289,19 @@ ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, +@@ -178,11 +294,42 @@ ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, lscf->balancer.src_key = cache_key; @@ -230,6 +262,29 @@ index f71a3e00..0d403716 100644 + uscf = ngx_http_conf_get_module_srv_conf(cf, ngx_http_upstream_module); ++ if (uscf->servers->nelts == 0) { ++ us = ngx_array_push(uscf->servers); ++ if (us == NULL) { ++ return NGX_CONF_ERROR; ++ } ++ ++ ngx_memzero(us, sizeof(ngx_http_upstream_server_t)); ++ ngx_memzero(&url, sizeof(ngx_url_t)); ++ ++ ngx_str_set(&url.url, "0.0.0.1"); ++ url.default_port = 80; ++ ++ if (ngx_parse_url(cf->pool, &url) != NGX_OK) { ++ return NGX_CONF_ERROR; ++ } ++ ++ us->name = url.url; ++ us->addrs = url.addrs; ++ us->naddrs = url.naddrs; ++ ++ ngx_http_lua_balancer_default_server_sockaddr = us->addrs[0].sockaddr; ++ } ++ if (uscf->peer.init_upstream) { ngx_conf_log_error(NGX_LOG_WARN, cf, 0, "load balancing method redefined"); @@ -242,7 +297,7 @@ index f71a3e00..0d403716 100644 } uscf->peer.init_upstream = ngx_http_lua_balancer_init; -@@ -198,14 +317,18 @@ ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, +@@ -198,14 +345,18 @@ ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, static ngx_int_t @@ -265,7 +320,7 @@ index f71a3e00..0d403716 100644 us->peer.init = ngx_http_lua_balancer_init_peer; return NGX_OK; -@@ -216,33 +339,38 @@ static ngx_int_t +@@ -216,33 +367,39 @@ static ngx_int_t ngx_http_lua_balancer_init_peer(ngx_http_request_t *r, ngx_http_upstream_srv_conf_t *us) { @@ -298,6 +353,7 @@ index f71a3e00..0d403716 100644 + r->upstream->peer.data = bp; r->upstream->peer.get = ngx_http_lua_balancer_get_peer; r->upstream->peer.free = ngx_http_lua_balancer_free_peer; ++ r->upstream->peer.notify = ngx_http_lua_balancer_notify_peer; #if (NGX_HTTP_SSL) + bp->original_set_session = r->upstream->peer.set_session; @@ -315,7 +371,7 @@ index f71a3e00..0d403716 100644 return NGX_OK; } -@@ -250,25 +378,26 @@ ngx_http_lua_balancer_init_peer(ngx_http_request_t *r, +@@ -250,25 +407,26 @@ ngx_http_lua_balancer_init_peer(ngx_http_request_t *r, static ngx_int_t ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) { @@ -333,9 +389,9 @@ index f71a3e00..0d403716 100644 + ngx_http_request_t *r; + ngx_http_lua_ctx_t *ctx; + ngx_http_lua_srv_conf_t *lscf; -+ ngx_http_lua_main_conf_t *lmcf; + ngx_http_lua_balancer_keepalive_item_t *item; + ngx_http_lua_balancer_peer_data_t *bp = data; ++ void *pdata; ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, - "lua balancer peer, tries: %ui", pc->tries); @@ -353,7 +409,7 @@ index f71a3e00..0d403716 100644 if (ctx == NULL) { ctx = ngx_http_lua_create_ctx(r); if (ctx == NULL) { -@@ -286,9 +415,15 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) +@@ -286,21 +444,24 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) ctx->context = NGX_HTTP_LUA_CONTEXT_BALANCER; @@ -368,16 +424,24 @@ index f71a3e00..0d403716 100644 + bp->keepalive = 0; bp->total_tries++; - lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); -@@ -300,7 +435,6 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) - lmcf->balancer_peer_data = bp; +- lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); +- +- /* balancer_by_lua does not support yielding and +- * there cannot be any conflicts among concurrent requests, +- * thus it is safe to store the peer data in the main conf. +- */ +- lmcf->balancer_peer_data = bp; ++ pdata = r->upstream->peer.data; ++ r->upstream->peer.data = bp; rc = lscf->balancer.handler(r, lscf, L); -- + ++ r->upstream->peer.data = pdata; ++ if (rc == NGX_ERROR) { return NGX_ERROR; } -@@ -322,105 +456,414 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) +@@ -322,79 +483,87 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) } } @@ -397,10 +461,16 @@ index f71a3e00..0d403716 100644 } - dd("tries: %d", (int) r->upstream->peer.tries); +- +- return NGX_OK; +- } +- +- return ngx_http_upstream_get_round_robin_peer(pc, &bp->rrp); +-} + if (ngx_http_lua_balancer_keepalive_is_enabled(bp)) { + ngx_http_lua_balancer_get_keepalive_pool(L, bp->cpool_crc32, + &bp->cpool); -+ + + if (bp->cpool == NULL + && ngx_http_lua_balancer_create_keepalive_pool(L, pc->log, + bp->cpool_crc32, @@ -410,65 +480,105 @@ index f71a3e00..0d403716 100644 + { + return NGX_ERROR; + } -+ + +-static ngx_int_t +-ngx_http_lua_balancer_by_chunk(lua_State *L, ngx_http_request_t *r) +-{ +- u_char *err_msg; +- size_t len; +- ngx_int_t rc; + ngx_http_lua_assert(bp->cpool); -+ + +- /* init nginx context in Lua VM */ +- ngx_http_lua_set_req(L, r); + if (!ngx_queue_empty(&bp->cpool->cache)) { + q = ngx_queue_head(&bp->cpool->cache); -+ + +-#ifndef OPENRESTY_LUAJIT +- ngx_http_lua_create_new_globals_table(L, 0 /* narr */, 1 /* nrec */); + item = ngx_queue_data(q, ngx_http_lua_balancer_keepalive_item_t, + queue); + c = item->connection; -+ + +- /* {{{ make new env inheriting main thread's globals table */ +- lua_createtable(L, 0, 1 /* nrec */); /* the metatable for the new env */ +- ngx_http_lua_get_globals_table(L); +- lua_setfield(L, -2, "__index"); +- lua_setmetatable(L, -2); /* setmetatable({}, {__index = _G}) */ +- /* }}} */ + ngx_queue_remove(q); + ngx_queue_insert_head(&bp->cpool->free, q); -+ + +- lua_setfenv(L, -2); /* set new running env for the code closure */ +-#endif /* OPENRESTY_LUAJIT */ + c->idle = 0; + c->sent = 0; + c->log = pc->log; + c->read->log = pc->log; + c->write->log = pc->log; + c->pool->log = pc->log; -+ + +- lua_pushcfunction(L, ngx_http_lua_traceback); +- lua_insert(L, 1); /* put it under chunk and args */ + if (c->read->timer_set) { + ngx_del_timer(c->read); + } -+ + +- /* protected call user code */ +- rc = lua_pcall(L, 0, 1, 1); + pc->cached = 1; + pc->connection = c; -+ + +- lua_remove(L, 1); /* remove traceback function */ + ngx_log_debug3(NGX_LOG_DEBUG_HTTP, pc->log, 0, + "lua balancer: keepalive reusing connection %p, " + "requests: %ui, cpool: %p", + c, c->requests, bp->cpool); -+ + +- dd("rc == %d", (int) rc); + return NGX_DONE; + } -+ + +- if (rc != 0) { +- /* error occurred when running loaded code */ +- err_msg = (u_char *) lua_tolstring(L, -1, &len); + bp->cpool->connections++; -+ + +- if (err_msg == NULL) { +- err_msg = (u_char *) "unknown reason"; +- len = sizeof("unknown reason") - 1; + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, + "lua balancer: keepalive no free connection, " + "cpool: %p", bp->cpool); -+ } + } - return NGX_OK; +- ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, +- "failed to run balancer_by_lua*: %*s", len, err_msg); ++ return NGX_OK; ++ } + +- lua_settop(L, 0); /* clear remaining elems on stack */ ++ rc = bp->original_get_peer(pc, bp->data); ++ if (rc == NGX_ERROR) { ++ return rc; ++ } ++ ++ if (pc->sockaddr == ngx_http_lua_balancer_default_server_sockaddr) { ++ ngx_log_error(NGX_LOG_ERR, pc->log, 0, ++ "lua balancer: no peer set"); + + return NGX_ERROR; } -- return ngx_http_upstream_get_round_robin_peer(pc, &bp->rrp); -+ return bp->original_get_peer(pc, bp->data); +- lua_settop(L, 0); /* clear remaining elems on stack */ + return rc; } - --static ngx_int_t --ngx_http_lua_balancer_by_chunk(lua_State *L, ngx_http_request_t *r) -+static void -+ngx_http_lua_balancer_free_peer(ngx_peer_connection_t *pc, void *data, -+ ngx_uint_t state) +@@ -403,24 +572,347 @@ static void + ngx_http_lua_balancer_free_peer(ngx_peer_connection_t *pc, void *data, + ngx_uint_t state) { -- u_char *err_msg; -- size_t len; -- ngx_int_t rc; +- ngx_http_lua_balancer_peer_data_t *bp = data; + ngx_queue_t *q; + ngx_connection_t *c; + ngx_http_upstream_t *u; @@ -476,38 +586,24 @@ index f71a3e00..0d403716 100644 + ngx_http_lua_balancer_keepalive_pool_t *cpool; + ngx_http_lua_balancer_peer_data_t *bp = data; -- /* init nginx context in Lua VM */ -- ngx_http_lua_set_req(L, r); -+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, +- "lua balancer free peer, tries: %ui", pc->tries); + "lua balancer: free peer, tries: %ui", pc->tries); - --#ifndef OPENRESTY_LUAJIT -- ngx_http_lua_create_new_globals_table(L, 0 /* narr */, 1 /* nrec */); ++ + u = bp->request->upstream; + c = pc->connection; -- /* {{{ make new env inheriting main thread's globals table */ -- lua_createtable(L, 0, 1 /* nrec */); /* the metatable for the new env */ -- ngx_http_lua_get_globals_table(L); -- lua_setfield(L, -2, "__index"); -- lua_setmetatable(L, -2); /* setmetatable({}, {__index = _G}) */ -- /* }}} */ +- if (bp->sockaddr && bp->socklen) { + if (ngx_http_lua_balancer_peer_set(bp)) { -+ bp->last_peer_state = (int) state; + bp->last_peer_state = (int) state; -- lua_setfenv(L, -2); /* set new running env for the code closure */ --#endif /* OPENRESTY_LUAJIT */ -+ if (pc->tries) { -+ pc->tries--; -+ } + if (pc->tries) { + pc->tries--; + } -- lua_pushcfunction(L, ngx_http_lua_traceback); -- lua_insert(L, 1); /* put it under chunk and args */ + if (ngx_http_lua_balancer_keepalive_is_enabled(bp)) { + cpool = bp->cpool; - -- /* protected call user code */ -- rc = lua_pcall(L, 0, 1, 1); ++ + if (state & NGX_PEER_FAILED + || c == NULL + || c->read->eof @@ -518,29 +614,21 @@ index f71a3e00..0d403716 100644 + { + goto invalid; + } - -- lua_remove(L, 1); /* remove traceback function */ ++ + if (bp->keepalive_requests + && c->requests >= bp->keepalive_requests) + { + goto invalid; + } - -- dd("rc == %d", (int) rc); ++ + if (!u->keepalive) { + goto invalid; + } - -- if (rc != 0) { -- /* error occurred when running loaded code */ -- err_msg = (u_char *) lua_tolstring(L, -1, &len); ++ + if (!u->request_body_sent) { + goto invalid; + } - -- if (err_msg == NULL) { -- err_msg = (u_char *) "unknown reason"; -- len = sizeof("unknown reason") - 1; ++ + if (ngx_terminate || ngx_exiting) { + goto invalid; + } @@ -617,18 +705,25 @@ index f71a3e00..0d403716 100644 + if (cpool->connections == 0) { + ngx_http_lua_balancer_free_keepalive_pool(pc->log, cpool); + } - } - -- ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, -- "failed to run balancer_by_lua*: %*s", len, err_msg); ++ } ++ + return; + } - -- lua_settop(L, 0); /* clear remaining elems on stack */ ++ + bp->original_free_peer(pc, bp->data, state); +} + + ++static void ++ngx_http_lua_balancer_notify_peer(ngx_peer_connection_t *pc, void *data, ++ ngx_uint_t type) ++{ ++ if (type == NGX_HTTP_UPSTREAM_NOFITY_CACHED_CONNECTION_ERROR) { ++ pc->tries--; ++ } ++} ++ ++ +static ngx_int_t +ngx_http_lua_balancer_create_keepalive_pool(lua_State *L, ngx_log_t *log, + uint32_t cpool_crc32, ngx_uint_t cpool_size, @@ -648,14 +743,12 @@ index f71a3e00..0d403716 100644 + + size = sizeof(ngx_http_lua_balancer_keepalive_pool_t) + + sizeof(ngx_http_lua_balancer_keepalive_item_t) * cpool_size; - ++ + upool = lua_newuserdata(L, size); /* pools upool */ + if (upool == NULL) { - return NGX_ERROR; - } - -- lua_settop(L, 0); /* clear remaining elems on stack */ -- return rc; ++ return NGX_ERROR; ++ } ++ + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, log, 0, + "lua balancer: keepalive create pool, crc32: %ui, " + "size: %ui", cpool_crc32, cpool_size); @@ -683,16 +776,13 @@ index f71a3e00..0d403716 100644 + *cpool = upool; + + return NGX_OK; - } - - - static void --ngx_http_lua_balancer_free_peer(ngx_peer_connection_t *pc, void *data, -- ngx_uint_t state) ++} ++ ++ ++static void +ngx_http_lua_balancer_get_keepalive_pool(lua_State *L, uint32_t cpool_crc32, + ngx_http_lua_balancer_keepalive_pool_t **cpool) - { -- ngx_http_lua_balancer_peer_data_t *bp = data; ++{ + ngx_http_lua_balancer_keepalive_pool_t *upool; + + /* get upstream connection pools table */ @@ -710,19 +800,13 @@ index f71a3e00..0d403716 100644 + lua_pushvalue(L, -2); /* pools pools_table_key pools */ + lua_rawset(L, LUA_REGISTRYINDEX); /* pools */ + } - -- ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, -- "lua balancer free peer, tries: %ui", pc->tries); ++ + ngx_http_lua_assert(lua_istable(L, -1)); - -- if (bp->sockaddr && bp->socklen) { -- bp->last_peer_state = (int) state; ++ + lua_rawgeti(L, -1, cpool_crc32); /* pools upool? */ + upool = lua_touserdata(L, -1); + lua_pop(L, 2); /* orig stack */ - -- if (pc->tries) { -- pc->tries--; ++ + *cpool = upool; +} + @@ -820,8 +904,8 @@ index f71a3e00..0d403716 100644 + + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + goto close; - } - ++ } ++ return; } @@ -830,10 +914,10 @@ index f71a3e00..0d403716 100644 + + item = c->data; + c->log = ev->log; -+ -+ ngx_http_lua_balancer_close(c); - ngx_http_upstream_free_round_robin_peer(pc, data, state); ++ ngx_http_lua_balancer_close(c); ++ + ngx_queue_remove(&item->queue); + ngx_queue_insert_head(&item->cpool->free, &item->queue); + @@ -843,7 +927,7 @@ index f71a3e00..0d403716 100644 } -@@ -431,12 +874,12 @@ ngx_http_lua_balancer_set_session(ngx_peer_connection_t *pc, void *data) +@@ -431,12 +923,12 @@ ngx_http_lua_balancer_set_session(ngx_peer_connection_t *pc, void *data) { ngx_http_lua_balancer_peer_data_t *bp = data; @@ -858,7 +942,7 @@ index f71a3e00..0d403716 100644 } -@@ -445,13 +888,12 @@ ngx_http_lua_balancer_save_session(ngx_peer_connection_t *pc, void *data) +@@ -445,13 +937,12 @@ ngx_http_lua_balancer_save_session(ngx_peer_connection_t *pc, void *data) { ngx_http_lua_balancer_peer_data_t *bp = data; @@ -874,7 +958,7 @@ index f71a3e00..0d403716 100644 } #endif -@@ -459,14 +901,14 @@ ngx_http_lua_balancer_save_session(ngx_peer_connection_t *pc, void *data) +@@ -459,14 +950,13 @@ ngx_http_lua_balancer_save_session(ngx_peer_connection_t *pc, void *data) int ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, @@ -891,12 +975,39 @@ index f71a3e00..0d403716 100644 + ngx_url_t url; + ngx_http_upstream_t *u; + ngx_http_lua_ctx_t *ctx; -+ ngx_http_lua_main_conf_t *lmcf; + ngx_http_lua_balancer_peer_data_t *bp; if (r == NULL) { *err = "no request found"; -@@ -536,6 +978,70 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, +@@ -491,18 +981,6 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, + return NGX_ERROR; + } + +- lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); +- +- /* we cannot read r->upstream->peer.data here directly because +- * it could be overridden by other modules like +- * ngx_http_upstream_keepalive_module. +- */ +- bp = lmcf->balancer_peer_data; +- if (bp == NULL) { +- *err = "no upstream peer data found"; +- return NGX_ERROR; +- } +- + ngx_memzero(&url, sizeof(ngx_url_t)); + + url.url.data = ngx_palloc(r->pool, addr_len); +@@ -526,6 +1004,8 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, + return NGX_ERROR; + } + ++ bp = (ngx_http_lua_balancer_peer_data_t *) u->peer.data; ++ + if (url.addrs && url.addrs[0].sockaddr) { + bp->sockaddr = url.addrs[0].sockaddr; + bp->socklen = url.addrs[0].socklen; +@@ -536,6 +1016,59 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, return NGX_ERROR; } @@ -913,7 +1024,6 @@ index f71a3e00..0d403716 100644 +{ + ngx_http_upstream_t *u; + ngx_http_lua_ctx_t *ctx; -+ ngx_http_lua_main_conf_t *lmcf; + ngx_http_lua_balancer_peer_data_t *bp; + + if (r == NULL) { @@ -939,17 +1049,7 @@ index f71a3e00..0d403716 100644 + return NGX_ERROR; + } + -+ lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); -+ -+ /* we cannot read r->upstream->peer.data here directly because -+ * it could be overridden by other modules like -+ * ngx_http_upstream_keepalive_module. -+ */ -+ bp = lmcf->balancer_peer_data; -+ if (bp == NULL) { -+ *err = "no upstream peer data found"; -+ return NGX_ERROR; -+ } ++ bp = (ngx_http_lua_balancer_peer_data_t *) u->peer.data; + + if (!ngx_http_lua_balancer_peer_set(bp)) { + *err = "no current peer set"; @@ -967,239 +1067,7 @@ index f71a3e00..0d403716 100644 return NGX_OK; } -diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_common.h b/ngx_lua-0.10.20/src/ngx_http_lua_common.h -index 781a2454..9ce6836a 100644 ---- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_common.h -+++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_common.h -@@ -320,6 +320,10 @@ union ngx_http_lua_srv_conf_u { - #endif - - struct { -+ ngx_http_upstream_init_pt original_init_upstream; -+ ngx_http_upstream_init_peer_pt original_init_peer; -+ uintptr_t data; -+ - ngx_http_lua_srv_conf_handler_pt handler; - ngx_str_t src; - u_char *src_key; -diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_module.c b/ngx_lua-0.10.20/src/ngx_http_lua_module.c -index 9816d864..5d7cedfd 100644 ---- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_module.c -+++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_module.c -@@ -1068,6 +1068,9 @@ ngx_http_lua_create_srv_conf(ngx_conf_t *cf) - * lscf->srv.ssl_session_fetch_src = { 0, NULL }; - * lscf->srv.ssl_session_fetch_src_key = NULL; - * -+ * lscf->balancer.original_init_upstream = NULL; -+ * lscf->balancer.original_init_peer = NULL; -+ * lscf->balancer.data = NULL; - * lscf->balancer.handler = NULL; - * lscf->balancer.src = { 0, NULL }; - * lscf->balancer.src_key = NULL; --- -2.26.2 - - -From 4c5cb29a265b2f9524434322adf15d07deec6c7f Mon Sep 17 00:00:00 2001 -From: Thibault Charbonnier -Date: Tue, 17 Sep 2019 11:43:54 -0700 -Subject: [PATCH 2/3] feature: we now avoid the need for 'upstream' blocks to - define a stub 'server' directive when using 'balancer_by_lua*'. - ---- - src/ngx_http_lua_balancer.c | 42 +++++++++++++++++++++++++++++++++++-- - 1 file changed, 40 insertions(+), 2 deletions(-) - -diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c b/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c -index 0d403716..5c862d22 100644 ---- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c -+++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c -@@ -111,7 +111,8 @@ static void ngx_http_lua_balancer_save_session(ngx_peer_connection_t *pc, - (bp->sockaddr && bp->socklen) - - --static char ngx_http_lua_balancer_keepalive_pools_table_key; -+static char ngx_http_lua_balancer_keepalive_pools_table_key; -+static struct sockaddr *ngx_http_lua_balancer_default_server_sockaddr; - - - ngx_int_t -@@ -239,7 +240,9 @@ ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, - u_char *cache_key = NULL; - u_char *name; - ngx_str_t *value; -+ ngx_url_t url; - ngx_http_upstream_srv_conf_t *uscf; -+ ngx_http_upstream_server_t *us; - ngx_http_lua_srv_conf_t *lscf = conf; - - dd("enter"); -@@ -293,6 +296,29 @@ ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, - - uscf = ngx_http_conf_get_module_srv_conf(cf, ngx_http_upstream_module); - -+ if (uscf->servers->nelts == 0) { -+ us = ngx_array_push(uscf->servers); -+ if (us == NULL) { -+ return NGX_CONF_ERROR; -+ } -+ -+ ngx_memzero(us, sizeof(ngx_http_upstream_server_t)); -+ ngx_memzero(&url, sizeof(ngx_url_t)); -+ -+ ngx_str_set(&url.url, "0.0.0.1"); -+ url.default_port = 80; -+ -+ if (ngx_parse_url(cf->pool, &url) != NGX_OK) { -+ return NGX_CONF_ERROR; -+ } -+ -+ us->name = url.url; -+ us->addrs = url.addrs; -+ us->naddrs = url.naddrs; -+ -+ ngx_http_lua_balancer_default_server_sockaddr = us->addrs[0].sockaddr; -+ } -+ - if (uscf->peer.init_upstream) { - ngx_conf_log_error(NGX_LOG_WARN, cf, 0, - "load balancing method redefined"); -@@ -525,7 +551,19 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) - return NGX_OK; - } - -- return bp->original_get_peer(pc, bp->data); -+ rc = bp->original_get_peer(pc, bp->data); -+ if (rc == NGX_ERROR) { -+ return rc; -+ } -+ -+ if (pc->sockaddr == ngx_http_lua_balancer_default_server_sockaddr) { -+ ngx_log_error(NGX_LOG_ERR, pc->log, 0, -+ "lua balancer: no peer set"); -+ -+ return NGX_ERROR; -+ } -+ -+ return rc; - } - - --- -2.26.2 - - -From 941cd893573561574bc6a326d6306f1a30127293 Mon Sep 17 00:00:00 2001 -From: Thibault Charbonnier -Date: Tue, 17 Sep 2019 11:43:58 -0700 -Subject: [PATCH 3/3] refactor: used a simpler way to stash the balancer peer - data. - ---- - src/ngx_http_lua_balancer.c | 91 +++++++++---------------------------- - src/ngx_http_lua_common.h | 7 --- - 2 files changed, 22 insertions(+), 76 deletions(-) - -diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c b/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c -index 5c862d22..3ea1f067 100644 ---- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c -+++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c -@@ -411,9 +411,9 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) - ngx_http_request_t *r; - ngx_http_lua_ctx_t *ctx; - ngx_http_lua_srv_conf_t *lscf; -- ngx_http_lua_main_conf_t *lmcf; - ngx_http_lua_balancer_keepalive_item_t *item; - ngx_http_lua_balancer_peer_data_t *bp = data; -+ void *pdata; - - ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, - "lua balancer: get peer, tries: %ui", pc->tries); -@@ -452,15 +452,13 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) - bp->keepalive = 0; - bp->total_tries++; - -- lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); -- -- /* balancer_by_lua does not support yielding and -- * there cannot be any conflicts among concurrent requests, -- * thus it is safe to store the peer data in the main conf. -- */ -- lmcf->balancer_peer_data = bp; -+ pdata = r->upstream->peer.data; -+ r->upstream->peer.data = bp; - - rc = lscf->balancer.handler(r, lscf, L); -+ -+ r->upstream->peer.data = pdata; -+ - if (rc == NGX_ERROR) { - return NGX_ERROR; - } -@@ -945,7 +943,6 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, - ngx_url_t url; - ngx_http_upstream_t *u; - ngx_http_lua_ctx_t *ctx; -- ngx_http_lua_main_conf_t *lmcf; - ngx_http_lua_balancer_peer_data_t *bp; - - if (r == NULL) { -@@ -971,18 +968,6 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, - return NGX_ERROR; - } - -- lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); -- -- /* we cannot read r->upstream->peer.data here directly because -- * it could be overridden by other modules like -- * ngx_http_upstream_keepalive_module. -- */ -- bp = lmcf->balancer_peer_data; -- if (bp == NULL) { -- *err = "no upstream peer data found"; -- return NGX_ERROR; -- } -- - ngx_memzero(&url, sizeof(ngx_url_t)); - - url.url.data = ngx_palloc(r->pool, addr_len); -@@ -1006,6 +991,8 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, - return NGX_ERROR; - } - -+ bp = (ngx_http_lua_balancer_peer_data_t *) u->peer.data; -+ - if (url.addrs && url.addrs[0].sockaddr) { - bp->sockaddr = url.addrs[0].sockaddr; - bp->socklen = url.addrs[0].socklen; -@@ -1029,7 +1016,6 @@ ngx_http_lua_ffi_balancer_enable_keepalive(ngx_http_request_t *r, - { - ngx_http_upstream_t *u; - ngx_http_lua_ctx_t *ctx; -- ngx_http_lua_main_conf_t *lmcf; - ngx_http_lua_balancer_peer_data_t *bp; - - if (r == NULL) { -@@ -1055,17 +1041,7 @@ ngx_http_lua_ffi_balancer_enable_keepalive(ngx_http_request_t *r, - return NGX_ERROR; - } - -- lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); -- -- /* we cannot read r->upstream->peer.data here directly because -- * it could be overridden by other modules like -- * ngx_http_upstream_keepalive_module. -- */ -- bp = lmcf->balancer_peer_data; -- if (bp == NULL) { -- *err = "no upstream peer data found"; -- return NGX_ERROR; -- } -+ bp = (ngx_http_lua_balancer_peer_data_t *) u->peer.data; - - if (!ngx_http_lua_balancer_peer_set(bp)) { - *err = "no current peer set"; -@@ -1089,14 +1065,13 @@ ngx_http_lua_ffi_balancer_set_timeouts(ngx_http_request_t *r, +@@ -545,14 +1078,13 @@ ngx_http_lua_ffi_balancer_set_timeouts(ngx_http_request_t *r, long connect_timeout, long send_timeout, long read_timeout, char **err) { @@ -1217,7 +1085,7 @@ index 5c862d22..3ea1f067 100644 if (r == NULL) { *err = "no request found"; -@@ -1121,15 +1096,9 @@ ngx_http_lua_ffi_balancer_set_timeouts(ngx_http_request_t *r, +@@ -577,15 +1109,9 @@ ngx_http_lua_ffi_balancer_set_timeouts(ngx_http_request_t *r, return NGX_ERROR; } @@ -1235,7 +1103,7 @@ index 5c862d22..3ea1f067 100644 if (!bp->cloned_upstream_conf) { /* we clone the upstream conf for the current request so that * we do not affect other requests at all. */ -@@ -1184,12 +1153,10 @@ ngx_http_lua_ffi_balancer_set_more_tries(ngx_http_request_t *r, +@@ -640,12 +1166,10 @@ ngx_http_lua_ffi_balancer_set_more_tries(ngx_http_request_t *r, int count, char **err) { #if (nginx_version >= 1007005) @@ -1251,7 +1119,7 @@ index 5c862d22..3ea1f067 100644 ngx_http_lua_balancer_peer_data_t *bp; if (r == NULL) { -@@ -1215,13 +1182,7 @@ ngx_http_lua_ffi_balancer_set_more_tries(ngx_http_request_t *r, +@@ -671,13 +1195,7 @@ ngx_http_lua_ffi_balancer_set_more_tries(ngx_http_request_t *r, return NGX_ERROR; } @@ -1266,7 +1134,7 @@ index 5c862d22..3ea1f067 100644 #if (nginx_version >= 1007005) max_tries = r->upstream->conf->next_upstream_tries; -@@ -1247,12 +1208,10 @@ int +@@ -703,12 +1221,10 @@ int ngx_http_lua_ffi_balancer_get_last_failure(ngx_http_request_t *r, int *status, char **err) { @@ -1282,7 +1150,7 @@ index 5c862d22..3ea1f067 100644 if (r == NULL) { *err = "no request found"; -@@ -1277,13 +1236,7 @@ ngx_http_lua_ffi_balancer_get_last_failure(ngx_http_request_t *r, +@@ -733,13 +1249,7 @@ ngx_http_lua_ffi_balancer_get_last_failure(ngx_http_request_t *r, return NGX_ERROR; } @@ -1297,8 +1165,8 @@ index 5c862d22..3ea1f067 100644 if (r->upstream_states && r->upstream_states->nelts > 1) { state = r->upstream_states->elts; -diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_common.h b/ngx_lua-0.10.20/src/ngx_http_lua_common.h -index 9ce6836a..9a4342df 100644 +diff --git a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_common.h b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_common.h +index 0751a11..7a66cb1 100644 --- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_common.h +++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_common.h @@ -239,13 +239,6 @@ struct ngx_http_lua_main_conf_s { @@ -1315,5 +1183,30 @@ index 9ce6836a..9a4342df 100644 ngx_chain_t *body_filter_chain; /* neither yielding nor recursion is possible in * body_filter_by_lua*, so there cannot be any races among +@@ -320,6 +313,10 @@ union ngx_http_lua_srv_conf_u { + #endif + + struct { ++ ngx_http_upstream_init_pt original_init_upstream; ++ ngx_http_upstream_init_peer_pt original_init_peer; ++ uintptr_t data; ++ + ngx_http_lua_srv_conf_handler_pt handler; + ngx_str_t src; + u_char *src_key; +diff --git a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_module.c b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_module.c +index 7358a95..21bf8f1 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_module.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_module.c +@@ -1068,6 +1068,9 @@ ngx_http_lua_create_srv_conf(ngx_conf_t *cf) + * lscf->srv.ssl_session_fetch_src = { 0, NULL }; + * lscf->srv.ssl_session_fetch_src_key = NULL; + * ++ * lscf->balancer.original_init_upstream = NULL; ++ * lscf->balancer.original_init_peer = NULL; ++ * lscf->balancer.data = NULL; + * lscf->balancer.handler = NULL; + * lscf->balancer.src = { 0, NULL }; + * lscf->balancer.src_key = NULL; -- -2.26.2 +2.34.1 diff --git a/changelog/unreleased/kong/balancer_respect_max_retries.yml b/changelog/unreleased/kong/balancer_respect_max_retries.yml new file mode 100644 index 00000000000..1884ad1ce9f --- /dev/null +++ b/changelog/unreleased/kong/balancer_respect_max_retries.yml @@ -0,0 +1,3 @@ +message: Fix an issue that the actual number of retry times exceeds the `retries` setting. +type: bugfix +scope: Core diff --git a/spec/02-integration/05-proxy/10-balancer/08-retries_spec.lua b/spec/02-integration/05-proxy/10-balancer/08-retries_spec.lua new file mode 100644 index 00000000000..cfc25919986 --- /dev/null +++ b/spec/02-integration/05-proxy/10-balancer/08-retries_spec.lua @@ -0,0 +1,124 @@ +local helpers = require "spec.helpers" +local cjson = require "cjson" + +local function get_log(typ, n) + local entries + helpers.wait_until(function() + local client = assert(helpers.http_client(helpers.mock_upstream_host, + helpers.mock_upstream_port)) + local res = client:get("/read_log/" .. typ, { + headers = { + Accept = "application/json" + } + }) + local raw = assert.res_status(200, res) + local body = cjson.decode(raw) + + entries = body.entries + return #entries > 0 + end, 10) + if n then + assert(#entries == n, "expected " .. n .. " log entries, but got " .. #entries) + end + return entries +end + +for _, strategy in helpers.each_strategy() do + describe("Balancer: respect max retries [#" .. strategy .. "]", function() + local service + + lazy_setup(function() + local bp = helpers.get_db_utils(strategy, { + "routes", + "services", + "plugins", + }) + + service = bp.services:insert { + name = "retry_service", + host = "127.0.0.1", + port = 62351, + retries = 5, + } + + local route = bp.routes:insert { + service = service, + paths = { "/hello" }, + strip_path = false, + } + + bp.plugins:insert { + route = { id = route.id }, + name = "http-log", + config = { + queue_size = 1, + http_endpoint = "http://" .. helpers.mock_upstream_host + .. ":" + .. helpers.mock_upstream_port + .. "/post_log/http" + } + } + + local fixtures = { + http_mock = {} + } + + fixtures.http_mock.my_server_block = [[ + server { + listen 0.0.0.0:62351; + location /hello { + content_by_lua_block { + local request_counter = ngx.shared.request_counter + local first_request = request_counter:get("first_request") + if first_request == nil then + request_counter:set("first_request", "yes") + ngx.say("hello") + else + ngx.exit(ngx.HTTP_CLOSE) + end + } + } + } + ]] + + assert(helpers.start_kong({ + database = strategy, + nginx_conf = "spec/fixtures/custom_nginx.template", + nginx_http_lua_shared_dict = "request_counter 1m", + }, nil, nil, fixtures)) + end) + + lazy_teardown(function() + helpers.stop_kong() + end) + + it("exceeded limit", function() + -- First request should succeed and save connection to upstream in keepalive pool + local proxy_client1 = helpers.proxy_client() + local res = assert(proxy_client1:send { + method = "GET", + path = "/hello", + }) + + assert.res_status(200, res) + + proxy_client1:close() + + -- Second request should failed 1 times and retry 5 times and then return 502 + local proxy_client2 = helpers.proxy_client() + + res = assert(proxy_client2:send { + method = "GET", + path = "/hello", + }) + + assert.res_status(502, res) + + -- wait for the http-log plugin to flush the log + ngx.sleep(1) + + local entries = get_log("http", 2) + assert.equal(#entries[2].tries, 6) + end) + end) +end From a33ea19c644eda735ef9c45b8e0aa02787dbe146 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 14/42] Revert "fix(balancer): respect max retries (#12346)" This reverts commit aa7c405be34c5ab52ffccf4e7ddc6c8ab48bc72d. --- ...ua-0.10.20_02-dyn_upstream_keepalive.patch | 667 ++++++++++-------- .../kong/balancer_respect_max_retries.yml | 3 - .../05-proxy/10-balancer/08-retries_spec.lua | 124 ---- 3 files changed, 387 insertions(+), 407 deletions(-) delete mode 100644 changelog/unreleased/kong/balancer_respect_max_retries.yml delete mode 100644 spec/02-integration/05-proxy/10-balancer/08-retries_spec.lua diff --git a/build/openresty/patches/ngx_lua-0.10.20_02-dyn_upstream_keepalive.patch b/build/openresty/patches/ngx_lua-0.10.20_02-dyn_upstream_keepalive.patch index 0ee6df76ae8..effdd5b517b 100644 --- a/build/openresty/patches/ngx_lua-0.10.20_02-dyn_upstream_keepalive.patch +++ b/build/openresty/patches/ngx_lua-0.10.20_02-dyn_upstream_keepalive.patch @@ -5,43 +5,16 @@ Subject: [PATCH 1/3] feature: implemented keepalive pooling in 'balancer_by_lua*'. --- - .../nginx-1.19.9/src/http/ngx_http_upstream.c | 1 + - .../nginx-1.19.9/src/http/ngx_http_upstream.h | 2 + - .../src/ngx_http_lua_balancer.c | 848 ++++++++++++++---- - .../ngx_lua-0.10.20/src/ngx_http_lua_common.h | 11 +- - .../ngx_lua-0.10.20/src/ngx_http_lua_module.c | 3 + - 5 files changed, 689 insertions(+), 176 deletions(-) - -diff --git a/bundle/nginx-1.19.9/src/http/ngx_http_upstream.c b/bundle/nginx-1.19.9/src/http/ngx_http_upstream.c -index 4a6db93..98a8cfc 100644 ---- a/bundle/nginx-1.19.9/src/http/ngx_http_upstream.c -+++ b/bundle/nginx-1.19.9/src/http/ngx_http_upstream.c -@@ -4244,6 +4244,7 @@ ngx_http_upstream_next(ngx_http_request_t *r, ngx_http_upstream_t *u, - if (u->peer.cached && ft_type == NGX_HTTP_UPSTREAM_FT_ERROR) { - /* TODO: inform balancer instead */ - u->peer.tries++; -+ u->peer.notify(&u->peer, u->peer.data, NGX_HTTP_UPSTREAM_NOFITY_CACHED_CONNECTION_ERROR); - } - - switch (ft_type) { -diff --git a/bundle/nginx-1.19.9/src/http/ngx_http_upstream.h b/bundle/nginx-1.19.9/src/http/ngx_http_upstream.h -index 0432617..50a10cb 100644 ---- a/bundle/nginx-1.19.9/src/http/ngx_http_upstream.h -+++ b/bundle/nginx-1.19.9/src/http/ngx_http_upstream.h -@@ -56,6 +56,8 @@ - #define NGX_HTTP_UPSTREAM_IGN_VARY 0x00000200 + src/ngx_http_lua_balancer.c | 738 ++++++++++++++++++++++++++++++------ + src/ngx_http_lua_common.h | 4 + + src/ngx_http_lua_module.c | 3 + + 3 files changed, 629 insertions(+), 116 deletions(-) - -+#define NGX_HTTP_UPSTREAM_NOFITY_CACHED_CONNECTION_ERROR 0x1 -+ - typedef struct { - ngx_uint_t status; - ngx_msec_t response_time; -diff --git a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c -index e4ac57a..44e01cb 100644 +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c b/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c +index f71a3e00..0d403716 100644 --- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c +++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c -@@ -16,46 +16,105 @@ +@@ -16,46 +16,102 @@ #include "ngx_http_lua_directive.h" @@ -75,20 +48,25 @@ index e4ac57a..44e01cb 100644 + + ngx_uint_t more_tries; + ngx_uint_t total_tries; -+ + +- ngx_http_lua_srv_conf_t *conf; +- ngx_http_request_t *request; + int last_peer_state; -+ + +- ngx_uint_t more_tries; +- ngx_uint_t total_tries; + uint32_t cpool_crc32; -+ + +- struct sockaddr *sockaddr; +- socklen_t socklen; + void *data; -- ngx_http_lua_srv_conf_t *conf; -- ngx_http_request_t *request; +- ngx_str_t *host; +- in_port_t port; + ngx_event_get_peer_pt original_get_peer; + ngx_event_free_peer_pt original_free_peer; -- ngx_uint_t more_tries; -- ngx_uint_t total_tries; +- int last_peer_state; +#if (NGX_HTTP_SSL) + ngx_event_set_peer_session_pt original_set_session; + ngx_event_save_peer_session_pt original_save_session; @@ -97,17 +75,12 @@ index e4ac57a..44e01cb 100644 + ngx_http_request_t *request; + ngx_http_lua_srv_conf_t *conf; + ngx_http_lua_balancer_keepalive_pool_t *cpool; - -- struct sockaddr *sockaddr; -- socklen_t socklen; ++ + ngx_str_t *host; - -- ngx_str_t *host; -- in_port_t port; ++ + struct sockaddr *sockaddr; + socklen_t socklen; - -- int last_peer_state; ++ + unsigned keepalive:1; #if !(HAVE_NGX_UPSTREAM_TIMEOUT_FIELDS) @@ -135,8 +108,6 @@ index e4ac57a..44e01cb 100644 - ngx_http_request_t *r); static void ngx_http_lua_balancer_free_peer(ngx_peer_connection_t *pc, void *data, ngx_uint_t state); -+static void ngx_http_lua_balancer_notify_peer(ngx_peer_connection_t *pc, -+ void *data, ngx_uint_t type); +static ngx_int_t ngx_http_lua_balancer_create_keepalive_pool(lua_State *L, + ngx_log_t *log, uint32_t cpool_crc32, ngx_uint_t cpool_size, + ngx_http_lua_balancer_keepalive_pool_t **cpool); @@ -162,12 +133,11 @@ index e4ac57a..44e01cb 100644 + (bp->sockaddr && bp->socklen) + + -+static char ngx_http_lua_balancer_keepalive_pools_table_key; -+static struct sockaddr *ngx_http_lua_balancer_default_server_sockaddr; ++static char ngx_http_lua_balancer_keepalive_pools_table_key; ngx_int_t -@@ -102,6 +161,61 @@ ngx_http_lua_balancer_handler_inline(ngx_http_request_t *r, +@@ -102,6 +158,61 @@ ngx_http_lua_balancer_handler_inline(ngx_http_request_t *r, } @@ -229,7 +199,7 @@ index e4ac57a..44e01cb 100644 char * ngx_http_lua_balancer_by_lua_block(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) -@@ -125,16 +239,18 @@ char * +@@ -125,16 +236,16 @@ char * ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) { @@ -241,9 +211,7 @@ index e4ac57a..44e01cb 100644 + u_char *cache_key = NULL; + u_char *name; + ngx_str_t *value; -+ ngx_url_t url; ngx_http_upstream_srv_conf_t *uscf; -+ ngx_http_upstream_server_t *us; + ngx_http_lua_srv_conf_t *lscf = conf; dd("enter"); @@ -254,7 +222,7 @@ index e4ac57a..44e01cb 100644 if (cmd->post == NULL) { return NGX_CONF_ERROR; } -@@ -178,11 +294,42 @@ ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, +@@ -178,11 +289,19 @@ ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, lscf->balancer.src_key = cache_key; @@ -262,29 +230,6 @@ index e4ac57a..44e01cb 100644 + uscf = ngx_http_conf_get_module_srv_conf(cf, ngx_http_upstream_module); -+ if (uscf->servers->nelts == 0) { -+ us = ngx_array_push(uscf->servers); -+ if (us == NULL) { -+ return NGX_CONF_ERROR; -+ } -+ -+ ngx_memzero(us, sizeof(ngx_http_upstream_server_t)); -+ ngx_memzero(&url, sizeof(ngx_url_t)); -+ -+ ngx_str_set(&url.url, "0.0.0.1"); -+ url.default_port = 80; -+ -+ if (ngx_parse_url(cf->pool, &url) != NGX_OK) { -+ return NGX_CONF_ERROR; -+ } -+ -+ us->name = url.url; -+ us->addrs = url.addrs; -+ us->naddrs = url.naddrs; -+ -+ ngx_http_lua_balancer_default_server_sockaddr = us->addrs[0].sockaddr; -+ } -+ if (uscf->peer.init_upstream) { ngx_conf_log_error(NGX_LOG_WARN, cf, 0, "load balancing method redefined"); @@ -297,7 +242,7 @@ index e4ac57a..44e01cb 100644 } uscf->peer.init_upstream = ngx_http_lua_balancer_init; -@@ -198,14 +345,18 @@ ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, +@@ -198,14 +317,18 @@ ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, static ngx_int_t @@ -320,7 +265,7 @@ index e4ac57a..44e01cb 100644 us->peer.init = ngx_http_lua_balancer_init_peer; return NGX_OK; -@@ -216,33 +367,39 @@ static ngx_int_t +@@ -216,33 +339,38 @@ static ngx_int_t ngx_http_lua_balancer_init_peer(ngx_http_request_t *r, ngx_http_upstream_srv_conf_t *us) { @@ -353,7 +298,6 @@ index e4ac57a..44e01cb 100644 + r->upstream->peer.data = bp; r->upstream->peer.get = ngx_http_lua_balancer_get_peer; r->upstream->peer.free = ngx_http_lua_balancer_free_peer; -+ r->upstream->peer.notify = ngx_http_lua_balancer_notify_peer; #if (NGX_HTTP_SSL) + bp->original_set_session = r->upstream->peer.set_session; @@ -371,7 +315,7 @@ index e4ac57a..44e01cb 100644 return NGX_OK; } -@@ -250,25 +407,26 @@ ngx_http_lua_balancer_init_peer(ngx_http_request_t *r, +@@ -250,25 +378,26 @@ ngx_http_lua_balancer_init_peer(ngx_http_request_t *r, static ngx_int_t ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) { @@ -389,9 +333,9 @@ index e4ac57a..44e01cb 100644 + ngx_http_request_t *r; + ngx_http_lua_ctx_t *ctx; + ngx_http_lua_srv_conf_t *lscf; ++ ngx_http_lua_main_conf_t *lmcf; + ngx_http_lua_balancer_keepalive_item_t *item; + ngx_http_lua_balancer_peer_data_t *bp = data; -+ void *pdata; ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, - "lua balancer peer, tries: %ui", pc->tries); @@ -409,7 +353,7 @@ index e4ac57a..44e01cb 100644 if (ctx == NULL) { ctx = ngx_http_lua_create_ctx(r); if (ctx == NULL) { -@@ -286,21 +444,24 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) +@@ -286,9 +415,15 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) ctx->context = NGX_HTTP_LUA_CONTEXT_BALANCER; @@ -424,24 +368,16 @@ index e4ac57a..44e01cb 100644 + bp->keepalive = 0; bp->total_tries++; -- lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); -- -- /* balancer_by_lua does not support yielding and -- * there cannot be any conflicts among concurrent requests, -- * thus it is safe to store the peer data in the main conf. -- */ -- lmcf->balancer_peer_data = bp; -+ pdata = r->upstream->peer.data; -+ r->upstream->peer.data = bp; + lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); +@@ -300,7 +435,6 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) + lmcf->balancer_peer_data = bp; rc = lscf->balancer.handler(r, lscf, L); - -+ r->upstream->peer.data = pdata; -+ +- if (rc == NGX_ERROR) { return NGX_ERROR; } -@@ -322,79 +483,87 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) +@@ -322,105 +456,414 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) } } @@ -461,16 +397,10 @@ index e4ac57a..44e01cb 100644 } - dd("tries: %d", (int) r->upstream->peer.tries); -- -- return NGX_OK; -- } -- -- return ngx_http_upstream_get_round_robin_peer(pc, &bp->rrp); --} + if (ngx_http_lua_balancer_keepalive_is_enabled(bp)) { + ngx_http_lua_balancer_get_keepalive_pool(L, bp->cpool_crc32, + &bp->cpool); - ++ + if (bp->cpool == NULL + && ngx_http_lua_balancer_create_keepalive_pool(L, pc->log, + bp->cpool_crc32, @@ -480,105 +410,65 @@ index e4ac57a..44e01cb 100644 + { + return NGX_ERROR; + } - --static ngx_int_t --ngx_http_lua_balancer_by_chunk(lua_State *L, ngx_http_request_t *r) --{ -- u_char *err_msg; -- size_t len; -- ngx_int_t rc; ++ + ngx_http_lua_assert(bp->cpool); - -- /* init nginx context in Lua VM */ -- ngx_http_lua_set_req(L, r); ++ + if (!ngx_queue_empty(&bp->cpool->cache)) { + q = ngx_queue_head(&bp->cpool->cache); - --#ifndef OPENRESTY_LUAJIT -- ngx_http_lua_create_new_globals_table(L, 0 /* narr */, 1 /* nrec */); ++ + item = ngx_queue_data(q, ngx_http_lua_balancer_keepalive_item_t, + queue); + c = item->connection; - -- /* {{{ make new env inheriting main thread's globals table */ -- lua_createtable(L, 0, 1 /* nrec */); /* the metatable for the new env */ -- ngx_http_lua_get_globals_table(L); -- lua_setfield(L, -2, "__index"); -- lua_setmetatable(L, -2); /* setmetatable({}, {__index = _G}) */ -- /* }}} */ ++ + ngx_queue_remove(q); + ngx_queue_insert_head(&bp->cpool->free, q); - -- lua_setfenv(L, -2); /* set new running env for the code closure */ --#endif /* OPENRESTY_LUAJIT */ ++ + c->idle = 0; + c->sent = 0; + c->log = pc->log; + c->read->log = pc->log; + c->write->log = pc->log; + c->pool->log = pc->log; - -- lua_pushcfunction(L, ngx_http_lua_traceback); -- lua_insert(L, 1); /* put it under chunk and args */ ++ + if (c->read->timer_set) { + ngx_del_timer(c->read); + } - -- /* protected call user code */ -- rc = lua_pcall(L, 0, 1, 1); ++ + pc->cached = 1; + pc->connection = c; - -- lua_remove(L, 1); /* remove traceback function */ ++ + ngx_log_debug3(NGX_LOG_DEBUG_HTTP, pc->log, 0, + "lua balancer: keepalive reusing connection %p, " + "requests: %ui, cpool: %p", + c, c->requests, bp->cpool); - -- dd("rc == %d", (int) rc); ++ + return NGX_DONE; + } - -- if (rc != 0) { -- /* error occurred when running loaded code */ -- err_msg = (u_char *) lua_tolstring(L, -1, &len); ++ + bp->cpool->connections++; - -- if (err_msg == NULL) { -- err_msg = (u_char *) "unknown reason"; -- len = sizeof("unknown reason") - 1; ++ + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, + "lua balancer: keepalive no free connection, " + "cpool: %p", bp->cpool); - } - -- ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, -- "failed to run balancer_by_lua*: %*s", len, err_msg); -+ return NGX_OK; -+ } - -- lua_settop(L, 0); /* clear remaining elems on stack */ -+ rc = bp->original_get_peer(pc, bp->data); -+ if (rc == NGX_ERROR) { -+ return rc; -+ } -+ -+ if (pc->sockaddr == ngx_http_lua_balancer_default_server_sockaddr) { -+ ngx_log_error(NGX_LOG_ERR, pc->log, 0, -+ "lua balancer: no peer set"); ++ } - return NGX_ERROR; + return NGX_OK; } -- lua_settop(L, 0); /* clear remaining elems on stack */ - return rc; +- return ngx_http_upstream_get_round_robin_peer(pc, &bp->rrp); ++ return bp->original_get_peer(pc, bp->data); } -@@ -403,24 +572,347 @@ static void - ngx_http_lua_balancer_free_peer(ngx_peer_connection_t *pc, void *data, - ngx_uint_t state) + +-static ngx_int_t +-ngx_http_lua_balancer_by_chunk(lua_State *L, ngx_http_request_t *r) ++static void ++ngx_http_lua_balancer_free_peer(ngx_peer_connection_t *pc, void *data, ++ ngx_uint_t state) { -- ngx_http_lua_balancer_peer_data_t *bp = data; +- u_char *err_msg; +- size_t len; +- ngx_int_t rc; + ngx_queue_t *q; + ngx_connection_t *c; + ngx_http_upstream_t *u; @@ -586,24 +476,38 @@ index e4ac57a..44e01cb 100644 + ngx_http_lua_balancer_keepalive_pool_t *cpool; + ngx_http_lua_balancer_peer_data_t *bp = data; - ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, -- "lua balancer free peer, tries: %ui", pc->tries); +- /* init nginx context in Lua VM */ +- ngx_http_lua_set_req(L, r); ++ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, + "lua balancer: free peer, tries: %ui", pc->tries); -+ + +-#ifndef OPENRESTY_LUAJIT +- ngx_http_lua_create_new_globals_table(L, 0 /* narr */, 1 /* nrec */); + u = bp->request->upstream; + c = pc->connection; -- if (bp->sockaddr && bp->socklen) { +- /* {{{ make new env inheriting main thread's globals table */ +- lua_createtable(L, 0, 1 /* nrec */); /* the metatable for the new env */ +- ngx_http_lua_get_globals_table(L); +- lua_setfield(L, -2, "__index"); +- lua_setmetatable(L, -2); /* setmetatable({}, {__index = _G}) */ +- /* }}} */ + if (ngx_http_lua_balancer_peer_set(bp)) { - bp->last_peer_state = (int) state; ++ bp->last_peer_state = (int) state; - if (pc->tries) { - pc->tries--; - } +- lua_setfenv(L, -2); /* set new running env for the code closure */ +-#endif /* OPENRESTY_LUAJIT */ ++ if (pc->tries) { ++ pc->tries--; ++ } +- lua_pushcfunction(L, ngx_http_lua_traceback); +- lua_insert(L, 1); /* put it under chunk and args */ + if (ngx_http_lua_balancer_keepalive_is_enabled(bp)) { + cpool = bp->cpool; -+ + +- /* protected call user code */ +- rc = lua_pcall(L, 0, 1, 1); + if (state & NGX_PEER_FAILED + || c == NULL + || c->read->eof @@ -614,21 +518,29 @@ index e4ac57a..44e01cb 100644 + { + goto invalid; + } -+ + +- lua_remove(L, 1); /* remove traceback function */ + if (bp->keepalive_requests + && c->requests >= bp->keepalive_requests) + { + goto invalid; + } -+ + +- dd("rc == %d", (int) rc); + if (!u->keepalive) { + goto invalid; + } -+ + +- if (rc != 0) { +- /* error occurred when running loaded code */ +- err_msg = (u_char *) lua_tolstring(L, -1, &len); + if (!u->request_body_sent) { + goto invalid; + } -+ + +- if (err_msg == NULL) { +- err_msg = (u_char *) "unknown reason"; +- len = sizeof("unknown reason") - 1; + if (ngx_terminate || ngx_exiting) { + goto invalid; + } @@ -705,25 +617,18 @@ index e4ac57a..44e01cb 100644 + if (cpool->connections == 0) { + ngx_http_lua_balancer_free_keepalive_pool(pc->log, cpool); + } -+ } -+ + } + +- ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, +- "failed to run balancer_by_lua*: %*s", len, err_msg); + return; + } -+ + +- lua_settop(L, 0); /* clear remaining elems on stack */ + bp->original_free_peer(pc, bp->data, state); +} + + -+static void -+ngx_http_lua_balancer_notify_peer(ngx_peer_connection_t *pc, void *data, -+ ngx_uint_t type) -+{ -+ if (type == NGX_HTTP_UPSTREAM_NOFITY_CACHED_CONNECTION_ERROR) { -+ pc->tries--; -+ } -+} -+ -+ +static ngx_int_t +ngx_http_lua_balancer_create_keepalive_pool(lua_State *L, ngx_log_t *log, + uint32_t cpool_crc32, ngx_uint_t cpool_size, @@ -743,12 +648,14 @@ index e4ac57a..44e01cb 100644 + + size = sizeof(ngx_http_lua_balancer_keepalive_pool_t) + + sizeof(ngx_http_lua_balancer_keepalive_item_t) * cpool_size; -+ + + upool = lua_newuserdata(L, size); /* pools upool */ + if (upool == NULL) { -+ return NGX_ERROR; -+ } -+ + return NGX_ERROR; + } + +- lua_settop(L, 0); /* clear remaining elems on stack */ +- return rc; + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, log, 0, + "lua balancer: keepalive create pool, crc32: %ui, " + "size: %ui", cpool_crc32, cpool_size); @@ -776,13 +683,16 @@ index e4ac57a..44e01cb 100644 + *cpool = upool; + + return NGX_OK; -+} -+ -+ -+static void + } + + + static void +-ngx_http_lua_balancer_free_peer(ngx_peer_connection_t *pc, void *data, +- ngx_uint_t state) +ngx_http_lua_balancer_get_keepalive_pool(lua_State *L, uint32_t cpool_crc32, + ngx_http_lua_balancer_keepalive_pool_t **cpool) -+{ + { +- ngx_http_lua_balancer_peer_data_t *bp = data; + ngx_http_lua_balancer_keepalive_pool_t *upool; + + /* get upstream connection pools table */ @@ -800,13 +710,19 @@ index e4ac57a..44e01cb 100644 + lua_pushvalue(L, -2); /* pools pools_table_key pools */ + lua_rawset(L, LUA_REGISTRYINDEX); /* pools */ + } -+ + +- ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, +- "lua balancer free peer, tries: %ui", pc->tries); + ngx_http_lua_assert(lua_istable(L, -1)); -+ + +- if (bp->sockaddr && bp->socklen) { +- bp->last_peer_state = (int) state; + lua_rawgeti(L, -1, cpool_crc32); /* pools upool? */ + upool = lua_touserdata(L, -1); + lua_pop(L, 2); /* orig stack */ -+ + +- if (pc->tries) { +- pc->tries--; + *cpool = upool; +} + @@ -904,8 +820,8 @@ index e4ac57a..44e01cb 100644 + + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + goto close; -+ } -+ + } + return; } @@ -914,10 +830,10 @@ index e4ac57a..44e01cb 100644 + + item = c->data; + c->log = ev->log; ++ ++ ngx_http_lua_balancer_close(c); - ngx_http_upstream_free_round_robin_peer(pc, data, state); -+ ngx_http_lua_balancer_close(c); -+ + ngx_queue_remove(&item->queue); + ngx_queue_insert_head(&item->cpool->free, &item->queue); + @@ -927,7 +843,7 @@ index e4ac57a..44e01cb 100644 } -@@ -431,12 +923,12 @@ ngx_http_lua_balancer_set_session(ngx_peer_connection_t *pc, void *data) +@@ -431,12 +874,12 @@ ngx_http_lua_balancer_set_session(ngx_peer_connection_t *pc, void *data) { ngx_http_lua_balancer_peer_data_t *bp = data; @@ -942,7 +858,7 @@ index e4ac57a..44e01cb 100644 } -@@ -445,13 +937,12 @@ ngx_http_lua_balancer_save_session(ngx_peer_connection_t *pc, void *data) +@@ -445,13 +888,12 @@ ngx_http_lua_balancer_save_session(ngx_peer_connection_t *pc, void *data) { ngx_http_lua_balancer_peer_data_t *bp = data; @@ -958,7 +874,7 @@ index e4ac57a..44e01cb 100644 } #endif -@@ -459,14 +950,13 @@ ngx_http_lua_balancer_save_session(ngx_peer_connection_t *pc, void *data) +@@ -459,14 +901,14 @@ ngx_http_lua_balancer_save_session(ngx_peer_connection_t *pc, void *data) int ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, @@ -975,39 +891,12 @@ index e4ac57a..44e01cb 100644 + ngx_url_t url; + ngx_http_upstream_t *u; + ngx_http_lua_ctx_t *ctx; ++ ngx_http_lua_main_conf_t *lmcf; + ngx_http_lua_balancer_peer_data_t *bp; if (r == NULL) { *err = "no request found"; -@@ -491,18 +981,6 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, - return NGX_ERROR; - } - -- lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); -- -- /* we cannot read r->upstream->peer.data here directly because -- * it could be overridden by other modules like -- * ngx_http_upstream_keepalive_module. -- */ -- bp = lmcf->balancer_peer_data; -- if (bp == NULL) { -- *err = "no upstream peer data found"; -- return NGX_ERROR; -- } -- - ngx_memzero(&url, sizeof(ngx_url_t)); - - url.url.data = ngx_palloc(r->pool, addr_len); -@@ -526,6 +1004,8 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, - return NGX_ERROR; - } - -+ bp = (ngx_http_lua_balancer_peer_data_t *) u->peer.data; -+ - if (url.addrs && url.addrs[0].sockaddr) { - bp->sockaddr = url.addrs[0].sockaddr; - bp->socklen = url.addrs[0].socklen; -@@ -536,6 +1016,59 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, +@@ -536,6 +978,70 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, return NGX_ERROR; } @@ -1024,6 +913,7 @@ index e4ac57a..44e01cb 100644 +{ + ngx_http_upstream_t *u; + ngx_http_lua_ctx_t *ctx; ++ ngx_http_lua_main_conf_t *lmcf; + ngx_http_lua_balancer_peer_data_t *bp; + + if (r == NULL) { @@ -1049,7 +939,17 @@ index e4ac57a..44e01cb 100644 + return NGX_ERROR; + } + -+ bp = (ngx_http_lua_balancer_peer_data_t *) u->peer.data; ++ lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); ++ ++ /* we cannot read r->upstream->peer.data here directly because ++ * it could be overridden by other modules like ++ * ngx_http_upstream_keepalive_module. ++ */ ++ bp = lmcf->balancer_peer_data; ++ if (bp == NULL) { ++ *err = "no upstream peer data found"; ++ return NGX_ERROR; ++ } + + if (!ngx_http_lua_balancer_peer_set(bp)) { + *err = "no current peer set"; @@ -1067,7 +967,239 @@ index e4ac57a..44e01cb 100644 return NGX_OK; } -@@ -545,14 +1078,13 @@ ngx_http_lua_ffi_balancer_set_timeouts(ngx_http_request_t *r, +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_common.h b/ngx_lua-0.10.20/src/ngx_http_lua_common.h +index 781a2454..9ce6836a 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_common.h ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_common.h +@@ -320,6 +320,10 @@ union ngx_http_lua_srv_conf_u { + #endif + + struct { ++ ngx_http_upstream_init_pt original_init_upstream; ++ ngx_http_upstream_init_peer_pt original_init_peer; ++ uintptr_t data; ++ + ngx_http_lua_srv_conf_handler_pt handler; + ngx_str_t src; + u_char *src_key; +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_module.c b/ngx_lua-0.10.20/src/ngx_http_lua_module.c +index 9816d864..5d7cedfd 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_module.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_module.c +@@ -1068,6 +1068,9 @@ ngx_http_lua_create_srv_conf(ngx_conf_t *cf) + * lscf->srv.ssl_session_fetch_src = { 0, NULL }; + * lscf->srv.ssl_session_fetch_src_key = NULL; + * ++ * lscf->balancer.original_init_upstream = NULL; ++ * lscf->balancer.original_init_peer = NULL; ++ * lscf->balancer.data = NULL; + * lscf->balancer.handler = NULL; + * lscf->balancer.src = { 0, NULL }; + * lscf->balancer.src_key = NULL; +-- +2.26.2 + + +From 4c5cb29a265b2f9524434322adf15d07deec6c7f Mon Sep 17 00:00:00 2001 +From: Thibault Charbonnier +Date: Tue, 17 Sep 2019 11:43:54 -0700 +Subject: [PATCH 2/3] feature: we now avoid the need for 'upstream' blocks to + define a stub 'server' directive when using 'balancer_by_lua*'. + +--- + src/ngx_http_lua_balancer.c | 42 +++++++++++++++++++++++++++++++++++-- + 1 file changed, 40 insertions(+), 2 deletions(-) + +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c b/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c +index 0d403716..5c862d22 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c +@@ -111,7 +111,8 @@ static void ngx_http_lua_balancer_save_session(ngx_peer_connection_t *pc, + (bp->sockaddr && bp->socklen) + + +-static char ngx_http_lua_balancer_keepalive_pools_table_key; ++static char ngx_http_lua_balancer_keepalive_pools_table_key; ++static struct sockaddr *ngx_http_lua_balancer_default_server_sockaddr; + + + ngx_int_t +@@ -239,7 +240,9 @@ ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, + u_char *cache_key = NULL; + u_char *name; + ngx_str_t *value; ++ ngx_url_t url; + ngx_http_upstream_srv_conf_t *uscf; ++ ngx_http_upstream_server_t *us; + ngx_http_lua_srv_conf_t *lscf = conf; + + dd("enter"); +@@ -293,6 +296,29 @@ ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, + + uscf = ngx_http_conf_get_module_srv_conf(cf, ngx_http_upstream_module); + ++ if (uscf->servers->nelts == 0) { ++ us = ngx_array_push(uscf->servers); ++ if (us == NULL) { ++ return NGX_CONF_ERROR; ++ } ++ ++ ngx_memzero(us, sizeof(ngx_http_upstream_server_t)); ++ ngx_memzero(&url, sizeof(ngx_url_t)); ++ ++ ngx_str_set(&url.url, "0.0.0.1"); ++ url.default_port = 80; ++ ++ if (ngx_parse_url(cf->pool, &url) != NGX_OK) { ++ return NGX_CONF_ERROR; ++ } ++ ++ us->name = url.url; ++ us->addrs = url.addrs; ++ us->naddrs = url.naddrs; ++ ++ ngx_http_lua_balancer_default_server_sockaddr = us->addrs[0].sockaddr; ++ } ++ + if (uscf->peer.init_upstream) { + ngx_conf_log_error(NGX_LOG_WARN, cf, 0, + "load balancing method redefined"); +@@ -525,7 +551,19 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) + return NGX_OK; + } + +- return bp->original_get_peer(pc, bp->data); ++ rc = bp->original_get_peer(pc, bp->data); ++ if (rc == NGX_ERROR) { ++ return rc; ++ } ++ ++ if (pc->sockaddr == ngx_http_lua_balancer_default_server_sockaddr) { ++ ngx_log_error(NGX_LOG_ERR, pc->log, 0, ++ "lua balancer: no peer set"); ++ ++ return NGX_ERROR; ++ } ++ ++ return rc; + } + + +-- +2.26.2 + + +From 941cd893573561574bc6a326d6306f1a30127293 Mon Sep 17 00:00:00 2001 +From: Thibault Charbonnier +Date: Tue, 17 Sep 2019 11:43:58 -0700 +Subject: [PATCH 3/3] refactor: used a simpler way to stash the balancer peer + data. + +--- + src/ngx_http_lua_balancer.c | 91 +++++++++---------------------------- + src/ngx_http_lua_common.h | 7 --- + 2 files changed, 22 insertions(+), 76 deletions(-) + +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c b/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c +index 5c862d22..3ea1f067 100644 +--- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c ++++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_balancer.c +@@ -411,9 +411,9 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) + ngx_http_request_t *r; + ngx_http_lua_ctx_t *ctx; + ngx_http_lua_srv_conf_t *lscf; +- ngx_http_lua_main_conf_t *lmcf; + ngx_http_lua_balancer_keepalive_item_t *item; + ngx_http_lua_balancer_peer_data_t *bp = data; ++ void *pdata; + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, + "lua balancer: get peer, tries: %ui", pc->tries); +@@ -452,15 +452,13 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) + bp->keepalive = 0; + bp->total_tries++; + +- lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); +- +- /* balancer_by_lua does not support yielding and +- * there cannot be any conflicts among concurrent requests, +- * thus it is safe to store the peer data in the main conf. +- */ +- lmcf->balancer_peer_data = bp; ++ pdata = r->upstream->peer.data; ++ r->upstream->peer.data = bp; + + rc = lscf->balancer.handler(r, lscf, L); ++ ++ r->upstream->peer.data = pdata; ++ + if (rc == NGX_ERROR) { + return NGX_ERROR; + } +@@ -945,7 +943,6 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, + ngx_url_t url; + ngx_http_upstream_t *u; + ngx_http_lua_ctx_t *ctx; +- ngx_http_lua_main_conf_t *lmcf; + ngx_http_lua_balancer_peer_data_t *bp; + + if (r == NULL) { +@@ -971,18 +968,6 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, + return NGX_ERROR; + } + +- lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); +- +- /* we cannot read r->upstream->peer.data here directly because +- * it could be overridden by other modules like +- * ngx_http_upstream_keepalive_module. +- */ +- bp = lmcf->balancer_peer_data; +- if (bp == NULL) { +- *err = "no upstream peer data found"; +- return NGX_ERROR; +- } +- + ngx_memzero(&url, sizeof(ngx_url_t)); + + url.url.data = ngx_palloc(r->pool, addr_len); +@@ -1006,6 +991,8 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, + return NGX_ERROR; + } + ++ bp = (ngx_http_lua_balancer_peer_data_t *) u->peer.data; ++ + if (url.addrs && url.addrs[0].sockaddr) { + bp->sockaddr = url.addrs[0].sockaddr; + bp->socklen = url.addrs[0].socklen; +@@ -1029,7 +1016,6 @@ ngx_http_lua_ffi_balancer_enable_keepalive(ngx_http_request_t *r, + { + ngx_http_upstream_t *u; + ngx_http_lua_ctx_t *ctx; +- ngx_http_lua_main_conf_t *lmcf; + ngx_http_lua_balancer_peer_data_t *bp; + + if (r == NULL) { +@@ -1055,17 +1041,7 @@ ngx_http_lua_ffi_balancer_enable_keepalive(ngx_http_request_t *r, + return NGX_ERROR; + } + +- lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); +- +- /* we cannot read r->upstream->peer.data here directly because +- * it could be overridden by other modules like +- * ngx_http_upstream_keepalive_module. +- */ +- bp = lmcf->balancer_peer_data; +- if (bp == NULL) { +- *err = "no upstream peer data found"; +- return NGX_ERROR; +- } ++ bp = (ngx_http_lua_balancer_peer_data_t *) u->peer.data; + + if (!ngx_http_lua_balancer_peer_set(bp)) { + *err = "no current peer set"; +@@ -1089,14 +1065,13 @@ ngx_http_lua_ffi_balancer_set_timeouts(ngx_http_request_t *r, long connect_timeout, long send_timeout, long read_timeout, char **err) { @@ -1085,7 +1217,7 @@ index e4ac57a..44e01cb 100644 if (r == NULL) { *err = "no request found"; -@@ -577,15 +1109,9 @@ ngx_http_lua_ffi_balancer_set_timeouts(ngx_http_request_t *r, +@@ -1121,15 +1096,9 @@ ngx_http_lua_ffi_balancer_set_timeouts(ngx_http_request_t *r, return NGX_ERROR; } @@ -1103,7 +1235,7 @@ index e4ac57a..44e01cb 100644 if (!bp->cloned_upstream_conf) { /* we clone the upstream conf for the current request so that * we do not affect other requests at all. */ -@@ -640,12 +1166,10 @@ ngx_http_lua_ffi_balancer_set_more_tries(ngx_http_request_t *r, +@@ -1184,12 +1153,10 @@ ngx_http_lua_ffi_balancer_set_more_tries(ngx_http_request_t *r, int count, char **err) { #if (nginx_version >= 1007005) @@ -1119,7 +1251,7 @@ index e4ac57a..44e01cb 100644 ngx_http_lua_balancer_peer_data_t *bp; if (r == NULL) { -@@ -671,13 +1195,7 @@ ngx_http_lua_ffi_balancer_set_more_tries(ngx_http_request_t *r, +@@ -1215,13 +1182,7 @@ ngx_http_lua_ffi_balancer_set_more_tries(ngx_http_request_t *r, return NGX_ERROR; } @@ -1134,7 +1266,7 @@ index e4ac57a..44e01cb 100644 #if (nginx_version >= 1007005) max_tries = r->upstream->conf->next_upstream_tries; -@@ -703,12 +1221,10 @@ int +@@ -1247,12 +1208,10 @@ int ngx_http_lua_ffi_balancer_get_last_failure(ngx_http_request_t *r, int *status, char **err) { @@ -1150,7 +1282,7 @@ index e4ac57a..44e01cb 100644 if (r == NULL) { *err = "no request found"; -@@ -733,13 +1249,7 @@ ngx_http_lua_ffi_balancer_get_last_failure(ngx_http_request_t *r, +@@ -1277,13 +1236,7 @@ ngx_http_lua_ffi_balancer_get_last_failure(ngx_http_request_t *r, return NGX_ERROR; } @@ -1165,8 +1297,8 @@ index e4ac57a..44e01cb 100644 if (r->upstream_states && r->upstream_states->nelts > 1) { state = r->upstream_states->elts; -diff --git a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_common.h b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_common.h -index 0751a11..7a66cb1 100644 +diff --git a/ngx_lua-0.10.20/src/ngx_http_lua_common.h b/ngx_lua-0.10.20/src/ngx_http_lua_common.h +index 9ce6836a..9a4342df 100644 --- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_common.h +++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_common.h @@ -239,13 +239,6 @@ struct ngx_http_lua_main_conf_s { @@ -1183,30 +1315,5 @@ index 0751a11..7a66cb1 100644 ngx_chain_t *body_filter_chain; /* neither yielding nor recursion is possible in * body_filter_by_lua*, so there cannot be any races among -@@ -320,6 +313,10 @@ union ngx_http_lua_srv_conf_u { - #endif - - struct { -+ ngx_http_upstream_init_pt original_init_upstream; -+ ngx_http_upstream_init_peer_pt original_init_peer; -+ uintptr_t data; -+ - ngx_http_lua_srv_conf_handler_pt handler; - ngx_str_t src; - u_char *src_key; -diff --git a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_module.c b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_module.c -index 7358a95..21bf8f1 100644 ---- a/bundle/ngx_lua-0.10.20/src/ngx_http_lua_module.c -+++ b/bundle/ngx_lua-0.10.20/src/ngx_http_lua_module.c -@@ -1068,6 +1068,9 @@ ngx_http_lua_create_srv_conf(ngx_conf_t *cf) - * lscf->srv.ssl_session_fetch_src = { 0, NULL }; - * lscf->srv.ssl_session_fetch_src_key = NULL; - * -+ * lscf->balancer.original_init_upstream = NULL; -+ * lscf->balancer.original_init_peer = NULL; -+ * lscf->balancer.data = NULL; - * lscf->balancer.handler = NULL; - * lscf->balancer.src = { 0, NULL }; - * lscf->balancer.src_key = NULL; -- -2.34.1 +2.26.2 diff --git a/changelog/unreleased/kong/balancer_respect_max_retries.yml b/changelog/unreleased/kong/balancer_respect_max_retries.yml deleted file mode 100644 index 1884ad1ce9f..00000000000 --- a/changelog/unreleased/kong/balancer_respect_max_retries.yml +++ /dev/null @@ -1,3 +0,0 @@ -message: Fix an issue that the actual number of retry times exceeds the `retries` setting. -type: bugfix -scope: Core diff --git a/spec/02-integration/05-proxy/10-balancer/08-retries_spec.lua b/spec/02-integration/05-proxy/10-balancer/08-retries_spec.lua deleted file mode 100644 index cfc25919986..00000000000 --- a/spec/02-integration/05-proxy/10-balancer/08-retries_spec.lua +++ /dev/null @@ -1,124 +0,0 @@ -local helpers = require "spec.helpers" -local cjson = require "cjson" - -local function get_log(typ, n) - local entries - helpers.wait_until(function() - local client = assert(helpers.http_client(helpers.mock_upstream_host, - helpers.mock_upstream_port)) - local res = client:get("/read_log/" .. typ, { - headers = { - Accept = "application/json" - } - }) - local raw = assert.res_status(200, res) - local body = cjson.decode(raw) - - entries = body.entries - return #entries > 0 - end, 10) - if n then - assert(#entries == n, "expected " .. n .. " log entries, but got " .. #entries) - end - return entries -end - -for _, strategy in helpers.each_strategy() do - describe("Balancer: respect max retries [#" .. strategy .. "]", function() - local service - - lazy_setup(function() - local bp = helpers.get_db_utils(strategy, { - "routes", - "services", - "plugins", - }) - - service = bp.services:insert { - name = "retry_service", - host = "127.0.0.1", - port = 62351, - retries = 5, - } - - local route = bp.routes:insert { - service = service, - paths = { "/hello" }, - strip_path = false, - } - - bp.plugins:insert { - route = { id = route.id }, - name = "http-log", - config = { - queue_size = 1, - http_endpoint = "http://" .. helpers.mock_upstream_host - .. ":" - .. helpers.mock_upstream_port - .. "/post_log/http" - } - } - - local fixtures = { - http_mock = {} - } - - fixtures.http_mock.my_server_block = [[ - server { - listen 0.0.0.0:62351; - location /hello { - content_by_lua_block { - local request_counter = ngx.shared.request_counter - local first_request = request_counter:get("first_request") - if first_request == nil then - request_counter:set("first_request", "yes") - ngx.say("hello") - else - ngx.exit(ngx.HTTP_CLOSE) - end - } - } - } - ]] - - assert(helpers.start_kong({ - database = strategy, - nginx_conf = "spec/fixtures/custom_nginx.template", - nginx_http_lua_shared_dict = "request_counter 1m", - }, nil, nil, fixtures)) - end) - - lazy_teardown(function() - helpers.stop_kong() - end) - - it("exceeded limit", function() - -- First request should succeed and save connection to upstream in keepalive pool - local proxy_client1 = helpers.proxy_client() - local res = assert(proxy_client1:send { - method = "GET", - path = "/hello", - }) - - assert.res_status(200, res) - - proxy_client1:close() - - -- Second request should failed 1 times and retry 5 times and then return 502 - local proxy_client2 = helpers.proxy_client() - - res = assert(proxy_client2:send { - method = "GET", - path = "/hello", - }) - - assert.res_status(502, res) - - -- wait for the http-log plugin to flush the log - ngx.sleep(1) - - local entries = get_log("http", 2) - assert.equal(#entries[2].tries, 6) - end) - end) -end From e2dd96a0ee4ddf2e384fe0f1c0f8dfb4deb98608 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 15/42] Revert "chore(deps): bump `lua-resty-healthcheck` to `1.5.4` (#12238)" This reverts commit 85c75e8d7bebafa1599b587f892834414d01cac0. --- changelog/unreleased/kong/bump-lua-resty-healthcheck-1.5.4.yml | 3 --- kong-2.8.6-0.rockspec | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) delete mode 100644 changelog/unreleased/kong/bump-lua-resty-healthcheck-1.5.4.yml diff --git a/changelog/unreleased/kong/bump-lua-resty-healthcheck-1.5.4.yml b/changelog/unreleased/kong/bump-lua-resty-healthcheck-1.5.4.yml deleted file mode 100644 index 672ecdb2cf9..00000000000 --- a/changelog/unreleased/kong/bump-lua-resty-healthcheck-1.5.4.yml +++ /dev/null @@ -1,3 +0,0 @@ -message: "Bumped lua-resty-healthcheck from 1.5.3 to 1.5.4" -type: dependency -scope: Core diff --git a/kong-2.8.6-0.rockspec b/kong-2.8.6-0.rockspec index 39effabc2ee..73b82bbcd73 100644 --- a/kong-2.8.6-0.rockspec +++ b/kong-2.8.6-0.rockspec @@ -33,7 +33,7 @@ dependencies = { "luaxxhash >= 1.0", "lua-protobuf == 0.3.3", "lua-resty-worker-events == 1.0.0", - "lua-resty-healthcheck == 1.5.4", + "lua-resty-healthcheck == 1.5.3", "lua-resty-mlcache == 2.5.0", "lua-messagepack == 0.5.2", "lua-resty-openssl == 0.8.22", From 660a79b0ca0ecf5938893b94602be69ba117d56f Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 16/42] Revert "Revert "Revert "tests: re-enable and fix flaky tests in spec/02-integration/03-db/07-tags_spec.lua (#10715) (#11118)""" This reverts commit 0733e36bf6b87bbafbf2b9355d7b4b8a6fce8b87. --- spec/02-integration/03-db/07-tags_spec.lua | 60 ++++++++++------------ spec/helpers.lua | 21 -------- 2 files changed, 27 insertions(+), 54 deletions(-) diff --git a/spec/02-integration/03-db/07-tags_spec.lua b/spec/02-integration/03-db/07-tags_spec.lua index 767097d21cb..c120d740dd1 100644 --- a/spec/02-integration/03-db/07-tags_spec.lua +++ b/spec/02-integration/03-db/07-tags_spec.lua @@ -219,6 +219,23 @@ for _, strategy in helpers.each_strategy() do describe("page() by tag", function() local single_tag_count = 5 local total_entities_count = 100 + for i = 1, total_entities_count do + local service = { + host = "anotherexample-" .. i .. ".org", + name = "service-paging" .. i, + tags = { "paging", "team_paging_" .. fmod(i, 5), "irrelevant_tag" } + } + local row, err, err_t = bp.services:insert(service) + assert.is_nil(err) + assert.is_nil(err_t) + assert.same(service.tags, row.tags) + end + + if strategy == "off" then + local entities = assert(bp.done()) + local dc = assert(declarative_config.load(helpers.test_conf.loaded_plugins)) + declarative.load_into_cache(dc:flatten(entities)) + end local scenarios = { -- { tags[], expected_result_count } { @@ -245,26 +262,6 @@ for _, strategy in helpers.each_strategy() do local paging_size = { total_entities_count/single_tag_count, } - lazy_setup(function() - for i = 1, total_entities_count do - local service = { - host = "anotherexample-" .. i .. ".org", - name = "service-paging" .. i, - tags = { "paging", "team_paging_" .. fmod(i, 5), "irrelevant_tag" } - } - local row, err, err_t = bp.services:insert(service) - assert.is_nil(err) - assert.is_nil(err_t) - assert.same(service.tags, row.tags) - end - - if strategy == "off" then - local entities = assert(bp.done()) - local dc = assert(declarative_config.load(helpers.test_conf.loaded_plugins)) - declarative.load_into_cache(dc:flatten(entities)) - end - end) - for s_idx, scenario in ipairs(scenarios) do local opts, expected_count = unpack(scenario) @@ -345,23 +342,20 @@ for _, strategy in helpers.each_strategy() do assert.stub(ngx.log).was_not_called() end) - it("and returns as normal if page size is large enough", function() + it("#flaky and returns as normal if page size is large enough", function() stub(ngx, "log") - -- cassandra is a bit slow on CI, so we need to wait a bit - helpers.pwait_until(function() - local rows, err, err_t, offset = db.services:page(enough_page_size, nil, + local rows, err, err_t, offset = db.services:page(enough_page_size, nil, + { tags = { "paging", "team_paging_1" }, tags_cond = 'and' }) + assert(is_valid_page(rows, err, err_t)) + assert.equal(enough_page_size, #rows) + if offset then + rows, err, err_t, offset = db.services:page(enough_page_size, offset, { tags = { "paging", "team_paging_1" }, tags_cond = 'and' }) assert(is_valid_page(rows, err, err_t)) - assert.equal(enough_page_size, #rows) - if offset then - rows, err, err_t, offset = db.services:page(enough_page_size, offset, - { tags = { "paging", "team_paging_1" }, tags_cond = 'and' }) - assert(is_valid_page(rows, err, err_t)) - assert.equal(0, #rows) - assert.is_nil(offset) - end - end) + assert.equal(0, #rows) + assert.is_nil(offset) + end assert.stub(ngx.log).was_not_called() end) diff --git a/spec/helpers.lua b/spec/helpers.lua index 50b12314dfe..ad4c30fda46 100644 --- a/spec/helpers.lua +++ b/spec/helpers.lua @@ -1342,26 +1342,6 @@ local function wait_until(f, timeout, step) end - ---- Waits until no Lua error occurred --- The check function will repeatedly be called (with a fixed interval), until --- there is no Lua error occurred --- --- NOTE: this is a regular Lua function, not a Luassert assertion. --- @function pwait_until --- @param f check function --- @param timeout (optional) maximum time to wait after which an error is --- thrown, defaults to 5. --- @param step (optional) interval between checks, defaults to 0.05. --- @return nothing. It returns when the condition is met, or throws an error --- when it times out. -local function pwait_until(f, timeout, step) - wait_until(function() - return pcall(f) - end, timeout, step) -end - - --- Waits for invalidation of a cached key by polling the mgt-api -- and waiting for a 404 response. Throws an error on timeout. -- @@ -2941,7 +2921,6 @@ end grpc_client = grpc_client, http2_client = http2_client, wait_until = wait_until, - pwait_until = pwait_until, wait_pid = wait_pid, tcp_server = tcp_server, udp_server = udp_server, From 31673b2a0b508d37d070499d8d78356059cc7266 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 17/42] Revert "Revert "Revert "test(cmd): fix flaky `can receive USR1` test (#10903)""" This reverts commit 8bb559c98c674f46204ff937ba597ad454e47d28. --- spec/02-integration/02-cmd/13-signals_spec.lua | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/spec/02-integration/02-cmd/13-signals_spec.lua b/spec/02-integration/02-cmd/13-signals_spec.lua index 21287489020..9f9c9e38c1b 100644 --- a/spec/02-integration/02-cmd/13-signals_spec.lua +++ b/spec/02-integration/02-cmd/13-signals_spec.lua @@ -15,12 +15,10 @@ describe("signals", function() assert(helpers.start_kong()) helpers.signal(nil, "-USR1") - helpers.wait_until(function() - local conf = helpers.get_running_conf() - local _, code = helpers.execute("grep -F '(SIGUSR1) received from' " .. - conf.nginx_err_logs, true) - return 0 == code, "SIGUSR1 not received" - end) + local conf = helpers.get_running_conf() + local _, code = helpers.execute("grep -F '(SIGUSR1) received from' " .. + conf.nginx_err_logs, true) + assert.equal(0, code) end) it("can receive USR2 #flaky", function() From cf85cf65632a37e691ad4505e18ea354fa7bb09d Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 18/42] Revert "Revert "Revert "tests(*): fix flaky hybrid mode ocsp tests (#10912)""" This reverts commit 4dea806367627f78f7c137b2d9f1255632dacb5d. --- .../09-hybrid_mode/05-ocsp_spec.lua | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/spec/02-integration/09-hybrid_mode/05-ocsp_spec.lua b/spec/02-integration/09-hybrid_mode/05-ocsp_spec.lua index d2dab2aaef2..3f6275c150f 100644 --- a/spec/02-integration/09-hybrid_mode/05-ocsp_spec.lua +++ b/spec/02-integration/09-hybrid_mode/05-ocsp_spec.lua @@ -41,8 +41,6 @@ for _, strategy in helpers.each_strategy() do cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", })) - set_ocsp_status("good") - assert(helpers.start_kong({ role = "data_plane", database = "off", @@ -56,6 +54,8 @@ for _, strategy in helpers.each_strategy() do cluster_server_name = "kong_clustering", cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", })) + + set_ocsp_status("good") end) lazy_teardown(function() @@ -110,8 +110,6 @@ for _, strategy in helpers.each_strategy() do cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", })) - set_ocsp_status("revoked") - assert(helpers.start_kong({ role = "data_plane", database = "off", @@ -125,6 +123,8 @@ for _, strategy in helpers.each_strategy() do cluster_server_name = "kong_clustering", cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", })) + + set_ocsp_status("revoked") end) lazy_teardown(function() @@ -177,8 +177,6 @@ for _, strategy in helpers.each_strategy() do cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", })) - set_ocsp_status("error") - assert(helpers.start_kong({ role = "data_plane", database = "off", @@ -192,6 +190,8 @@ for _, strategy in helpers.each_strategy() do cluster_server_name = "kong_clustering", cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", })) + + set_ocsp_status("error") end) lazy_teardown(function() @@ -247,8 +247,6 @@ for _, strategy in helpers.each_strategy() do cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", })) - set_ocsp_status("revoked") - assert(helpers.start_kong({ role = "data_plane", database = "off", @@ -262,6 +260,8 @@ for _, strategy in helpers.each_strategy() do cluster_server_name = "kong_clustering", cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", })) + + set_ocsp_status("revoked") end) lazy_teardown(function() @@ -318,8 +318,6 @@ for _, strategy in helpers.each_strategy() do cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", })) - set_ocsp_status("revoked") - assert(helpers.start_kong({ role = "data_plane", database = "off", @@ -333,6 +331,8 @@ for _, strategy in helpers.each_strategy() do cluster_server_name = "kong_clustering", cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", })) + + set_ocsp_status("revoked") end) lazy_teardown(function() @@ -385,8 +385,6 @@ for _, strategy in helpers.each_strategy() do cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", })) - set_ocsp_status("error") - assert(helpers.start_kong({ role = "data_plane", database = "off", @@ -400,6 +398,8 @@ for _, strategy in helpers.each_strategy() do cluster_server_name = "kong_clustering", cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", })) + + set_ocsp_status("error") end) lazy_teardown(function() From 0f9ca8c322db9aa4e6320bc867a0ae21e7028f07 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 19/42] Revert "Revert "Revert "tests(*): modify tests that need to access mockbin.com from integration test (#10893)""" This reverts commit e4ed6f3cd5c0c2920144bfedf476b080fcd7f887. --- spec/02-integration/05-proxy/06-ssl_spec.lua | 52 +++++--------------- 1 file changed, 11 insertions(+), 41 deletions(-) diff --git a/spec/02-integration/05-proxy/06-ssl_spec.lua b/spec/02-integration/05-proxy/06-ssl_spec.lua index 28385246c93..c6833d8f3a3 100644 --- a/spec/02-integration/05-proxy/06-ssl_spec.lua +++ b/spec/02-integration/05-proxy/06-ssl_spec.lua @@ -1,7 +1,6 @@ local ssl_fixtures = require "spec.fixtures.ssl" local helpers = require "spec.helpers" local cjson = require "cjson" -local fmt = string.format local function get_cert(server_name) @@ -13,32 +12,6 @@ local function get_cert(server_name) return stdout end -local mock_tls_server_port = helpers.get_available_port() - -local fixtures = { - dns_mock = helpers.dns_mock.new(), - http_mock = { - test_upstream_tls_server = fmt([[ - server { - server_name example2.com; - listen %s ssl; - - ssl_certificate ../spec/fixtures/mtls_certs/example2.com.crt; - ssl_certificate_key ../spec/fixtures/mtls_certs/example2.com.key; - - location = / { - echo 'it works'; - } - } - ]], mock_tls_server_port) - }, -} - -fixtures.dns_mock:A { - name = "example2.com", - address = "127.0.0.1", -} - for _, strategy in helpers.each_strategy() do describe("SSL [#" .. strategy .. "]", function() local proxy_client @@ -153,18 +126,16 @@ for _, strategy in helpers.each_strategy() do preserve_host = false, } - local service_example2 = assert(bp.services:insert { - name = "service-example2", - protocol = "https", - host = "example2.com", - port = mock_tls_server_port, + local service_mockbin = assert(bp.services:insert { + name = "service-mockbin", + url = "https://mockbin.com/request", }) assert(bp.routes:insert { protocols = { "http" }, - hosts = { "example2.com" }, + hosts = { "mockbin.com" }, paths = { "/" }, - service = service_example2, + service = service_mockbin, }) assert(bp.routes:insert { @@ -233,14 +204,13 @@ for _, strategy in helpers.each_strategy() do -- /wildcard tests - assert(helpers.start_kong ({ + assert(helpers.start_kong { database = strategy, nginx_conf = "spec/fixtures/custom_nginx.template", trusted_ips = "127.0.0.1", nginx_http_proxy_ssl_verify = "on", nginx_http_proxy_ssl_trusted_certificate = "../spec/fixtures/kong_spec.crt", - nginx_http_proxy_ssl_verify_depth = "5", - }, nil, nil, fixtures)) + }) proxy_client = helpers.proxy_client() https_client = helpers.proxy_ssl_client() @@ -258,13 +228,13 @@ for _, strategy in helpers.each_strategy() do method = "GET", path = "/", headers = { - Host = "example2.com", + Host = "mockbin.com", }, }) local body = assert.res_status(502, res) assert.matches("An invalid response was received from the upstream server", body) assert.logfile().has.line("upstream SSL certificate verify error: " .. - "(21:unable to verify the first certificate) " .. + "(20:unable to get local issuer certificate) " .. "while SSL handshaking to upstream", true, 2) end) @@ -570,7 +540,7 @@ for _, strategy in helpers.each_strategy() do snis = { "example.com" }, service = service, } - + bp.routes:insert { protocols = { "tls" }, snis = { "foobar.example.com." }, @@ -594,7 +564,7 @@ for _, strategy in helpers.each_strategy() do stream_listen = "127.0.0.1:9020 ssl" }) - + end) lazy_teardown(function() From 0cdb25888b8aa93a09a76de8e7942de6a3619334 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 20/42] Revert "Revert "Revert "tests(helpers): pickup reload helper from #8670""" This reverts commit cf1a91b136ee43245b21fd987f55eeef5a2e550f. --- spec/02-integration/02-cmd/03-reload_spec.lua | 99 ++++++++++++++++--- spec/helpers.lua | 75 -------------- 2 files changed, 85 insertions(+), 89 deletions(-) diff --git a/spec/02-integration/02-cmd/03-reload_spec.lua b/spec/02-integration/02-cmd/03-reload_spec.lua index 115db1f9862..7a2baa91346 100644 --- a/spec/02-integration/02-cmd/03-reload_spec.lua +++ b/spec/02-integration/02-cmd/03-reload_spec.lua @@ -2,6 +2,31 @@ local helpers = require "spec.helpers" local cjson = require "cjson" +local function get_kong_workers() + local workers + helpers.wait_until(function() + local pok, admin_client = pcall(helpers.admin_client) + if not pok then + return false + end + local res = admin_client:send { + method = "GET", + path = "/", + } + if not res or res.status ~= 200 then + return false + end + local body = assert.res_status(200, res) + local json = cjson.decode(body) + + admin_client:close() + workers = json.pids.workers + return true + end, 10) + return workers +end + + local function assert_wait_call(fn, ...) local res local args = { ... } @@ -13,6 +38,52 @@ local function assert_wait_call(fn, ...) end +local function wait_until_no_common_workers(workers, expected_total, strategy) + if strategy == "cassandra" then + ngx.sleep(0.5) + end + helpers.wait_until(function() + local pok, admin_client = pcall(helpers.admin_client) + if not pok then + return false + end + local res = assert(admin_client:send { + method = "GET", + path = "/", + }) + assert.res_status(200, res) + local json = cjson.decode(assert.res_status(200, res)) + admin_client:close() + + local new_workers = json.pids.workers + local total = 0 + local common = 0 + if new_workers then + for _, v in ipairs(new_workers) do + total = total + 1 + for _, v_old in ipairs(workers) do + if v == v_old then + common = common + 1 + break + end + end + end + end + return common == 0 and total == (expected_total or total) + end) +end + + +local function kong_reload(strategy, ...) + local workers = get_kong_workers() + local ok, err = helpers.kong_exec(...) + if ok then + wait_until_no_common_workers(workers, 1, strategy) + end + return ok, err +end + + for _, strategy in helpers.each_strategy() do describe("kong reload #" .. strategy, function() @@ -33,7 +104,7 @@ describe("kong reload #" .. strategy, function() local nginx_pid = assert_wait_call(helpers.file.read, helpers.test_conf.nginx_pid) -- kong_exec uses test conf too, so same prefix - assert(helpers.reload_kong(strategy, "reload --prefix " .. helpers.test_conf.prefix)) + assert(kong_reload(strategy, "reload --prefix " .. helpers.test_conf.prefix)) local nginx_pid_after = assert_wait_call(helpers.file.read, helpers.test_conf.nginx_pid) @@ -50,14 +121,14 @@ describe("kong reload #" .. strategy, function() local client = helpers.http_client("0.0.0.0", 9002, 5000) client:close() - local workers = helpers.get_kong_workers() + local workers = get_kong_workers() local nginx_pid = assert(helpers.file.read(helpers.test_conf.nginx_pid), "no nginx master PID") assert(helpers.kong_exec("reload --conf spec/fixtures/reload.conf")) - helpers.wait_until_no_common_workers(workers, 1) + wait_until_no_common_workers(workers, 1) -- same master PID assert.equal(nginx_pid, helpers.file.read(helpers.test_conf.nginx_pid)) @@ -76,7 +147,7 @@ describe("kong reload #" .. strategy, function() local client = helpers.http_client("0.0.0.0", 9002, 5000) client:close() - local workers = helpers.get_kong_workers() + local workers = get_kong_workers() local nginx_pid = assert(helpers.file.read(helpers.test_conf.nginx_pid), "no nginx master PID") @@ -85,7 +156,7 @@ describe("kong reload #" .. strategy, function() proxy_listen = "0.0.0.0:9000" })) - helpers.wait_until_no_common_workers(workers, 1) + wait_until_no_common_workers(workers, 1) -- same master PID assert.equal(nginx_pid, helpers.file.read(helpers.test_conf.nginx_pid)) @@ -100,7 +171,7 @@ describe("kong reload #" .. strategy, function() proxy_listen = "0.0.0.0:9002" }, nil, true)) - local workers = helpers.get_kong_workers() + local workers = get_kong_workers() -- http_client errors out if cannot connect local client = helpers.http_client("0.0.0.0", 9002, 5000) @@ -110,7 +181,7 @@ describe("kong reload #" .. strategy, function() .. " --nginx-conf spec/fixtures/custom_nginx.template")) - helpers.wait_until_no_common_workers(workers, 1) + wait_until_no_common_workers(workers, 1) -- new server client = helpers.http_client(helpers.mock_upstream_host, @@ -142,7 +213,7 @@ describe("kong reload #" .. strategy, function() local pids_1 = json.pids client:close() - assert(helpers.reload_kong(strategy, "reload --prefix " .. helpers.test_conf.prefix)) + assert(kong_reload(strategy, "reload --prefix " .. helpers.test_conf.prefix)) client = helpers.admin_client() local res = assert(client:get("/")) @@ -179,7 +250,7 @@ describe("kong reload #" .. strategy, function() local node_id_1 = json.node_id client:close() - assert(helpers.reload_kong(strategy, "reload --prefix " .. helpers.test_conf.prefix)) + assert(kong_reload(strategy, "reload --prefix " .. helpers.test_conf.prefix)) client = helpers.admin_client() local res = assert(client:get("/")) @@ -255,7 +326,7 @@ describe("kong reload #" .. strategy, function() - example.test ]], yaml_file) - assert(helpers.reload_kong(strategy, "reload --prefix " .. helpers.test_conf.prefix, { + assert(kong_reload(strategy, "reload --prefix " .. helpers.test_conf.prefix, { declarative_config = yaml_file, })) @@ -325,7 +396,7 @@ describe("kong reload #" .. strategy, function() return true end) - assert(helpers.reload_kong(strategy, "reload --prefix " .. helpers.test_conf.prefix)) + assert(kong_reload(strategy, "reload --prefix " .. helpers.test_conf.prefix)) admin_client = assert(helpers.admin_client()) local res = assert(admin_client:send { @@ -422,7 +493,7 @@ describe("kong reload #" .. strategy, function() return true end) - assert(helpers.reload_kong(strategy, "reload --prefix " .. helpers.test_conf.prefix)) + assert(kong_reload(strategy, "reload --prefix " .. helpers.test_conf.prefix)) admin_client = assert(helpers.admin_client()) local res = assert(admin_client:send { @@ -513,7 +584,7 @@ describe("kong reload #" .. strategy, function() weight: 100 ]], yaml_file) - assert(helpers.reload_kong(strategy, "reload --prefix " .. helpers.test_conf.prefix, { + assert(kong_reload(strategy, "reload --prefix " .. helpers.test_conf.prefix, { declarative_config = yaml_file, })) @@ -646,7 +717,7 @@ describe("key-auth plugin invalidation on dbless reload #off", function() keyauth_credentials: - key: my-new-key ]], yaml_file) - assert(helpers.reload_kong("off", "reload --prefix " .. helpers.test_conf.prefix, { + assert(kong_reload("off", "reload --prefix " .. helpers.test_conf.prefix, { declarative_config = yaml_file, })) diff --git a/spec/helpers.lua b/spec/helpers.lua index ad4c30fda46..644b40df320 100644 --- a/spec/helpers.lua +++ b/spec/helpers.lua @@ -2712,78 +2712,6 @@ local function restart_kong(env, tables, fixtures) end -local function wait_until_no_common_workers(workers, expected_total, strategy) - if strategy == "cassandra" then - ngx.sleep(0.5) - end - wait_until(function() - local pok, admin_client = pcall(admin_client) - if not pok then - return false - end - local res = assert(admin_client:send { - method = "GET", - path = "/", - }) - luassert.res_status(200, res) - local json = cjson.decode(luassert.res_status(200, res)) - admin_client:close() - - local new_workers = json.pids.workers - local total = 0 - local common = 0 - if new_workers then - for _, v in ipairs(new_workers) do - total = total + 1 - for _, v_old in ipairs(workers) do - if v == v_old then - common = common + 1 - break - end - end - end - end - return common == 0 and total == (expected_total or total) - end, 30) -end - - -local function get_kong_workers() - local workers - wait_until(function() - local pok, admin_client = pcall(admin_client) - if not pok then - return false - end - local res = admin_client:send { - method = "GET", - path = "/", - } - if not res or res.status ~= 200 then - return false - end - local body = luassert.res_status(200, res) - local json = cjson.decode(body) - - admin_client:close() - workers = json.pids.workers - return true - end, 10) - return workers -end - - ---- Reload Kong and wait all workers are restarted. -local function reload_kong(strategy, ...) - local workers = get_kong_workers() - local ok, err = kong_exec(...) - if ok then - wait_until_no_common_workers(workers, 1, strategy) - end - return ok, err -end - - --- Simulate a Hybrid mode DP and connect to the CP specified in `opts`. -- @function clustering_client -- @param opts Options to use, the `host`, `port`, `cert` and `cert_key` fields @@ -2960,9 +2888,6 @@ end start_kong = start_kong, stop_kong = stop_kong, restart_kong = restart_kong, - reload_kong = reload_kong, - get_kong_workers = get_kong_workers, - wait_until_no_common_workers = wait_until_no_common_workers, start_grpc_target = start_grpc_target, stop_grpc_target = stop_grpc_target, From 9040cc0dfe82e7c61a1ae2a7a766e3e91e0f5334 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 21/42] Revert "Revert "Revert "tests(router): add worker_consistency=eventual to test case""" This reverts commit 06bae64c4040e746ece4b11cc516ff5280927a78. --- .../05-proxy/02-router_spec.lua | 155 +++++++++--------- 1 file changed, 75 insertions(+), 80 deletions(-) diff --git a/spec/02-integration/05-proxy/02-router_spec.lua b/spec/02-integration/05-proxy/02-router_spec.lua index 09a54d4f582..58609797cf7 100644 --- a/spec/02-integration/05-proxy/02-router_spec.lua +++ b/spec/02-integration/05-proxy/02-router_spec.lua @@ -2196,102 +2196,97 @@ for _, strategy in helpers.each_strategy() do end) end) - for _, consistency in ipairs({ "strict", "eventual" }) do - describe("Router [#" .. strategy .. ", consistency = " .. consistency .. "] at startup" , function() - local proxy_client - local route + describe("Router [#" .. strategy .. ", flavor = " .. flavor .. "] at startup" , function() + local proxy_client + local route - lazy_setup(function() - local bp = helpers.get_db_utils(strategy, { - "routes", - "services", - "plugins", - }, { - "enable-buffering", - }) + lazy_setup(function() + local bp = helpers.get_db_utils(strategy, { + "routes", + "services", + "plugins", + }, { + "enable-buffering", + }) - route = bp.routes:insert({ - methods = { "GET" }, - protocols = { "http" }, - strip_path = false, - }) + route = bp.routes:insert({ + methods = { "GET" }, + protocols = { "http" }, + strip_path = false, + }) - if enable_buffering then - bp.plugins:insert { - name = "enable-buffering", - protocols = { "http", "https", "grpc", "grpcs" }, - } - end + if enable_buffering then + bp.plugins:insert { + name = "enable-buffering", + protocols = { "http", "https", "grpc", "grpcs" }, + } + end - assert(helpers.start_kong({ - worker_consistency = consistency, - database = strategy, - nginx_worker_processes = 4, - plugins = "bundled,enable-buffering", - nginx_conf = "spec/fixtures/custom_nginx.template", - })) - end) + assert(helpers.start_kong({ + database = strategy, + nginx_worker_processes = 4, + plugins = "bundled,enable-buffering", + nginx_conf = "spec/fixtures/custom_nginx.template", + })) + end) - lazy_teardown(function() - helpers.stop_kong() - end) + lazy_teardown(function() + helpers.stop_kong() + end) - before_each(function() - proxy_client = helpers.proxy_client() - end) + before_each(function() + proxy_client = helpers.proxy_client() + end) - after_each(function() - if proxy_client then - proxy_client:close() - end - end) + after_each(function() + if proxy_client then + proxy_client:close() + end + end) - it("uses configuration from datastore or declarative_config", function() - for _ = 1, 1000 do - proxy_client = helpers.proxy_client() - local res = assert(proxy_client:send { - method = "GET", - path = "/get", - headers = { ["kong-debug"] = 1 }, - }) + it("uses configuration from datastore or declarative_config", function() + for _ = 1, 1000 do + proxy_client = helpers.proxy_client() + local res = assert(proxy_client:send { + method = "GET", + path = "/get", + headers = { ["kong-debug"] = 1 }, + }) - assert.response(res).has_status(200) + assert.response(res).has_status(200) - assert.equal(route.service.name, res.headers["kong-service-name"]) - proxy_client:close() - end - end) + assert.equal(route.service.name, res.headers["kong-service-name"]) + proxy_client:close() + end + end) - it("#db worker respawn correctly rebuilds router", function() - local admin_client = helpers.admin_client() + it("#db worker respawn correctly rebuilds router", function() + local admin_client = helpers.admin_client() - local res = assert(admin_client:post("/routes", { - headers = { ["Content-Type"] = "application/json" }, - body = { - paths = { "/foo" }, - }, - })) - assert.res_status(201, res) - admin_client:close() + local res = assert(admin_client:post("/routes", { + headers = { ["Content-Type"] = "application/json" }, + body = { + paths = { "/foo" }, + }, + })) + assert.res_status(201, res) + admin_client:close() - local workers_before = helpers.get_kong_workers() - assert(helpers.signal_workers(nil, "-TERM")) - helpers.wait_until_no_common_workers(workers_before, 1) -- respawned + assert(helpers.signal_workers(nil, "-TERM")) - proxy_client:close() - proxy_client = helpers.proxy_client() + proxy_client:close() + proxy_client = helpers.proxy_client() - local res = assert(proxy_client:send { - method = "GET", - path = "/foo", - headers = { ["kong-debug"] = 1 }, - }) + local res = assert(proxy_client:send { + method = "GET", + path = "/foo", + headers = { ["kong-debug"] = 1 }, + }) - local body = assert.response(res).has_status(503) - local json = cjson.decode(body) - assert.equal("no Service found with those values", json.message) - end) + local body = assert.response(res).has_status(503) + local json = cjson.decode(body) + assert.equal("no Service found with those values", json.message) end) - end + end) end end From f523e4df48c81b7b4fd1c431c7ed688450e078ca Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 22/42] Revert "Revert "Revert "fix(*): do not use stale router data if workers are respawned""" This reverts commit 5f1fdbcbfc436b3aa0c1d29dbffb0db695c564b6. --- kong/init.lua | 19 +++++-------------- kong/runloop/handler.lua | 2 +- 2 files changed, 6 insertions(+), 15 deletions(-) diff --git a/kong/init.lua b/kong/init.lua index 978ee3c5271..4f1b477c981 100644 --- a/kong/init.lua +++ b/kong/init.lua @@ -661,31 +661,22 @@ function Kong.init_worker() return end - local is_not_control_plane = kong.configuration.role ~= "control_plane" - if is_not_control_plane then + if kong.configuration.role ~= "control_plane" then ok, err = execute_cache_warmup(kong.configuration) if not ok then ngx_log(ngx_ERR, "failed to warm up the DB cache: " .. err) end end + runloop.init_worker.before() + + -- run plugins init_worker context ok, err = runloop.update_plugins_iterator() if not ok then stash_init_worker_error("failed to build the plugins iterator: " .. err) return end - if is_not_control_plane then - ok, err = runloop.update_router() - if not ok then - stash_init_worker_error("failed to build the router: " .. err) - return - end - end - - runloop.init_worker.before() - - -- run plugins init_worker context local plugins_iterator = runloop.get_plugins_iterator() local errors = execute_init_worker_plugins_iterator(plugins_iterator, ctx) if errors then @@ -698,7 +689,7 @@ function Kong.init_worker() runloop.init_worker.after() - if is_not_control_plane and ngx.worker.id() == 0 then + if kong.configuration.role ~= "control_plane" then plugin_servers.start() end diff --git a/kong/runloop/handler.lua b/kong/runloop/handler.lua index 8eb31483ce1..6608bd15138 100644 --- a/kong/runloop/handler.lua +++ b/kong/runloop/handler.lua @@ -1167,7 +1167,7 @@ end -- before or after the plugins return { build_router = build_router, - update_router = update_router, + build_plugins_iterator = build_plugins_iterator, update_plugins_iterator = update_plugins_iterator, get_plugins_iterator = get_plugins_iterator, From 6fce805527a839fd873daed482afa6f2e7ac9873 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 23/42] Revert "Revert "Revert "fix(runloop) do not reset `*:version` to `init` when worker respawns,""" This reverts commit 4c6a3f7dc3144e2702c582a61c7cf0552f2bed31. --- kong/init.lua | 12 ++++---- kong/runloop/handler.lua | 24 +++++---------- .../05-proxy/02-router_spec.lua | 29 +------------------ 3 files changed, 15 insertions(+), 50 deletions(-) diff --git a/kong/init.lua b/kong/init.lua index 4f1b477c981..98f10dfc5af 100644 --- a/kong/init.lua +++ b/kong/init.lua @@ -564,12 +564,6 @@ function Kong.init() if config.role ~= "control_plane" then assert(runloop.build_router("init")) - - ok, err = runloop.set_init_versions_in_cache() - if not ok then - error("error setting initial versions for router and plugins iterator in cache: " .. - tostring(err)) - end end end @@ -644,6 +638,12 @@ function Kong.init_worker() end kong.core_cache = core_cache + ok, err = runloop.set_init_versions_in_cache() + if not ok then + stash_init_worker_error(err) -- 'err' fully formatted + return + end + -- LEGACY singletons.cache = cache singletons.core_cache = core_cache diff --git a/kong/runloop/handler.lua b/kong/runloop/handler.lua index 6608bd15138..970534f4c18 100644 --- a/kong/runloop/handler.lua +++ b/kong/runloop/handler.lua @@ -13,7 +13,6 @@ local declarative = require "kong.db.declarative" local workspaces = require "kong.workspaces" local lrucache = require "resty.lrucache" local request_id = require "kong.tracing.request_id" -local marshall = require "kong.cache.marshall" local PluginsIterator = require "kong.runloop.plugins_iterator" @@ -1139,24 +1138,17 @@ end local function set_init_versions_in_cache() - -- because of worker events, kong.cache can not be initialized in `init` phase - -- therefore, we need to use the shdict API directly to set the initial value - assert(kong.configuration.role ~= "control_plane") - assert(ngx.get_phase() == "init") - local core_cache_shm = ngx.shared["kong_core_db_cache"] - - -- ttl = forever is okay as "*:versions" keys are always manually invalidated - local marshalled_value = marshall("init", 0, 0) - - -- see kong.cache.safe_set function - local ok, err = core_cache_shm:safe_set("kong_core_db_cacherouter:version", marshalled_value) - if not ok then - return nil, "failed to set initial router version in cache: " .. tostring(err) + if kong.configuration.role ~= "control_pane" then + local ok, err = kong.core_cache:safe_set("router:version", "init") + if not ok then + return nil, "failed to set router version in cache: " .. tostring(err) + end end - ok, err = core_cache_shm:safe_set("kong_core_db_cacheplugins_iterator:version", marshalled_value) + local ok, err = kong.core_cache:safe_set("plugins_iterator:version", "init") if not ok then - return nil, "failed to set initial plugins iterator version in cache: " .. tostring(err) + return nil, "failed to set plugins iterator version in cache: " .. + tostring(err) end return true diff --git a/spec/02-integration/05-proxy/02-router_spec.lua b/spec/02-integration/05-proxy/02-router_spec.lua index 58609797cf7..331755c837b 100644 --- a/spec/02-integration/05-proxy/02-router_spec.lua +++ b/spec/02-integration/05-proxy/02-router_spec.lua @@ -2196,7 +2196,7 @@ for _, strategy in helpers.each_strategy() do end) end) - describe("Router [#" .. strategy .. ", flavor = " .. flavor .. "] at startup" , function() + describe("Router at startup [#" .. strategy .. "]" , function() local proxy_client local route @@ -2260,33 +2260,6 @@ for _, strategy in helpers.each_strategy() do end end) - it("#db worker respawn correctly rebuilds router", function() - local admin_client = helpers.admin_client() - - local res = assert(admin_client:post("/routes", { - headers = { ["Content-Type"] = "application/json" }, - body = { - paths = { "/foo" }, - }, - })) - assert.res_status(201, res) - admin_client:close() - - assert(helpers.signal_workers(nil, "-TERM")) - - proxy_client:close() - proxy_client = helpers.proxy_client() - - local res = assert(proxy_client:send { - method = "GET", - path = "/foo", - headers = { ["kong-debug"] = 1 }, - }) - - local body = assert.response(res).has_status(503) - local json = cjson.decode(body) - assert.equal("no Service found with those values", json.message) - end) end) end end From 41fe3df81d5c491dc12c6e8e1c411e6c77bea749 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 24/42] Revert "Revert "Revert "fix(*): prevent queues from growing without bounds (#10046) (#10254)""" This reverts commit 369cc34dec4f6c68d6d5e1c971e2a50510e18cda. --- CHANGELOG.md | 4 -- kong.conf.default | 11 ---- kong/conf_loader/init.lua | 2 - kong/plugins/http-log/handler.lua | 2 +- kong/templates/kong_defaults.lua | 2 - kong/tools/batch_queue.lua | 89 +++++++++++----------------- spec/01-unit/27-batch_queue_spec.lua | 30 ---------- 7 files changed, 35 insertions(+), 105 deletions(-) delete mode 100644 spec/01-unit/27-batch_queue_spec.lua diff --git a/CHANGELOG.md b/CHANGELOG.md index 7bcc9240fec..5fde727f9b6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -102,10 +102,6 @@ - **HTTP Log**: fix internal error during validating the schema if http_endpoint contains userinfo but headers is empty [#9574](https://github.com/Kong/kong/pull/9574) -- Update the batch queues module so that queues no longer grow without bounds if - their consumers fail to process the entries. Instead, old batches are now dropped - and an error is logged. - [#10247](https://github.com/Kong/kong/pull/10247) ##### CLI diff --git a/kong.conf.default b/kong.conf.default index e1d79288cfa..87e5812afb1 100644 --- a/kong.conf.default +++ b/kong.conf.default @@ -1562,14 +1562,3 @@ # **Warning**: Certain variables, when made # available, may create opportunities to # escape the sandbox. - -#max_queued_batches = 100 # Maximum number of batches to keep on an internal - # plugin queue before dropping old batches. This is - # meant as a global, last-resort control to prevent - # queues from consuming infinite memory. When batches - # are being dropped, an error message - # "exceeded max_queued_batches (%d), dropping oldest" - # will be logged. The error message will also include - # a string that identifies the plugin causing the - # problem. Queues are used by the http-log, statsd, - # opentelemetry and datadog plugins. diff --git a/kong/conf_loader/init.lua b/kong/conf_loader/init.lua index 7b5670eec49..4bb20604c06 100644 --- a/kong/conf_loader/init.lua +++ b/kong/conf_loader/init.lua @@ -663,8 +663,6 @@ local CONF_INFERENCES = { untrusted_lua = { enum = { "on", "off", "sandbox" } }, untrusted_lua_sandbox_requires = { typ = "array" }, untrusted_lua_sandbox_environment = { typ = "array" }, - - max_queued_batches = { typ = "number" }, } diff --git a/kong/plugins/http-log/handler.lua b/kong/plugins/http-log/handler.lua index 2c4d130b97b..ef82bf5bc14 100644 --- a/kong/plugins/http-log/handler.lua +++ b/kong/plugins/http-log/handler.lua @@ -170,7 +170,7 @@ function HttpLogHandler:log(conf) } local err - q, err = BatchQueue.new("http-log", process, opts) + q, err = BatchQueue.new(process, opts) if not q then kong.log.err("could not create queue: ", err) return diff --git a/kong/templates/kong_defaults.lua b/kong/templates/kong_defaults.lua index 459318eaae2..a440e202b2e 100644 --- a/kong/templates/kong_defaults.lua +++ b/kong/templates/kong_defaults.lua @@ -179,6 +179,4 @@ pluginserver_names = NONE untrusted_lua = sandbox untrusted_lua_sandbox_requires = untrusted_lua_sandbox_environment = - -max_queued_batches = 100 ]] diff --git a/kong/tools/batch_queue.lua b/kong/tools/batch_queue.lua index 92322905a22..8eaf5ae56ef 100644 --- a/kong/tools/batch_queue.lua +++ b/kong/tools/batch_queue.lua @@ -24,14 +24,12 @@ -- end -- -- local q = BatchQueue.new( --- name, -- name of the queue for identification purposes in the log -- process, -- function used to "process/consume" values from the queue -- { -- Opts table with control values. Defaults shown: --- retry_count = 0, -- number of times to retry processing --- batch_max_size = 1000, -- max number of entries that can be queued before they are queued for processing --- process_delay = 1, -- in seconds, how often the current batch is closed & queued --- flush_timeout = 2, -- in seconds, how much time passes without activity before the current batch is closed and queued --- max_queued_batches = 100, -- max number of batches that can be queued before the oldest batch is dropped when a new one is queued +-- retry_count = 0, -- number of times to retry processing +-- batch_max_size = 1000, -- max number of entries that can be queued before they are queued for processing +-- process_delay = 1, -- in seconds, how often the current batch is closed & queued +-- flush_timeout = 2, -- in seconds, how much time passes without activity before the current batch is closed and queued -- } -- ) -- @@ -70,9 +68,11 @@ local timer_at = ngx.timer.at local remove = table.remove local type = type local huge = math.huge +local fmt = string.format local min = math.min local now = ngx.now local ERR = ngx.ERR +local ngx_log = ngx.log local DEBUG = ngx.DEBUG local WARN = ngx.WARN @@ -100,10 +100,10 @@ local process local function schedule_flush(self) local ok, err = timer_at(self.flush_timeout/1000, flush, self) if not ok then - self:log(ERR, "failed to create delayed flush timer: %s", err) + ngx_log(ERR, "failed to create delayed flush timer: ", err) return end - --self:log(DEBUG, "delayed timer created") + --ngx_log(DEBUG, "delayed timer created") self.flush_scheduled = true end @@ -113,10 +113,10 @@ end -- @param self Queue -- @param batch: table with `entries` and `retries` counter -- @param delay number: timer delay in seconds -local function schedule_process(self, delay) - local ok, err = timer_at(delay, process, self) +local function schedule_process(self, batch, delay) + local ok, err = timer_at(delay, process, self, batch) if not ok then - self:log(ERR, "failed to create process timer: %s", err) + ngx_log(ERR, "failed to create process timer: ", err) return end self.process_scheduled = true @@ -147,13 +147,13 @@ flush = function(premature, self) if get_now() - self.last_t < self.flush_timeout then -- flushing reported: we had activity - self:log(DEBUG, "[flush] queue had activity, delaying flush") + ngx_log(DEBUG, "[flush] queue had activity, delaying flush") schedule_flush(self) return end -- no activity and timeout reached - self:log(DEBUG, "[flush] queue had no activity, flushing triggered by flush_timeout") + ngx_log(DEBUG, "[flush] queue had no activity, flushing triggered by flush_timeout") self:flush() self.flush_scheduled = false end @@ -165,31 +165,27 @@ end -- @param self Queue -- @param batch: table with `entries` and `retries` counter -- @return nothing -process = function(premature, self) +process = function(premature, self, batch) if premature then return end - local batch = self.batch_queue[1] - if not batch then - self:log(WARN, "queue process called but no batches to be processed") - return - end - local next_retry_delay local ok, err = self.process(batch.entries) if ok then -- success, reset retry delays self.retry_delay = 1 next_retry_delay = 0 - remove(self.batch_queue, 1) + else batch.retries = batch.retries + 1 if batch.retries < self.retry_count then - self:log(WARN, "failed to process entries: %s", tostring(err)) + ngx_log(WARN, "failed to process entries: ", tostring(err)) + -- queue our data for processing again, at the end of the queue + self.batch_queue[#self.batch_queue + 1] = batch else - self:log(ERR, "entry batch was already tried %d times, dropping it", batch.retries) - remove(self.batch_queue, 1) + ngx_log(ERR, fmt("entry batch was already tried %d times, dropping it", + batch.retries)) end self.retry_delay = self.retry_delay + 1 @@ -197,8 +193,10 @@ process = function(premature, self) end if #self.batch_queue > 0 then -- more to process? - self:log(DEBUG, "processing oldest data, %d still queued", #self.batch_queue) - schedule_process(self, next_retry_delay) + ngx_log(DEBUG, fmt("processing oldest data, %d still queued", + #self.batch_queue - 1)) + local oldest_batch = remove(self.batch_queue, 1) + schedule_process(self, oldest_batch, next_retry_delay) return end @@ -220,15 +218,13 @@ end -- @param opts table, optionally including -- `retry_count`, `flush_timeout`, `batch_max_size` and `process_delay` -- @return table: a Queue object. -function Queue.new(name, process, opts) +function Queue.new(process, opts) opts = opts or {} - assert(type(name) == "string", - "arg #1 (name) must be a string") assert(type(process) == "function", - "arg #2 (process) must be a function") + "arg #1 (process) must be a function") assert(type(opts) == "table", - "arg #3 (opts) must be a table") + "arg #2 (opts) must be a table") assert(opts.retry_count == nil or type(opts.retry_count) == "number", "retry_count must be a number") assert(opts.flush_timeout == nil or type(opts.flush_timeout) == "number", @@ -237,11 +233,8 @@ function Queue.new(name, process, opts) "batch_max_size must be a number") assert(opts.process_delay == nil or type(opts.batch_max_size) == "number", "process_delay must be a number") - assert(opts.max_queued_batches == nil or type(opts.max_queued_batches) == "number", - "max_queued_batches must be a number") local self = { - name = name, process = process, -- flush timeout in milliseconds @@ -249,7 +242,6 @@ function Queue.new(name, process, opts) retry_count = opts.retry_count or 0, batch_max_size = opts.batch_max_size or 1000, process_delay = opts.process_delay or 1, - max_queued_batches = opts.max_queued_batches or (kong.configuration and kong.configuration.max_queued_batches) or 100, retry_delay = 1, @@ -266,17 +258,6 @@ function Queue.new(name, process, opts) end -------------------------------------------------------------------------------- --- Log a message that includes the name of the queue for identification purposes --- @param self Queue --- @param level: log level --- @param formatstring: format string, will get the queue name and ": " prepended --- @param ...: formatter arguments -function Queue:log(level, formatstring, ...) - return ngx.log(level, string.format(self.name .. ": " .. formatstring, unpack({...}))) -end - - ------------------------------------------------------------------------------- -- Add data to the queue -- @param entry the value included in the queue. It can be any Lua value besides nil. @@ -288,8 +269,8 @@ function Queue:add(entry) if self.batch_max_size == 1 then -- no batching - self.batch_queue = { { entries = { entry }, retries = 0 } } - schedule_process(self, 0) + local batch = { entries = { entry }, retries = 0 } + schedule_process(self, batch, 0) return true end @@ -323,12 +304,8 @@ function Queue:flush() -- Queue the current batch, if it has at least 1 entry if current_batch_size > 0 then - self:log(DEBUG, "queueing batch for processing (%d entries)", current_batch_size) + ngx_log(DEBUG, "queueing batch for processing (", current_batch_size, " entries)") - while #self.batch_queue >= self.max_queued_batches do - self:log(ERR, "exceeded max_queued_batches (%d), dropping oldest", self.max_queued_batches) - remove(self.batch_queue, 1) - end self.batch_queue[#self.batch_queue + 1] = self.current_batch self.current_batch = { entries = {}, retries = 0 } end @@ -337,8 +314,10 @@ function Queue:flush() -- in the future. This will keep calling itself in the future until -- the queue is empty if #self.batch_queue > 0 and not self.process_scheduled then - self:log(DEBUG, "processing oldest entry, %d still queued", #self.batch_queue) - schedule_process(self, self.process_delay) + ngx_log(DEBUG, fmt("processing oldest entry, %d still queued", + #self.batch_queue - 1)) + local oldest_batch = remove(self.batch_queue, 1) + schedule_process(self, oldest_batch, self.process_delay) end return true diff --git a/spec/01-unit/27-batch_queue_spec.lua b/spec/01-unit/27-batch_queue_spec.lua deleted file mode 100644 index d4b0bef4c35..00000000000 --- a/spec/01-unit/27-batch_queue_spec.lua +++ /dev/null @@ -1,30 +0,0 @@ - -local BatchQueue = require "kong.tools.batch_queue" - -describe("batch queue", function() - - it("observes the limit parameter", function() - local count = 0 - local last - local function process(entries) - count = count + #entries - last = entries[#entries] - return true - end - - local q = BatchQueue.new("batch-queue-unit-test", process, {max_queued_batches=2, batch_max_size=100, process_delay=0}) - - q:add(1) - q:flush() - q:add(2) - q:flush() - q:add(3) - q:flush() - - -- run scheduled timer tasks - ngx.sleep(1) - - assert.equal(2, count) - assert.equal(3, last) - end) -end) From 5f8d3a12a51de7acd11945b40c183d7f1427b9ea Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 25/42] Revert "Revert "Revert "Revert "fix(*): prevent queues from growing without bounds (#10046) (#10058)"""" This reverts commit e30496e6ac0fbff48697ddbb5561b5cd083e4601. --- CHANGELOG.md | 12 ++++ kong/conf_loader/init.lua | 2 + kong/plugins/http-log/handler.lua | 2 +- kong/tools/batch_queue.lua | 89 +++++++++++++++++----------- spec/01-unit/27-batch_queue_spec.lua | 33 +++++++++++ 5 files changed, 103 insertions(+), 35 deletions(-) create mode 100644 spec/01-unit/27-batch_queue_spec.lua diff --git a/CHANGELOG.md b/CHANGELOG.md index 5fde727f9b6..8bc993edd86 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -92,6 +92,18 @@ - Fixed a bug where internal redirects (i.e. those produced by the error_page directive) could interfere with worker process handling the request when buffered proxying is being used. +## Unrelease + +### Fixes + +##### Plugins + +- Update the batch queues module so that queues no longer grow without bounds if + their consumers fail to process the entries. Instead, old batches are now dropped + and an error is logged. + [#10046](https://github.com/Kong/kong/pull/10046) + + ## [2.8.3] > Released 2022/11/02 diff --git a/kong/conf_loader/init.lua b/kong/conf_loader/init.lua index 4bb20604c06..7b5670eec49 100644 --- a/kong/conf_loader/init.lua +++ b/kong/conf_loader/init.lua @@ -663,6 +663,8 @@ local CONF_INFERENCES = { untrusted_lua = { enum = { "on", "off", "sandbox" } }, untrusted_lua_sandbox_requires = { typ = "array" }, untrusted_lua_sandbox_environment = { typ = "array" }, + + max_queued_batches = { typ = "number" }, } diff --git a/kong/plugins/http-log/handler.lua b/kong/plugins/http-log/handler.lua index ef82bf5bc14..2c4d130b97b 100644 --- a/kong/plugins/http-log/handler.lua +++ b/kong/plugins/http-log/handler.lua @@ -170,7 +170,7 @@ function HttpLogHandler:log(conf) } local err - q, err = BatchQueue.new(process, opts) + q, err = BatchQueue.new("http-log", process, opts) if not q then kong.log.err("could not create queue: ", err) return diff --git a/kong/tools/batch_queue.lua b/kong/tools/batch_queue.lua index 8eaf5ae56ef..92322905a22 100644 --- a/kong/tools/batch_queue.lua +++ b/kong/tools/batch_queue.lua @@ -24,12 +24,14 @@ -- end -- -- local q = BatchQueue.new( +-- name, -- name of the queue for identification purposes in the log -- process, -- function used to "process/consume" values from the queue -- { -- Opts table with control values. Defaults shown: --- retry_count = 0, -- number of times to retry processing --- batch_max_size = 1000, -- max number of entries that can be queued before they are queued for processing --- process_delay = 1, -- in seconds, how often the current batch is closed & queued --- flush_timeout = 2, -- in seconds, how much time passes without activity before the current batch is closed and queued +-- retry_count = 0, -- number of times to retry processing +-- batch_max_size = 1000, -- max number of entries that can be queued before they are queued for processing +-- process_delay = 1, -- in seconds, how often the current batch is closed & queued +-- flush_timeout = 2, -- in seconds, how much time passes without activity before the current batch is closed and queued +-- max_queued_batches = 100, -- max number of batches that can be queued before the oldest batch is dropped when a new one is queued -- } -- ) -- @@ -68,11 +70,9 @@ local timer_at = ngx.timer.at local remove = table.remove local type = type local huge = math.huge -local fmt = string.format local min = math.min local now = ngx.now local ERR = ngx.ERR -local ngx_log = ngx.log local DEBUG = ngx.DEBUG local WARN = ngx.WARN @@ -100,10 +100,10 @@ local process local function schedule_flush(self) local ok, err = timer_at(self.flush_timeout/1000, flush, self) if not ok then - ngx_log(ERR, "failed to create delayed flush timer: ", err) + self:log(ERR, "failed to create delayed flush timer: %s", err) return end - --ngx_log(DEBUG, "delayed timer created") + --self:log(DEBUG, "delayed timer created") self.flush_scheduled = true end @@ -113,10 +113,10 @@ end -- @param self Queue -- @param batch: table with `entries` and `retries` counter -- @param delay number: timer delay in seconds -local function schedule_process(self, batch, delay) - local ok, err = timer_at(delay, process, self, batch) +local function schedule_process(self, delay) + local ok, err = timer_at(delay, process, self) if not ok then - ngx_log(ERR, "failed to create process timer: ", err) + self:log(ERR, "failed to create process timer: %s", err) return end self.process_scheduled = true @@ -147,13 +147,13 @@ flush = function(premature, self) if get_now() - self.last_t < self.flush_timeout then -- flushing reported: we had activity - ngx_log(DEBUG, "[flush] queue had activity, delaying flush") + self:log(DEBUG, "[flush] queue had activity, delaying flush") schedule_flush(self) return end -- no activity and timeout reached - ngx_log(DEBUG, "[flush] queue had no activity, flushing triggered by flush_timeout") + self:log(DEBUG, "[flush] queue had no activity, flushing triggered by flush_timeout") self:flush() self.flush_scheduled = false end @@ -165,27 +165,31 @@ end -- @param self Queue -- @param batch: table with `entries` and `retries` counter -- @return nothing -process = function(premature, self, batch) +process = function(premature, self) if premature then return end + local batch = self.batch_queue[1] + if not batch then + self:log(WARN, "queue process called but no batches to be processed") + return + end + local next_retry_delay local ok, err = self.process(batch.entries) if ok then -- success, reset retry delays self.retry_delay = 1 next_retry_delay = 0 - + remove(self.batch_queue, 1) else batch.retries = batch.retries + 1 if batch.retries < self.retry_count then - ngx_log(WARN, "failed to process entries: ", tostring(err)) - -- queue our data for processing again, at the end of the queue - self.batch_queue[#self.batch_queue + 1] = batch + self:log(WARN, "failed to process entries: %s", tostring(err)) else - ngx_log(ERR, fmt("entry batch was already tried %d times, dropping it", - batch.retries)) + self:log(ERR, "entry batch was already tried %d times, dropping it", batch.retries) + remove(self.batch_queue, 1) end self.retry_delay = self.retry_delay + 1 @@ -193,10 +197,8 @@ process = function(premature, self, batch) end if #self.batch_queue > 0 then -- more to process? - ngx_log(DEBUG, fmt("processing oldest data, %d still queued", - #self.batch_queue - 1)) - local oldest_batch = remove(self.batch_queue, 1) - schedule_process(self, oldest_batch, next_retry_delay) + self:log(DEBUG, "processing oldest data, %d still queued", #self.batch_queue) + schedule_process(self, next_retry_delay) return end @@ -218,13 +220,15 @@ end -- @param opts table, optionally including -- `retry_count`, `flush_timeout`, `batch_max_size` and `process_delay` -- @return table: a Queue object. -function Queue.new(process, opts) +function Queue.new(name, process, opts) opts = opts or {} + assert(type(name) == "string", + "arg #1 (name) must be a string") assert(type(process) == "function", - "arg #1 (process) must be a function") + "arg #2 (process) must be a function") assert(type(opts) == "table", - "arg #2 (opts) must be a table") + "arg #3 (opts) must be a table") assert(opts.retry_count == nil or type(opts.retry_count) == "number", "retry_count must be a number") assert(opts.flush_timeout == nil or type(opts.flush_timeout) == "number", @@ -233,8 +237,11 @@ function Queue.new(process, opts) "batch_max_size must be a number") assert(opts.process_delay == nil or type(opts.batch_max_size) == "number", "process_delay must be a number") + assert(opts.max_queued_batches == nil or type(opts.max_queued_batches) == "number", + "max_queued_batches must be a number") local self = { + name = name, process = process, -- flush timeout in milliseconds @@ -242,6 +249,7 @@ function Queue.new(process, opts) retry_count = opts.retry_count or 0, batch_max_size = opts.batch_max_size or 1000, process_delay = opts.process_delay or 1, + max_queued_batches = opts.max_queued_batches or (kong.configuration and kong.configuration.max_queued_batches) or 100, retry_delay = 1, @@ -258,6 +266,17 @@ function Queue.new(process, opts) end +------------------------------------------------------------------------------- +-- Log a message that includes the name of the queue for identification purposes +-- @param self Queue +-- @param level: log level +-- @param formatstring: format string, will get the queue name and ": " prepended +-- @param ...: formatter arguments +function Queue:log(level, formatstring, ...) + return ngx.log(level, string.format(self.name .. ": " .. formatstring, unpack({...}))) +end + + ------------------------------------------------------------------------------- -- Add data to the queue -- @param entry the value included in the queue. It can be any Lua value besides nil. @@ -269,8 +288,8 @@ function Queue:add(entry) if self.batch_max_size == 1 then -- no batching - local batch = { entries = { entry }, retries = 0 } - schedule_process(self, batch, 0) + self.batch_queue = { { entries = { entry }, retries = 0 } } + schedule_process(self, 0) return true end @@ -304,8 +323,12 @@ function Queue:flush() -- Queue the current batch, if it has at least 1 entry if current_batch_size > 0 then - ngx_log(DEBUG, "queueing batch for processing (", current_batch_size, " entries)") + self:log(DEBUG, "queueing batch for processing (%d entries)", current_batch_size) + while #self.batch_queue >= self.max_queued_batches do + self:log(ERR, "exceeded max_queued_batches (%d), dropping oldest", self.max_queued_batches) + remove(self.batch_queue, 1) + end self.batch_queue[#self.batch_queue + 1] = self.current_batch self.current_batch = { entries = {}, retries = 0 } end @@ -314,10 +337,8 @@ function Queue:flush() -- in the future. This will keep calling itself in the future until -- the queue is empty if #self.batch_queue > 0 and not self.process_scheduled then - ngx_log(DEBUG, fmt("processing oldest entry, %d still queued", - #self.batch_queue - 1)) - local oldest_batch = remove(self.batch_queue, 1) - schedule_process(self, oldest_batch, self.process_delay) + self:log(DEBUG, "processing oldest entry, %d still queued", #self.batch_queue) + schedule_process(self, self.process_delay) end return true diff --git a/spec/01-unit/27-batch_queue_spec.lua b/spec/01-unit/27-batch_queue_spec.lua new file mode 100644 index 00000000000..38b1edcb989 --- /dev/null +++ b/spec/01-unit/27-batch_queue_spec.lua @@ -0,0 +1,33 @@ + +local BatchQueue = require "kong.tools.batch_queue" +local helpers = require "spec.helpers" + +describe("batch queue", function() + + it("observes the limit parameter", function() + local count = 0 + local last + local function process(entries) + count = count + #entries + last = entries[#entries] + return true + end + + local q = BatchQueue.new("batch-queue-unit-test", process, {max_queued_batches=2, batch_max_size=100, process_delay=0}) + + q:add(1) + q:flush() + q:add(2) + q:flush() + q:add(3) + q:flush() + + helpers.wait_until(function() + ngx.sleep(.1) + return #q.batch_queue == 0 + end, 1) + + assert.equal(2, count) + assert.equal(3, last) + end) +end) From 40c351cde982205a5e20c1a1c94b8e008f803df6 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 26/42] Revert "Revert "Revert "Revert "docs(*): document new max_queued_batches parameter (#10071)"""" This reverts commit 59079c62cd3548aae5d6dba3152588de2e78c9c6. --- kong.conf.default | 10 ++++++++++ kong/templates/kong_defaults.lua | 2 ++ 2 files changed, 12 insertions(+) diff --git a/kong.conf.default b/kong.conf.default index 87e5812afb1..9831258f6fe 100644 --- a/kong.conf.default +++ b/kong.conf.default @@ -1562,3 +1562,13 @@ # **Warning**: Certain variables, when made # available, may create opportunities to # escape the sandbox. + +#max_queued_batches = 100 # Maximum number of batches to keep on an internal + # plugin queue before dropping old batches. This is + # meant as a global, last-resort control to prevent + # queues from consuming infinite memory. When batches + # are being dropped, an error message + # "exceeded max_queued_batches (%d), dropping oldest" + # will be logged. The error message will also include + # a string that identifies the plugin causing the + # problem. diff --git a/kong/templates/kong_defaults.lua b/kong/templates/kong_defaults.lua index a440e202b2e..459318eaae2 100644 --- a/kong/templates/kong_defaults.lua +++ b/kong/templates/kong_defaults.lua @@ -179,4 +179,6 @@ pluginserver_names = NONE untrusted_lua = sandbox untrusted_lua_sandbox_requires = untrusted_lua_sandbox_environment = + +max_queued_batches = 100 ]] From 4428c7c8cb7dab3209de857663d63ae68d21cf1f Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 27/42] Revert "Revert "Revert "docs(*): document new max_queued_batches parameter (#10071)""" This reverts commit 747e4d211cbcb0099bf9d693ab90ce1cbe35a7c7. --- kong.conf.default | 10 ---------- kong/templates/kong_defaults.lua | 2 -- 2 files changed, 12 deletions(-) diff --git a/kong.conf.default b/kong.conf.default index 9831258f6fe..87e5812afb1 100644 --- a/kong.conf.default +++ b/kong.conf.default @@ -1562,13 +1562,3 @@ # **Warning**: Certain variables, when made # available, may create opportunities to # escape the sandbox. - -#max_queued_batches = 100 # Maximum number of batches to keep on an internal - # plugin queue before dropping old batches. This is - # meant as a global, last-resort control to prevent - # queues from consuming infinite memory. When batches - # are being dropped, an error message - # "exceeded max_queued_batches (%d), dropping oldest" - # will be logged. The error message will also include - # a string that identifies the plugin causing the - # problem. diff --git a/kong/templates/kong_defaults.lua b/kong/templates/kong_defaults.lua index 459318eaae2..a440e202b2e 100644 --- a/kong/templates/kong_defaults.lua +++ b/kong/templates/kong_defaults.lua @@ -179,6 +179,4 @@ pluginserver_names = NONE untrusted_lua = sandbox untrusted_lua_sandbox_requires = untrusted_lua_sandbox_environment = - -max_queued_batches = 100 ]] From 425101099955d2189fdec0df85f1442514a9db22 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 28/42] Revert "Revert "Revert "fix(*): prevent queues from growing without bounds (#10046) (#10058)""" This reverts commit 0cff93440acb4d8cdbd67bc5c65e2cc28a6a972e. --- CHANGELOG.md | 12 ---- kong/conf_loader/init.lua | 2 - kong/plugins/http-log/handler.lua | 2 +- kong/tools/batch_queue.lua | 89 +++++++++++----------------- spec/01-unit/27-batch_queue_spec.lua | 33 ----------- 5 files changed, 35 insertions(+), 103 deletions(-) delete mode 100644 spec/01-unit/27-batch_queue_spec.lua diff --git a/CHANGELOG.md b/CHANGELOG.md index 8bc993edd86..5fde727f9b6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -92,18 +92,6 @@ - Fixed a bug where internal redirects (i.e. those produced by the error_page directive) could interfere with worker process handling the request when buffered proxying is being used. -## Unrelease - -### Fixes - -##### Plugins - -- Update the batch queues module so that queues no longer grow without bounds if - their consumers fail to process the entries. Instead, old batches are now dropped - and an error is logged. - [#10046](https://github.com/Kong/kong/pull/10046) - - ## [2.8.3] > Released 2022/11/02 diff --git a/kong/conf_loader/init.lua b/kong/conf_loader/init.lua index 7b5670eec49..4bb20604c06 100644 --- a/kong/conf_loader/init.lua +++ b/kong/conf_loader/init.lua @@ -663,8 +663,6 @@ local CONF_INFERENCES = { untrusted_lua = { enum = { "on", "off", "sandbox" } }, untrusted_lua_sandbox_requires = { typ = "array" }, untrusted_lua_sandbox_environment = { typ = "array" }, - - max_queued_batches = { typ = "number" }, } diff --git a/kong/plugins/http-log/handler.lua b/kong/plugins/http-log/handler.lua index 2c4d130b97b..ef82bf5bc14 100644 --- a/kong/plugins/http-log/handler.lua +++ b/kong/plugins/http-log/handler.lua @@ -170,7 +170,7 @@ function HttpLogHandler:log(conf) } local err - q, err = BatchQueue.new("http-log", process, opts) + q, err = BatchQueue.new(process, opts) if not q then kong.log.err("could not create queue: ", err) return diff --git a/kong/tools/batch_queue.lua b/kong/tools/batch_queue.lua index 92322905a22..8eaf5ae56ef 100644 --- a/kong/tools/batch_queue.lua +++ b/kong/tools/batch_queue.lua @@ -24,14 +24,12 @@ -- end -- -- local q = BatchQueue.new( --- name, -- name of the queue for identification purposes in the log -- process, -- function used to "process/consume" values from the queue -- { -- Opts table with control values. Defaults shown: --- retry_count = 0, -- number of times to retry processing --- batch_max_size = 1000, -- max number of entries that can be queued before they are queued for processing --- process_delay = 1, -- in seconds, how often the current batch is closed & queued --- flush_timeout = 2, -- in seconds, how much time passes without activity before the current batch is closed and queued --- max_queued_batches = 100, -- max number of batches that can be queued before the oldest batch is dropped when a new one is queued +-- retry_count = 0, -- number of times to retry processing +-- batch_max_size = 1000, -- max number of entries that can be queued before they are queued for processing +-- process_delay = 1, -- in seconds, how often the current batch is closed & queued +-- flush_timeout = 2, -- in seconds, how much time passes without activity before the current batch is closed and queued -- } -- ) -- @@ -70,9 +68,11 @@ local timer_at = ngx.timer.at local remove = table.remove local type = type local huge = math.huge +local fmt = string.format local min = math.min local now = ngx.now local ERR = ngx.ERR +local ngx_log = ngx.log local DEBUG = ngx.DEBUG local WARN = ngx.WARN @@ -100,10 +100,10 @@ local process local function schedule_flush(self) local ok, err = timer_at(self.flush_timeout/1000, flush, self) if not ok then - self:log(ERR, "failed to create delayed flush timer: %s", err) + ngx_log(ERR, "failed to create delayed flush timer: ", err) return end - --self:log(DEBUG, "delayed timer created") + --ngx_log(DEBUG, "delayed timer created") self.flush_scheduled = true end @@ -113,10 +113,10 @@ end -- @param self Queue -- @param batch: table with `entries` and `retries` counter -- @param delay number: timer delay in seconds -local function schedule_process(self, delay) - local ok, err = timer_at(delay, process, self) +local function schedule_process(self, batch, delay) + local ok, err = timer_at(delay, process, self, batch) if not ok then - self:log(ERR, "failed to create process timer: %s", err) + ngx_log(ERR, "failed to create process timer: ", err) return end self.process_scheduled = true @@ -147,13 +147,13 @@ flush = function(premature, self) if get_now() - self.last_t < self.flush_timeout then -- flushing reported: we had activity - self:log(DEBUG, "[flush] queue had activity, delaying flush") + ngx_log(DEBUG, "[flush] queue had activity, delaying flush") schedule_flush(self) return end -- no activity and timeout reached - self:log(DEBUG, "[flush] queue had no activity, flushing triggered by flush_timeout") + ngx_log(DEBUG, "[flush] queue had no activity, flushing triggered by flush_timeout") self:flush() self.flush_scheduled = false end @@ -165,31 +165,27 @@ end -- @param self Queue -- @param batch: table with `entries` and `retries` counter -- @return nothing -process = function(premature, self) +process = function(premature, self, batch) if premature then return end - local batch = self.batch_queue[1] - if not batch then - self:log(WARN, "queue process called but no batches to be processed") - return - end - local next_retry_delay local ok, err = self.process(batch.entries) if ok then -- success, reset retry delays self.retry_delay = 1 next_retry_delay = 0 - remove(self.batch_queue, 1) + else batch.retries = batch.retries + 1 if batch.retries < self.retry_count then - self:log(WARN, "failed to process entries: %s", tostring(err)) + ngx_log(WARN, "failed to process entries: ", tostring(err)) + -- queue our data for processing again, at the end of the queue + self.batch_queue[#self.batch_queue + 1] = batch else - self:log(ERR, "entry batch was already tried %d times, dropping it", batch.retries) - remove(self.batch_queue, 1) + ngx_log(ERR, fmt("entry batch was already tried %d times, dropping it", + batch.retries)) end self.retry_delay = self.retry_delay + 1 @@ -197,8 +193,10 @@ process = function(premature, self) end if #self.batch_queue > 0 then -- more to process? - self:log(DEBUG, "processing oldest data, %d still queued", #self.batch_queue) - schedule_process(self, next_retry_delay) + ngx_log(DEBUG, fmt("processing oldest data, %d still queued", + #self.batch_queue - 1)) + local oldest_batch = remove(self.batch_queue, 1) + schedule_process(self, oldest_batch, next_retry_delay) return end @@ -220,15 +218,13 @@ end -- @param opts table, optionally including -- `retry_count`, `flush_timeout`, `batch_max_size` and `process_delay` -- @return table: a Queue object. -function Queue.new(name, process, opts) +function Queue.new(process, opts) opts = opts or {} - assert(type(name) == "string", - "arg #1 (name) must be a string") assert(type(process) == "function", - "arg #2 (process) must be a function") + "arg #1 (process) must be a function") assert(type(opts) == "table", - "arg #3 (opts) must be a table") + "arg #2 (opts) must be a table") assert(opts.retry_count == nil or type(opts.retry_count) == "number", "retry_count must be a number") assert(opts.flush_timeout == nil or type(opts.flush_timeout) == "number", @@ -237,11 +233,8 @@ function Queue.new(name, process, opts) "batch_max_size must be a number") assert(opts.process_delay == nil or type(opts.batch_max_size) == "number", "process_delay must be a number") - assert(opts.max_queued_batches == nil or type(opts.max_queued_batches) == "number", - "max_queued_batches must be a number") local self = { - name = name, process = process, -- flush timeout in milliseconds @@ -249,7 +242,6 @@ function Queue.new(name, process, opts) retry_count = opts.retry_count or 0, batch_max_size = opts.batch_max_size or 1000, process_delay = opts.process_delay or 1, - max_queued_batches = opts.max_queued_batches or (kong.configuration and kong.configuration.max_queued_batches) or 100, retry_delay = 1, @@ -266,17 +258,6 @@ function Queue.new(name, process, opts) end -------------------------------------------------------------------------------- --- Log a message that includes the name of the queue for identification purposes --- @param self Queue --- @param level: log level --- @param formatstring: format string, will get the queue name and ": " prepended --- @param ...: formatter arguments -function Queue:log(level, formatstring, ...) - return ngx.log(level, string.format(self.name .. ": " .. formatstring, unpack({...}))) -end - - ------------------------------------------------------------------------------- -- Add data to the queue -- @param entry the value included in the queue. It can be any Lua value besides nil. @@ -288,8 +269,8 @@ function Queue:add(entry) if self.batch_max_size == 1 then -- no batching - self.batch_queue = { { entries = { entry }, retries = 0 } } - schedule_process(self, 0) + local batch = { entries = { entry }, retries = 0 } + schedule_process(self, batch, 0) return true end @@ -323,12 +304,8 @@ function Queue:flush() -- Queue the current batch, if it has at least 1 entry if current_batch_size > 0 then - self:log(DEBUG, "queueing batch for processing (%d entries)", current_batch_size) + ngx_log(DEBUG, "queueing batch for processing (", current_batch_size, " entries)") - while #self.batch_queue >= self.max_queued_batches do - self:log(ERR, "exceeded max_queued_batches (%d), dropping oldest", self.max_queued_batches) - remove(self.batch_queue, 1) - end self.batch_queue[#self.batch_queue + 1] = self.current_batch self.current_batch = { entries = {}, retries = 0 } end @@ -337,8 +314,10 @@ function Queue:flush() -- in the future. This will keep calling itself in the future until -- the queue is empty if #self.batch_queue > 0 and not self.process_scheduled then - self:log(DEBUG, "processing oldest entry, %d still queued", #self.batch_queue) - schedule_process(self, self.process_delay) + ngx_log(DEBUG, fmt("processing oldest entry, %d still queued", + #self.batch_queue - 1)) + local oldest_batch = remove(self.batch_queue, 1) + schedule_process(self, oldest_batch, self.process_delay) end return true diff --git a/spec/01-unit/27-batch_queue_spec.lua b/spec/01-unit/27-batch_queue_spec.lua deleted file mode 100644 index 38b1edcb989..00000000000 --- a/spec/01-unit/27-batch_queue_spec.lua +++ /dev/null @@ -1,33 +0,0 @@ - -local BatchQueue = require "kong.tools.batch_queue" -local helpers = require "spec.helpers" - -describe("batch queue", function() - - it("observes the limit parameter", function() - local count = 0 - local last - local function process(entries) - count = count + #entries - last = entries[#entries] - return true - end - - local q = BatchQueue.new("batch-queue-unit-test", process, {max_queued_batches=2, batch_max_size=100, process_delay=0}) - - q:add(1) - q:flush() - q:add(2) - q:flush() - q:add(3) - q:flush() - - helpers.wait_until(function() - ngx.sleep(.1) - return #q.batch_queue == 0 - end, 1) - - assert.equal(2, count) - assert.equal(3, last) - end) -end) From 4ec3aba15b76942977847e1973069ab00a1fb223 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 29/42] Revert "Revert "Revert "refactor(handler): trying to make reconfigure more atomic (#9993)""" This reverts commit 2350ba2a05ab5182a28e4e608bb48a38e641cfbb. --- kong/runloop/handler.lua | 124 +++++------------------------- kong/runloop/plugins_iterator.lua | 6 +- 2 files changed, 20 insertions(+), 110 deletions(-) diff --git a/kong/runloop/handler.lua b/kong/runloop/handler.lua index 970534f4c18..41d26d5e135 100644 --- a/kong/runloop/handler.lua +++ b/kong/runloop/handler.lua @@ -56,7 +56,6 @@ local ERR = ngx.ERR local CRIT = ngx.CRIT local NOTICE = ngx.NOTICE local WARN = ngx.WARN -local INFO = ngx.INFO local DEBUG = ngx.DEBUG local COMMA = byte(",") local SPACE = byte(" ") @@ -79,10 +78,10 @@ local GLOBAL_QUERY_OPTS = { workspace = ngx.null, show_ws_id = true } local get_plugins_iterator, get_updated_plugins_iterator -local build_plugins_iterator, update_plugins_iterator, replace_plugins_iterator +local build_plugins_iterator, update_plugins_iterator local rebuild_plugins_iterator -local get_updated_router, build_router, update_router, new_router, replace_router +local get_updated_router, build_router, update_router local server_header = meta._SERVER_TOKENS local rebuild_router @@ -366,31 +365,12 @@ local function register_events() local current_plugins_hash local current_balancer_hash - - local now = ngx.now - local update_time = ngx.update_time - local worker_id = ngx.worker.id() - - local exiting = ngx.worker.exiting - local function is_exiting() - if not exiting() then - return false - end - log(NOTICE, "declarative config flip was canceled on worker #", worker_id, - ": process exiting") - return true - end - worker_events.register(function(data) - if is_exiting() then + if ngx.worker.exiting() then + log(NOTICE, "declarative flip config canceled: process exiting") return true end - update_time() - local reconfigure_started_at = now() * 1000 - - log(INFO, "declarative config flip was started on worker #", worker_id) - local default_ws local router_hash local plugins_hash @@ -404,11 +384,8 @@ local function register_events() end local ok, err = concurrency.with_coroutine_mutex(FLIP_CONFIG_OPTS, function() - -- below you are encouraged to yield for cooperative threading - local rebuild_balancer = balancer_hash == nil or balancer_hash ~= current_balancer_hash if rebuild_balancer then - log(DEBUG, "stopping previously started health checkers on worker #", worker_id) balancer.stop_healthcheckers(CLEAR_HEALTH_STATUS_DELAY) end @@ -416,75 +393,30 @@ local function register_events() core_cache:flip() kong.default_workspace = default_ws - ngx.ctx.workspace = default_ws - - local router, err - if router_hash == nil or router_hash ~= current_router_hash then - update_time() - local start = now() * 1000 - - router, err = new_router() - if not router then - return nil, err - end - - update_time() - log(INFO, "building a new router took ", now() * 1000 - start, - " ms on worker #", worker_id) - end + ngx.ctx.workspace = kong.default_workspace - local plugins_iterator if plugins_hash == nil or plugins_hash ~= current_plugins_hash then - update_time() - local start = now() * 1000 - - plugins_iterator, err = PluginsIterator.new() - if not plugins_iterator then - return nil, err - end - - update_time() - log(INFO, "building a new plugins iterator took ", now() * 1000 - start, - " ms on worker #", worker_id) + rebuild_plugins_iterator(PLUGINS_ITERATOR_SYNC_OPTS) + current_plugins_hash = plugins_hash end - -- below you are not supposed to yield and this should be fast and atomic - - -- TODO: we should perhaps only purge the configuration related cache. - - log(DEBUG, "flushing caches as part of the config flip on worker #", worker_id) - - if router then - replace_router(router) + if router_hash == nil or router_hash ~= current_router_hash then + rebuild_router(ROUTER_SYNC_OPTS) current_router_hash = router_hash end - if plugins_iterator then - replace_plugins_iterator(plugins_iterator) - current_plugins_hash = plugins_hash - end - if rebuild_balancer then - -- TODO: balancer is a big blob of global state and you cannot easily - -- initialize new balancer and then atomically flip it. - log(DEBUG, "reinitializing balancer with a new configuration on worker #", worker_id) balancer.init() current_balancer_hash = balancer_hash end - update_time() - log(INFO, "declarative config flip took ", now() * 1000 - reconfigure_started_at, - " ms on worker #", worker_id) - declarative.lock() return true end) if not ok then - update_time() - log(ERR, "declarative config flip failed after ", now() * 1000 - reconfigure_started_at, - " ms on worker #", worker_id, ": ", err) + log(ERR, "config flip failed: ", err) end end, "declarative", "flip_config") @@ -652,19 +584,13 @@ do local plugins_iterator - replace_plugins_iterator = function(new_iterator) - plugins_iterator = new_iterator - return true - end - - build_plugins_iterator = function(version) local new_iterator, err = PluginsIterator.new(version) if not new_iterator then return nil, err end - - return replace_plugins_iterator(new_iterator) + plugins_iterator = new_iterator + return true end @@ -833,7 +759,7 @@ do end - new_router = function(version) + build_router = function(version) local db = kong.db local routes, i = {}, 0 @@ -899,17 +825,12 @@ do router_cache_size = cache_size end - local router_new, err = Router.new(routes, router_cache, router_cache_neg) - if not router_new then + local new_router, err = Router.new(routes, router_cache, router_cache_neg) + if not new_router then return nil, "could not create router: " .. err end - return router_new - end - - - replace_router = function(router_new, version) - router = router_new + router = new_router if version then router_version = version @@ -926,16 +847,6 @@ do end - build_router = function(version) - local router_new, err = new_router(version) - if not router_new then - return nil, err - end - - return replace_router(router_new, version) - end - - update_router = function() -- we might not need to rebuild the router (if we were not -- the first request in this process to enter this code path) @@ -1220,8 +1131,9 @@ return { name = "flip-config", timeout = rebuild_timeout, } + end - elseif kong.configuration.worker_consistency == "strict" then + if strategy == "off" or kong.configuration.worker_consistency == "strict" then ROUTER_SYNC_OPTS = { name = "router", timeout = rebuild_timeout, diff --git a/kong/runloop/plugins_iterator.lua b/kong/runloop/plugins_iterator.lua index ae174bd4ea4..6e2341c5787 100644 --- a/kong/runloop/plugins_iterator.lua +++ b/kong/runloop/plugins_iterator.lua @@ -426,10 +426,8 @@ end function PluginsIterator.new(version) - if kong.db.strategy ~= "off" then - if not version then - error("version must be given", 2) - end + if not version then + error("version must be given", 2) end loaded_plugins = loaded_plugins or get_loaded_plugins() From dc8e87b9f61a8b5ed0947982df6c592f1d700f79 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 30/42] Revert "Revert "Revert "chore(rockspec): bump lua-resty-healthcheck to 1.5.3 (#9756)""" This reverts commit 3bac87de433dc094cb812a8db4d5e2ad78a6cfc0. --- kong-2.8.6-0.rockspec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kong-2.8.6-0.rockspec b/kong-2.8.6-0.rockspec index 73b82bbcd73..866429bb9f6 100644 --- a/kong-2.8.6-0.rockspec +++ b/kong-2.8.6-0.rockspec @@ -33,7 +33,7 @@ dependencies = { "luaxxhash >= 1.0", "lua-protobuf == 0.3.3", "lua-resty-worker-events == 1.0.0", - "lua-resty-healthcheck == 1.5.3", + "lua-resty-healthcheck == 1.5.1", "lua-resty-mlcache == 2.5.0", "lua-messagepack == 0.5.2", "lua-resty-openssl == 0.8.22", From 8e4c60a58efb1553d04419a2c05cb1e7c8cadab6 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 31/42] Revert "Revert "Revert "fix(postgres): close socket actively when timeout happens during query (#11480)""" This reverts commit 5b6d9323518928357ffb91ceefbd11bd6c9d41ea. --- CHANGELOG/unreleased/kong/11480.yaml | 7 ---- kong/db/strategies/postgres/connector.lua | 34 ++++++------------ spec/02-integration/03-db/01-db_spec.lua | 44 ----------------------- 3 files changed, 10 insertions(+), 75 deletions(-) delete mode 100644 CHANGELOG/unreleased/kong/11480.yaml diff --git a/CHANGELOG/unreleased/kong/11480.yaml b/CHANGELOG/unreleased/kong/11480.yaml deleted file mode 100644 index 96f39635558..00000000000 --- a/CHANGELOG/unreleased/kong/11480.yaml +++ /dev/null @@ -1,7 +0,0 @@ -message: Fix a problem that abnormal socket connection will be reused when querying Postgres database. -type: bugfix -scope: Core -prs: - - 11480 -jiras: - - "FTI-5322" diff --git a/kong/db/strategies/postgres/connector.lua b/kong/db/strategies/postgres/connector.lua index 3d932a51842..e0c8d886074 100644 --- a/kong/db/strategies/postgres/connector.lua +++ b/kong/db/strategies/postgres/connector.lua @@ -497,7 +497,6 @@ function _mt:query(sql, operation) operation = "write" end - local conn, is_new_conn local res, err, partial, num_queries local ok @@ -506,36 +505,23 @@ function _mt:query(sql, operation) return nil, "error acquiring query semaphore: " .. err end - conn = self:get_stored_connection(operation) - if not conn then + local conn = self:get_stored_connection(operation) + if conn then + res, err, partial, num_queries = conn:query(sql) + + else + local connection local config = operation == "write" and self.config or self.config_ro - conn, err = connect(config) - if not conn then + connection, err = connect(config) + if not connection then self:release_query_semaphore_resource(operation) return nil, err end - is_new_conn = true - end - - res, err, partial, num_queries = conn:query(sql) - -- if err is string then either it is a SQL error - -- or it is a socket error, here we abort connections - -- that encounter errors instead of reusing them, for - -- safety reason - if err and type(err) == "string" then - ngx.log(ngx.DEBUG, "SQL query throw error: ", err, ", close connection") - local _, err = conn:disconnect() - if err then - -- We're at the end of the query - just logging if - -- we cannot cleanup the connection - ngx.log(ngx.ERR, "failed to disconnect: ", err) - end - self.store_connection(nil, operation) + res, err, partial, num_queries = connection:query(sql) - elseif is_new_conn then - setkeepalive(conn) + setkeepalive(connection) end self:release_query_semaphore_resource(operation) diff --git a/spec/02-integration/03-db/01-db_spec.lua b/spec/02-integration/03-db/01-db_spec.lua index cd11e9bfdfd..b7b46d7e8b2 100644 --- a/spec/02-integration/03-db/01-db_spec.lua +++ b/spec/02-integration/03-db/01-db_spec.lua @@ -447,50 +447,6 @@ for _, strategy in helpers.each_strategy() do end) end) - describe("#testme :query() [#" .. strategy .. "]", function() - lazy_setup(function() - helpers.get_db_utils(strategy, {}) - end) - - postgres_only("establish new connection when error occurred", function() - ngx.IS_CLI = false - - local conf = utils.deep_copy(helpers.test_conf) - conf.pg_ro_host = conf.pg_host - conf.pg_ro_user = conf.pg_user - - local db, err = DB.new(conf, strategy) - - assert.is_nil(err) - assert.is_table(db) - assert(db:init_connector()) - assert(db:connect()) - - local res, err = db.connector:query("SELECT now();") - assert.not_nil(res) - assert.is_nil(err) - - local old_conn = db.connector:get_stored_connection("write") - assert.not_nil(old_conn) - - local res, err = db.connector:query("SELECT * FROM not_exist_table;") - assert.is_nil(res) - assert.not_nil(err) - - local new_conn = db.connector:get_stored_connection("write") - assert.is_nil(new_conn) - - local res, err = db.connector:query("SELECT now();") - assert.not_nil(res) - assert.is_nil(err) - - local res, err = db.connector:query("SELECT now();") - assert.not_nil(res) - assert.is_nil(err) - - assert(db:close()) - end) - end) describe(":setkeepalive() [#" .. strategy .. "]", function() lazy_setup(function() From 2dbd59df2ee85efcaa45e5ec4f4eb0ff8c350f26 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 32/42] Revert "feat(templates): bump `dns_stale_ttl` default to 1 hour (#12087)" This reverts commit 4fee6caa8ebb7dcfcbccea4a3d2923e9a50477e4. --- changelog/unreleased/kong/bump_dns_stale_ttl.yml | 3 --- kong.conf.default | 4 +--- kong/templates/kong_defaults.lua | 2 +- 3 files changed, 2 insertions(+), 7 deletions(-) delete mode 100644 changelog/unreleased/kong/bump_dns_stale_ttl.yml diff --git a/changelog/unreleased/kong/bump_dns_stale_ttl.yml b/changelog/unreleased/kong/bump_dns_stale_ttl.yml deleted file mode 100644 index 43ed55cb079..00000000000 --- a/changelog/unreleased/kong/bump_dns_stale_ttl.yml +++ /dev/null @@ -1,3 +0,0 @@ -message: Bump `dns_stale_ttl` default to 1 hour so stale DNS record can be used for longer time in case of resolver downtime. -type: performance -scope: Configuration diff --git a/kong.conf.default b/kong.conf.default index 87e5812afb1..73dd51acd3b 100644 --- a/kong.conf.default +++ b/kong.conf.default @@ -1312,7 +1312,7 @@ # property receives a value (in seconds), it # will override the TTL for all records. -#dns_stale_ttl = 3600 # Defines, in seconds, how long a record will +#dns_stale_ttl = 4 # Defines, in seconds, how long a record will # remain in cache past its TTL. This value # will be used while the new DNS record is # fetched in the background. @@ -1320,8 +1320,6 @@ # record until either the refresh query # completes, or the `dns_stale_ttl` number of # seconds have passed. - # This configuration enables Kong to be more - # resilient during resolver downtime. #dns_cache_size = 10000 # Defines the maximum allowed number of # DNS records stored in memory cache. diff --git a/kong/templates/kong_defaults.lua b/kong/templates/kong_defaults.lua index a440e202b2e..421da77a5ae 100644 --- a/kong/templates/kong_defaults.lua +++ b/kong/templates/kong_defaults.lua @@ -156,7 +156,7 @@ dns_resolver = NONE dns_hostsfile = /etc/hosts dns_order = LAST,SRV,A,CNAME dns_valid_ttl = NONE -dns_stale_ttl = 3600 +dns_stale_ttl = 4 dns_cache_size = 10000 dns_not_found_ttl = 30 dns_error_ttl = 1 From ed464a22a6d25964d5eb592d852baea3dae758fe Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 33/42] Revert "fix(dns): eliminate asynchronous timer in `syncQuery()` to prevent deadlock risk (#11900)" This reverts commit 30c178fb45845f9775aeb915a99048bbc89864d1. --- .../unreleased/kong/fix_dns_blocking.yml | 3 - kong/resty/dns/client.lua | 143 +++++++++--------- spec/01-unit/21-dns-client/02-client_spec.lua | 22 +-- t/03-dns-client/01-phases.t | 7 +- t/03-dns-client/02-timer-usage.t | 80 +++++----- 5 files changed, 121 insertions(+), 134 deletions(-) delete mode 100644 changelog/unreleased/kong/fix_dns_blocking.yml diff --git a/changelog/unreleased/kong/fix_dns_blocking.yml b/changelog/unreleased/kong/fix_dns_blocking.yml deleted file mode 100644 index a167c5fa165..00000000000 --- a/changelog/unreleased/kong/fix_dns_blocking.yml +++ /dev/null @@ -1,3 +0,0 @@ -message: Eliminate asynchronous timer in syncQuery() to prevent hang risk -type: bugfix -scope: Core diff --git a/kong/resty/dns/client.lua b/kong/resty/dns/client.lua index 913dd3efc81..c5bfc55821f 100644 --- a/kong/resty/dns/client.lua +++ b/kong/resty/dns/client.lua @@ -30,10 +30,10 @@ local time = ngx.now local log = ngx.log local ERR = ngx.ERR local WARN = ngx.WARN -local ALERT = ngx.ALERT local DEBUG = ngx.DEBUG local PREFIX = "[dns-client] " local timer_at = ngx.timer.at +local get_phase = ngx.get_phase local math_min = math.min local math_max = math.max @@ -637,9 +637,7 @@ _M.init = function(options) config = options -- store it in our module level global - -- maximum time to wait for the dns resolver to hit its timeouts - -- + 1s to ensure some delay in timer execution and semaphore return are accounted for - resolve_max_wait = options.timeout / 1000 * options.retrans + 1 + resolve_max_wait = options.timeout / 1000 * options.retrans -- maximum time to wait for the dns resolver to hit its timeouts return true end @@ -723,62 +721,44 @@ local function individualQuery(qname, r_opts, try_list) return result, nil, try_list end -local queue = setmetatable({}, {__mode = "v"}) - -local function enqueue_query(key, qname, r_opts, try_list) - local item = { - key = key, - semaphore = semaphore(), - qname = qname, - r_opts = deepcopy(r_opts), - try_list = try_list, - expire_time = time() + resolve_max_wait, - } - queue[key] = item - return item -end - - -local function dequeue_query(item) - if queue[item.key] == item then - -- query done, but by now many others might be waiting for our result. - -- 1) stop new ones from adding to our lock/semaphore - queue[item.key] = nil - -- 2) release all waiting threads - item.semaphore:post(math_max(item.semaphore:count() * -1, 1)) - item.semaphore = nil - end -end - - -local function queue_get_query(key, try_list) - local item = queue[key] - - if not item then - return nil - end - - -- bug checks: release it actively if the waiting query queue is blocked - if item.expire_time < time() then - local err = "stale query, key:" .. key - add_status_to_try_list(try_list, err) - log(ALERT, PREFIX, err) - dequeue_query(item) - return nil - end - - return item -end - +local queue = setmetatable({}, {__mode = "v"}) -- to be called as a timer-callback, performs a query and returns the results -- in the `item` table. local function executeQuery(premature, item) if premature then return end - item.result, item.err = individualQuery(item.qname, item.r_opts, item.try_list) + local r, err = resolver:new(config) + if not r then + item.result, item.err = r, "failed to create a resolver: " .. err + else + --[[ + log(DEBUG, PREFIX, "Query executing: ", item.qname, ":", item.r_opts.qtype, " ", fquery(item)) + --]] + add_status_to_try_list(item.try_list, "querying") + item.result, item.err = r:query(item.qname, item.r_opts) + if item.result then + --[[ + log(DEBUG, PREFIX, "Query answer: ", item.qname, ":", item.r_opts.qtype, " ", fquery(item), + " ", frecord(item.result)) + --]] + parseAnswer(item.qname, item.r_opts.qtype, item.result, item.try_list) + --[[ + log(DEBUG, PREFIX, "Query parsed answer: ", item.qname, ":", item.r_opts.qtype, " ", fquery(item), + " ", frecord(item.result)) + else + log(DEBUG, PREFIX, "Query error: ", item.qname, ":", item.r_opts.qtype, " err=", tostring(err)) + --]] + end + end - dequeue_query(item) + -- query done, but by now many others might be waiting for our result. + -- 1) stop new ones from adding to our lock/semaphore + queue[item.key] = nil + -- 2) release all waiting threads + item.semaphore:post(math_max(item.semaphore:count() * -1, 1)) + item.semaphore = nil + ngx.sleep(0) end @@ -792,7 +772,7 @@ end -- the `semaphore` field will be removed). Upon error it returns `nil+error`. local function asyncQuery(qname, r_opts, try_list) local key = qname..":"..r_opts.qtype - local item = queue_get_query(key, try_list) + local item = queue[key] if item then --[[ log(DEBUG, PREFIX, "Query async (exists): ", key, " ", fquery(item)) @@ -801,7 +781,14 @@ local function asyncQuery(qname, r_opts, try_list) return item -- already in progress, return existing query end - item = enqueue_query(key, qname, r_opts, try_list) + item = { + key = key, + semaphore = semaphore(), + qname = qname, + r_opts = deepcopy(r_opts), + try_list = try_list, + } + queue[key] = item local ok, err = timer_at(0, executeQuery, item) if not ok then @@ -827,24 +814,40 @@ end -- @return `result + nil + try_list`, or `nil + err + try_list` in case of errors local function syncQuery(qname, r_opts, try_list) local key = qname..":"..r_opts.qtype + local item = queue[key] - local item = queue_get_query(key, try_list) - - -- If nothing is in progress, we start a new sync query + -- if nothing is in progress, we start a new async query if not item then - item = enqueue_query(key, qname, r_opts, try_list) + local err + item, err = asyncQuery(qname, r_opts, try_list) + if not item then + return item, err, try_list + end + else + add_status_to_try_list(try_list, "in progress (sync)") + end - item.result, item.err = individualQuery(qname, item.r_opts, try_list) + local supported_semaphore_wait_phases = { + rewrite = true, + access = true, + content = true, + timer = true, + ssl_cert = true, + ssl_session_fetch = true, + } - dequeue_query(item) + local ngx_phase = get_phase() - return item.result, item.err, try_list + if not supported_semaphore_wait_phases[ngx_phase] then + -- phase not supported by `semaphore:wait` + -- return existing query (item) + -- + -- this will avoid: + -- "dns lookup pool exceeded retries" (second try and subsequent retries) + -- "API disabled in the context of init_worker_by_lua" (first try) + return item, nil, try_list end - -- If the query is already in progress, we wait for it. - - add_status_to_try_list(try_list, "in progress (sync)") - -- block and wait for the async query to complete local ok, err = item.semaphore:wait(resolve_max_wait) if ok and item.result then @@ -857,14 +860,6 @@ local function syncQuery(qname, r_opts, try_list) return item.result, item.err, try_list end - -- bug checks - if not ok and not item.err then - item.err = err -- only first expired wait() reports error - log(ALERT, PREFIX, "semaphore:wait(", resolve_max_wait, ") failed: ", err, - ", count: ", item.semaphore and item.semaphore:count(), - ", qname: ", qname) - end - err = err or item.err or "unknown" add_status_to_try_list(try_list, "error: "..err) diff --git a/spec/01-unit/21-dns-client/02-client_spec.lua b/spec/01-unit/21-dns-client/02-client_spec.lua index 6a6715db1c7..106e47fde1b 100644 --- a/spec/01-unit/21-dns-client/02-client_spec.lua +++ b/spec/01-unit/21-dns-client/02-client_spec.lua @@ -582,10 +582,7 @@ describe("[DNS client]", function() } })) query_func = function(self, original_query_func, name, options) - -- The first request uses syncQuery not waiting on the - -- aysncQuery timer, so the low-level r:query() could not sleep(5s), - -- it can only sleep(timeout). - ngx.sleep(math.min(timeout, 5)) + ngx.sleep(5) return nil end local start_time = ngx.now() @@ -1748,12 +1745,9 @@ describe("[DNS client]", function() end) it("timeout while waiting", function() - - local timeout = 500 - local ip = "1.4.2.3" -- basically the local function _synchronized_query assert(client.init({ - timeout = timeout, + timeout = 500, retrans = 1, resolvConf = { -- resolv.conf without `search` and `domain` options @@ -1764,7 +1758,7 @@ describe("[DNS client]", function() -- insert a stub thats waits and returns a fixed record local name = TEST_DOMAIN query_func = function() - local ip = ip + local ip = "1.4.2.3" local entry = { { type = client.TYPE_A, @@ -1776,9 +1770,7 @@ describe("[DNS client]", function() touch = 0, expire = gettime() + 10, } - -- wait before we return the results - -- `+ 2` s ensures that the semaphore:wait() expires - sleep(timeout/1000 + 2) + sleep(0.5) -- wait before we return the results return entry end @@ -1808,12 +1800,10 @@ describe("[DNS client]", function() ngx.thread.wait(coros[i]) -- this wait will resume the scheduled ones end - -- results[1~9] are equal, as they all will wait for the first response - for i = 1, 9 do + -- all results are equal, as they all will wait for the first response + for i = 1, 10 do assert.equal("timeout", results[i]) end - -- results[10] comes from synchronous DNS access of the first request - assert.equal(ip, results[10][1]["address"]) end) end) diff --git a/t/03-dns-client/01-phases.t b/t/03-dns-client/01-phases.t index 7f10aa9f619..e12cfab420c 100644 --- a/t/03-dns-client/01-phases.t +++ b/t/03-dns-client/01-phases.t @@ -1,6 +1,6 @@ use Test::Nginx::Socket; -plan tests => repeat_each() * (blocks() * 4 + 1); +plan tests => repeat_each() * (blocks() * 5); workers(6); @@ -59,7 +59,8 @@ qq { GET /t --- response_body answers: nil -err: nil ---- error_log +err: dns client error: 101 empty record received +--- no_error_log [error] +dns lookup pool exceeded retries API disabled in the context of init_worker_by_lua diff --git a/t/03-dns-client/02-timer-usage.t b/t/03-dns-client/02-timer-usage.t index 73c35ccb1c4..24cc32bddb6 100644 --- a/t/03-dns-client/02-timer-usage.t +++ b/t/03-dns-client/02-timer-usage.t @@ -2,72 +2,76 @@ use Test::Nginx::Socket; plan tests => repeat_each() * (blocks() * 5); -workers(1); +workers(6); no_shuffle(); run_tests(); __DATA__ -=== TEST 1: stale result triggers async timer + +=== TEST 1: reuse timers for queries of same name, independent on # of workers +--- http_config eval +qq { + init_worker_by_lua_block { + local client = require("kong.resty.dns.client") + assert(client.init({ + nameservers = { "8.8.8.8" }, + hosts = {}, -- empty tables to parse to prevent defaulting to /etc/hosts + resolvConf = {}, -- and resolv.conf files + order = { "A" }, + })) + local host = "httpbin.org" + local typ = client.TYPE_A + for i = 1, 10 do + client.resolve(host, { qtype = typ }) + end + + local host = "mockbin.org" + for i = 1, 10 do + client.resolve(host, { qtype = typ }) + end + + workers = ngx.worker.count() + timers = ngx.timer.pending_count() + } +} --- config location = /t { access_by_lua_block { - -- init local client = require("kong.resty.dns.client") - assert(client.init({ - nameservers = { "127.0.0.53" }, - hosts = {}, -- empty tables to parse to prevent defaulting to /etc/hosts - resolvConf = {}, -- and resolv.conf files - order = { "A" }, - validTtl = 1, - })) - - local host = "konghq.com" + assert(client.init()) + local host = "httpbin.org" local typ = client.TYPE_A + local answers, err = client.resolve(host, { qtype = typ }) - -- first time - - local answers, err, try_list = client.resolve(host, { qtype = typ }) if not answers then ngx.say("failed to resolve: ", err) - return end - ngx.say("first address name: ", answers[1].name) - ngx.say("first try_list: ", tostring(try_list)) - -- sleep to wait for dns record to become stale - ngx.sleep(1.5) + ngx.say("first address name: ", answers[1].name) - -- second time: use stale result and trigger async timer + host = "mockbin.org" + answers, err = client.resolve(host, { qtype = typ }) - answers, err, try_list = client.resolve(host, { qtype = typ }) if not answers then ngx.say("failed to resolve: ", err) - return end + ngx.say("second address name: ", answers[1].name) - ngx.say("second try_list: ", tostring(try_list)) - -- third time: use stale result and find triggered async timer + ngx.say("workers: ", workers) - answers, err, try_list = client.resolve(host, { qtype = typ }) - if not answers then - ngx.say("failed to resolve: ", err) - return - end - ngx.say("third address name: ", answers[1].name) - ngx.say("third try_list: ", tostring(try_list)) + -- should be 2 timers maximum (1 for each hostname) + ngx.say("timers: ", timers) } } --- request GET /t --- response_body -first address name: konghq.com -first try_list: ["(short)konghq.com:1 - cache-miss","konghq.com:1 - cache-miss/querying"] -second address name: konghq.com -second try_list: ["(short)konghq.com:1 - cache-hit/stale","konghq.com:1 - cache-hit/stale/scheduled"] -third address name: konghq.com -third try_list: ["(short)konghq.com:1 - cache-hit/stale","konghq.com:1 - cache-hit/stale/in progress (async)"] +first address name: httpbin.org +second address name: mockbin.org +workers: 6 +timers: 2 --- no_error_log [error] dns lookup pool exceeded retries From 5ac37a479ef9f32a77e672e0a60869b27e1085fe Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 34/42] Revert "fix(dns): fix retry and timeout handling (#11386)" This reverts commit 73158170750bb6655f2efcbeb11b50e9bff89a4d. --- .../fix_dns_retry_and_timeout_handling.yml | 3 - kong/resty/dns/client.lua | 95 ++++++++------- spec/01-unit/21-dns-client/02-client_spec.lua | 110 ++++-------------- .../lua-resty-dns/resty/dns/resolver.lua | 14 +-- 4 files changed, 82 insertions(+), 140 deletions(-) delete mode 100644 changelog/unreleased/kong/fix_dns_retry_and_timeout_handling.yml diff --git a/changelog/unreleased/kong/fix_dns_retry_and_timeout_handling.yml b/changelog/unreleased/kong/fix_dns_retry_and_timeout_handling.yml deleted file mode 100644 index 359666786c5..00000000000 --- a/changelog/unreleased/kong/fix_dns_retry_and_timeout_handling.yml +++ /dev/null @@ -1,3 +0,0 @@ -message: Update the DNS client to follow configured timeouts in a more predictable manner -type: bugfix -scope: Core diff --git a/kong/resty/dns/client.lua b/kong/resty/dns/client.lua index c5bfc55821f..b361a25c020 100644 --- a/kong/resty/dns/client.lua +++ b/kong/resty/dns/client.lua @@ -360,9 +360,9 @@ end -- @param self the try_list to add to -- @param status string with current status, added to the list for the current try -- @return the try_list -local function add_status_to_try_list(self, status) - local try_list = self[#self].msg - try_list[#try_list + 1] = status +local function try_status(self, status) + local status_list = self[#self].msg + status_list[#status_list + 1] = status return self end @@ -383,7 +383,8 @@ end -- @section resolving -local resolve_max_wait +local poolMaxWait +local poolMaxRetry --- Initialize the client. Can be called multiple times. When called again it -- will clear the cache. @@ -637,7 +638,8 @@ _M.init = function(options) config = options -- store it in our module level global - resolve_max_wait = options.timeout / 1000 * options.retrans -- maximum time to wait for the dns resolver to hit its timeouts + poolMaxRetry = 1 -- do one retry, dns resolver is already doing 'retrans' number of retries on top + poolMaxWait = options.timeout / 1000 * options.retrans -- default is to wait for the dns resolver to hit its timeouts return true end @@ -670,7 +672,7 @@ local function parseAnswer(qname, qtype, answers, try_list) if (answer.type ~= qtype) or (answer.name ~= check_qname) then local key = answer.type..":"..answer.name - add_status_to_try_list(try_list, key .. " removed") + try_status(try_list, key .. " removed") local lst = others[key] if not lst then lst = {} @@ -708,7 +710,7 @@ local function individualQuery(qname, r_opts, try_list) return r, "failed to create a resolver: " .. err, try_list end - add_status_to_try_list(try_list, "querying") + try_status(try_list, "querying") local result result, err = r:query(qname, r_opts) @@ -735,7 +737,7 @@ local function executeQuery(premature, item) --[[ log(DEBUG, PREFIX, "Query executing: ", item.qname, ":", item.r_opts.qtype, " ", fquery(item)) --]] - add_status_to_try_list(item.try_list, "querying") + try_status(item.try_list, "querying") item.result, item.err = r:query(item.qname, item.r_opts) if item.result then --[[ @@ -777,7 +779,7 @@ local function asyncQuery(qname, r_opts, try_list) --[[ log(DEBUG, PREFIX, "Query async (exists): ", key, " ", fquery(item)) --]] - add_status_to_try_list(try_list, "in progress (async)") + try_status(try_list, "in progress (async)") return item -- already in progress, return existing query end @@ -799,7 +801,7 @@ local function asyncQuery(qname, r_opts, try_list) --[[ log(DEBUG, PREFIX, "Query async (scheduled): ", key, " ", fquery(item)) --]] - add_status_to_try_list(try_list, "scheduled") + try_status(try_list, "scheduled") return item end @@ -807,24 +809,33 @@ end -- schedules a sync query. -- This will be synchronized, so multiple calls (sync or async) might result in 1 query. --- The maximum delay would be `options.timeout * options.retrans`. +-- The `poolMaxWait` is how long a thread waits for another to complete the query. +-- The `poolMaxRetry` is how often we wait for another query to complete. +-- The maximum delay would be `poolMaxWait * poolMaxRetry`. -- @param qname the name to query for -- @param r_opts a table with the query options -- @param try_list the try_list object to add to -- @return `result + nil + try_list`, or `nil + err + try_list` in case of errors -local function syncQuery(qname, r_opts, try_list) +local function syncQuery(qname, r_opts, try_list, count) local key = qname..":"..r_opts.qtype local item = queue[key] + count = count or 1 -- if nothing is in progress, we start a new async query if not item then local err item, err = asyncQuery(qname, r_opts, try_list) + --[[ + log(DEBUG, PREFIX, "Query sync (new): ", key, " ", fquery(item)," count=", count) + --]] if not item then return item, err, try_list end else - add_status_to_try_list(try_list, "in progress (sync)") + --[[ + log(DEBUG, PREFIX, "Query sync (exists): ", key, " ", fquery(item)," count=", count) + --]] + try_status(try_list, "in progress (sync)") end local supported_semaphore_wait_phases = { @@ -849,7 +860,7 @@ local function syncQuery(qname, r_opts, try_list) end -- block and wait for the async query to complete - local ok, err = item.semaphore:wait(resolve_max_wait) + local ok, err = item.semaphore:wait(poolMaxWait) if ok and item.result then -- we were released, and have a query result from the -- other thread, so all is well, return it @@ -860,16 +871,25 @@ local function syncQuery(qname, r_opts, try_list) return item.result, item.err, try_list end - err = err or item.err or "unknown" - add_status_to_try_list(try_list, "error: "..err) + -- there was an error, either a semaphore timeout, or a lookup error + -- go retry + try_status(try_list, "try "..count.." error: "..(item.err or err or "unknown")) + if count > poolMaxRetry then + --[[ + log(DEBUG, PREFIX, "Query sync (fail): ", key, " ", fquery(item)," retries exceeded. count=", count) + --]] + return nil, "dns lookup pool exceeded retries (" .. + tostring(poolMaxRetry) .. "): "..tostring(item.err or err), + try_list + end -- don't block on the same thread again, so remove it from the queue - if queue[key] == item then - queue[key] = nil - end + if queue[key] == item then queue[key] = nil end - -- there was an error, either a semaphore timeout, or a lookup error - return nil, err + --[[ + log(DEBUG, PREFIX, "Query sync (fail): ", key, " ", fquery(item)," retrying. count=", count) + --]] + return syncQuery(qname, r_opts, try_list, count + 1) end -- will lookup a name in the cache, or alternatively query the nameservers. @@ -908,7 +928,7 @@ local function lookup(qname, r_opts, dnsCacheOnly, try_list) try_list = try_add(try_list, qname, r_opts.qtype, "cache-hit") if entry.expired then -- the cached record is stale but usable, so we do a refresh query in the background - add_status_to_try_list(try_list, "stale") + try_status(try_list, "stale") asyncQuery(qname, r_opts, try_list) end @@ -926,7 +946,7 @@ local function check_ipv6(qname, qtype, try_list) local record = cachelookup(qname, qtype) if record then - add_status_to_try_list(try_list, "cached") + try_status(try_list, "cached") return record, nil, try_list end @@ -946,7 +966,7 @@ local function check_ipv6(qname, qtype, try_list) end if qtype == _M.TYPE_AAAA and check:match("^%x%x?%x?%x?:%x%x?%x?%x?:%x%x?%x?%x?:%x%x?%x?%x?:%x%x?%x?%x?:%x%x?%x?%x?:%x%x?%x?%x?:%x%x?%x?%x?$") then - add_status_to_try_list(try_list, "validated") + try_status(try_list, "validated") record = {{ address = qname, type = _M.TYPE_AAAA, @@ -958,7 +978,7 @@ local function check_ipv6(qname, qtype, try_list) else -- not a valid IPv6 address, or a bad type (non ipv6) -- return a "server error" - add_status_to_try_list(try_list, "bad IPv6") + try_status(try_list, "bad IPv6") record = { errcode = 3, errstr = "name error", @@ -979,12 +999,12 @@ local function check_ipv4(qname, qtype, try_list) local record = cachelookup(qname, qtype) if record then - add_status_to_try_list(try_list, "cached") + try_status(try_list, "cached") return record, nil, try_list end if qtype == _M.TYPE_A then - add_status_to_try_list(try_list, "validated") + try_status(try_list, "validated") record = {{ address = qname, type = _M.TYPE_A, @@ -996,7 +1016,7 @@ local function check_ipv4(qname, qtype, try_list) else -- bad query type for this ipv4 address -- return a "server error" - add_status_to_try_list(try_list, "bad IPv4") + try_status(try_list, "bad IPv4") record = { errcode = 3, errstr = "name error", @@ -1140,7 +1160,7 @@ local function resolve(qname, r_opts, dnsCacheOnly, try_list) records = nil -- luacheck: pop err = "recursion detected" - add_status_to_try_list(try_list, "recursion detected") + try_status(try_list, "recursion detected") return nil, err, try_list end end @@ -1152,14 +1172,14 @@ local function resolve(qname, r_opts, dnsCacheOnly, try_list) -- luacheck: push no unused records = nil -- luacheck: pop - try_list = add_status_to_try_list(try_list, "stale") + try_list = try_status(try_list, "stale") else -- a valid non-stale record -- check for CNAME records, and dereferencing the CNAME if (records[1] or EMPTY).type == _M.TYPE_CNAME and qtype ~= _M.TYPE_CNAME then opts.qtype = nil - add_status_to_try_list(try_list, "dereferencing") + try_status(try_list, "dereferencing") return resolve(records[1].cname, opts, dnsCacheOnly, try_list) end @@ -1207,10 +1227,8 @@ local function resolve(qname, r_opts, dnsCacheOnly, try_list) end if not records then -- luacheck: ignore - -- An error has occurred, terminate the lookup process. We don't want to try other record types because - -- that would potentially cause us to respond with wrong answers (i.e. the contents of an A record if the - -- query for the SRV record failed due to a network error). - goto failed + -- nothing to do, an error + -- fall through to the next entry in our search sequence elseif records.errcode then -- dns error: fall through to the next entry in our search sequence @@ -1269,7 +1287,7 @@ local function resolve(qname, r_opts, dnsCacheOnly, try_list) if records[1].type == _M.TYPE_CNAME and qtype ~= _M.TYPE_CNAME then -- dereference CNAME opts.qtype = nil - add_status_to_try_list(try_list, "dereferencing") + try_status(try_list, "dereferencing") return resolve(records[1].cname, opts, dnsCacheOnly, try_list) end @@ -1278,9 +1296,8 @@ local function resolve(qname, r_opts, dnsCacheOnly, try_list) end -- we had some error, record it in the status list - add_status_to_try_list(try_list, err) + try_status(try_list, err) end - ::failed:: -- we failed, clear cache and return last error if not dnsCacheOnly then @@ -1490,7 +1507,7 @@ local function toip(qname, port, dnsCacheOnly, try_list) local entry = rec[roundRobinW(rec)] -- our SRV entry might still contain a hostname, so recurse, with found port number local srvport = (entry.port ~= 0 and entry.port) or port -- discard port if it is 0 - add_status_to_try_list(try_list, "dereferencing SRV") + try_status(try_list, "dereferencing SRV") return toip(entry.target, srvport, dnsCacheOnly, try_list) else -- must be A or AAAA diff --git a/spec/01-unit/21-dns-client/02-client_spec.lua b/spec/01-unit/21-dns-client/02-client_spec.lua index 106e47fde1b..586ab0edeb8 100644 --- a/spec/01-unit/21-dns-client/02-client_spec.lua +++ b/spec/01-unit/21-dns-client/02-client_spec.lua @@ -3,13 +3,6 @@ local tempfilename = require("pl.path").tmpname local pretty = require("pl.pretty").write --- Several DNS tests use the actual DNS to verify the client behavior against real name servers. It seems that --- even though we have a DNS mocking system, it is good to have some coverage against actual servers to ensure that --- we're not relying on mocked behavior. We use the domain name kong-gateway-testing.link, which is hosted in Route53 --- in the AWS sandbox, allowing Gateway developers to make additions if required. -local TEST_DOMAIN="kong-gateway-testing.link" - - -- empty records and not found errors should be identical, hence we -- define a constant for that error message local NOT_FOUND_ERROR = "dns server error: 3 name error" @@ -86,7 +79,7 @@ describe("[DNS client]", function() resolvConf = {"nameserver [fe80::1%enp0s20f0u1u1]"}, }) end) - local ip, port = client.toip(TEST_DOMAIN) + local ip, port = client.toip("thijsschreijer.nl") assert.is_nil(ip) assert.not_matches([[failed to parse host name "[fe80::1%enp0s20f0u1u1]": invalid IPv6 address]], port, nil, true) assert.matches([[failed to create a resolver: no nameservers specified]], port, nil, true) @@ -564,68 +557,13 @@ describe("[DNS client]", function() }, list) end) - for retrans in ipairs({1, 2}) do - for _, timeout in ipairs({1, 2}) do - it("correctly observes #timeout of " .. tostring(timeout) .. " seconds with " .. tostring(retrans) .. " retries", function() - -- KAG-2300 - https://github.com/Kong/kong/issues/10182 - -- If we encounter a timeout while talking to the DNS server, expect the total timeout to be close to the - -- configured timeout * retrans parameters - assert(client.init({ - resolvConf = { - "nameserver 198.51.100.0", - "domain one.com", - }, - timeout = timeout * 1000, - retrans = retrans, - hosts = { - "127.0.0.1 host" - } - })) - query_func = function(self, original_query_func, name, options) - ngx.sleep(5) - return nil - end - local start_time = ngx.now() - client.resolve("host1.one.com.") - local duration = ngx.now() - start_time - assert.truthy(duration < (timeout * retrans + 1)) - end) - end - end - - -- The domain name below needs to have both a SRV and an A record - local SRV_A_TEST_NAME = "timeouttest."..TEST_DOMAIN - - it("verify correctly set up test DNS entry", function() - assert(client.init({ timeout = 1000, retrans = 2 })) - local answers = client.resolve(SRV_A_TEST_NAME, { qtype = client.TYPE_SRV}) - assert.same(client.TYPE_SRV, answers[1].type) - answers = client.resolve(SRV_A_TEST_NAME, { qtype = client.TYPE_A}) - assert.same(client.TYPE_A, answers[1].type) - end) - - it("does not respond with incorrect answer on transient failure", function() - -- KAG-2300 - https://github.com/Kong/kong/issues/10182 - -- If we encounter a timeout while talking to the DNS server, don't keep trying with other record types - assert(client.init({ timeout = 1000, retrans = 2 })) - query_func = function(self, original_query_func, name, options) - if options.qtype == client.TYPE_SRV then - ngx.sleep(10) - else - return original_query_func(self, name, options) - end - end - local answers = client.resolve(SRV_A_TEST_NAME) - assert.is_nil(answers) - end) - end) it("fetching a record without nameservers errors", function() assert(client.init({ resolvConf = {} })) - local host = TEST_DOMAIN + local host = "thijsschreijer.nl" local typ = client.TYPE_A local answers, err, _ = client.resolve(host, { qtype = typ }) @@ -636,7 +574,7 @@ describe("[DNS client]", function() it("fetching a TXT record", function() assert(client.init()) - local host = "txttest."..TEST_DOMAIN + local host = "txttest.thijsschreijer.nl" local typ = client.TYPE_TXT local answers, err, try_list = client.resolve(host, { qtype = typ }) @@ -649,7 +587,7 @@ describe("[DNS client]", function() it("fetching a CNAME record", function() assert(client.init()) - local host = "smtp."..TEST_DOMAIN + local host = "smtp.thijsschreijer.nl" local typ = client.TYPE_CNAME local answers = assert(client.resolve(host, { qtype = typ })) @@ -661,7 +599,7 @@ describe("[DNS client]", function() it("fetching a CNAME record FQDN", function() assert(client.init()) - local host = "smtp."..TEST_DOMAIN + local host = "smtp.thijsschreijer.nl" local typ = client.TYPE_CNAME local answers = assert(client.resolve(host .. ".", { qtype = typ })) @@ -673,7 +611,7 @@ describe("[DNS client]", function() it("expire and touch times", function() assert(client.init()) - local host = "txttest."..TEST_DOMAIN + local host = "txttest.thijsschreijer.nl" local typ = client.TYPE_TXT local answers, _, _ = assert(client.resolve(host, { qtype = typ })) @@ -729,7 +667,7 @@ describe("[DNS client]", function() it("fetching multiple A records", function() assert(client.init()) - local host = "atest."..TEST_DOMAIN + local host = "atest.thijsschreijer.nl" local typ = client.TYPE_A local answers = assert(client.resolve(host, { qtype = typ })) @@ -743,7 +681,7 @@ describe("[DNS client]", function() it("fetching multiple A records FQDN", function() assert(client.init()) - local host = "atest."..TEST_DOMAIN + local host = "atest.thijsschreijer.nl" local typ = client.TYPE_A local answers = assert(client.resolve(host .. ".", { qtype = typ })) @@ -772,20 +710,20 @@ describe("[DNS client]", function() This does not affect client side code, as the result is always the final A record. --]] - local host = "smtp."..TEST_DOMAIN + local host = "smtp.thijsschreijer.nl" local typ = client.TYPE_A local answers, _, _ = assert(client.resolve(host)) -- check first CNAME local key1 = client.TYPE_CNAME..":"..host local entry1 = lrucache:get(key1) - assert.are.equal(host, entry1[1].name) -- the 1st record is the original 'smtp.'..TEST_DOMAIN + assert.are.equal(host, entry1[1].name) -- the 1st record is the original 'smtp.thijsschreijer.nl' assert.are.equal(client.TYPE_CNAME, entry1[1].type) -- and that is a CNAME -- check second CNAME local key2 = client.TYPE_CNAME..":"..entry1[1].cname local entry2 = lrucache:get(key2) - assert.are.equal(entry1[1].cname, entry2[1].name) -- the 2nd is the middle 'thuis.'..TEST_DOMAIN + assert.are.equal(entry1[1].cname, entry2[1].name) -- the 2nd is the middle 'thuis.thijsschreijer.nl' assert.are.equal(client.TYPE_CNAME, entry2[1].type) -- and that is also a CNAME -- check second target to match final record @@ -807,7 +745,7 @@ describe("[DNS client]", function() it("fetching multiple SRV records (un-typed)", function() assert(client.init()) - local host = "srvtest."..TEST_DOMAIN + local host = "srvtest.thijsschreijer.nl" local typ = client.TYPE_SRV -- un-typed lookup @@ -825,7 +763,7 @@ describe("[DNS client]", function() assert(client.init({ search = {}, })) local lrucache = client.getcache() - local host = "cname2srv."..TEST_DOMAIN + local host = "cname2srv.thijsschreijer.nl" local typ = client.TYPE_SRV -- un-typed lookup @@ -855,7 +793,7 @@ describe("[DNS client]", function() }, })) - local host = "srvtest."..TEST_DOMAIN + local host = "srvtest.thijsschreijer.nl" local typ = client.TYPE_A --> the entry is SRV not A local answers, err, _ = client.resolve(host, {qtype = typ}) @@ -871,7 +809,7 @@ describe("[DNS client]", function() }, })) - local host = "IsNotHere."..TEST_DOMAIN + local host = "IsNotHere.thijsschreijer.nl" local answers, err, _ = client.resolve(host) assert.is_nil(answers) @@ -1161,7 +1099,7 @@ describe("[DNS client]", function() describe("toip() function", function() it("A/AAAA-record, round-robin",function() assert(client.init({ search = {}, })) - local host = "atest."..TEST_DOMAIN + local host = "atest.thijsschreijer.nl" local answers = assert(client.resolve(host)) answers.last_index = nil -- make sure to clean local ips = {} @@ -1365,12 +1303,11 @@ describe("[DNS client]", function() assert.is_number(port) assert.is_not.equal(0, port) end) - it("port passing if SRV port=0",function() assert(client.init({ search = {}, })) local ip, port, host - host = "srvport0."..TEST_DOMAIN + host = "srvport0.thijsschreijer.nl" ip, port = client.toip(host, 10) assert.is_string(ip) assert.is_number(port) @@ -1380,7 +1317,6 @@ describe("[DNS client]", function() assert.is_string(ip) assert.is_nil(port) end) - it("recursive SRV pointing to itself",function() assert(client.init({ resolvConf = { @@ -1389,7 +1325,7 @@ describe("[DNS client]", function() }, })) local ip, record, port, host, err, _ - host = "srvrecurse."..TEST_DOMAIN + host = "srvrecurse.thijsschreijer.nl" -- resolve SRV specific should return the record including its -- recursive entry @@ -1544,7 +1480,7 @@ describe("[DNS client]", function() --empty/error responses should be cached for a configurable time local emptyTtl = 0.1 local staleTtl = 0.1 - local qname = "really.really.really.does.not.exist."..TEST_DOMAIN + local qname = "really.really.really.does.not.exist.thijsschreijer.nl" assert(client.init({ emptyTtl = emptyTtl, staleTtl = staleTtl, @@ -1712,7 +1648,7 @@ describe("[DNS client]", function() -- starting resolving coroutine.yield(coroutine.running()) local result, _, _ = client.resolve( - TEST_DOMAIN, + "thijsschreijer.nl", { qtype = client.TYPE_A } ) table.insert(results, result) @@ -1756,7 +1692,7 @@ describe("[DNS client]", function() })) -- insert a stub thats waits and returns a fixed record - local name = TEST_DOMAIN + local name = "thijsschreijer.nl" query_func = function() local ip = "1.4.2.3" local entry = { @@ -1802,7 +1738,7 @@ describe("[DNS client]", function() -- all results are equal, as they all will wait for the first response for i = 1, 10 do - assert.equal("timeout", results[i]) + assert.equal("dns lookup pool exceeded retries (1): timeout", results[i]) end end) end) @@ -1819,7 +1755,7 @@ describe("[DNS client]", function() -- insert a stub thats waits and returns a fixed record local call_count = 0 - local name = TEST_DOMAIN + local name = "thijsschreijer.nl" query_func = function() local ip = "1.4.2.3" local entry = { diff --git a/spec/fixtures/mocks/lua-resty-dns/resty/dns/resolver.lua b/spec/fixtures/mocks/lua-resty-dns/resty/dns/resolver.lua index f1b83355ab7..75dcea922b9 100644 --- a/spec/fixtures/mocks/lua-resty-dns/resty/dns/resolver.lua +++ b/spec/fixtures/mocks/lua-resty-dns/resty/dns/resolver.lua @@ -95,18 +95,10 @@ resolver.query = function(self, name, options, tries) end if not mocks_only then - -- No mock, so invoke original resolver. Note that if the original resolver fails (i.e. because an - -- invalid domain name like example.com was used), we return an empty result set instead of passing - -- the error up to the caller. This is done so that if the mock contains "A" records (which would - -- be the most common case), the initial query for a SRV record does not fail, but appear not to have - -- yielded any results. This will make dns/client.lua try finding an A record next. - local records, err, tries = old_query(self, name, options, tries) - if records then - return records, err, tries - end + -- no mock, so invoke original resolver + local a, b, c = old_query(self, name, options, tries) + return a, b, c end - - return {}, nil, tries end do From a17f569a379e2155ff3f68d7d3eb3e5010c2e885 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 35/42] Revert "perf(request-id): use `proxy_set_header` instead of `ngx.req.set_header` (#11788)" This reverts commit 1a97e630c86beed48fec4f171730d194cedc09db. --- kong/runloop/handler.lua | 14 ++++++++++++++ kong/templates/nginx_kong.lua | 18 ------------------ spec/fixtures/custom_nginx.template | 15 --------------- 3 files changed, 14 insertions(+), 33 deletions(-) diff --git a/kong/runloop/handler.lua b/kong/runloop/handler.lua index 41d26d5e135..0ce909baecb 100644 --- a/kong/runloop/handler.lua +++ b/kong/runloop/handler.lua @@ -36,6 +36,7 @@ local log = ngx.log local exit = ngx.exit local exec = ngx.exec local header = ngx.header +local set_header = ngx.req.set_header local timer_at = ngx.timer.at local subsystem = ngx.config.subsystem local clear_header = ngx.req.clear_header @@ -1472,6 +1473,9 @@ return { end, -- Only executed if the `router` module found a route and allows nginx to proxy it. after = function(ctx) + local enabled_headers_upstream = kong.configuration.enabled_headers_upstream + local headers = constants.HEADERS + -- Nginx's behavior when proxying a request with an empty querystring -- `/foo?` is to keep `$is_args` an empty string, hence effectively -- stripping the empty querystring. @@ -1553,6 +1557,16 @@ return { if var.http_proxy_connection then clear_header("Proxy-Connection") end + + -- X-Kong-Request-Id upstream header + local rid, rid_get_err = request_id_get() + if not rid then + log(WARN, "failed to get Request ID: ", rid_get_err) + end + + if enabled_headers_upstream[headers.REQUEST_ID] and rid then + set_header(headers.REQUEST_ID, rid) + end end }, response = { diff --git a/kong/templates/nginx_kong.lua b/kong/templates/nginx_kong.lua index 7aa5e5b7246..84330518720 100644 --- a/kong/templates/nginx_kong.lua +++ b/kong/templates/nginx_kong.lua @@ -205,9 +205,6 @@ server { proxy_set_header X-Forwarded-Path $upstream_x_forwarded_path; proxy_set_header X-Forwarded-Prefix $upstream_x_forwarded_prefix; proxy_set_header X-Real-IP $remote_addr; -> if enabled_headers_upstream["X-Kong-Request-Id"] then - proxy_set_header X-Kong-Request-Id $kong_request_id; -> end proxy_pass_header Server; proxy_pass_header Date; proxy_ssl_name $upstream_host; @@ -239,9 +236,6 @@ server { proxy_set_header X-Forwarded-Path $upstream_x_forwarded_path; proxy_set_header X-Forwarded-Prefix $upstream_x_forwarded_prefix; proxy_set_header X-Real-IP $remote_addr; -> if enabled_headers_upstream["X-Kong-Request-Id"] then - proxy_set_header X-Kong-Request-Id $kong_request_id; -> end proxy_pass_header Server; proxy_pass_header Date; proxy_ssl_name $upstream_host; @@ -273,9 +267,6 @@ server { proxy_set_header X-Forwarded-Path $upstream_x_forwarded_path; proxy_set_header X-Forwarded-Prefix $upstream_x_forwarded_prefix; proxy_set_header X-Real-IP $remote_addr; -> if enabled_headers_upstream["X-Kong-Request-Id"] then - proxy_set_header X-Kong-Request-Id $kong_request_id; -> end proxy_pass_header Server; proxy_pass_header Date; proxy_ssl_name $upstream_host; @@ -307,9 +298,6 @@ server { proxy_set_header X-Forwarded-Path $upstream_x_forwarded_path; proxy_set_header X-Forwarded-Prefix $upstream_x_forwarded_prefix; proxy_set_header X-Real-IP $remote_addr; -> if enabled_headers_upstream["X-Kong-Request-Id"] then - proxy_set_header X-Kong-Request-Id $kong_request_id; -> end proxy_pass_header Server; proxy_pass_header Date; proxy_ssl_name $upstream_host; @@ -334,9 +322,6 @@ server { grpc_set_header X-Forwarded-Path $upstream_x_forwarded_path; grpc_set_header X-Forwarded-Prefix $upstream_x_forwarded_prefix; grpc_set_header X-Real-IP $remote_addr; -> if enabled_headers_upstream["X-Kong-Request-Id"] then - grpc_set_header X-Kong-Request-Id $kong_request_id; -> end grpc_pass_header Server; grpc_pass_header Date; grpc_ssl_name $upstream_host; @@ -371,9 +356,6 @@ server { proxy_set_header X-Forwarded-Path $upstream_x_forwarded_path; proxy_set_header X-Forwarded-Prefix $upstream_x_forwarded_prefix; proxy_set_header X-Real-IP $remote_addr; -> if enabled_headers_upstream["X-Kong-Request-Id"] then - proxy_set_header X-Kong-Request-Id $kong_request_id; -> end proxy_pass_header Server; proxy_pass_header Date; proxy_ssl_name $upstream_host; diff --git a/spec/fixtures/custom_nginx.template b/spec/fixtures/custom_nginx.template index 8987e935f2a..22402199ad4 100644 --- a/spec/fixtures/custom_nginx.template +++ b/spec/fixtures/custom_nginx.template @@ -218,9 +218,6 @@ http { proxy_set_header X-Forwarded-Path $upstream_x_forwarded_path; proxy_set_header X-Forwarded-Prefix $upstream_x_forwarded_prefix; proxy_set_header X-Real-IP $remote_addr; -> if enabled_headers_upstream["X-Kong-Request-Id"] then - proxy_set_header X-Kong-Request-Id $kong_request_id; -> end proxy_pass_header Server; proxy_pass_header Date; proxy_ssl_name $upstream_host; @@ -252,9 +249,6 @@ http { proxy_set_header X-Forwarded-Path $upstream_x_forwarded_path; proxy_set_header X-Forwarded-Prefix $upstream_x_forwarded_prefix; proxy_set_header X-Real-IP $remote_addr; -> if enabled_headers_upstream["X-Kong-Request-Id"] then - proxy_set_header X-Kong-Request-Id $kong_request_id; -> end proxy_pass_header Server; proxy_pass_header Date; proxy_ssl_name $upstream_host; @@ -285,9 +279,6 @@ http { proxy_set_header X-Forwarded-Port $upstream_x_forwarded_port; proxy_set_header X-Forwarded-Prefix $upstream_x_forwarded_prefix; proxy_set_header X-Real-IP $remote_addr; -> if enabled_headers_upstream["X-Kong-Request-Id"] then - proxy_set_header X-Kong-Request-Id $kong_request_id; -> end proxy_pass_header Server; proxy_pass_header Date; proxy_ssl_name $upstream_host; @@ -318,9 +309,6 @@ http { proxy_set_header X-Forwarded-Port $upstream_x_forwarded_port; proxy_set_header X-Forwarded-Prefix $upstream_x_forwarded_prefix; proxy_set_header X-Real-IP $remote_addr; -> if enabled_headers_upstream["X-Kong-Request-Id"] then - proxy_set_header X-Kong-Request-Id $kong_request_id; -> end proxy_pass_header Server; proxy_pass_header Date; proxy_ssl_name $upstream_host; @@ -379,9 +367,6 @@ http { proxy_set_header X-Forwarded-Path $upstream_x_forwarded_path; proxy_set_header X-Forwarded-Prefix $upstream_x_forwarded_prefix; proxy_set_header X-Real-IP $remote_addr; -> if enabled_headers_upstream["X-Kong-Request-Id"] then - proxy_set_header X-Kong-Request-Id $kong_request_id; -> end proxy_pass_header Server; proxy_pass_header Date; proxy_ssl_name $upstream_host; From c6c37f95ebed7e1f5de53da7b1c5e46fd8f37ae6 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:08:59 +0800 Subject: [PATCH 36/42] Revert "perf(request-id): use `$kong_request_id` for `$request_id` for better performance (#11725)" This reverts commit 88b385b760a85f31451a23e7ab40cc8e94db942f. --- kong/pdk/log.lua | 6 +++--- kong/templates/nginx_kong.lua | 4 ++-- kong/tracing/request_id.lua | 8 +++----- spec/01-unit/10-log_serializer_spec.lua | 2 +- spec/01-unit/26-tracing/03-request-id_spec.lua | 13 ++----------- 5 files changed, 11 insertions(+), 22 deletions(-) diff --git a/kong/pdk/log.lua b/kong/pdk/log.lua index 42ea97c973e..a7ee98b9915 100644 --- a/kong/pdk/log.lua +++ b/kong/pdk/log.lua @@ -918,14 +918,14 @@ local function new_log(namespace, format) if not buf then error(err, 2) end - + for log_lvl_name, log_lvl in pairs(_LEVELS) do - self[log_lvl_name] = gen_log_func(log_lvl, buf) + self[log_lvl_name] = gen_log_func(log_lvl, buf) end self.deprecation = new_deprecation(gen_log_func(_LEVELS.warn, buf, nil, 5)) end - + self.set_format(format) self.inspect = new_inspect(namespace) diff --git a/kong/templates/nginx_kong.lua b/kong/templates/nginx_kong.lua index 84330518720..d2614ac2c56 100644 --- a/kong/templates/nginx_kong.lua +++ b/kong/templates/nginx_kong.lua @@ -58,7 +58,7 @@ init_worker_by_lua_block { log_format kong_log_format '$remote_addr - $remote_user [$time_local] ' '"$request" $status $body_bytes_sent ' '"$http_referer" "$http_user_agent" ' - 'kong_request_id: "$kong_request_id"'; + 'kong_request_id: "$request_id"'; # Load variable indexes lua_kong_load_var_index $args; @@ -128,7 +128,7 @@ server { # Append the kong request id to the error log # https://github.com/Kong/lua-kong-nginx-module#lua_kong_error_log_request_id - lua_kong_error_log_request_id $kong_request_id; + lua_kong_error_log_request_id $request_id; access_log ${{PROXY_ACCESS_LOG}} kong_log_format; error_log ${{PROXY_ERROR_LOG}} ${{LOG_LEVEL}}; diff --git a/kong/tracing/request_id.lua b/kong/tracing/request_id.lua index bab196df1bb..c16f7a5d705 100644 --- a/kong/tracing/request_id.lua +++ b/kong/tracing/request_id.lua @@ -1,6 +1,4 @@ local ngx = ngx -local var = ngx.var -local get_phase = ngx.get_phase local NGX_VAR_PHASES = { set = true, @@ -23,14 +21,14 @@ local function get() local rid = get_ctx_request_id() if not rid then - local phase = get_phase() + local phase = ngx.get_phase() if not NGX_VAR_PHASES[phase] then return nil, "cannot access ngx.var in " .. phase .. " phase" end -- first access to the request id for this request: - -- initialize with the value of $kong_request_id - rid = var.kong_request_id + -- initialize with the value of $request_id + rid = ngx.var.request_id ngx.ctx.request_id = rid end diff --git a/spec/01-unit/10-log_serializer_spec.lua b/spec/01-unit/10-log_serializer_spec.lua index 23295579b60..dfd7ec108d1 100644 --- a/spec/01-unit/10-log_serializer_spec.lua +++ b/spec/01-unit/10-log_serializer_spec.lua @@ -20,7 +20,7 @@ describe("kong.log.serialize", function() }, }, var = { - kong_request_id = "1234", + request_id = "1234", request_uri = "/request_uri", upstream_uri = "/upstream_uri", scheme = "http", diff --git a/spec/01-unit/26-tracing/03-request-id_spec.lua b/spec/01-unit/26-tracing/03-request-id_spec.lua index e4b85be593d..7b6bf3537f4 100644 --- a/spec/01-unit/26-tracing/03-request-id_spec.lua +++ b/spec/01-unit/26-tracing/03-request-id_spec.lua @@ -1,13 +1,9 @@ -local function reload_module(name) - package.loaded[name] = nil - return require(name) -end - +local request_id = require "kong.tracing.request_id" local function reset_globals(id) _G.ngx.ctx = {} _G.ngx.var = { - kong_request_id = id, + request_id = id, } _G.ngx.get_phase = function() -- luacheck: ignore return "access" @@ -47,9 +43,6 @@ describe("Request ID unit tests", function() end) it("returns the expected Request ID and caches it in ctx", function() - - local request_id = reload_module("kong.tracing.request_id") - local id, err = request_id.get() assert.is_nil(err) assert.equal(ngx_var_request_id, id) @@ -61,8 +54,6 @@ describe("Request ID unit tests", function() it("fails if accessed from phase that cannot read ngx.var", function() _G.ngx.get_phase = function() return "init" end - local request_id = reload_module("kong.tracing.request_id") - local id, err = request_id.get() assert.is_nil(id) assert.equal("cannot access ngx.var in init phase", err) From 7a70642f0f6a05fadc9ae01bff9f2ff0f274a9ca Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 18 Jul 2024 21:11:05 +0800 Subject: [PATCH 37/42] Revert "feat(request-id): introduce unique Request ID (#11663)" This reverts commit edf183546ca23dc4b4e771b7325b0f483286ad09. --- .requirements | 2 +- .../kong/lua_kong_nginx_module_bump.yml | 3 - changelog/unreleased/kong/request_id.yml | 6 - kong-2.8.6-0.rockspec | 2 - kong.conf.default | 18 +- kong/conf_loader/init.lua | 35 +- kong/constants.lua | 1 - kong/error_handlers.lua | 4 +- kong/pdk/log.lua | 15 +- kong/pdk/response.lua | 4 +- kong/plugins/correlation-id/handler.lua | 2 - kong/plugins/zipkin/tracing_headers.lua | 88 +---- kong/runloop/handler.lua | 74 ++-- kong/templates/kong_defaults.lua | 3 +- kong/templates/nginx_kong.lua | 11 +- kong/tools/utils.lua | 7 +- kong/tracing/request_id.lua | 44 --- spec/01-unit/04-prefix_handler_spec.lua | 4 +- spec/01-unit/10-log_serializer_spec.lua | 8 - .../01-unit/26-tracing/03-request-id_spec.lua | 62 ---- spec/02-integration/05-proxy/06-ssl_spec.lua | 8 +- .../05-proxy/12-error_default_type_spec.lua | 25 +- .../05-proxy/13-error_handlers_spec.lua | 2 +- .../05-proxy/18-upstream_tls_spec.lua | 16 +- .../05-proxy/33-request-id-header_spec.lua | 333 ------------------ .../10-go_plugins/01-reports_spec.lua | 10 +- .../14-tracing/04-trace-ids-log_spec.lua | 200 ----------- .../03-plugins/09-key-auth/02-access_spec.lua | 46 +-- .../10-basic-auth/03-access_spec.lua | 18 +- .../10-basic-auth/05-declarative_spec.lua | 3 +- .../11-correlation-id/01-access_spec.lua | 66 +--- .../01-access_spec.lua | 18 +- .../14-request-termination/02-access_spec.lua | 1 - .../17-ip-restriction/02-access_spec.lua | 67 ++-- spec/03-plugins/18-acl/02-access_spec.lua | 51 +-- .../19-hmac-auth/03-access_spec.lua | 6 +- .../23-rate-limiting/04-access_spec.lua | 46 +-- .../02-access_spec.lua | 9 +- .../34-zipkin/tracing_headers_spec.lua | 3 +- t/01-pdk/08-response/13-error.t | 104 +++--- 40 files changed, 204 insertions(+), 1221 deletions(-) delete mode 100644 changelog/unreleased/kong/lua_kong_nginx_module_bump.yml delete mode 100644 changelog/unreleased/kong/request_id.yml delete mode 100644 kong/tracing/request_id.lua delete mode 100644 spec/01-unit/26-tracing/03-request-id_spec.lua delete mode 100644 spec/02-integration/05-proxy/33-request-id-header_spec.lua delete mode 100644 spec/02-integration/14-tracing/04-trace-ids-log_spec.lua diff --git a/.requirements b/.requirements index 4c2a1b0b423..3625afb2c7b 100644 --- a/.requirements +++ b/.requirements @@ -9,7 +9,7 @@ RESTY_PCRE_VERSION=8.45 LIBYAML_VERSION=0.2.5 KONG_GO_PLUGINSERVER_VERSION=v0.6.1 KONG_BUILD_TOOLS_VERSION=4.40.1 -KONG_NGINX_MODULE_BRANCH=0.2.2 +KONG_NGINX_MODULE_BRANCH=0.2.1 PCRE=8.45 OPENSSL=1.1.1o diff --git a/changelog/unreleased/kong/lua_kong_nginx_module_bump.yml b/changelog/unreleased/kong/lua_kong_nginx_module_bump.yml deleted file mode 100644 index 0062a00d4f0..00000000000 --- a/changelog/unreleased/kong/lua_kong_nginx_module_bump.yml +++ /dev/null @@ -1,3 +0,0 @@ -message: Bump lua-kong-nginx-module from 0.2.1 to 0.2.2 -type: dependency -scope: Core diff --git a/changelog/unreleased/kong/request_id.yml b/changelog/unreleased/kong/request_id.yml deleted file mode 100644 index 39d4daa08fc..00000000000 --- a/changelog/unreleased/kong/request_id.yml +++ /dev/null @@ -1,6 +0,0 @@ -message: > - A unique Request ID is now populated in the error log, access log, error templates, - log serializer, and in a new X-Kong-Request-Id header (configurable for upstream/downstream - using the `headers` and `headers_upstream` configuration options). -type: feature -scope: Core diff --git a/kong-2.8.6-0.rockspec b/kong-2.8.6-0.rockspec index 866429bb9f6..57da2e05f51 100644 --- a/kong-2.8.6-0.rockspec +++ b/kong-2.8.6-0.rockspec @@ -472,7 +472,5 @@ build = { ["kong.vaults.env"] = "kong/vaults/env/init.lua", ["kong.vaults.env.schema"] = "kong/vaults/env/schema.lua", - - ["kong.tracing.request_id"] = "kong/tracing/request_id.lua", } } diff --git a/kong.conf.default b/kong.conf.default index 73dd51acd3b..4c91e39dd3f 100644 --- a/kong.conf.default +++ b/kong.conf.default @@ -667,7 +667,7 @@ # # See docs for `ssl_cert_key` for detailed usage. -#headers = server_tokens, latency_tokens, X-Kong-Request-Id +#headers = server_tokens, latency_tokens # Comma-separated list of headers Kong should # inject in client responses. # @@ -697,8 +697,6 @@ # This is particularly useful for clients to # distinguish upstream statuses if the # response is rewritten by a plugin. - # - `X-Kong-Request-Id`: Unique identifier of - # the request. # - `server_tokens`: Same as specifying both # `Server` and `Via`. # - `latency_tokens`: Same as specifying @@ -715,20 +713,6 @@ # # Example: `headers = via, latency_tokens` -#headers_upstream = X-Kong-Request-Id - # Comma-separated list of headers Kong should - # inject in requests to upstream. - # - # At this time, the only accepted value is: - # - `X-Kong-Request-Id`: Unique identifier of - # the request. - # - # In addition, this value can be set - # to `off`, which prevents Kong from injecting - # the above header. Note that this - # does not prevent plugins from injecting - # headers of their own. - #trusted_ips = # Defines trusted IP addresses blocks that are # known to send correct `X-Forwarded-*` # headers. diff --git a/kong/conf_loader/init.lua b/kong/conf_loader/init.lua index 4bb20604c06..feb873d99db 100644 --- a/kong/conf_loader/init.lua +++ b/kong/conf_loader/init.lua @@ -107,11 +107,6 @@ local HEADER_KEY_TO_NAME = { [string.lower(HEADERS.ADMIN_LATENCY)] = HEADERS.ADMIN_LATENCY, [string.lower(HEADERS.UPSTREAM_LATENCY)] = HEADERS.UPSTREAM_LATENCY, [string.lower(HEADERS.UPSTREAM_STATUS)] = HEADERS.UPSTREAM_STATUS, - [string.lower(HEADERS.REQUEST_ID)] = HEADERS.REQUEST_ID, -} - -local UPSTREAM_HEADER_KEY_TO_NAME = { - [string.lower(HEADERS.REQUEST_ID)] = HEADERS.REQUEST_ID, } @@ -412,7 +407,6 @@ local CONF_INFERENCES = { upstream_keepalive_idle_timeout = { typ = "number" }, headers = { typ = "array" }, - headers_upstream = { typ = "array" }, trusted_ips = { typ = "array" }, real_ip_header = { typ = "string", @@ -976,15 +970,6 @@ local function check_and_infer(conf, opts) end end - if conf.headers_upstream then - for _, token in ipairs(conf.headers_upstream) do - if token ~= "off" and not UPSTREAM_HEADER_KEY_TO_NAME[string.lower(token)] then - errors[#errors + 1] = fmt("headers_upstream: invalid entry '%s'", - tostring(token)) - end - end - end - if conf.dns_resolver then for _, server in ipairs(conf.dns_resolver) do local dns = utils.normalize_ip(server) @@ -1704,9 +1689,8 @@ local function load(path, custom_conf, opts) do -- load headers configuration - - -- (downstream) local enabled_headers = {} + for _, v in pairs(HEADER_KEY_TO_NAME) do enabled_headers[v] = false end @@ -1732,23 +1716,6 @@ local function load(path, custom_conf, opts) end conf.enabled_headers = setmetatable(enabled_headers, _nop_tostring_mt) - - - -- (upstream) - local enabled_headers_upstream = {} - for _, v in pairs(UPSTREAM_HEADER_KEY_TO_NAME) do - enabled_headers_upstream[v] = false - end - - if #conf.headers_upstream > 0 and conf.headers_upstream[1] ~= "off" then - for _, token in ipairs(conf.headers_upstream) do - if token ~= "off" then - enabled_headers_upstream[UPSTREAM_HEADER_KEY_TO_NAME[string.lower(token)]] = true - end - end - end - - conf.enabled_headers_upstream = setmetatable(enabled_headers_upstream, _nop_tostring_mt) end -- load absolute paths diff --git a/kong/constants.lua b/kong/constants.lua index bc41182bd5b..f38bb11fe28 100644 --- a/kong/constants.lua +++ b/kong/constants.lua @@ -107,7 +107,6 @@ local constants = { FORWARDED_PATH = "X-Forwarded-Path", FORWARDED_PREFIX = "X-Forwarded-Prefix", ANONYMOUS = "X-Anonymous-Consumer", - REQUEST_ID = "X-Kong-Request-Id", VIA = "Via", SERVER = "Server" }, diff --git a/kong/error_handlers.lua b/kong/error_handlers.lua index 9054337e218..117756305c6 100644 --- a/kong/error_handlers.lua +++ b/kong/error_handlers.lua @@ -2,7 +2,6 @@ local kong = kong local find = string.find local fmt = string.format local utils = require "kong.tools.utils" -local request_id = require "kong.tracing.request_id" local CONTENT_TYPE = "Content-Type" @@ -46,8 +45,7 @@ return function(ctx) else local mime_type = utils.get_mime_type(accept_header) - local rid = request_id.get() or "" - message = fmt(utils.get_error_template(mime_type), message, rid) + message = fmt(utils.get_error_template(mime_type), message) headers = { [CONTENT_TYPE] = mime_type } end diff --git a/kong/pdk/log.lua b/kong/pdk/log.lua index a7ee98b9915..c68c1641e26 100644 --- a/kong/pdk/log.lua +++ b/kong/pdk/log.lua @@ -32,7 +32,6 @@ local ngx = ngx local kong = kong local check_phase = phase_checker.check local split = utils.split -local request_id_get = require "kong.tracing.request_id".get local _PREFIX = "[kong] " @@ -697,7 +696,6 @@ do -- The following fields are included in the returned table: -- * `client_ip` - client IP address in textual format. -- * `latencies` - request/proxy latencies. - -- * `request.id` - request id. -- * `request.headers` - request headers. -- * `request.method` - request method. -- * `request.querystring` - request query strings. @@ -722,12 +720,6 @@ do -- * `request.tls.cipher` - TLS/SSL cipher used by the connection. -- * `request.tls.client_verify` - mTLS validation result. Contents are the same as described in [$ssl_client_verify](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#var_ssl_client_verify). -- - -- The following field is only present in requests where a tracing plugin (OpenTelemetry or Zipkin) is executed: - -- * `trace_id` - trace ID. - -- - -- The following field is only present in requests where the Correlation ID plugin is executed: - -- * `correlation_id` - correlation ID. - -- -- **Warning:** This function may return sensitive data (e.g., API keys). -- Consider filtering before writing it to unsecured locations. -- @@ -791,7 +783,6 @@ do return edit_result(ctx, { request = { - id = request_id_get() or "", uri = request_uri, url = var.scheme .. "://" .. var.host .. ":" .. host_port .. request_uri, querystring = okong.request.get_query(), -- parameters, as a table @@ -918,14 +909,14 @@ local function new_log(namespace, format) if not buf then error(err, 2) end - + for log_lvl_name, log_lvl in pairs(_LEVELS) do - self[log_lvl_name] = gen_log_func(log_lvl, buf) + self[log_lvl_name] = gen_log_func(log_lvl, buf) end self.deprecation = new_deprecation(gen_log_func(_LEVELS.warn, buf, nil, 5)) end - + self.set_format(format) self.inspect = new_inspect(namespace) diff --git a/kong/pdk/response.lua b/kong/pdk/response.lua index bd799bcad4b..4677b64c31f 100644 --- a/kong/pdk/response.lua +++ b/kong/pdk/response.lua @@ -16,7 +16,6 @@ local cjson = require "cjson.safe" local checks = require "kong.pdk.private.checks" local phase_checker = require "kong.pdk.private.phases" local utils = require "kong.tools.utils" -local request_id = require "kong.tracing.request_id" local ngx = ngx @@ -1146,8 +1145,7 @@ local function new(self, major_version) local actual_message = message or HTTP_MESSAGES["s" .. status] or fmt(HTTP_MESSAGES.default, status) - local rid = request_id.get() or "" - body = fmt(utils.get_error_template(content_type), actual_message, rid) + body = fmt(utils.get_error_template(content_type), actual_message) end local ctx = ngx.ctx diff --git a/kong/plugins/correlation-id/handler.lua b/kong/plugins/correlation-id/handler.lua index a7941256076..338da18885f 100644 --- a/kong/plugins/correlation-id/handler.lua +++ b/kong/plugins/correlation-id/handler.lua @@ -62,8 +62,6 @@ function CorrelationIdHandler:access(conf) end end - kong.log.set_serialize_value("correlation_id", correlation_id) - if conf.echo_downstream then -- For later use, to echo it back downstream kong.ctx.plugin.correlation_id = correlation_id diff --git a/kong/plugins/zipkin/tracing_headers.lua b/kong/plugins/zipkin/tracing_headers.lua index c503ab6607e..89c83446957 100644 --- a/kong/plugins/zipkin/tracing_headers.lua +++ b/kong/plugins/zipkin/tracing_headers.lua @@ -364,52 +364,6 @@ local function find_header_type(headers) end --- Performs a table merge to add trace ID formats to the current request's --- trace ID and returns a table containing all the formats. --- --- Plugins can handle different formats of trace ids depending on their headers --- configuration, multiple plugins executions may result in additional formats --- of the current request's trace id. --- --- The `propagation_trace_id_all_fmt` table is stored in `ngx.ctx` to keep the --- list of formats updated for the current request. --- --- Each item in the resulting `propagation_trace_id_all_fmt` table represents a --- format associated with the trace ID for the current request. --- --- @param trace_id_new_fmt table containing the trace ID formats to be added --- @returns propagation_trace_id_all_fmt table contains all the formats for --- the current request --- --- @example --- --- propagation_trace_id_all_fmt = { datadog = "1234", --- w3c = "abcd" } --- --- trace_id_new_fmt = { ot = "abcd", --- w3c = "abcd" } --- --- propagation_trace_id_all_fmt = { datadog = "1234", --- ot = "abcd", --- w3c = "abcd" } --- -local function add_trace_id_formats(trace_id_new_fmt) - -- TODO: @samugi - move trace ID table in the unified tracing context - local trace_id_all_fmt = ngx.ctx.propagation_trace_id_all_fmt - if not trace_id_all_fmt then - ngx.ctx.propagation_trace_id_all_fmt = trace_id_new_fmt - return trace_id_new_fmt - end - - -- add new formats to trace ID formats table - for format, value in pairs(trace_id_new_fmt) do - trace_id_all_fmt[format] = value - end - - return trace_id_all_fmt -end - - local function parse(headers, conf_header_type) if conf_header_type == "ignore" then return nil @@ -463,24 +417,9 @@ local function set(conf_header_type, found_header_type, proxy_span, conf_default found_header_type = found_header_type or conf_default_header_type or "b3" - -- contains all the different formats of the current trace ID, with zero or - -- more of the following entries: - -- { - -- ["b3"] = "", -- the trace_id when the request has a b3 or X-B3-TraceId (zipkin) header - -- ["w3c"] = "", -- the trace_id when the request has a W3C header - -- ["jaeger"] = "", -- the trace_id when the request has a jaeger tracing header - -- ["ot"] = "", -- the trace_id when the request has an OpenTelemetry tracing header - -- ["aws"] = "", -- the trace_id when the request has an aws tracing header - -- ["gcp"] = "", -- the trace_id when the request has a gcp tracing header - -- } - local trace_id_formats = {} - if conf_header_type == "b3" or found_header_type == "b3" then - local trace_id = to_hex(proxy_span.trace_id) - trace_id_formats.b3 = trace_id - - set_header("x-b3-traceid", trace_id) + set_header("x-b3-traceid", to_hex(proxy_span.trace_id)) set_header("x-b3-spanid", to_hex(proxy_span.span_id)) if proxy_span.parent_id then set_header("x-b3-parentspanid", to_hex(proxy_span.parent_id)) @@ -494,43 +433,30 @@ local function set(conf_header_type, found_header_type, proxy_span, conf_default end if conf_header_type == "b3-single" or found_header_type == "b3-single" then - local trace_id = to_hex(proxy_span.trace_id) - trace_id_formats.b3 = trace_id - set_header("b3", fmt("%s-%s-%s-%s", - trace_id, + to_hex(proxy_span.trace_id), to_hex(proxy_span.span_id), proxy_span.should_sample and "1" or "0", to_hex(proxy_span.parent_id))) end if conf_header_type == "w3c" or found_header_type == "w3c" then - local trace_id = to_hex(proxy_span.trace_id) - trace_id_formats.w3c = trace_id - set_header("traceparent", fmt("00-%s-%s-%s", - trace_id, + to_hex(proxy_span.trace_id), to_hex(proxy_span.span_id), proxy_span.should_sample and "01" or "00")) end if conf_header_type == "jaeger" or found_header_type == "jaeger" then - local trace_id = to_hex(proxy_span.trace_id) - trace_id_formats.jaeger = trace_id - set_header("uber-trace-id", fmt("%s:%s:%s:%s", - trace_id, + to_hex(proxy_span.trace_id), to_hex(proxy_span.span_id), to_hex(proxy_span.parent_id), proxy_span.should_sample and "01" or "00")) end if conf_header_type == "ot" or found_header_type == "ot" then - local trace_id = to_hex(proxy_span.trace_id) - trace_id_formats.ot = trace_id - - set_header("ot-tracer-traceid", trace_id) - + set_header("ot-tracer-traceid", to_hex(proxy_span.trace_id)) set_header("ot-tracer-spanid", to_hex(proxy_span.span_id)) set_header("ot-tracer-sampled", proxy_span.should_sample and "1" or "0") @@ -543,10 +469,6 @@ local function set(conf_header_type, found_header_type, proxy_span, conf_default -- XXX: https://github.com/opentracing/specification/issues/117 set_header("uberctx-"..key, ngx.escape_uri(value)) end - - trace_id_formats = add_trace_id_formats(trace_id_formats) - -- add trace IDs to log serializer output - kong.log.set_serialize_value("trace_id", trace_id_formats) end diff --git a/kong/runloop/handler.lua b/kong/runloop/handler.lua index 0ce909baecb..20a43270d25 100644 --- a/kong/runloop/handler.lua +++ b/kong/runloop/handler.lua @@ -12,38 +12,35 @@ local concurrency = require "kong.concurrency" local declarative = require "kong.db.declarative" local workspaces = require "kong.workspaces" local lrucache = require "resty.lrucache" -local request_id = require "kong.tracing.request_id" local PluginsIterator = require "kong.runloop.plugins_iterator" -local kong = kong -local type = type -local ipairs = ipairs -local tostring = tostring -local tonumber = tonumber -local setmetatable = setmetatable -local sub = string.sub -local byte = string.byte -local gsub = string.gsub -local find = string.find -local lower = string.lower -local fmt = string.format -local ngx = ngx -local var = ngx.var -local log = ngx.log -local exit = ngx.exit -local exec = ngx.exec -local header = ngx.header -local set_header = ngx.req.set_header -local timer_at = ngx.timer.at -local subsystem = ngx.config.subsystem -local clear_header = ngx.req.clear_header -local http_version = ngx.req.http_version -local request_id_get = request_id.get -local unpack = unpack -local escape = require("kong.tools.uri").escape +local kong = kong +local type = type +local ipairs = ipairs +local tostring = tostring +local tonumber = tonumber +local setmetatable = setmetatable +local sub = string.sub +local byte = string.byte +local gsub = string.gsub +local find = string.find +local lower = string.lower +local fmt = string.format +local ngx = ngx +local var = ngx.var +local log = ngx.log +local exit = ngx.exit +local exec = ngx.exec +local header = ngx.header +local timer_at = ngx.timer.at +local subsystem = ngx.config.subsystem +local clear_header = ngx.req.clear_header +local http_version = ngx.req.http_version +local unpack = unpack +local escape = require("kong.tools.uri").escape local is_http_module = subsystem == "http" @@ -1473,9 +1470,6 @@ return { end, -- Only executed if the `router` module found a route and allows nginx to proxy it. after = function(ctx) - local enabled_headers_upstream = kong.configuration.enabled_headers_upstream - local headers = constants.HEADERS - -- Nginx's behavior when proxying a request with an empty querystring -- `/foo?` is to keep `$is_args` an empty string, hence effectively -- stripping the empty querystring. @@ -1557,16 +1551,6 @@ return { if var.http_proxy_connection then clear_header("Proxy-Connection") end - - -- X-Kong-Request-Id upstream header - local rid, rid_get_err = request_id_get() - if not rid then - log(WARN, "failed to get Request ID: ", rid_get_err) - end - - if enabled_headers_upstream[headers.REQUEST_ID] and rid then - set_header(headers.REQUEST_ID, rid) - end end }, response = { @@ -1655,16 +1639,6 @@ return { end end end - - -- X-Kong-Request-Id downstream header - local rid, rid_get_err = request_id_get() - if not rid then - log(WARN, "failed to get Request ID: ", rid_get_err) - end - - if enabled_headers[headers.REQUEST_ID] and rid then - header[headers.REQUEST_ID] = rid - end end }, log = { diff --git a/kong/templates/kong_defaults.lua b/kong/templates/kong_defaults.lua index 421da77a5ae..598f4da1992 100644 --- a/kong/templates/kong_defaults.lua +++ b/kong/templates/kong_defaults.lua @@ -49,8 +49,7 @@ admin_ssl_cert = NONE admin_ssl_cert_key = NONE status_ssl_cert = NONE status_ssl_cert_key = NONE -headers = server_tokens, latency_tokens, x-kong-request-id -headers_upstream = x-kong-request-id +headers = server_tokens, latency_tokens trusted_ips = NONE error_default_type = text/plain upstream_keepalive = NONE diff --git a/kong/templates/nginx_kong.lua b/kong/templates/nginx_kong.lua index d2614ac2c56..73e0be2ab07 100644 --- a/kong/templates/nginx_kong.lua +++ b/kong/templates/nginx_kong.lua @@ -55,11 +55,6 @@ init_worker_by_lua_block { } > if (role == "traditional" or role == "data_plane") and #proxy_listeners > 0 then -log_format kong_log_format '$remote_addr - $remote_user [$time_local] ' - '"$request" $status $body_bytes_sent ' - '"$http_referer" "$http_user_agent" ' - 'kong_request_id: "$request_id"'; - # Load variable indexes lua_kong_load_var_index $args; lua_kong_load_var_index $bytes_sent; @@ -126,11 +121,7 @@ server { error_page 400 404 405 408 411 413 414 417 494 /kong_error_handler; error_page 500 502 503 504 /kong_error_handler; - # Append the kong request id to the error log - # https://github.com/Kong/lua-kong-nginx-module#lua_kong_error_log_request_id - lua_kong_error_log_request_id $request_id; - - access_log ${{PROXY_ACCESS_LOG}} kong_log_format; + access_log ${{PROXY_ACCESS_LOG}}; error_log ${{PROXY_ERROR_LOG}} ${{LOG_LEVEL}}; > if proxy_ssl_enabled then diff --git a/kong/tools/utils.lua b/kong/tools/utils.lua index 68821a5d05b..6c92098cce9 100644 --- a/kong/tools/utils.lua +++ b/kong/tools/utils.lua @@ -1289,21 +1289,18 @@ do

Kong Error

%s.

-

request_id: %s

]], [CONTENT_TYPE_JSON] = [[ { - "message":"%s", - "request_id":"%s" + "message":"%s" }]], - [CONTENT_TYPE_PLAIN] = "%s\nrequest_id: %s\n", + [CONTENT_TYPE_PLAIN] = "%s\n", [CONTENT_TYPE_XML] = [[ %s - %s ]], } diff --git a/kong/tracing/request_id.lua b/kong/tracing/request_id.lua deleted file mode 100644 index c16f7a5d705..00000000000 --- a/kong/tracing/request_id.lua +++ /dev/null @@ -1,44 +0,0 @@ -local ngx = ngx - -local NGX_VAR_PHASES = { - set = true, - rewrite = true, - access = true, - content = true, - header_filter = true, - body_filter = true, - log = true, - balancer = true, -} - - -local function get_ctx_request_id() - return ngx.ctx.request_id -end - - -local function get() - local rid = get_ctx_request_id() - - if not rid then - local phase = ngx.get_phase() - if not NGX_VAR_PHASES[phase] then - return nil, "cannot access ngx.var in " .. phase .. " phase" - end - - -- first access to the request id for this request: - -- initialize with the value of $request_id - rid = ngx.var.request_id - ngx.ctx.request_id = rid - end - - return rid -end - - -return { - get = get, - - -- for unit testing - _get_ctx_request_id = get_ctx_request_id, -} diff --git a/spec/01-unit/04-prefix_handler_spec.lua b/spec/01-unit/04-prefix_handler_spec.lua index 75143b3d20a..8b32b232755 100644 --- a/spec/01-unit/04-prefix_handler_spec.lua +++ b/spec/01-unit/04-prefix_handler_spec.lua @@ -473,7 +473,7 @@ describe("NGINX conf compiler", function() nginx_stream_tcp_nodelay = "on", })) local nginx_conf = prefix_handler.compile_kong_conf(conf) - assert.matches("access_log%s/dev/stdout%skong_log_format;", nginx_conf) + assert.matches("access_log%s/dev/stdout;", nginx_conf) local nginx_conf = prefix_handler.compile_kong_stream_conf(conf) assert.matches("access_log%slogs/access.log%sbasic;", nginx_conf) @@ -483,7 +483,7 @@ describe("NGINX conf compiler", function() nginx_stream_tcp_nodelay = "on", })) local nginx_conf = prefix_handler.compile_kong_conf(conf) - assert.matches("access_log%slogs/access.log%skong_log_format;", nginx_conf) + assert.matches("access_log%slogs/access.log;", nginx_conf) local nginx_conf = prefix_handler.compile_kong_stream_conf(conf) assert.matches("access_log%s/dev/stdout%scustom;", nginx_conf) end) diff --git a/spec/01-unit/10-log_serializer_spec.lua b/spec/01-unit/10-log_serializer_spec.lua index dfd7ec108d1..faf233c3d8e 100644 --- a/spec/01-unit/10-log_serializer_spec.lua +++ b/spec/01-unit/10-log_serializer_spec.lua @@ -20,7 +20,6 @@ describe("kong.log.serialize", function() }, }, var = { - request_id = "1234", request_uri = "/request_uri", upstream_uri = "/upstream_uri", scheme = "http", @@ -43,14 +42,8 @@ describe("kong.log.serialize", function() resp = { get_headers = function() return {header1 = "respheader1", header2 = "respheader2", ["set-cookie"] = "delicious=delicacy"} end }, - get_phase = function() return "access" end, - WARN = ngx.WARN, } - package.loaded["kong.tracing.request_id"] = nil - package.loaded["kong.pdk.log"] = nil - kong.log = require "kong.pdk.log".new(kong) - package.loaded["kong.pdk.request"] = nil local pdk_request = require "kong.pdk.request" kong.request = pdk_request.new(kong) @@ -81,7 +74,6 @@ describe("kong.log.serialize", function() assert.equal("/upstream_uri", res.upstream_uri) assert.equal(200, res.request.size) assert.equal("/request_uri", res.request.uri) - assert.equal("1234", res.request.id) -- Response assert.is_table(res.response) diff --git a/spec/01-unit/26-tracing/03-request-id_spec.lua b/spec/01-unit/26-tracing/03-request-id_spec.lua deleted file mode 100644 index 7b6bf3537f4..00000000000 --- a/spec/01-unit/26-tracing/03-request-id_spec.lua +++ /dev/null @@ -1,62 +0,0 @@ -local request_id = require "kong.tracing.request_id" - -local function reset_globals(id) - _G.ngx.ctx = {} - _G.ngx.var = { - request_id = id, - } - _G.ngx.get_phase = function() -- luacheck: ignore - return "access" - end - - _G.kong = { - log = { - notice = function() end, - info = function() end, - }, - } -end - - -describe("Request ID unit tests", function() - local ngx_var_request_id = "1234" - - describe("get()", function() - local old_ngx_ctx - local old_ngx_var - local old_ngx_get_phase - - lazy_setup(function() - old_ngx_ctx = _G.ngx.ctx - old_ngx_var = _G.ngx.var - old_ngx_get_phase = _G.ngx.get_phase - end) - - before_each(function() - reset_globals(ngx_var_request_id) - end) - - lazy_teardown(function() - _G.ngx.ctx = old_ngx_ctx - _G.ngx.var = old_ngx_var - _G.ngx.get_phase = old_ngx_get_phase - end) - - it("returns the expected Request ID and caches it in ctx", function() - local id, err = request_id.get() - assert.is_nil(err) - assert.equal(ngx_var_request_id, id) - - local ctx_request_id = request_id._get_ctx_request_id() - assert.equal(ngx_var_request_id, ctx_request_id) - end) - - it("fails if accessed from phase that cannot read ngx.var", function() - _G.ngx.get_phase = function() return "init" end - - local id, err = request_id.get() - assert.is_nil(id) - assert.equal("cannot access ngx.var in init phase", err) - end) - end) -end) diff --git a/spec/02-integration/05-proxy/06-ssl_spec.lua b/spec/02-integration/05-proxy/06-ssl_spec.lua index c6833d8f3a3..1a6391cceed 100644 --- a/spec/02-integration/05-proxy/06-ssl_spec.lua +++ b/spec/02-integration/05-proxy/06-ssl_spec.lua @@ -232,7 +232,7 @@ for _, strategy in helpers.each_strategy() do }, }) local body = assert.res_status(502, res) - assert.matches("An invalid response was received from the upstream server", body) + assert.equal("An invalid response was received from the upstream server", body) assert.logfile().has.line("upstream SSL certificate verify error: " .. "(20:unable to get local issuer certificate) " .. "while SSL handshaking to upstream", true, 2) @@ -322,8 +322,7 @@ for _, strategy in helpers.each_strategy() do local body = assert.res_status(426, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Please use HTTPS protocol", json.message) + assert.same({ message = "Please use HTTPS protocol" }, json) assert.contains("Upgrade", res.headers.connection) assert.equal("TLS/1.2, HTTP/1.1", res.headers.upgrade) @@ -338,8 +337,7 @@ for _, strategy in helpers.each_strategy() do body = assert.res_status(426, res) json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Please use HTTPS protocol", json.message) + assert.same({ message = "Please use HTTPS protocol" }, json) assert.contains("Upgrade", res.headers.connection) assert.equal("TLS/1.2, HTTP/1.1", res.headers.upgrade) end) diff --git a/spec/02-integration/05-proxy/12-error_default_type_spec.lua b/spec/02-integration/05-proxy/12-error_default_type_spec.lua index 8adbfc63e0d..894ef706694 100644 --- a/spec/02-integration/05-proxy/12-error_default_type_spec.lua +++ b/spec/02-integration/05-proxy/12-error_default_type_spec.lua @@ -1,13 +1,11 @@ local helpers = require "spec.helpers" local cjson = require "cjson" -local constants = require "kong.constants" local XML_TEMPLATE = [[ %s - %s ]] @@ -21,14 +19,10 @@ local HTML_TEMPLATE = [[

Kong Error

%s.

-

request_id: %s

]] -local PLAIN_TEMPLATE = "%s\nrequest_id: %s" - - local RESPONSE_CODE = 504 local RESPONSE_MESSAGE = "The upstream server is timing out" @@ -71,7 +65,6 @@ for _, strategy in helpers.each_strategy() do before_each(function() proxy_client = helpers.proxy_client() - helpers.clean_logfile() end) after_each(function() @@ -90,8 +83,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(RESPONSE_CODE, res) - local request_id = res.headers[constants.HEADERS.REQUEST_ID] - local html_message = string.format(HTML_TEMPLATE, RESPONSE_MESSAGE, request_id) + local html_message = string.format(HTML_TEMPLATE, RESPONSE_MESSAGE) assert.equal(html_message, body) end) @@ -139,7 +131,6 @@ for _, strategy in helpers.each_strategy() do before_each(function() proxy_client = helpers.proxy_client() - helpers.clean_logfile() end) after_each(function() @@ -158,16 +149,10 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(RESPONSE_CODE, res) - local request_id = res.headers[constants.HEADERS.REQUEST_ID] - local plain_message = string.format(PLAIN_TEMPLATE, RESPONSE_MESSAGE, request_id) - assert.equals(plain_message, body) + assert.equal(RESPONSE_MESSAGE, body) end) describe("Accept header modified Content-Type", function() - before_each(function() - helpers.clean_logfile() - end) - it("text/html", function() local res = assert(proxy_client:send { method = "GET", @@ -178,8 +163,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(RESPONSE_CODE, res) - local request_id = res.headers[constants.HEADERS.REQUEST_ID] - local html_message = string.format(HTML_TEMPLATE, RESPONSE_MESSAGE, request_id) + local html_message = string.format(HTML_TEMPLATE, RESPONSE_MESSAGE) assert.equal(html_message, body) end) @@ -207,8 +191,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(RESPONSE_CODE, res) - local request_id = res.headers[constants.HEADERS.REQUEST_ID] - local xml_message = string.format(XML_TEMPLATE, RESPONSE_MESSAGE, request_id) + local xml_message = string.format(XML_TEMPLATE, RESPONSE_MESSAGE) assert.equal(xml_message, body) end) end) diff --git a/spec/02-integration/05-proxy/13-error_handlers_spec.lua b/spec/02-integration/05-proxy/13-error_handlers_spec.lua index 4fdd6362ebe..fd78481ee1c 100644 --- a/spec/02-integration/05-proxy/13-error_handlers_spec.lua +++ b/spec/02-integration/05-proxy/13-error_handlers_spec.lua @@ -35,7 +35,7 @@ describe("Proxy error handlers", function() assert.res_status(400, res) local body = res:read_body() assert.matches("kong/", res.headers.server, nil, true) - assert.matches("Bad request\nrequest_id: %x+\n", body) + assert.equal("Bad request\n", body) end) it("does not expose OpenResty version", function() diff --git a/spec/02-integration/05-proxy/18-upstream_tls_spec.lua b/spec/02-integration/05-proxy/18-upstream_tls_spec.lua index ca45816f311..c438af0c4ca 100644 --- a/spec/02-integration/05-proxy/18-upstream_tls_spec.lua +++ b/spec/02-integration/05-proxy/18-upstream_tls_spec.lua @@ -177,7 +177,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(200, res) - assert.matches("it works", body) + assert.equals("it works", body) end) it("remove client_certificate removes access", function() @@ -239,7 +239,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(200, res) - assert.matches("it works", body) + assert.equals("it works", body) end) it("remove client_certificate removes access", function() @@ -294,7 +294,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(200, res) - assert.matches("it works", body) + assert.equals("it works", body) end) end) end) @@ -310,7 +310,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(200, res) - assert.matches("it works", body) + assert.equals("it works", body) end) it("#db turn it on, request is blocked", function() @@ -331,7 +331,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(502, res) - assert.matches("An invalid response was received from the upstream server", body) + assert.equals("An invalid response was received from the upstream server", body) end) end) @@ -355,7 +355,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(200, res) - assert.matches("it works", body) + assert.equals("it works", body) end) end) @@ -390,7 +390,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(502, res) - assert.matches("An invalid response was received from the upstream server", body) + assert.equals("An invalid response was received from the upstream server", body) end) it("request is allowed through if depth limit is sufficient", function() @@ -411,7 +411,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(200, res) - assert.matches("it works", body) + assert.equals("it works", body) end) end) end) diff --git a/spec/02-integration/05-proxy/33-request-id-header_spec.lua b/spec/02-integration/05-proxy/33-request-id-header_spec.lua deleted file mode 100644 index f8e0f222425..00000000000 --- a/spec/02-integration/05-proxy/33-request-id-header_spec.lua +++ /dev/null @@ -1,333 +0,0 @@ -local helpers = require "spec.helpers" -local constants = require "kong.constants" -local cjson = require "cjson" - - -local function setup_db() - local bp = helpers.get_db_utils(nil, { - "routes", - "services", - "plugins", - }) - - local service = bp.services:insert { - host = helpers.mock_upstream_host, - port = helpers.mock_upstream_port, - protocol = helpers.mock_upstream_protocol, - } - - bp.routes:insert { - protocols = { "http" }, - hosts = { "request_id" }, - service = service, - } - - local route_post_func = bp.routes:insert { - protocols = { "http" }, - hosts = { "post-function-access" }, - service = service, - } - - bp.plugins:insert { - name = "post-function", - route = route_post_func, - config = { access = { - "ngx.req.set_header('" .. constants.HEADERS.REQUEST_ID .. "', 'overwritten')" - }} - } - - local route_post_func_2 = bp.routes:insert { - protocols = { "http" }, - hosts = { "post-function-header-filter" }, - service = service, - } - - bp.plugins:insert { - name = "post-function", - route = route_post_func_2, - config = { header_filter = { - "ngx.header['" .. constants.HEADERS.REQUEST_ID .. "'] = 'overwritten'" - }} - } - -end - - -describe(constants.HEADERS.REQUEST_ID .. " header", function() - local client - - describe("(downstream)", function() - describe("with default configuration", function() - lazy_setup(function() - setup_db() - - assert(helpers.start_kong { - nginx_conf = "spec/fixtures/custom_nginx.template", - plugins = "bundled", - }) - - client = helpers.proxy_client() - end) - - lazy_teardown(function() - if client then - client:close() - end - - helpers.stop_kong() - end) - - it("contains the expected value", function() - local res = assert(client:send { - method = "GET", - path = "/", - headers = { - host = "request_id", - } - }) - assert.res_status(200, res) - assert.matches("^[0-9a-f]+$", res.headers[constants.HEADERS.REQUEST_ID]) - end) - - it("should be populated when no API matched", function() - local res = assert(client:send { - method = "GET", - path = "/", - headers = { - host = "404.com", - } - }) - local body = assert.res_status(404, res) - body = cjson.decode(body) - - assert.matches(body.message, "no Route matched with those values") - assert.matches("^[0-9a-f]+$", res.headers[constants.HEADERS.REQUEST_ID]) - end) - - it("overwrites value set by plugin", function() - local res = assert(client:send { - method = "GET", - path = "/", - headers = { - host = "post-function-header-filter", - } - }) - assert.res_status(200, res) - - local downstream_header = res.headers[constants.HEADERS.REQUEST_ID] - assert.not_nil(downstream_header) - assert.matches("^[0-9a-f]+$", downstream_header) - assert.not_equal("overwritten", downstream_header) - end) - end) - - - describe("with configuration [headers=X-Kong-Request-Id]", function() - lazy_setup(function() - setup_db() - - assert(helpers.start_kong { - nginx_conf = "spec/fixtures/custom_nginx.template", - headers = "X-Kong-Request-Id", - }) - - client = helpers.proxy_client() - end) - - lazy_teardown(function() - if client then - client:close() - end - - helpers.stop_kong() - end) - - it("contains the expected value", function() - local res = assert(client:send { - method = "GET", - path = "/", - headers = { - host = "request_id", - } - }) - assert.res_status(200, res) - assert.matches("^[0-9a-f]+$", res.headers[constants.HEADERS.REQUEST_ID]) - end) - end) - - describe("is not injected with configuration [headers=off]", function() - lazy_setup(function() - setup_db() - - assert(helpers.start_kong { - nginx_conf = "spec/fixtures/custom_nginx.template", - headers = "off", - }) - - client = helpers.proxy_client() - end) - - lazy_teardown(function() - if client then - client:close() - end - - helpers.stop_kong() - end) - - it("is nil", function() - local res = assert(client:send { - method = "GET", - path = "/", - headers = { - host = "request_id", - } - }) - assert.res_status(200, res) - assert.is_nil(res.headers[constants.HEADERS.REQUEST_ID]) - end) - end) - end) - - describe("(upstream)", function() - describe("default configuration", function() - lazy_setup(function() - setup_db() - - assert(helpers.start_kong { - nginx_conf = "spec/fixtures/custom_nginx.template", - plugins = "bundled", - }) - - client = helpers.proxy_client() - end) - - lazy_teardown(function() - if client then - client:close() - end - - helpers.stop_kong() - end) - - it("contains the expected value", function() - local res = assert(client:send { - method = "GET", - path = "/anything", - headers = { - host = "request_id", - } - }) - local body = assert.res_status(200, res) - body = cjson.decode(body) - assert.matches("^[0-9a-f]+$", body.headers[string.lower(constants.HEADERS.REQUEST_ID)]) - end) - - it("overwrites client value if any", function() - local client_header_value = "client_value" - local res = assert(client:send { - method = "GET", - path = "/anything", - headers = { - host = "request_id", - ["X-Kong-Request-Id"] = client_header_value - } - }) - - local body = assert.res_status(200, res) - body = cjson.decode(body) - local upstream_received_header = body.headers[string.lower(constants.HEADERS.REQUEST_ID)] - - assert.matches("^[0-9a-f]+$", upstream_received_header) - assert.not_equal(client_header_value, upstream_received_header) - end) - - it("overwrites value set by plugin", function() - local res = assert(client:send { - method = "GET", - path = "/", - headers = { - host = "post-function-access", - } - }) - - local body = assert.res_status(200, res) - body = cjson.decode(body) - local upstream_received_header = body.headers[string.lower(constants.HEADERS.REQUEST_ID)] - - assert.matches("^[0-9a-f]+$", upstream_received_header) - assert.not_equal("overwritten", upstream_received_header) - end) - end) - - - describe("is injected with configuration [headers=X-Kong-Request-Id]", function() - lazy_setup(function() - setup_db() - - assert(helpers.start_kong { - nginx_conf = "spec/fixtures/custom_nginx.template", - headers_upstream = "X-Kong-Request-Id", - }) - - client = helpers.proxy_client() - end) - - lazy_teardown(function() - if client then - client:close() - end - - helpers.stop_kong() - end) - - it("contains the expected value", function() - local res = assert(client:send { - method = "GET", - path = "/", - headers = { - host = "request_id", - } - }) - local body = assert.res_status(200, res) - body = cjson.decode(body) - assert.matches("^[0-9a-f]+$", body.headers[string.lower(constants.HEADERS.REQUEST_ID)]) - end) - end) - - - describe("is not injected with configuration [headers=off]", function() - lazy_setup(function() - setup_db() - - assert(helpers.start_kong { - nginx_conf = "spec/fixtures/custom_nginx.template", - headers_upstream = "off", - }) - - client = helpers.proxy_client() - end) - - lazy_teardown(function() - if client then - client:close() - end - - helpers.stop_kong() - end) - - it("is nil", function() - local res = assert(client:send { - method = "GET", - path = "/", - headers = { - host = "request_id", - } - }) - local body = assert.res_status(200, res) - body = cjson.decode(body) - assert.is_nil(body.headers[string.lower(constants.HEADERS.REQUEST_ID)]) - end) - end) - end) -end) diff --git a/spec/02-integration/10-go_plugins/01-reports_spec.lua b/spec/02-integration/10-go_plugins/01-reports_spec.lua index 2cb60ca30a4..719c98eca7d 100644 --- a/spec/02-integration/10-go_plugins/01-reports_spec.lua +++ b/spec/02-integration/10-go_plugins/01-reports_spec.lua @@ -145,11 +145,11 @@ for _, strategy in helpers.each_strategy() do local logs = pl_file.read(cfg.prefix .. "/" .. cfg.proxy_error_log) for _, logpat in ipairs{ - "access_start: %d%d+", - "shared_msg: Kong!", - "request_header: this", - "response_header: mock_upstream", - "serialized:%b{}", + "access_start: %d%d+\n", + "shared_msg: Kong!\n", + "request_header: this\n", + "response_header: mock_upstream\n", + "serialized:%b{}\n", } do assert.match(logpat, logs) end diff --git a/spec/02-integration/14-tracing/04-trace-ids-log_spec.lua b/spec/02-integration/14-tracing/04-trace-ids-log_spec.lua deleted file mode 100644 index 41783de86fc..00000000000 --- a/spec/02-integration/14-tracing/04-trace-ids-log_spec.lua +++ /dev/null @@ -1,200 +0,0 @@ -local helpers = require "spec.helpers" -local cjson = require "cjson.safe" -local pl_path = require "pl.path" -local pl_file = require "pl.file" -local pl_stringx = require "pl.stringx" - -local FILE_LOG_PATH = os.tmpname() - -local fmt = string.format - -local trace_id_hex_128 = "4bf92000000000000000000000000001" -local span_id = "0000000000000003" -local trace_id_hex_pattern = "^%x+$" - - -local tracing_headers = { - { - type = "b3", - serializer_key = "b3", - name = "X-B3-TraceId", - value = trace_id_hex_128, - trace_id = trace_id_hex_128, - trace_id_pattern = trace_id_hex_pattern, - }, - { - type = "b3-single", - serializer_key = "b3", - name = "b3", - value = fmt("%s-%s-1-%s", trace_id_hex_128, span_id, span_id), - trace_id = trace_id_hex_128, - trace_id_pattern = trace_id_hex_pattern, - }, - { - type = "jaeger", - serializer_key = "jaeger", - name = "uber-trace-id", - value = fmt("%s:%s:%s:%s", trace_id_hex_128, span_id, span_id, "01"), - trace_id = trace_id_hex_128, - trace_id_pattern = trace_id_hex_pattern, - }, - { - type = "w3c", - serializer_key = "w3c", - name = "traceparent", - value = fmt("00-%s-%s-01", trace_id_hex_128, span_id), - trace_id = trace_id_hex_128, - trace_id_pattern = trace_id_hex_pattern, - }, - { - type = "ot", - serializer_key = "ot", - name = "ot-tracer-traceid", - value = trace_id_hex_128, - trace_id = trace_id_hex_128, - trace_id_pattern = trace_id_hex_pattern, - }, -} - -local function wait_json_log() - helpers.wait_until(function() - return pl_path.exists(FILE_LOG_PATH) and pl_path.getsize(FILE_LOG_PATH) > 0 - end, 10) - - local log = pl_file.read(FILE_LOG_PATH) - return cjson.decode(pl_stringx.strip(log):match("%b{}")) -end - -for _, strategy in helpers.each_strategy() do - local proxy_client - - for _, config_header in ipairs(tracing_headers) do - describe("Trace IDs log serializer spec #" .. strategy, function() - lazy_setup(function() - local bp, _ = assert(helpers.get_db_utils(strategy, { - "services", - "routes", - "plugins", - })) - - local service = bp.services:insert() - - local zipkin_route = bp.routes:insert({ - service = service, - hosts = { "zipkin" }, - }) - - bp.plugins:insert { - name = "file-log", - config = { - path = FILE_LOG_PATH, - reopen = true, - }, - } - - bp.plugins:insert({ - name = "zipkin", - route = { id = zipkin_route.id }, - config = { - sample_ratio = 1, - http_endpoint = "http://localhost:8080/v1/traces", - header_type = config_header.type, - } - }) - - assert(helpers.start_kong { - database = strategy, - nginx_conf = "spec/fixtures/custom_nginx.template", - plugins = "bundled", - tracing_instrumentations = "all", - tracing_sampling_rate = 1, - }) - end) - - lazy_teardown(function() - helpers.stop_kong() - if proxy_client then - proxy_client:close() - end - end) - - before_each(function() - proxy_client = helpers.proxy_client() - os.remove(FILE_LOG_PATH) - end) - - after_each(function() - if proxy_client then - proxy_client:close() - end - - os.remove(FILE_LOG_PATH) - end) - - describe("with Zipkin", function() - local default_type_zipkin = "b3" - - it("contains only the configured trace ID type: " .. config_header.type .. - " + the default (b3) with no tracing headers in the request", function() - local r = proxy_client:get("/", { - headers = { - host = "zipkin", - }, - }) - assert.response(r).has.status(200) - local json_log = wait_json_log() - assert.not_nil(json_log) - - -- contains the configured trace id type - assert.matches(config_header.trace_id_pattern, - json_log.trace_id[config_header.serializer_key]) - -- contains the default trace id type (generated trace id) - assert.matches(trace_id_hex_pattern, - json_log.trace_id[default_type_zipkin]) - - -- does not contain other types - for _, header in ipairs(tracing_headers) do - local k = header.serializer_key - if k ~= config_header.serializer_key and k ~= default_type_zipkin then - assert.is_nil(json_log.trace_id[k]) - end - end - end) - - for _, req_header in ipairs(tracing_headers) do - it("contains only the configured trace ID type (" .. config_header.type .. - ") + the incoming (" .. req_header.type .. ")", function() - if req_header.type == config_header.type then - return - end - - local r = proxy_client:get("/", { - headers = { - host = "zipkin", - [req_header.name] = req_header.value, - }, - }) - assert.response(r).has.status(200) - local json_log = wait_json_log() - assert.not_nil(json_log) - - -- contains the configured trace id type of the incoming trace id - assert.matches(config_header.trace_id_pattern, - json_log.trace_id[config_header.serializer_key]) - -- contains the incoming trace id - assert.equals(req_header.trace_id, - json_log.trace_id[req_header.serializer_key]) - - -- does not contain other types - for _, header in ipairs(tracing_headers) do - local k = header.serializer_key - if k ~= config_header.serializer_key and k ~= req_header.serializer_key then - assert.is_nil(json_log.trace_id[k]) - end - end - end) - end - end) - end) - end -end diff --git a/spec/03-plugins/09-key-auth/02-access_spec.lua b/spec/03-plugins/09-key-auth/02-access_spec.lua index 0e0d1763a4f..57e277de487 100644 --- a/spec/03-plugins/09-key-auth/02-access_spec.lua +++ b/spec/03-plugins/09-key-auth/02-access_spec.lua @@ -206,9 +206,7 @@ for _, strategy in helpers.each_strategy() do }) assert.res_status(401, res) local body = assert.res_status(401, res) - local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("No API key found in request", json.message) + assert.same({message = "No API key found in request"}, cjson.decode(body)) end) it("returns Unauthorized on missing credentials", function() local res = assert(proxy_client:send { @@ -220,8 +218,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(401, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("No API key found in request", json.message) + assert.same({ message = "No API key found in request" }, json) end) it("returns Unauthorized on empty key header", function() local res = assert(proxy_client:send { @@ -234,8 +231,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(401, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("No API key found in request", json.message) + assert.same({ message = "No API key found in request" }, json) end) it("returns Unauthorized on empty key querystring", function() local res = assert(proxy_client:send { @@ -247,8 +243,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(401, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("No API key found in request", json.message) + assert.same({ message = "No API key found in request" }, json) end) it("returns WWW-Authenticate header on missing credentials", function() local res = assert(proxy_client:send { @@ -284,8 +279,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(401, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Invalid authentication credentials", json.message) + assert.same({ message = "Invalid authentication credentials" }, json) end) it("handles duplicated key in querystring", function() local res = assert(proxy_client:send { @@ -297,8 +291,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(401, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Duplicate API key found", json.message) + assert.same({ message = "Duplicate API key found" }, json) end) end) @@ -358,8 +351,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(401, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Invalid authentication credentials", json.message) + assert.same({ message = "Invalid authentication credentials" }, json) end) -- lua-multipart doesn't currently handle duplicates at all. @@ -379,8 +371,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(401, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Duplicate API key found", json.message) + assert.same({ message = "Duplicate API key found" }, json) end) end @@ -395,8 +386,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(401, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Duplicate API key found", json.message) + assert.same({ message = "Duplicate API key found" }, json) end) it("does not identify apikey[] as api keys", function() @@ -409,8 +399,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(401, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("No API key found in request", json.message) + assert.same({ message = "No API key found in request" }, json) end) it("does not identify apikey[1] as api keys", function() @@ -423,8 +412,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(401, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("No API key found in request", json.message) + assert.same({ message = "No API key found in request" }, json) end) end end) @@ -454,8 +442,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(401, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Invalid authentication credentials", json.message) + assert.same({ message = "Invalid authentication credentials" }, json) end) end) @@ -514,8 +501,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(401, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Invalid authentication credentials", json.message) + assert.same({ message = "Invalid authentication credentials" }, json) res = assert(proxy_client:send { method = "GET", @@ -527,8 +513,7 @@ for _, strategy in helpers.each_strategy() do }) body = assert.res_status(401, res) json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Invalid authentication credentials", json.message) + assert.same({ message = "Invalid authentication credentials" }, json) end) end) @@ -658,8 +643,7 @@ for _, strategy in helpers.each_strategy() do local body = assert.res_status(401, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("No API key found in request", json.message) + assert.same({ message = "No API key found in request" }, json) end) end) diff --git a/spec/03-plugins/10-basic-auth/03-access_spec.lua b/spec/03-plugins/10-basic-auth/03-access_spec.lua index bbb13640c42..c7d7cee5fe9 100644 --- a/spec/03-plugins/10-basic-auth/03-access_spec.lua +++ b/spec/03-plugins/10-basic-auth/03-access_spec.lua @@ -143,8 +143,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(401, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Unauthorized", json.message) + assert.same({ message = "Unauthorized" }, json) end) it("returns WWW-Authenticate header on missing credentials", function() @@ -174,8 +173,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(401, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Invalid authentication credentials", json.message) + assert.same({ message = "Invalid authentication credentials" }, json) end) it("returns 401 Unauthorized on invalid credentials in Proxy-Authorization", function() @@ -189,8 +187,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(401, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Invalid authentication credentials", json.message) + assert.same({ message = "Invalid authentication credentials" }, json) end) it("returns 401 Unauthorized on password only", function() @@ -204,8 +201,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(401, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Invalid authentication credentials", json.message) + assert.same({ message = "Invalid authentication credentials" }, json) end) it("returns 401 Unauthorized on username only", function() @@ -219,8 +215,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(401, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Invalid authentication credentials", json.message) + assert.same({ message = "Invalid authentication credentials" }, json) end) it("rejects gRPC call without credentials", function() @@ -296,8 +291,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(401, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Invalid authentication credentials", json.message) + assert.same({ message = "Invalid authentication credentials" }, json) end) it("authenticates valid credentials in Proxy-Authorization", function() diff --git a/spec/03-plugins/10-basic-auth/05-declarative_spec.lua b/spec/03-plugins/10-basic-auth/05-declarative_spec.lua index 29a2dc8c7e0..747b68ccc94 100644 --- a/spec/03-plugins/10-basic-auth/05-declarative_spec.lua +++ b/spec/03-plugins/10-basic-auth/05-declarative_spec.lua @@ -177,8 +177,7 @@ for _, strategy in helpers.each_strategy() do })) local body = assert.res_status(401, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Invalid authentication credentials", json.message) + assert.same({ message = "Invalid authentication credentials" }, json) end) end) diff --git a/spec/03-plugins/11-correlation-id/01-access_spec.lua b/spec/03-plugins/11-correlation-id/01-access_spec.lua index 007f7f734a1..1a9242f4c6b 100644 --- a/spec/03-plugins/11-correlation-id/01-access_spec.lua +++ b/spec/03-plugins/11-correlation-id/01-access_spec.lua @@ -1,23 +1,10 @@ -local helpers = require "spec.helpers" -local cjson = require "cjson" -local pl_path = require "pl.path" -local pl_file = require "pl.file" -local pl_stringx = require "pl.stringx" +local helpers = require "spec.helpers" +local cjson = require "cjson" + local UUID_PATTERN = "%x%x%x%x%x%x%x%x%-%x%x%x%x%-%x%x%x%x%-%x%x%x%x%-%x%x%x%x%x%x%x%x%x%x%x%x" local UUID_COUNTER_PATTERN = UUID_PATTERN .. "#%d" local TRACKER_PATTERN = "%d+%.%d+%.%d+%.%d+%-%d+%-%d+%-%d+%-%d+%-%d%d%d%d%d%d%d%d%d%d%.%d%d%d" -local FILE_LOG_PATH = os.tmpname() - - -local function wait_json_log() - helpers.wait_until(function() - return pl_path.exists(FILE_LOG_PATH) and pl_path.getsize(FILE_LOG_PATH) > 0 - end, 10) - - local log = pl_file.read(FILE_LOG_PATH) - return cjson.decode(pl_stringx.strip(log):match("%b{}")) -end for _, strategy in helpers.each_strategy() do @@ -70,10 +57,6 @@ for _, strategy in helpers.each_strategy() do }), }) - local route_serializer = bp.routes:insert { - hosts = { "correlation-serializer.com" }, - } - bp.plugins:insert { name = "correlation-id", route = { id = route1.id }, @@ -155,20 +138,6 @@ for _, strategy in helpers.each_strategy() do }, } - bp.plugins:insert { - name = "file-log", - route = { id = route_serializer.id }, - config = { - path = FILE_LOG_PATH, - reopen = true, - }, - } - - bp.plugins:insert { - name = "correlation-id", - route = { id = route_serializer.id }, - } - assert(helpers.start_kong({ database = strategy, nginx_conf = "spec/fixtures/custom_nginx.template", @@ -446,34 +415,5 @@ for _, strategy in helpers.each_strategy() do local downstream_id = assert.response(res).has.header("kong-request-id") assert.equals("my very personal id", downstream_id) end) - - describe("log serializer", function() - before_each(function() - os.remove(FILE_LOG_PATH) - end) - - after_each(function() - os.remove(FILE_LOG_PATH) - end) - - it("contains the Correlation ID", function() - local correlation_id = "1234" - local r = proxy_client:get("/", { - headers = { - host = "correlation-serializer.com", - ["Kong-Request-ID"] = correlation_id, - }, - }) - assert.response(r).has.status(200) - - local json_log = wait_json_log() - local request_id = json_log and json_log.request and json_log.request.id - assert.matches("^[a-f0-9]+$", request_id) - assert.True(request_id:len() == 32) - - local logged_id = json_log and json_log.correlation_id - assert.equals(correlation_id, logged_id) - end) - end) end) end diff --git a/spec/03-plugins/12-request-size-limiting/01-access_spec.lua b/spec/03-plugins/12-request-size-limiting/01-access_spec.lua index eeef6f0a233..e3302415119 100644 --- a/spec/03-plugins/12-request-size-limiting/01-access_spec.lua +++ b/spec/03-plugins/12-request-size-limiting/01-access_spec.lua @@ -121,8 +121,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(413, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Request size limit exceeded", json.message) + assert.same({ message = "Request size limit exceeded" }, json) end) it("blocks if size is greater than limit and Expect header", function() @@ -139,8 +138,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(417, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Request size limit exceeded", json.message) + assert.same({ message = "Request size limit exceeded" }, json) end) for _, unit in ipairs(size_units) do @@ -157,8 +155,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(413, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Request size limit exceeded", json.message) + assert.same({ message = "Request size limit exceeded" }, json) end) end @@ -222,8 +219,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(413, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Request size limit exceeded", json.message) + assert.same({ message = "Request size limit exceeded" }, json) end) it("blocks if size is greater than limit and Expect header", function() @@ -240,8 +236,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(417, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Request size limit exceeded", json.message) + assert.same({ message = "Request size limit exceeded" }, json) end) for _, unit in ipairs(size_units) do @@ -258,8 +253,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(413, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Request size limit exceeded", json.message) + assert.same({ message = "Request size limit exceeded" }, json) end) end diff --git a/spec/03-plugins/14-request-termination/02-access_spec.lua b/spec/03-plugins/14-request-termination/02-access_spec.lua index 0745b884de9..e8b3664c1fd 100644 --- a/spec/03-plugins/14-request-termination/02-access_spec.lua +++ b/spec/03-plugins/14-request-termination/02-access_spec.lua @@ -161,7 +161,6 @@ for _, strategy in helpers.each_strategy() do assert(helpers.start_kong({ database = strategy, nginx_conf = "spec/fixtures/custom_nginx.template", - headers_upstream = "off", })) end) diff --git a/spec/03-plugins/17-ip-restriction/02-access_spec.lua b/spec/03-plugins/17-ip-restriction/02-access_spec.lua index 789c9e0ab41..fba888543fc 100644 --- a/spec/03-plugins/17-ip-restriction/02-access_spec.lua +++ b/spec/03-plugins/17-ip-restriction/02-access_spec.lua @@ -248,8 +248,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Your IP address is not allowed", json.message) + assert.same({ message = "Your IP address is not allowed" }, json) end) it("blocks a request when the IP is denied with status/message", function() @@ -262,9 +261,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(401, res) local json = cjson.decode(body) - - assert.not_nil(json) - assert.matches("Forbidden", json.message) + assert.same({ message = "Forbidden" }, json) end) it("blocks a request when the IP is denied #grpc", function() @@ -313,8 +310,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Your IP address is not allowed", json.message) + assert.same({ message = "Your IP address is not allowed" }, json) end) it("blocks an IP on a allowed CIDR range", function() local res = assert(proxy_client:send { @@ -326,8 +322,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Your IP address is not allowed", json.message) + assert.same({ message = "Your IP address is not allowed" }, json) end) it("takes precedence over an allowed IP", function() local res = assert(proxy_client:send { @@ -339,8 +334,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Your IP address is not allowed", json.message) + assert.same({ message = "Your IP address is not allowed" }, json) end) it("takes precedence over an allowed CIDR range", function() local res = assert(proxy_client:send { @@ -352,8 +346,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Your IP address is not allowed", json.message) + assert.same({ message = "Your IP address is not allowed" }, json) end) describe("X-Forwarded-For", function() @@ -393,8 +386,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Your IP address is not allowed", json.message) + assert.same({ message = "Your IP address is not allowed" }, json) end) end) end) @@ -410,8 +402,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Your IP address is not allowed", json.message) + assert.same({ message = "Your IP address is not allowed" }, json) end) it("allows a allowed IP", function() local res = assert(proxy_client:send { @@ -435,8 +426,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Your IP address is not allowed", json.message) + assert.same({ message = "Your IP address is not allowed" }, json) end) it("block with not allowed X-Forwarded-For header", function() local res = assert(proxy_client:send { @@ -449,8 +439,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Your IP address is not allowed", json.message) + assert.same({ message = "Your IP address is not allowed" }, json) end) it("block with not allowed X-Forwarded-For header #grpc", function() local ok, err = helpers.proxy_client_grpc(){ @@ -533,8 +522,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Your IP address is not allowed", json.message) + assert.same({ message = "Your IP address is not allowed" }, json) end) describe("#regression", function() @@ -709,8 +697,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Your IP address is not allowed", json.message) + assert.same({ message = "Your IP address is not allowed" }, json) end) it("allows a request when the IPv6 is not denied", function() local res = assert(proxy_client:send { @@ -736,8 +723,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Your IP address is not allowed", json.message) + assert.same({ message = "Your IP address is not allowed" }, json) end) it("blocks an IPv6 on a allowed IPv6 CIDR range", function() local res = assert(proxy_client:send { @@ -750,8 +736,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Your IP address is not allowed", json.message) + assert.same({ message = "Your IP address is not allowed" }, json) end) it("takes precedence over an allowed IPv6", function() local res = assert(proxy_client:send { @@ -764,8 +749,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Your IP address is not allowed", json.message) + assert.same({ message = "Your IP address is not allowed" }, json) end) it("takes precedence over an allowed IPv6 CIDR range", function() local res = assert(proxy_client:send { @@ -777,8 +761,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Your IP address is not allowed", json.message) + assert.same({ message = "Your IP address is not allowed" }, json) end) end) @@ -794,8 +777,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Your IP address is not allowed", json.message) + assert.same({ message = "Your IP address is not allowed" }, json) end) it("allows a allowed IPv6", function() local res = assert(proxy_client:send { @@ -858,8 +840,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Your IP address is not allowed", json.message) + assert.same({ message = "Your IP address is not allowed" }, json) end) describe("#regression", function() @@ -961,8 +942,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Your IP address is not allowed", json.message) + assert.same({ message = "Your IP address is not allowed" }, json) end) it("blocks with blocked complex X-Forwarded-For header", function() local res = assert(proxy_client:send { @@ -975,8 +955,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Your IP address is not allowed", json.message) + assert.same({ message = "Your IP address is not allowed" }, json) end) it("allows with allowed complex X-Forwarded-For header", function() local res = assert(proxy_client:send { @@ -1005,8 +984,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Your IP address is not allowed", json.message) + assert.same({ message = "Your IP address is not allowed" }, json) end) it("allows with allowed X-Forwarded-For header", function() local res = assert(proxy_client:send { @@ -1045,8 +1023,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Your IP address is not allowed", json.message) + assert.same({ message = "Your IP address is not allowed" }, json) end) end) end) diff --git a/spec/03-plugins/18-acl/02-access_spec.lua b/spec/03-plugins/18-acl/02-access_spec.lua index 3b5c37cc4e7..12b60aff0e8 100644 --- a/spec/03-plugins/18-acl/02-access_spec.lua +++ b/spec/03-plugins/18-acl/02-access_spec.lua @@ -793,8 +793,7 @@ for _, strategy in helpers.each_strategy() do })) local body = assert.res_status(401, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Unauthorized", json.message) + assert.same({ message = "Unauthorized" }, json) end) it("should fail when an authentication plugin is missing (with credential)", function() @@ -805,8 +804,7 @@ for _, strategy in helpers.each_strategy() do })) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("You cannot consume this service", json.message) + assert.same({ message = "You cannot consume this service" }, json) end) it("should fail when not allowed", function() @@ -817,8 +815,7 @@ for _, strategy in helpers.each_strategy() do })) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("You cannot consume this service", json.message) + assert.same({ message = "You cannot consume this service" }, json) end) it("should fail when not allowed with authenticated groups", function() @@ -829,8 +826,7 @@ for _, strategy in helpers.each_strategy() do })) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("You cannot consume this service", json.message) + assert.same({ message = "You cannot consume this service" }, json) end) it("should work when allowed", function() @@ -925,8 +921,7 @@ for _, strategy in helpers.each_strategy() do })) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("You cannot consume this service", json.message) + assert.same({ message = "You cannot consume this service" }, json) end) it("should fail when denied with authenticated groups", function() @@ -937,8 +932,7 @@ for _, strategy in helpers.each_strategy() do })) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("You cannot consume this service", json.message) + assert.same({ message = "You cannot consume this service" }, json) end) it("should fail denied and with no authenticated groups", function() @@ -949,8 +943,7 @@ for _, strategy in helpers.each_strategy() do })) local body = assert.res_status(401, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Unauthorized", json.message) + assert.same({ message = "Unauthorized" }, json) end) end) @@ -985,8 +978,7 @@ for _, strategy in helpers.each_strategy() do })) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("You cannot consume this service", json.message) + assert.same({ message = "You cannot consume this service" }, json) end) it("should fail when not allowed with authenticated groups", function() @@ -997,8 +989,7 @@ for _, strategy in helpers.each_strategy() do })) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("You cannot consume this service", json.message) + assert.same({ message = "You cannot consume this service" }, json) end) it("should fail when denied", function() @@ -1009,8 +1000,7 @@ for _, strategy in helpers.each_strategy() do })) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("You cannot consume this service", json.message) + assert.same({ message = "You cannot consume this service" }, json) end) it("should fail when denied with authenticated groups", function() @@ -1021,8 +1011,7 @@ for _, strategy in helpers.each_strategy() do })) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("You cannot consume this service", json.message) + assert.same({ message = "You cannot consume this service" }, json) end) @@ -1052,8 +1041,7 @@ for _, strategy in helpers.each_strategy() do })) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("You cannot consume this service", json.message) + assert.same({ message = "You cannot consume this service" }, json) end) it("should not work when one of the ACLs denied with authenticated groups", function() @@ -1064,8 +1052,7 @@ for _, strategy in helpers.each_strategy() do })) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("You cannot consume this service", json.message) + assert.same({ message = "You cannot consume this service" }, json) end) it("should work when one of the ACLs is allowed", function() @@ -1094,8 +1081,7 @@ for _, strategy in helpers.each_strategy() do })) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("You cannot consume this service", json.message) + assert.same({ message = "You cannot consume this service" }, json) end) it("should not work when at least one of the ACLs denied with authenticated groups", function() @@ -1106,8 +1092,7 @@ for _, strategy in helpers.each_strategy() do })) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("You cannot consume this service", json.message) + assert.same({ message = "You cannot consume this service" }, json) end) end) @@ -1316,8 +1301,7 @@ for _, strategy in helpers.each_strategy() do })) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("You cannot consume this service", json.message) + assert.same({ message = "You cannot consume this service" }, json) end) it("authorized groups even when anonymous consumer is present", function() @@ -1328,8 +1312,7 @@ for _, strategy in helpers.each_strategy() do })) local body = assert.res_status(403, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("You cannot consume this service", json.message) + assert.same({ message = "You cannot consume this service" }, json) end) end) end) diff --git a/spec/03-plugins/19-hmac-auth/03-access_spec.lua b/spec/03-plugins/19-hmac-auth/03-access_spec.lua index 1f79d3aa8fa..04d14efea3c 100644 --- a/spec/03-plugins/19-hmac-auth/03-access_spec.lua +++ b/spec/03-plugins/19-hmac-auth/03-access_spec.lua @@ -362,8 +362,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(401, res) body = cjson.decode(body) - assert.not_nil(body.message) - assert.matches("HMAC signature cannot be verified", body.message) + assert.same({ message = "HMAC signature cannot be verified" }, body) end) it("should not pass with signature missing", function() @@ -382,8 +381,7 @@ for _, strategy in helpers.each_strategy() do }) local body = assert.res_status(401, res) body = cjson.decode(body) - assert.not_nil(body.message) - assert.matches("HMAC signature cannot be verified", body.message) + assert.same({ message = "HMAC signature cannot be verified" }, body) end) it("should pass with GET", function() diff --git a/spec/03-plugins/23-rate-limiting/04-access_spec.lua b/spec/03-plugins/23-rate-limiting/04-access_spec.lua index 9630f1b72c9..6b48a1a1b10 100644 --- a/spec/03-plugins/23-rate-limiting/04-access_spec.lua +++ b/spec/03-plugins/23-rate-limiting/04-access_spec.lua @@ -333,8 +333,7 @@ for _, strategy in helpers.each_strategy() do assert.equal(true, reset <= 60 and reset > 0) local json = cjson.decode(body) - assert.not_nil(json.message) - assert.matches("API rate limit exceeded", json.message) + assert.same({ message = "API rate limit exceeded" }, json) end) it_with_retry("blocks if exceeding limit, only if done via same path", function() @@ -394,8 +393,7 @@ for _, strategy in helpers.each_strategy() do assert.equal(true, reset <= 60 and reset > 0) local json = cjson.decode(body) - assert.not_nil(json.message) - assert.matches("API rate limit exceeded", json.message) + assert.same({ message = "API rate limit exceeded" }, json) end) it_with_retry("counts against the same service register from different routes", function() @@ -440,8 +438,7 @@ for _, strategy in helpers.each_strategy() do assert.equal(true, reset <= 60 and reset > 0) local json = cjson.decode(body) - assert.not_nil(json.message) - assert.matches("API rate limit exceeded", json.message) + assert.same({ message = "API rate limit exceeded" }, json) end) it_with_retry("handles multiple limits #flaky", function() @@ -482,8 +479,7 @@ for _, strategy in helpers.each_strategy() do assert.equal(true, reset <= 60 and reset > 0) local json = cjson.decode(body) - assert.not_nil(json.message) - assert.matches("API rate limit exceeded", json.message) + assert.same({ message = "API rate limit exceeded" }, json) end) end) describe("Without authentication (IP address)", function() @@ -520,8 +516,8 @@ for _, strategy in helpers.each_strategy() do assert.matches("ratelimit%-limit: 6", res) assert.matches("ratelimit%-remaining: 0", res) - local retry = tonumber(string.match(res, "retry%-after: (%d+)")) - assert.equal(true, retry <= 60 and retry > 0) + local retry = tonumber(string.match(res, "retry%-after: (%d+)")) + assert.equal(true, retry <= 60 and retry > 0) local reset = tonumber(string.match(res, "ratelimit%-reset: (%d+)")) @@ -559,8 +555,7 @@ for _, strategy in helpers.each_strategy() do assert.equal(true, reset <= 60 and reset > 0) local json = cjson.decode(body) - assert.not_nil(json.message) - assert.matches("API rate limit exceeded", json.message) + assert.same({ message = "API rate limit exceeded" }, json) -- Using a different key of the same consumer works GET("/status/200?apikey=apikey333", { @@ -597,8 +592,7 @@ for _, strategy in helpers.each_strategy() do assert.equal(true, reset <= 60 and reset > 0) local json = cjson.decode(body) - assert.not_nil(json.message) - assert.matches("API rate limit exceeded", json.message) + assert.same({ message = "API rate limit exceeded" }, json) end) it_with_retry("blocks if the only rate-limiting plugin existing is per consumer and not per API", function() @@ -629,8 +623,7 @@ for _, strategy in helpers.each_strategy() do assert.equal(true, reset <= 60 and reset > 0) local json = cjson.decode(body) - assert.not_nil(json.message) - assert.matches("API rate limit exceeded", json.message) + assert.same({ message = "API rate limit exceeded" }, json) end) end) end) @@ -709,8 +702,7 @@ for _, strategy in helpers.each_strategy() do }, 500) local json = cjson.decode(body) - assert.not_nil(json.message) - assert.matches("An unexpected error occurred", json.message) + assert.same({ message = "An unexpected error occurred" }, json) db:reset() bp, db = helpers.get_db_utils(strategy) @@ -800,8 +792,7 @@ for _, strategy in helpers.each_strategy() do }, 500) local json = cjson.decode(body) - assert.not_nil(json.message) - assert.matches("An unexpected error occurred", json.message) + assert.same({ message = "An unexpected error occurred" }, json) end) it_with_retry("keeps working if an error occurs", function() @@ -959,8 +950,7 @@ for _, strategy in helpers.each_strategy() do assert.equal(true, reset <= 60 and reset > 0) local json = cjson.decode(body) - assert.not_nil(json.message) - assert.matches("API rate limit exceeded", json.message) + assert.same({ message = "API rate limit exceeded" }, json) end) end) @@ -1036,8 +1026,7 @@ for _, strategy in helpers.each_strategy() do assert.equal(true, reset <= 60 and reset > 0) local json = cjson.decode(body) - assert.not_nil(json.message) - assert.matches("API rate limit exceeded", json.message) + assert.same({ message = "API rate limit exceeded" }, json) end) end) @@ -1124,8 +1113,7 @@ for _, strategy in helpers.each_strategy() do assert.equal(true, reset <= 60 and reset > 0) local json = cjson.decode(body) - assert.not_nil(json.message) - assert.matches("API rate limit exceeded", json.message) + assert.same({ message = "API rate limit exceeded" }, json) end end) end) @@ -1196,8 +1184,7 @@ for _, strategy in helpers.each_strategy() do assert.equal(true, reset <= 60 and reset > 0) local json = cjson.decode(body) - assert.not_nil(json.message) - assert.matches("API rate limit exceeded", json.message) + assert.same({ message = "API rate limit exceeded" }, json) end) end) @@ -1273,8 +1260,7 @@ for _, strategy in helpers.each_strategy() do assert.equal(true, reset <= 60 and reset > 0) local json = cjson.decode(body) - assert.not_nil(json.message) - assert.matches("API rate limit exceeded", json.message) + assert.same({ message = "API rate limit exceeded" }, json) end) end) end diff --git a/spec/03-plugins/33-serverless-functions/02-access_spec.lua b/spec/03-plugins/33-serverless-functions/02-access_spec.lua index 5d5393a8cd3..191ceac4fd5 100644 --- a/spec/03-plugins/33-serverless-functions/02-access_spec.lua +++ b/spec/03-plugins/33-serverless-functions/02-access_spec.lua @@ -256,8 +256,7 @@ for _, plugin_name in ipairs({ "pre-function", "post-function" }) do }) local body = assert.res_status(406, res) local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("Invalid", json.message) + assert.same({ message = "Invalid" }, json) end) it("cascading functions for a 400 and exit", function() @@ -269,7 +268,7 @@ for _, plugin_name in ipairs({ "pre-function", "post-function" }) do } }) local body = assert.res_status(400, res) - assert.matches("Bad request", body) + assert.same("Bad request", body) end) it("runtime error aborts with a 500", function() @@ -281,9 +280,7 @@ for _, plugin_name in ipairs({ "pre-function", "post-function" }) do } }) local body = assert.res_status(500, res) - local json = cjson.decode(body) - assert.not_nil(json) - assert.matches("An unexpected error occurred", json.message) + assert.same('{"message":"An unexpected error occurred"}', body) end) end) diff --git a/spec/03-plugins/34-zipkin/tracing_headers_spec.lua b/spec/03-plugins/34-zipkin/tracing_headers_spec.lua index 7f0b83599f8..a96d0e50409 100644 --- a/spec/03-plugins/34-zipkin/tracing_headers_spec.lua +++ b/spec/03-plugins/34-zipkin/tracing_headers_spec.lua @@ -557,8 +557,7 @@ describe("tracing_headers.set", function() log = { warn = function(msg) warnings[#warnings + 1] = msg - end, - set_serialize_value = function() end, + end } } diff --git a/t/01-pdk/08-response/13-error.t b/t/01-pdk/08-response/13-error.t index ae75b8486a9..8ee4d0e2b58 100644 --- a/t/01-pdk/08-response/13-error.t +++ b/t/01-pdk/08-response/13-error.t @@ -29,11 +29,10 @@ Accept: application/json --- error_code: 502 --- response_headers_like Content-Type: application/json; charset=utf-8 ---- response_body eval -qr/{ -\s*"message":"An invalid response was received from the upstream server", -\s*"request_id":".*" -}/ +--- response_body chop +{ + "message":"An invalid response was received from the upstream server" +} --- no_error_log [error] @@ -55,11 +54,10 @@ GET /t --- error_code: 400 --- response_headers_like Content-Type: application/json; charset=utf-8 ---- response_body eval -qr/{ -\s*"message":"Bad request", -\s*"request_id":".*" -}/ +--- response_body chop +{ + "message":"Bad request" +} --- no_error_log [error] @@ -83,11 +81,10 @@ Accept: json --- error_code: 400 --- response_headers_like Content-Type: application/json; charset=utf-8 ---- response_body eval -qr/{ -\s*"message":"Bad request", -\s*"request_id":".*" -}/ +--- response_body chop +{ + "message":"Bad request" +} --- no_error_log [error] @@ -115,11 +112,11 @@ Accept: application/json --- error_code: 503 --- response_headers_like Content-Type: application/xml ---- response_body eval -qr/<\?xml version="1\.0" encoding="UTF\-8"\?>\n -\s*this is fine<\/message> -\s*.*<\/requestid> -<\/error>/ +--- response_body + + + this is fine + --- no_error_log [error] @@ -143,19 +140,18 @@ Accept: text/plain;q=0.3, text/html;q=0.7, text/html;level=1, text/html;level=2; --- error_code: 502 --- response_headers_like Content-Type: text/html; charset=utf-8 ---- response_body eval -qr/ -\s* -\s* -\s* -\s*Kong Error<\/title> -\s*<\/head> -\s*<body> -\s*<h1>Kong Error<\/h1> -\s*<p>An invalid response was received from the upstream server.<\/p> -\s*<p>request_id: .*<\/p> -\s*<\/body> -\s*<\/html>/ +--- response_body +<!doctype html> +<html> + <head> + <meta charset="utf-8"> + <title>Kong Error + + +

Kong Error

+

An invalid response was received from the upstream server.

+ + --- no_error_log [error] @@ -187,11 +183,10 @@ GET /t --- error_code: 500 --- response_headers_like Content-Type: application/json; charset=utf-8 ---- response_body eval -qr/{ -\s*"message":"An unexpected error occurred", -\s*"request_id":".*" -}/ +--- response_body chop +{ + "message":"An unexpected error occurred" +} --- no_error_log [error] @@ -215,11 +210,10 @@ Accept: application/json --- error_code: 419 --- response_headers_like Content-Type: application/json; charset=utf-8 ---- response_body eval -qr/{ -\s*"message":"I'm not a teapot", -\s*"request_id":".*" -}/ +--- response_body chop +{ + "message":"I'm not a teapot" +} --- no_error_log [error] @@ -243,11 +237,10 @@ Accept: application/json --- error_code: 500 --- response_headers_like Content-Type: application/json; charset=utf-8 ---- response_body eval -qr/{ -\s*"message":"oh no", -\s*"request_id":".*" -}/ +--- response_body chop +{ + "message":"oh no" +} --- no_error_log [error] @@ -271,11 +264,11 @@ Accept: application/xml --- error_code: 502 --- response_headers_like Content-Type: application/xml; charset=utf-8 ---- response_body eval -qr/<\?xml version="1\.0" encoding="UTF\-8"\?>\n -\s*\{"a field":"not a default message"\}<\/message> -\s*.*<\/requestid> -<\/error>/ +--- response_body + + + {"a field":"not a default message"} + --- no_error_log [error] @@ -299,9 +292,8 @@ Accept: text/* --- error_code: 410 --- response_headers_like Content-Type: text/plain; charset=utf-8 ---- response_body eval -qr/Gone -request_id:.*/ +--- response_body +Gone --- no_error_log [error] From 553fbecf553bc501a46e578635eb79707d14e132 Mon Sep 17 00:00:00 2001 From: Wangchong Zhou Date: Tue, 21 May 2024 15:59:39 +0800 Subject: [PATCH 38/42] chore(cd): drop EOL rhel7 and debian10 --- .github/workflows/release.yml | 25 +--- BUILD.bazel | 12 -- build/README.md | 2 - scripts/explain_manifest/config.py | 27 ---- .../fixtures/debian-10-amd64.txt | 135 ------------------ .../explain_manifest/fixtures/el7-amd64.txt | 135 ------------------ 6 files changed, 5 insertions(+), 331 deletions(-) delete mode 100644 scripts/explain_manifest/fixtures/debian-10-amd64.txt delete mode 100644 scripts/explain_manifest/fixtures/el7-amd64.txt diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index b59115e9227..90e99650d35 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,9 +1,6 @@ name: Package & Release # The workflow to build and release official Kong packages and images. -# -# TODO: -# Do not bump the version of actions/checkout to v4 before dropping rhel7 and amazonlinux2. on: # yamllint disable-line rule:truthy pull_request: @@ -130,15 +127,15 @@ jobs: - name: Cache Git id: cache-git - if: (matrix.package == 'rpm' || matrix.image == 'debian:10') && matrix.image != '' - uses: actions/cache@v3 + if: (matrix.package == 'rpm') && matrix.image != '' + uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3, DO NOT BUMP, v4 BREAKS ON CENTOS7 OR AMAZONLINUX2 with: path: /usr/local/git key: ${{ matrix.label }}-git-2.41.0 - # el-7,8, amazonlinux-2,2023, debian-10 doesn't have git 2.18+, so we need to install it manually + # el-7,8, amazonlinux-2,2023 doesn't have git 2.18+, so we need to install it manually - name: Install newer Git - if: (matrix.package == 'rpm' || matrix.image == 'debian:10') && matrix.image != '' && steps.cache-git.outputs.cache-hit != 'true' + if: (matrix.package == 'rpm') && matrix.image != '' && steps.cache-git.outputs.cache-hit != 'true' run: | if which apt 2>/dev/null; then apt update @@ -152,28 +149,16 @@ jobs: tar xf git-2.41.0.tar.gz cd git-2.41.0 - # https://gitlab.com/gitlab-org/omnibus-gitlab/-/merge_requests/5948/diffs - if [[ ${{ matrix.image }} == "centos:7" ]]; then - echo 'CFLAGS=-std=gnu99' >> config.mak - fi - make configure ./configure --prefix=/usr/local/git make -j$(nproc) make install - name: Add Git to PATH - if: (matrix.package == 'rpm' || matrix.image == 'debian:10') && matrix.image != '' + if: (matrix.package == 'rpm') && matrix.image != '' run: | echo "/usr/local/git/bin" >> $GITHUB_PATH - - name: Debian Git dependencies - if: matrix.image == 'debian:10' - run: | - apt update - # dependencies for git - apt install -y wget libz-dev libssl-dev libcurl4-gnutls-dev sudo - - name: Checkout Kong source code uses: actions/checkout@v3 diff --git a/BUILD.bazel b/BUILD.bazel index 632194b18c1..d330e7089ef 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -50,18 +50,6 @@ nfpm_pkg( visibility = ["//visibility:public"], ) -nfpm_pkg( - name = "kong_el7", - config = "//build:package/nfpm.yaml", - env = nfpm_env, - extra_env = { - "RPM_EXTRA_DEPS": "hostname", - }, - packager = "rpm", - pkg_name = "kong.el7", - visibility = ["//visibility:public"], -) - nfpm_pkg( name = "kong_aws2", config = "//build:package/nfpm.yaml", diff --git a/build/README.md b/build/README.md index 7af1d2b7f04..5abe6e55e73 100644 --- a/build/README.md +++ b/build/README.md @@ -69,11 +69,9 @@ GITHUB_TOKEN=token bazel build --config release //build:kong --verbose_failures Supported build targets for binary packages: - `:kong_deb` -- `:kong_el7` - `:kong_el8` - `:kong_aws2` - `:kong_aws2023` -- `:kong_apk` For example, to build the deb package: diff --git a/scripts/explain_manifest/config.py b/scripts/explain_manifest/config.py index 398c9346c96..ba49ebe96d2 100644 --- a/scripts/explain_manifest/config.py +++ b/scripts/explain_manifest/config.py @@ -80,20 +80,6 @@ def transform(f: FileInfo): }, }, ), - "el7-amd64": ExpectSuite( - name="Redhat 7 (amd64)", - manifest="fixtures/el7-amd64.txt", - use_rpath=True, - tests={ - common_suites: {}, - libc_libcpp_suites: { - "libc_max_version": "2.17", - # gcc 4.8.5 - "libcxx_max_version": "3.4.19", - "cxxabi_max_version": "1.3.7", - }, - } - ), "el8-amd64": ExpectSuite( name="Redhat 8 (amd64)", manifest="fixtures/el8-amd64.txt", @@ -150,19 +136,6 @@ def transform(f: FileInfo): }, } ), - "debian-10-amd64": ExpectSuite( - name="Debian 10 (amd64)", - manifest="fixtures/debian-10-amd64.txt", - tests={ - common_suites: {}, - libc_libcpp_suites: { - "libc_max_version": "2.28", - # gcc 8.3.0 - "libcxx_max_version": "3.4.25", - "cxxabi_max_version": "1.3.11", - }, - } - ), "debian-11-amd64": ExpectSuite( name="Debian 11 (amd64)", manifest="fixtures/debian-11-amd64.txt", diff --git a/scripts/explain_manifest/fixtures/debian-10-amd64.txt b/scripts/explain_manifest/fixtures/debian-10-amd64.txt deleted file mode 100644 index 174773be772..00000000000 --- a/scripts/explain_manifest/fixtures/debian-10-amd64.txt +++ /dev/null @@ -1,135 +0,0 @@ -- Path : /etc/kong/kong.logrotate - -- Path : /usr/local/kong/include/google - Type : directory - -- Path : /usr/local/kong/lib/engines-1.1/afalg.so - Needed : - - libstdc++.so.6 - - libm.so.6 - - libcrypto.so.1.1 - - libdl.so.2 - - libc.so.6 - Runpath : /usr/local/kong/lib - -- Path : /usr/local/kong/lib/engines-1.1/capi.so - Needed : - - libstdc++.so.6 - - libm.so.6 - - libcrypto.so.1.1 - - libdl.so.2 - - libc.so.6 - Runpath : /usr/local/kong/lib - -- Path : /usr/local/kong/lib/engines-1.1/padlock.so - Needed : - - libstdc++.so.6 - - libm.so.6 - - libcrypto.so.1.1 - - libdl.so.2 - - libc.so.6 - Runpath : /usr/local/kong/lib - -- Path : /usr/local/kong/lib/libcrypto.so.1.1 - Needed : - - libstdc++.so.6 - - libm.so.6 - - libdl.so.2 - - libc.so.6 - Runpath : /usr/local/kong/lib - -- Path : /usr/local/kong/lib/libssl.so.1.1 - Needed : - - libstdc++.so.6 - - libm.so.6 - - libcrypto.so.1.1 - - libdl.so.2 - - libc.so.6 - Runpath : /usr/local/kong/lib - -- Path : /usr/local/lib/lua/5.1/lfs.so - Needed : - - libc.so.6 - -- Path : /usr/local/lib/lua/5.1/lpeg.so - Needed : - - libc.so.6 - -- Path : /usr/local/lib/lua/5.1/lsyslog.so - Needed : - - libc.so.6 - -- Path : /usr/local/lib/lua/5.1/lua_pack.so - Needed : - - libc.so.6 - -- Path : /usr/local/lib/lua/5.1/lua_system_constants.so - Needed : - - libc.so.6 - -- Path : /usr/local/lib/lua/5.1/mime/core.so - Needed : - - libc.so.6 - -- Path : /usr/local/lib/lua/5.1/pb.so - Needed : - - libc.so.6 - -- Path : /usr/local/lib/lua/5.1/socket/core.so - Needed : - - libc.so.6 - -- Path : /usr/local/lib/lua/5.1/socket/serial.so - Needed : - - libc.so.6 - -- Path : /usr/local/lib/lua/5.1/socket/unix.so - Needed : - - libc.so.6 - -- Path : /usr/local/lib/lua/5.1/ssl.so - Needed : - - libssl.so.1.1 - - libcrypto.so.1.1 - - libc.so.6 - -- Path : /usr/local/lib/lua/5.1/yaml.so - Needed : - - libyaml-0.so.2 - - libc.so.6 - -- Path : /usr/local/openresty/lualib/cjson.so - Needed : - - libc.so.6 - -- Path : /usr/local/openresty/lualib/librestysignal.so - Needed : - - libc.so.6 - -- Path : /usr/local/openresty/lualib/rds/parser.so - Needed : - - libc.so.6 - -- Path : /usr/local/openresty/lualib/redis/parser.so - Needed : - - libc.so.6 - -- Path : /usr/local/openresty/nginx/sbin/nginx - Needed : - - libdl.so.2 - - libpthread.so.0 - - libcrypt.so.1 - - libluajit-5.1.so.2 - - libm.so.6 - - libssl.so.1.1 - - libcrypto.so.1.1 - - libz.so.1 - - libc.so.6 - Runpath : /usr/local/openresty/luajit/lib:/usr/local/kong/lib - Modules : - - lua-kong-nginx-module - - lua-kong-nginx-module/stream - OpenSSL : OpenSSL 1.1.1o 3 May 2022 - DWARF : True - DWARF - ngx_http_request_t related DWARF DIEs: True - diff --git a/scripts/explain_manifest/fixtures/el7-amd64.txt b/scripts/explain_manifest/fixtures/el7-amd64.txt deleted file mode 100644 index cb5d0045ac2..00000000000 --- a/scripts/explain_manifest/fixtures/el7-amd64.txt +++ /dev/null @@ -1,135 +0,0 @@ -- Path : /etc/kong/kong.logrotate - -- Path : /usr/local/kong/include/google - Type : directory - -- Path : /usr/local/kong/lib/engines-1.1/afalg.so - Needed : - - libstdc++.so.6 - - libm.so.6 - - libcrypto.so.1.1 - - libdl.so.2 - - libc.so.6 - Runpath : /usr/local/kong/lib - -- Path : /usr/local/kong/lib/engines-1.1/capi.so - Needed : - - libstdc++.so.6 - - libm.so.6 - - libcrypto.so.1.1 - - libdl.so.2 - - libc.so.6 - Runpath : /usr/local/kong/lib - -- Path : /usr/local/kong/lib/engines-1.1/padlock.so - Needed : - - libstdc++.so.6 - - libm.so.6 - - libcrypto.so.1.1 - - libdl.so.2 - - libc.so.6 - Runpath : /usr/local/kong/lib - -- Path : /usr/local/kong/lib/libcrypto.so.1.1 - Needed : - - libstdc++.so.6 - - libm.so.6 - - libdl.so.2 - - libc.so.6 - Runpath : /usr/local/kong/lib - -- Path : /usr/local/kong/lib/libssl.so.1.1 - Needed : - - libstdc++.so.6 - - libm.so.6 - - libcrypto.so.1.1 - - libdl.so.2 - - libc.so.6 - Runpath : /usr/local/kong/lib - -- Path : /usr/local/lib/lua/5.1/lfs.so - Needed : - - libc.so.6 - -- Path : /usr/local/lib/lua/5.1/lpeg.so - Needed : - - libc.so.6 - -- Path : /usr/local/lib/lua/5.1/lsyslog.so - Needed : - - libc.so.6 - -- Path : /usr/local/lib/lua/5.1/lua_pack.so - Needed : - - libc.so.6 - -- Path : /usr/local/lib/lua/5.1/lua_system_constants.so - Needed : - - libc.so.6 - -- Path : /usr/local/lib/lua/5.1/mime/core.so - Needed : - - libc.so.6 - -- Path : /usr/local/lib/lua/5.1/pb.so - Needed : - - libc.so.6 - -- Path : /usr/local/lib/lua/5.1/socket/core.so - Needed : - - libc.so.6 - -- Path : /usr/local/lib/lua/5.1/socket/serial.so - Needed : - - libc.so.6 - -- Path : /usr/local/lib/lua/5.1/socket/unix.so - Needed : - - libc.so.6 - -- Path : /usr/local/lib/lua/5.1/ssl.so - Needed : - - libssl.so.1.1 - - libcrypto.so.1.1 - - libc.so.6 - -- Path : /usr/local/lib/lua/5.1/yaml.so - Needed : - - libyaml-0.so.2 - - libc.so.6 - -- Path : /usr/local/openresty/lualib/cjson.so - Needed : - - libc.so.6 - -- Path : /usr/local/openresty/lualib/librestysignal.so - Needed : - - libc.so.6 - -- Path : /usr/local/openresty/lualib/rds/parser.so - Needed : - - libc.so.6 - -- Path : /usr/local/openresty/lualib/redis/parser.so - Needed : - - libc.so.6 - -- Path : /usr/local/openresty/nginx/sbin/nginx - Needed : - - libdl.so.2 - - libpthread.so.0 - - libcrypt.so.1 - - libluajit-5.1.so.2 - - libm.so.6 - - libssl.so.1.1 - - libcrypto.so.1.1 - - libz.so.1 - - libc.so.6 - Rpath : /usr/local/openresty/luajit/lib:/usr/local/kong/lib - Modules : - - lua-kong-nginx-module - - lua-kong-nginx-module/stream - OpenSSL : OpenSSL 1.1.1o 3 May 2022 - DWARF : True - DWARF - ngx_http_request_t related DWARF DIEs: True - From a4e6e5c354876d59f3d4cb2ab7118a7a3e058fa5 Mon Sep 17 00:00:00 2001 From: Wangchong Zhou Date: Tue, 21 May 2024 16:47:36 +0800 Subject: [PATCH 39/42] chore(build): bump cross toolchain to 0.7.0 --- build/toolchain/repositories.bzl | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/build/toolchain/repositories.bzl b/build/toolchain/repositories.bzl index 19e7e2510ee..d95e10c4d1b 100644 --- a/build/toolchain/repositories.bzl +++ b/build/toolchain/repositories.bzl @@ -39,32 +39,32 @@ def toolchain_repositories(): http_archive( name = "aarch64-rhel9-linux-gnu-gcc-11", - url = "https://github.com/Kong/crosstool-ng-actions/releases/download/0.5.0/aarch64-rhel9-linux-gnu-glibc-2.34-gcc-11.tar.gz", - sha256 = "40fcf85e8315869621573512499aa3e2884283e0054dfefc2bad3bbf21b954c0", + url = "https://github.com/Kong/crosstool-ng-actions/releases/download/0.7.0/aarch64-rhel9-linux-gnu-glibc-2.34-gcc-11.tar.gz", + sha256 = "8db520adb98f43dfe3da5d51e09679b85956e3a11362d7cba37a85065e87fcf7", strip_prefix = "aarch64-rhel9-linux-gnu", build_file_content = build_file_content, ) http_archive( name = "aarch64-rhel8-linux-gnu-gcc-8", - url = "https://github.com/Kong/crosstool-ng-actions/releases/download/0.5.0/aarch64-rhel8-linux-gnu-glibc-2.28-gcc-8.tar.gz", - sha256 = "7a9a28ccab6d3b068ad49b2618276707e0a31b437ad010c8969ba8660ddf63fb", + url = "https://github.com/Kong/crosstool-ng-actions/releases/download/0.7.0/aarch64-rhel8-linux-gnu-glibc-2.28-gcc-8.tar.gz", + sha256 = "de41ca31b6a056bddd770b4cb50fe8e8c31e8faa9ce857771ab7410a954d1cbe", strip_prefix = "aarch64-rhel8-linux-gnu", build_file_content = build_file_content, ) http_archive( name = "aarch64-aws2023-linux-gnu-gcc-11", - url = "https://github.com/Kong/crosstool-ng-actions/releases/download/0.5.0/aarch64-aws2023-linux-gnu-glibc-2.34-gcc-11.tar.gz", - sha256 = "01498b49c20255dd3d5da733fa5d60b5dad4b1cdd55e50552d8f2867f3d82e98", + url = "https://github.com/Kong/crosstool-ng-actions/releases/download/0.7.0/aarch64-aws2023-linux-gnu-glibc-2.34-gcc-11.tar.gz", + sha256 = "c0333ba0934b32f59ab9c3076c47785c94413aae264cc2ee78d6d5fd46171a9d", strip_prefix = "aarch64-aws2023-linux-gnu", build_file_content = build_file_content, ) http_archive( name = "aarch64-aws2-linux-gnu-gcc-7", - url = "https://github.com/Kong/crosstool-ng-actions/releases/download/0.5.0/aarch64-aws2-linux-gnu-glibc-2.26-gcc-7.tar.gz", - sha256 = "9a8d0bb84c3eea7b662192bf44aaf33a76c9c68848a68a544a91ab90cd8cba60", + url = "https://github.com/Kong/crosstool-ng-actions/releases/download/0.7.0/aarch64-aws2-linux-gnu-glibc-2.26-gcc-7.tar.gz", + sha256 = "de365a366b5de93b0f6d851746e7ced06946b083b390500d4c1b4a8360702331", strip_prefix = "aarch64-aws2-linux-gnu", build_file_content = build_file_content, ) From bdacaddfdacf381742c26d718cb75b568da6cce9 Mon Sep 17 00:00:00 2001 From: Wangchong Zhou Date: Tue, 21 May 2024 16:09:57 +0800 Subject: [PATCH 40/42] feat(build): cross build for amazonlinux:2 x86_64 --- .github/matrix-full.yml | 2 +- BUILD.bazel | 21 +++++++++++++++++++ build/toolchain/BUILD | 8 +++++++ build/toolchain/managed_toolchain.bzl | 7 +++++++ build/toolchain/repositories.bzl | 8 +++++++ .../fixtures/amazonlinux-2-amd64.txt | 20 +++++------------- 6 files changed, 50 insertions(+), 16 deletions(-) diff --git a/.github/matrix-full.yml b/.github/matrix-full.yml index b514341ca32..a9b346134a7 100644 --- a/.github/matrix-full.yml +++ b/.github/matrix-full.yml @@ -37,10 +37,10 @@ build-packages: # Amazon Linux - label: amazonlinux-2 - image: amazonlinux:2 package: rpm package-type: aws2 check-manifest-suite: amazonlinux-2-amd64 + bazel-args: --platforms=//:aws2-crossbuild-x86_64 - label: amazonlinux-2023 image: amazonlinux:2023 package: rpm diff --git a/BUILD.bazel b/BUILD.bazel index d330e7089ef..5c52fc80094 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -193,6 +193,16 @@ platform( for vendor in aarch64_glibc_distros ] +platform( + name = "aws2-crossbuild-x86_64", + constraint_values = [ + "@platforms//os:linux", + "@platforms//cpu:x86_64", + "//build/platforms/distro:aws2", + ":cross_build", + ], +) + # config_settings define a select() condition based on user-set constraint_values # see https://bazel.build/docs/configurable-attributes config_setting( @@ -216,12 +226,23 @@ config_setting( visibility = ["//visibility:public"], ) +config_setting( + name = "x86_64-linux-anylibc-cross", + constraint_values = [ + "@platforms//os:linux", + "@platforms//cpu:x86_64", + ":cross_build", + ], + visibility = ["//visibility:public"], +) + selects.config_setting_group( # matches all cross build platforms name = "any-cross", match_any = [ ":aarch64-linux-anylibc-cross", ":x86_64-linux-musl-cross", + ":x86_64-linux-anylibc-cross", ], visibility = ["//visibility:public"], ) diff --git a/build/toolchain/BUILD b/build/toolchain/BUILD index 9b870846acf..e364acbd77e 100644 --- a/build/toolchain/BUILD +++ b/build/toolchain/BUILD @@ -63,6 +63,14 @@ define_managed_toolchain( vendor = "alpine", ) +define_managed_toolchain( + arch = "x86_64", + gcc_version = aarch64_glibc_distros["aws2"], + libc = "gnu", + target_compatible_with = ["//build/platforms/distro:aws2"], + vendor = "aws2", +) + [ define_managed_toolchain( arch = "aarch64", diff --git a/build/toolchain/managed_toolchain.bzl b/build/toolchain/managed_toolchain.bzl index 793b335924e..86d88868d5c 100644 --- a/build/toolchain/managed_toolchain.bzl +++ b/build/toolchain/managed_toolchain.bzl @@ -143,6 +143,13 @@ def register_all_toolchains(name = None): vendor = "alpine", ) + register_managed_toolchain( + arch = "x86_64", + gcc_version = "7", + libc = "gnu", + vendor = "aws2", + ) + for vendor in aarch64_glibc_distros: register_managed_toolchain( arch = "aarch64", diff --git a/build/toolchain/repositories.bzl b/build/toolchain/repositories.bzl index d95e10c4d1b..9c0b54d7841 100644 --- a/build/toolchain/repositories.bzl +++ b/build/toolchain/repositories.bzl @@ -68,3 +68,11 @@ def toolchain_repositories(): strip_prefix = "aarch64-aws2-linux-gnu", build_file_content = build_file_content, ) + + http_archive( + name = "x86_64-aws2-linux-gnu-gcc-7", + url = "https://github.com/Kong/crosstool-ng-actions/releases/download/0.7.0/x86_64-aws2-linux-gnu-glibc-2.26-gcc-7.tar.gz", + sha256 = "645c242d13bf456ca59a7e9701e9d2f53336fd0497ccaff2b151da9921469985", + strip_prefix = "x86_64-aws2-linux-gnu", + build_file_content = build_file_content, + ) diff --git a/scripts/explain_manifest/fixtures/amazonlinux-2-amd64.txt b/scripts/explain_manifest/fixtures/amazonlinux-2-amd64.txt index cb5d0045ac2..3d0e4868ebe 100644 --- a/scripts/explain_manifest/fixtures/amazonlinux-2-amd64.txt +++ b/scripts/explain_manifest/fixtures/amazonlinux-2-amd64.txt @@ -5,47 +5,37 @@ - Path : /usr/local/kong/lib/engines-1.1/afalg.so Needed : - - libstdc++.so.6 - - libm.so.6 - libcrypto.so.1.1 - libdl.so.2 - libc.so.6 - Runpath : /usr/local/kong/lib + Rpath : /usr/local/kong/lib - Path : /usr/local/kong/lib/engines-1.1/capi.so Needed : - - libstdc++.so.6 - - libm.so.6 - libcrypto.so.1.1 - libdl.so.2 - libc.so.6 - Runpath : /usr/local/kong/lib + Rpath : /usr/local/kong/lib - Path : /usr/local/kong/lib/engines-1.1/padlock.so Needed : - - libstdc++.so.6 - - libm.so.6 - libcrypto.so.1.1 - libdl.so.2 - libc.so.6 - Runpath : /usr/local/kong/lib + Rpath : /usr/local/kong/lib - Path : /usr/local/kong/lib/libcrypto.so.1.1 Needed : - - libstdc++.so.6 - - libm.so.6 - libdl.so.2 - libc.so.6 - Runpath : /usr/local/kong/lib + Rpath : /usr/local/kong/lib - Path : /usr/local/kong/lib/libssl.so.1.1 Needed : - - libstdc++.so.6 - - libm.so.6 - libcrypto.so.1.1 - libdl.so.2 - libc.so.6 - Runpath : /usr/local/kong/lib + Rpath : /usr/local/kong/lib - Path : /usr/local/lib/lua/5.1/lfs.so Needed : From 8188ef74e34270c92dd3af0743afe223954ff60b Mon Sep 17 00:00:00 2001 From: Wangchong Zhou Date: Tue, 21 May 2024 16:30:52 +0800 Subject: [PATCH 41/42] fix(build): cleanup cross build flags --- BUILD.bazel | 8 ++++---- .../cross_deps/libxcrypt/BUILD.libxcrypt.bazel | 7 +++++-- build/cross_deps/libyaml/BUILD.libyaml.bazel | 7 +++++-- build/openresty/BUILD.openresty.bazel | 17 +++++++++++++++-- build/openresty/openssl/openssl.bzl | 5 ++++- 5 files changed, 33 insertions(+), 11 deletions(-) diff --git a/BUILD.bazel b/BUILD.bazel index 5c52fc80094..71a35fde056 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -206,7 +206,7 @@ platform( # config_settings define a select() condition based on user-set constraint_values # see https://bazel.build/docs/configurable-attributes config_setting( - name = "aarch64-linux-anylibc-cross", + name = "aarch64-linux-glibc-cross", constraint_values = [ "@platforms//os:linux", "@platforms//cpu:aarch64", @@ -227,7 +227,7 @@ config_setting( ) config_setting( - name = "x86_64-linux-anylibc-cross", + name = "x86_64-linux-glibc-cross", constraint_values = [ "@platforms//os:linux", "@platforms//cpu:x86_64", @@ -240,9 +240,9 @@ selects.config_setting_group( # matches all cross build platforms name = "any-cross", match_any = [ - ":aarch64-linux-anylibc-cross", + ":aarch64-linux-glibc-cross", ":x86_64-linux-musl-cross", - ":x86_64-linux-anylibc-cross", + ":x86_64-linux-glibc-cross", ], visibility = ["//visibility:public"], ) diff --git a/build/cross_deps/libxcrypt/BUILD.libxcrypt.bazel b/build/cross_deps/libxcrypt/BUILD.libxcrypt.bazel index 933172eec78..88880868add 100644 --- a/build/cross_deps/libxcrypt/BUILD.libxcrypt.bazel +++ b/build/cross_deps/libxcrypt/BUILD.libxcrypt.bazel @@ -26,8 +26,11 @@ configure_make( configure_command = "configure", configure_in_place = True, configure_options = select({ - "@kong//:aarch64-linux-anylibc-cross": [ - "--host=aarch64-linux", + "@kong//:aarch64-linux-glibc-cross": [ + "--host=aarch64-unknown-linux-gnu", + ], + "@kong//:x86_64-linux-glibc-cross": [ + "--host=x86_64-unknown-linux-gnu", ], "@kong//:x86_64-linux-musl-cross": [ "--host=x86_64-linux-musl", diff --git a/build/cross_deps/libyaml/BUILD.libyaml.bazel b/build/cross_deps/libyaml/BUILD.libyaml.bazel index ad4e48560df..57139745a48 100644 --- a/build/cross_deps/libyaml/BUILD.libyaml.bazel +++ b/build/cross_deps/libyaml/BUILD.libyaml.bazel @@ -14,8 +14,11 @@ configure_make( configure_command = "configure", configure_in_place = True, configure_options = select({ - "@kong//:aarch64-linux-anylibc-cross": [ - "--host=aarch64-linux", + "@kong//:aarch64-linux-glibc-cross": [ + "--host=aarch64-unknown-linux-gnu", + ], + "@kong//:x86_64-linux-glibc-cross": [ + "--host=x86_64-unknown-linux-gnu", ], "@kong//:x86_64-linux-musl-cross": [ "--host=x86_64-linux-musl", diff --git a/build/openresty/BUILD.openresty.bazel b/build/openresty/BUILD.openresty.bazel index cc4107cabc7..ab9b2d043c9 100644 --- a/build/openresty/BUILD.openresty.bazel +++ b/build/openresty/BUILD.openresty.bazel @@ -152,7 +152,7 @@ CONFIGURE_OPTIONS = [ "--add-module=$$EXT_BUILD_ROOT$$/external/lua-kong-nginx-module", "--add-module=$$EXT_BUILD_ROOT$$/external/lua-kong-nginx-module/stream", ] + select({ - "@kong//:aarch64-linux-anylibc-cross": [ + "@kong//:aarch64-linux-glibc-cross": [ "--crossbuild=Linux:aarch64", "--with-endian=little", "--with-int=4", @@ -165,6 +165,19 @@ CONFIGURE_OPTIONS = [ "--with-time-t=8", "--with-sys-nerr=132", ], + "@kong//:x86_64-linux-glibc-cross": [ + "--crossbuild=Linux:x86_64", + "--with-endian=little", + "--with-int=4", + "--with-long=8", + "--with-long-long=8", + "--with-ptr-size=8", + "--with-sig-atomic-t=4", + "--with-size-t=8", + "--with-off-t=8", + "--with-time-t=8", + "--with-sys-nerr=132", + ], "@kong//:x86_64-linux-musl-cross": [ "--crossbuild=Linux:x86_64", "--with-endian=little", @@ -188,7 +201,7 @@ CONFIGURE_OPTIONS = [ }) + select({ # any cross build that migrated to use libxcrypt needs those flags # alpine uses different libc so doesn't need it - "@kong//:aarch64-linux-anylibc-cross": [ + "@kong//:aarch64-linux-glibc-cross": [ "--with-cc-opt=\"-I$$EXT_BUILD_DEPS$$/libxcrypt/include\"", "--with-ld-opt=\"-L$$EXT_BUILD_DEPS$$/libxcrypt/lib\"", ], diff --git a/build/openresty/openssl/openssl.bzl b/build/openresty/openssl/openssl.bzl index 62aa72f34dc..400e2e880ef 100644 --- a/build/openresty/openssl/openssl.bzl +++ b/build/openresty/openssl/openssl.bzl @@ -11,9 +11,12 @@ load("@kong_bindings//:variables.bzl", "KONG_VAR") # Read https://wiki.openssl.org/index.php/Compilation_and_Installation CONFIGURE_OPTIONS = select({ - "@kong//:aarch64-linux-anylibc-cross": [ + "@kong//:aarch64-linux-glibc-cross": [ "linux-aarch64", ], + "@kong//:x86_64-linux-glibc-cross": [ + "linux-x86_64", + ], # no extra args needed for "@kong//:x86_64-linux-musl-cross" or non-cross builds "//conditions:default": [], }) + [ From 1705ae192fd332e7adfbb3b8b858ba2f8dc5fcac Mon Sep 17 00:00:00 2001 From: Wangchong Zhou Date: Tue, 23 Jul 2024 23:55:40 +0800 Subject: [PATCH 42/42] fix(build): use Configure directly to support cross build on openssl 1.1.1 --- build/openresty/openssl/openssl.bzl | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/build/openresty/openssl/openssl.bzl b/build/openresty/openssl/openssl.bzl index 400e2e880ef..8bf672b2cd3 100644 --- a/build/openresty/openssl/openssl.bzl +++ b/build/openresty/openssl/openssl.bzl @@ -17,8 +17,13 @@ CONFIGURE_OPTIONS = select({ "@kong//:x86_64-linux-glibc-cross": [ "linux-x86_64", ], - # no extra args needed for "@kong//:x86_64-linux-musl-cross" or non-cross builds - "//conditions:default": [], + # non-cross build + "@platforms//cpu:x86_64": [ + "linux-x86_64", + ], + "@platforms//cpu:aarch64": [ + "linux-aarch64", + ], }) + [ "-g", "-O3", # force -O3 even we are using --debug (for example on CI) @@ -50,7 +55,7 @@ def build_openssl( configure_make( name = name, - configure_command = "config", + configure_command = "Configure", configure_in_place = True, configure_options = CONFIGURE_OPTIONS + extra_configure_options, env = select({