-
+
+
{() => }
diff --git a/ui/yarn.lock b/ui/yarn.lock
index f199c8c09ded..390aa23b8844 100644
--- a/ui/yarn.lock
+++ b/ui/yarn.lock
@@ -1416,10 +1416,10 @@
resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz#d7c6e6755c78567a951e04ab52ef0fd26de59f32"
integrity sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==
-"@jridgewell/trace-mapping@^0.3.17", "@jridgewell/trace-mapping@^0.3.18", "@jridgewell/trace-mapping@^0.3.9":
- version "0.3.20"
- resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.20.tgz#72e45707cf240fa6b081d0366f8265b0cd10197f"
- integrity sha512-R8LcPeWZol2zR8mmH3JeKQ6QRCFb7XgUhV9ZlGhHLGyg4wpPiPZNQOOWhFZhxKw8u//yTbNGI42Bx/3paXEQ+Q==
+"@jridgewell/trace-mapping@^0.3.17", "@jridgewell/trace-mapping@^0.3.18", "@jridgewell/trace-mapping@^0.3.20", "@jridgewell/trace-mapping@^0.3.9":
+ version "0.3.25"
+ resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz#15f190e98895f3fc23276ee14bc76b675c2e50f0"
+ integrity sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==
dependencies:
"@jridgewell/resolve-uri" "^3.1.0"
"@jridgewell/sourcemap-codec" "^1.4.14"
@@ -1991,23 +1991,7 @@
dependencies:
"@types/ms" "*"
-"@types/eslint-scope@^3.7.3":
- version "3.7.7"
- resolved "https://registry.yarnpkg.com/@types/eslint-scope/-/eslint-scope-3.7.7.tgz#3108bd5f18b0cdb277c867b3dd449c9ed7079ac5"
- integrity sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==
- dependencies:
- "@types/eslint" "*"
- "@types/estree" "*"
-
-"@types/eslint@*":
- version "8.56.0"
- resolved "https://registry.yarnpkg.com/@types/eslint/-/eslint-8.56.0.tgz#e28d045b8e530a33c9cbcfbf02332df0d1380a2c"
- integrity sha512-FlsN0p4FhuYRjIxpbdXovvHQhtlG05O1GG/RNWvdAxTboR438IOTwmrY/vLA+Xfgg06BTkP045M3vpFwTMv1dg==
- dependencies:
- "@types/estree" "*"
- "@types/json-schema" "*"
-
-"@types/estree@*", "@types/estree@^1.0.0":
+"@types/estree@^1.0.5":
version "1.0.5"
resolved "https://registry.yarnpkg.com/@types/estree/-/estree-1.0.5.tgz#a6ce3e556e00fd9895dd872dd172ad0d4bd687f4"
integrity sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==
@@ -2100,7 +2084,7 @@
resolved "https://registry.yarnpkg.com/@types/js-yaml/-/js-yaml-4.0.9.tgz#cd82382c4f902fed9691a2ed79ec68c5898af4c2"
integrity sha512-k4MGaQl5TGo/iipqb2UDG2UwjXziSWkh0uysQelTlJpX1qGlpUZYm8PnO4DxG1qBomtJUdYJ6qR6xdIah10JLg==
-"@types/json-schema@*", "@types/json-schema@^7.0.12", "@types/json-schema@^7.0.5", "@types/json-schema@^7.0.8", "@types/json-schema@^7.0.9":
+"@types/json-schema@^7.0.12", "@types/json-schema@^7.0.5", "@types/json-schema@^7.0.8", "@types/json-schema@^7.0.9":
version "7.0.15"
resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.15.tgz#596a1747233694d50f6ad8a7869fcb6f56cf5841"
integrity sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==
@@ -2471,10 +2455,10 @@
resolved "https://registry.yarnpkg.com/@ungap/structured-clone/-/structured-clone-1.2.0.tgz#756641adb587851b5ccb3e095daf27ae581c8406"
integrity sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==
-"@webassemblyjs/ast@1.11.6", "@webassemblyjs/ast@^1.11.5":
- version "1.11.6"
- resolved "https://registry.yarnpkg.com/@webassemblyjs/ast/-/ast-1.11.6.tgz#db046555d3c413f8966ca50a95176a0e2c642e24"
- integrity sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q==
+"@webassemblyjs/ast@1.12.1", "@webassemblyjs/ast@^1.12.1":
+ version "1.12.1"
+ resolved "https://registry.yarnpkg.com/@webassemblyjs/ast/-/ast-1.12.1.tgz#bb16a0e8b1914f979f45864c23819cc3e3f0d4bb"
+ integrity sha512-EKfMUOPRRUTy5UII4qJDGPpqfwjOmZ5jeGFwid9mnoqIFK+e0vqoi1qH56JpmZSzEL53jKnNzScdmftJyG5xWg==
dependencies:
"@webassemblyjs/helper-numbers" "1.11.6"
"@webassemblyjs/helper-wasm-bytecode" "1.11.6"
@@ -2489,10 +2473,10 @@
resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.6.tgz#6132f68c4acd59dcd141c44b18cbebbd9f2fa768"
integrity sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q==
-"@webassemblyjs/helper-buffer@1.11.6":
- version "1.11.6"
- resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-buffer/-/helper-buffer-1.11.6.tgz#b66d73c43e296fd5e88006f18524feb0f2c7c093"
- integrity sha512-z3nFzdcp1mb8nEOFFk8DrYLpHvhKC3grJD2ardfKOzmbmJvEf/tPIqCY+sNcwZIY8ZD7IkB2l7/pqhUhqm7hLA==
+"@webassemblyjs/helper-buffer@1.12.1":
+ version "1.12.1"
+ resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-buffer/-/helper-buffer-1.12.1.tgz#6df20d272ea5439bf20ab3492b7fb70e9bfcb3f6"
+ integrity sha512-nzJwQw99DNDKr9BVCOZcLuJJUlqkJh+kVzVl6Fmq/tI5ZtEyWT1KZMyOXltXLZJmDtvLCDgwsyrkohEtopTXCw==
"@webassemblyjs/helper-numbers@1.11.6":
version "1.11.6"
@@ -2508,15 +2492,15 @@
resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.6.tgz#bb2ebdb3b83aa26d9baad4c46d4315283acd51e9"
integrity sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA==
-"@webassemblyjs/helper-wasm-section@1.11.6":
- version "1.11.6"
- resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.11.6.tgz#ff97f3863c55ee7f580fd5c41a381e9def4aa577"
- integrity sha512-LPpZbSOwTpEC2cgn4hTydySy1Ke+XEu+ETXuoyvuyezHO3Kjdu90KK95Sh9xTbmjrCsUwvWwCOQQNta37VrS9g==
+"@webassemblyjs/helper-wasm-section@1.12.1":
+ version "1.12.1"
+ resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.12.1.tgz#3da623233ae1a60409b509a52ade9bc22a37f7bf"
+ integrity sha512-Jif4vfB6FJlUlSbgEMHUyk1j234GTNG9dBJ4XJdOySoj518Xj0oGsNi59cUQF4RRMS9ouBUxDDdyBVfPTypa5g==
dependencies:
- "@webassemblyjs/ast" "1.11.6"
- "@webassemblyjs/helper-buffer" "1.11.6"
+ "@webassemblyjs/ast" "1.12.1"
+ "@webassemblyjs/helper-buffer" "1.12.1"
"@webassemblyjs/helper-wasm-bytecode" "1.11.6"
- "@webassemblyjs/wasm-gen" "1.11.6"
+ "@webassemblyjs/wasm-gen" "1.12.1"
"@webassemblyjs/ieee754@1.11.6":
version "1.11.6"
@@ -2537,59 +2521,59 @@
resolved "https://registry.yarnpkg.com/@webassemblyjs/utf8/-/utf8-1.11.6.tgz#90f8bc34c561595fe156603be7253cdbcd0fab5a"
integrity sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA==
-"@webassemblyjs/wasm-edit@^1.11.5":
- version "1.11.6"
- resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-edit/-/wasm-edit-1.11.6.tgz#c72fa8220524c9b416249f3d94c2958dfe70ceab"
- integrity sha512-Ybn2I6fnfIGuCR+Faaz7YcvtBKxvoLV3Lebn1tM4o/IAJzmi9AWYIPWpyBfU8cC+JxAO57bk4+zdsTjJR+VTOw==
+"@webassemblyjs/wasm-edit@^1.12.1":
+ version "1.12.1"
+ resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-edit/-/wasm-edit-1.12.1.tgz#9f9f3ff52a14c980939be0ef9d5df9ebc678ae3b"
+ integrity sha512-1DuwbVvADvS5mGnXbE+c9NfA8QRcZ6iKquqjjmR10k6o+zzsRVesil54DKexiowcFCPdr/Q0qaMgB01+SQ1u6g==
dependencies:
- "@webassemblyjs/ast" "1.11.6"
- "@webassemblyjs/helper-buffer" "1.11.6"
+ "@webassemblyjs/ast" "1.12.1"
+ "@webassemblyjs/helper-buffer" "1.12.1"
"@webassemblyjs/helper-wasm-bytecode" "1.11.6"
- "@webassemblyjs/helper-wasm-section" "1.11.6"
- "@webassemblyjs/wasm-gen" "1.11.6"
- "@webassemblyjs/wasm-opt" "1.11.6"
- "@webassemblyjs/wasm-parser" "1.11.6"
- "@webassemblyjs/wast-printer" "1.11.6"
-
-"@webassemblyjs/wasm-gen@1.11.6":
- version "1.11.6"
- resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-gen/-/wasm-gen-1.11.6.tgz#fb5283e0e8b4551cc4e9c3c0d7184a65faf7c268"
- integrity sha512-3XOqkZP/y6B4F0PBAXvI1/bky7GryoogUtfwExeP/v7Nzwo1QLcq5oQmpKlftZLbT+ERUOAZVQjuNVak6UXjPA==
- dependencies:
- "@webassemblyjs/ast" "1.11.6"
+ "@webassemblyjs/helper-wasm-section" "1.12.1"
+ "@webassemblyjs/wasm-gen" "1.12.1"
+ "@webassemblyjs/wasm-opt" "1.12.1"
+ "@webassemblyjs/wasm-parser" "1.12.1"
+ "@webassemblyjs/wast-printer" "1.12.1"
+
+"@webassemblyjs/wasm-gen@1.12.1":
+ version "1.12.1"
+ resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-gen/-/wasm-gen-1.12.1.tgz#a6520601da1b5700448273666a71ad0a45d78547"
+ integrity sha512-TDq4Ojh9fcohAw6OIMXqiIcTq5KUXTGRkVxbSo1hQnSy6lAM5GSdfwWeSxpAo0YzgsgF182E/U0mDNhuA0tW7w==
+ dependencies:
+ "@webassemblyjs/ast" "1.12.1"
"@webassemblyjs/helper-wasm-bytecode" "1.11.6"
"@webassemblyjs/ieee754" "1.11.6"
"@webassemblyjs/leb128" "1.11.6"
"@webassemblyjs/utf8" "1.11.6"
-"@webassemblyjs/wasm-opt@1.11.6":
- version "1.11.6"
- resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-opt/-/wasm-opt-1.11.6.tgz#d9a22d651248422ca498b09aa3232a81041487c2"
- integrity sha512-cOrKuLRE7PCe6AsOVl7WasYf3wbSo4CeOk6PkrjS7g57MFfVUF9u6ysQBBODX0LdgSvQqRiGz3CXvIDKcPNy4g==
+"@webassemblyjs/wasm-opt@1.12.1":
+ version "1.12.1"
+ resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-opt/-/wasm-opt-1.12.1.tgz#9e6e81475dfcfb62dab574ac2dda38226c232bc5"
+ integrity sha512-Jg99j/2gG2iaz3hijw857AVYekZe2SAskcqlWIZXjji5WStnOpVoat3gQfT/Q5tb2djnCjBtMocY/Su1GfxPBg==
dependencies:
- "@webassemblyjs/ast" "1.11.6"
- "@webassemblyjs/helper-buffer" "1.11.6"
- "@webassemblyjs/wasm-gen" "1.11.6"
- "@webassemblyjs/wasm-parser" "1.11.6"
+ "@webassemblyjs/ast" "1.12.1"
+ "@webassemblyjs/helper-buffer" "1.12.1"
+ "@webassemblyjs/wasm-gen" "1.12.1"
+ "@webassemblyjs/wasm-parser" "1.12.1"
-"@webassemblyjs/wasm-parser@1.11.6", "@webassemblyjs/wasm-parser@^1.11.5":
- version "1.11.6"
- resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.6.tgz#bb85378c527df824004812bbdb784eea539174a1"
- integrity sha512-6ZwPeGzMJM3Dqp3hCsLgESxBGtT/OeCvCZ4TA1JUPYgmhAx38tTPR9JaKy0S5H3evQpO/h2uWs2j6Yc/fjkpTQ==
+"@webassemblyjs/wasm-parser@1.12.1", "@webassemblyjs/wasm-parser@^1.12.1":
+ version "1.12.1"
+ resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-parser/-/wasm-parser-1.12.1.tgz#c47acb90e6f083391e3fa61d113650eea1e95937"
+ integrity sha512-xikIi7c2FHXysxXe3COrVUPSheuBtpcfhbpFj4gmu7KRLYOzANztwUU0IbsqvMqzuNK2+glRGWCEqZo1WCLyAQ==
dependencies:
- "@webassemblyjs/ast" "1.11.6"
+ "@webassemblyjs/ast" "1.12.1"
"@webassemblyjs/helper-api-error" "1.11.6"
"@webassemblyjs/helper-wasm-bytecode" "1.11.6"
"@webassemblyjs/ieee754" "1.11.6"
"@webassemblyjs/leb128" "1.11.6"
"@webassemblyjs/utf8" "1.11.6"
-"@webassemblyjs/wast-printer@1.11.6":
- version "1.11.6"
- resolved "https://registry.yarnpkg.com/@webassemblyjs/wast-printer/-/wast-printer-1.11.6.tgz#a7bf8dd7e362aeb1668ff43f35cb849f188eff20"
- integrity sha512-JM7AhRcE+yW2GWYaKeHL5vt4xqee5N2WcezptmgyhNS+ScggqcT1OtXykhAb13Sn5Yas0j2uv9tHgrjwvzAP4A==
+"@webassemblyjs/wast-printer@1.12.1":
+ version "1.12.1"
+ resolved "https://registry.yarnpkg.com/@webassemblyjs/wast-printer/-/wast-printer-1.12.1.tgz#bcecf661d7d1abdaf989d8341a4833e33e2b31ac"
+ integrity sha512-+X4WAlOisVWQMikjbcvY2e0rwPsKQ9F688lksZhBcPycBBuii3O7m8FACbDMWDojpAqvjIncrG8J0XHKyQfVeA==
dependencies:
- "@webassemblyjs/ast" "1.11.6"
+ "@webassemblyjs/ast" "1.12.1"
"@xtuc/long" "4.2.2"
"@webpack-cli/configtest@^2.1.1":
@@ -2643,10 +2627,10 @@ acorn-globals@^6.0.0:
acorn "^7.1.1"
acorn-walk "^7.1.1"
-acorn-import-assertions@^1.9.0:
- version "1.9.0"
- resolved "https://registry.yarnpkg.com/acorn-import-assertions/-/acorn-import-assertions-1.9.0.tgz#507276249d684797c84e0734ef84860334cfb1ac"
- integrity sha512-cmMwop9x+8KFhxvKrKfPYmN6/pKTYYHBqLa0DfvVZcKMJWNyWLnaqND7dx/qn66R7ewM1UX5XMaDVP5wlVTaVA==
+acorn-import-attributes@^1.9.5:
+ version "1.9.5"
+ resolved "https://registry.yarnpkg.com/acorn-import-attributes/-/acorn-import-attributes-1.9.5.tgz#7eb1557b1ba05ef18b5ed0ec67591bfab04688ef"
+ integrity sha512-n02Vykv5uA3eHGM/Z2dQrcD56kL8TyDb2p1+0P83PClMnC/nc+anbQRhIOWnSq4Ke/KvDPrY3C9hDtC/A3eHnQ==
acorn-jsx@^5.3.2:
version "5.3.2"
@@ -3216,15 +3200,15 @@ browser-process-hrtime@^1.0.0:
resolved "https://registry.yarnpkg.com/browser-process-hrtime/-/browser-process-hrtime-1.0.0.tgz#3c9b4b7d782c8121e56f10106d84c0d0ffc94626"
integrity sha512-9o5UecI3GhkpM6DrXr69PblIuWxPKk9Y0jHBRhdocZ2y7YECBFCsHm79Pr3OyR2AvjhDkabFJaDJMYRazHgsow==
-browserslist@^4.14.5, browserslist@^4.22.2:
- version "4.22.2"
- resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.22.2.tgz#704c4943072bd81ea18997f3bd2180e89c77874b"
- integrity sha512-0UgcrvQmBDvZHFGdYUehrCNIazki7/lUP3kkoi/r3YB2amZbFM9J43ZRkJTXBUZK4gmx56+Sqk9+Vs9mwZx9+A==
+browserslist@^4.21.10, browserslist@^4.22.2:
+ version "4.23.3"
+ resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.23.3.tgz#debb029d3c93ebc97ffbc8d9cbb03403e227c800"
+ integrity sha512-btwCFJVjI4YWDNfau8RhZ+B1Q/VLoUITrm3RlP6y1tYGWIOa+InuYiRGXUBXo8nA1qKmHMyLB/iVQg5TT4eFoA==
dependencies:
- caniuse-lite "^1.0.30001565"
- electron-to-chromium "^1.4.601"
- node-releases "^2.0.14"
- update-browserslist-db "^1.0.13"
+ caniuse-lite "^1.0.30001646"
+ electron-to-chromium "^1.5.4"
+ node-releases "^2.0.18"
+ update-browserslist-db "^1.1.0"
bs-logger@0.x:
version "0.2.6"
@@ -3317,10 +3301,10 @@ camelcase@^6.0.0:
resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-6.3.0.tgz#5685b95eb209ac9c0c177467778c9c84df58ba9a"
integrity sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==
-caniuse-lite@^1.0.30001565:
- version "1.0.30001571"
- resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001571.tgz#4182e93d696ff42930f4af7eba515ddeb57917ac"
- integrity sha512-tYq/6MoXhdezDLFZuCO/TKboTzuQ/xR5cFdgXPfDtM7/kchBO3b4VWghE/OAi/DV7tTdhmLjZiZBZi1fA/GheQ==
+caniuse-lite@^1.0.30001646:
+ version "1.0.30001655"
+ resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001655.tgz#0ce881f5a19a2dcfda2ecd927df4d5c1684b982f"
+ integrity sha512-jRGVy3iSGO5Uutn2owlb5gR6qsGngTw9ZTb4ali9f3glshcNmJ2noam4Mo9zia5P9Dk3jNNydy7vQjuE5dQmfg==
capture-exit@^2.0.0:
version "2.0.0"
@@ -4108,10 +4092,10 @@ ee-first@1.1.1:
resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d"
integrity sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==
-electron-to-chromium@^1.4.601:
- version "1.4.616"
- resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.616.tgz#4bddbc2c76e1e9dbf449ecd5da3d8119826ea4fb"
- integrity sha512-1n7zWYh8eS0L9Uy+GskE0lkBUNK83cXTVJI0pU3mGprFsbfSdAc15VTFbo+A+Bq4pwstmL30AVcEU3Fo463lNg==
+electron-to-chromium@^1.5.4:
+ version "1.5.13"
+ resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.5.13.tgz#1abf0410c5344b2b829b7247e031f02810d442e6"
+ integrity sha512-lbBcvtIJ4J6sS4tb5TLp1b4LyfCdMkwStzXPyAgVgTRAsep4bvrAGaBOP7ZJtQMNJpSQ9SqG4brWOroNaQtm7Q==
emittery@^0.7.1:
version "0.7.2"
@@ -4140,10 +4124,10 @@ end-of-stream@^1.1.0, end-of-stream@^1.4.1:
dependencies:
once "^1.4.0"
-enhanced-resolve@^5.15.0:
- version "5.15.0"
- resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-5.15.0.tgz#1af946c7d93603eb88e9896cee4904dc012e9c35"
- integrity sha512-LXYT42KJ7lpIKECr2mAXIaMldcNCh/7E0KBKOu4KSfkHmP+mZmSs+8V5gBAqisWBy0OO4W5Oyys0GO1Y8KtdKg==
+enhanced-resolve@^5.17.1:
+ version "5.17.1"
+ resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-5.17.1.tgz#67bfbbcc2f81d511be77d686a90267ef7f898a15"
+ integrity sha512-LMHl3dXhTcfv8gM4kEzIUeTQ+7fpdA0l2tUf34BddXPkz2A5xJ5L/Pchd5BL6rdccM9QGvu0sWZzK1Z1t4wwyg==
dependencies:
graceful-fs "^4.2.4"
tapable "^2.2.0"
@@ -4299,10 +4283,10 @@ esbuild@^0.19.0:
"@esbuild/win32-ia32" "0.19.11"
"@esbuild/win32-x64" "0.19.11"
-escalade@^3.1.1:
- version "3.1.1"
- resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.1.1.tgz#d8cfdc7000965c5a0174b4a82eaa5c0552742e40"
- integrity sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==
+escalade@^3.1.2:
+ version "3.2.0"
+ resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.2.0.tgz#011a3f69856ba189dffa7dc8fcce99d2a87903e5"
+ integrity sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==
escape-html@~1.0.3:
version "1.0.3"
@@ -5075,7 +5059,7 @@ gopd@^1.0.1:
dependencies:
get-intrinsic "^1.1.3"
-graceful-fs@^4.1.2, graceful-fs@^4.2.4, graceful-fs@^4.2.6, graceful-fs@^4.2.9:
+graceful-fs@^4.1.2, graceful-fs@^4.2.11, graceful-fs@^4.2.4, graceful-fs@^4.2.6, graceful-fs@^4.2.9:
version "4.2.11"
resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.11.tgz#4183e4e8bf08bb6e05bbb2f7d2e0c8f712ca40e3"
integrity sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==
@@ -7379,10 +7363,10 @@ node-notifier@^8.0.0:
uuid "^8.3.0"
which "^2.0.2"
-node-releases@^2.0.14:
- version "2.0.14"
- resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.14.tgz#2ffb053bceb8b2be8495ece1ab6ce600c4461b0b"
- integrity sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==
+node-releases@^2.0.18:
+ version "2.0.18"
+ resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.18.tgz#f010e8d35e2fe8d6b2944f03f70213ecedc4ca3f"
+ integrity sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g==
normalize-package-data@^2.5.0:
version "2.5.0"
@@ -7761,10 +7745,10 @@ path-type@^5.0.0:
resolved "https://registry.yarnpkg.com/path-type/-/path-type-5.0.0.tgz#14b01ed7aea7ddf9c7c3f46181d4d04f9c785bb8"
integrity sha512-5HviZNaZcfqP95rwpv+1HDgUamezbqdSYTyzjTvwtJSnIH+3vnbmWsItli8OFEndS984VT55M3jduxZbX351gg==
-picocolors@^1.0.0:
- version "1.0.0"
- resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.0.tgz#cb5bdc74ff3f51892236eaf79d68bc44564ab81c"
- integrity sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==
+picocolors@^1.0.0, picocolors@^1.0.1:
+ version "1.0.1"
+ resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.1.tgz#a8ad579b571952f0e5d25892de5445bcfe25aaa1"
+ integrity sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==
picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.2.3, picomatch@^2.3.1:
version "2.3.1"
@@ -9404,21 +9388,21 @@ terminal-link@^2.0.0:
ansi-escapes "^4.2.1"
supports-hyperlinks "^2.0.0"
-terser-webpack-plugin@^5.3.7:
- version "5.3.9"
- resolved "https://registry.yarnpkg.com/terser-webpack-plugin/-/terser-webpack-plugin-5.3.9.tgz#832536999c51b46d468067f9e37662a3b96adfe1"
- integrity sha512-ZuXsqE07EcggTWQjXUj+Aot/OMcD0bMKGgF63f7UxYcu5/AJF53aIpK1YoP5xR9l6s/Hy2b+t1AM0bLNPRuhwA==
+terser-webpack-plugin@^5.3.10:
+ version "5.3.10"
+ resolved "https://registry.yarnpkg.com/terser-webpack-plugin/-/terser-webpack-plugin-5.3.10.tgz#904f4c9193c6fd2a03f693a2150c62a92f40d199"
+ integrity sha512-BKFPWlPDndPs+NGGCr1U59t0XScL5317Y0UReNrHaw9/FwhPENlq6bfgs+4yPfyP51vqC1bQ4rp1EfXW5ZSH9w==
dependencies:
- "@jridgewell/trace-mapping" "^0.3.17"
+ "@jridgewell/trace-mapping" "^0.3.20"
jest-worker "^27.4.5"
schema-utils "^3.1.1"
serialize-javascript "^6.0.1"
- terser "^5.16.8"
+ terser "^5.26.0"
-terser@^5.10.0, terser@^5.16.8:
- version "5.26.0"
- resolved "https://registry.yarnpkg.com/terser/-/terser-5.26.0.tgz#ee9f05d929f4189a9c28a0feb889d96d50126fe1"
- integrity sha512-dytTGoE2oHgbNV9nTzgBEPaqAWvcJNl66VZ0BkJqlvp71IjO8CxdBx/ykCNb47cLnCmCvRZ6ZR0tLkqvZCdVBQ==
+terser@^5.10.0, terser@^5.26.0:
+ version "5.31.6"
+ resolved "https://registry.yarnpkg.com/terser/-/terser-5.31.6.tgz#c63858a0f0703988d0266a82fcbf2d7ba76422b1"
+ integrity sha512-PQ4DAriWzKj+qgehQ7LK5bQqCFNMmlhjR2PFFLuqGCpuCAauxemVBWwWOxo3UIwWQx8+Pr61Df++r76wDmkQBg==
dependencies:
"@jridgewell/source-map" "^0.3.3"
acorn "^8.8.2"
@@ -9867,13 +9851,13 @@ untildify@^4.0.0:
resolved "https://registry.yarnpkg.com/untildify/-/untildify-4.0.0.tgz#2bc947b953652487e4600949fb091e3ae8cd919b"
integrity sha512-KK8xQ1mkzZeg9inewmFVDNkg3l5LUhoq9kN6iWYB/CC9YMG8HA+c1Q8HwDe6dEX7kErrEVNVBO3fWsVq5iDgtw==
-update-browserslist-db@^1.0.13:
- version "1.0.13"
- resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz#3c5e4f5c083661bd38ef64b6328c26ed6c8248c4"
- integrity sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==
+update-browserslist-db@^1.1.0:
+ version "1.1.0"
+ resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.1.0.tgz#7ca61c0d8650766090728046e416a8cde682859e"
+ integrity sha512-EdRAaAyk2cUE1wOf2DkEhzxqOQvFOoRJFNS6NeyJ01Gp2beMRpBAINjM2iDXE3KCuKhwnvHIQCJm6ThL2Z+HzQ==
dependencies:
- escalade "^3.1.1"
- picocolors "^1.0.0"
+ escalade "^3.1.2"
+ picocolors "^1.0.1"
uri-js@^4.2.2:
version "4.4.1"
@@ -10013,10 +9997,10 @@ warning@^4.0.1, warning@^4.0.2:
dependencies:
loose-envify "^1.0.0"
-watchpack@^2.4.0:
- version "2.4.0"
- resolved "https://registry.yarnpkg.com/watchpack/-/watchpack-2.4.0.tgz#fa33032374962c78113f93c7f2fb4c54c9862a5d"
- integrity sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==
+watchpack@^2.4.1:
+ version "2.4.2"
+ resolved "https://registry.yarnpkg.com/watchpack/-/watchpack-2.4.2.tgz#2feeaed67412e7c33184e5a79ca738fbd38564da"
+ integrity sha512-TnbFSbcOCcDgjZ4piURLCbJ3nJhznVh9kw6F6iokjiFPl8ONxe9A6nMDVXDiNbrSfLILs6vB07F7wLBrwPYzJw==
dependencies:
glob-to-regexp "^0.4.1"
graceful-fs "^4.1.2"
@@ -10136,34 +10120,33 @@ webpack-sources@^3.2.3:
resolved "https://registry.yarnpkg.com/webpack-sources/-/webpack-sources-3.2.3.tgz#2d4daab8451fd4b240cc27055ff6a0c2ccea0cde"
integrity sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==
-webpack@^5.89.0:
- version "5.89.0"
- resolved "https://registry.yarnpkg.com/webpack/-/webpack-5.89.0.tgz#56b8bf9a34356e93a6625770006490bf3a7f32dc"
- integrity sha512-qyfIC10pOr70V+jkmud8tMfajraGCZMBWJtrmuBymQKCrLTRejBI8STDp1MCyZu/QTdZSeacCQYpYNQVOzX5kw==
+webpack@^5.94.0:
+ version "5.94.0"
+ resolved "https://registry.yarnpkg.com/webpack/-/webpack-5.94.0.tgz#77a6089c716e7ab90c1c67574a28da518a20970f"
+ integrity sha512-KcsGn50VT+06JH/iunZJedYGUJS5FGjow8wb9c0v5n1Om8O1g4L6LjtfxwlXIATopoQu+vOXXa7gYisWxCoPyg==
dependencies:
- "@types/eslint-scope" "^3.7.3"
- "@types/estree" "^1.0.0"
- "@webassemblyjs/ast" "^1.11.5"
- "@webassemblyjs/wasm-edit" "^1.11.5"
- "@webassemblyjs/wasm-parser" "^1.11.5"
+ "@types/estree" "^1.0.5"
+ "@webassemblyjs/ast" "^1.12.1"
+ "@webassemblyjs/wasm-edit" "^1.12.1"
+ "@webassemblyjs/wasm-parser" "^1.12.1"
acorn "^8.7.1"
- acorn-import-assertions "^1.9.0"
- browserslist "^4.14.5"
+ acorn-import-attributes "^1.9.5"
+ browserslist "^4.21.10"
chrome-trace-event "^1.0.2"
- enhanced-resolve "^5.15.0"
+ enhanced-resolve "^5.17.1"
es-module-lexer "^1.2.1"
eslint-scope "5.1.1"
events "^3.2.0"
glob-to-regexp "^0.4.1"
- graceful-fs "^4.2.9"
+ graceful-fs "^4.2.11"
json-parse-even-better-errors "^2.3.1"
loader-runner "^4.2.0"
mime-types "^2.1.27"
neo-async "^2.6.2"
schema-utils "^3.2.0"
tapable "^2.1.1"
- terser-webpack-plugin "^5.3.7"
- watchpack "^2.4.0"
+ terser-webpack-plugin "^5.3.10"
+ watchpack "^2.4.1"
webpack-sources "^3.2.3"
websocket-driver@>=0.5.1, websocket-driver@^0.7.4:
diff --git a/util/kubeconfig/kubeconfig_test.go b/util/kubeconfig/kubeconfig_test.go
index 5d9d940b1ecc..6ed4163263c0 100644
--- a/util/kubeconfig/kubeconfig_test.go
+++ b/util/kubeconfig/kubeconfig_test.go
@@ -41,10 +41,10 @@ func Test_BasicAuthString(t *testing.T) {
assert.True(t, IsBasicAuthScheme(authString))
token := strings.TrimSpace(strings.TrimPrefix(authString, BasicAuthScheme))
uname, pwd, ok := decodeBasicAuthToken(token)
- if assert.True(t, ok) {
- assert.Equal(t, "admin", uname)
- assert.Equal(t, "admin", pwd)
- }
+ require.True(t, ok)
+ assert.Equal(t, "admin", uname)
+ assert.Equal(t, "admin", pwd)
+
file, err := os.CreateTemp("", "config.yaml")
require.NoError(t, err)
_, err = file.WriteString(config)
diff --git a/util/telemetry/attributes.go b/util/telemetry/attributes.go
new file mode 100644
index 000000000000..fad80c8bec07
--- /dev/null
+++ b/util/telemetry/attributes.go
@@ -0,0 +1,43 @@
+package telemetry
+
+const (
+ AttribBuildVersion string = `version`
+ AttribBuildPlatform string = `platform`
+ AttribBuildGoVersion string = `go_version`
+ AttribBuildDate string = `build_date`
+ AttribBuildCompiler string = `compiler`
+ AttribBuildGitCommit string = `git_commit`
+ AttribBuildGitTreeState string = `git_treestate`
+ AttribBuildGitTag string = `git_tag`
+
+ AttribCronWFName string = `name`
+
+ AttribErrorCause string = "cause"
+
+ AttribLogLevel string = `level`
+
+ AttribNodePhase string = `node_phase`
+
+ AttribPodPhase string = `phase`
+ AttribPodNamespace string = `namespace`
+ AttribPodPendingReason string = `reason`
+
+ AttribQueueName string = `queue_name`
+
+ AttribRecentlyStarted string = `recently_started`
+
+ AttribRequestKind = `kind`
+ AttribRequestVerb = `verb`
+ AttribRequestCode = `status_code`
+
+ AttribTemplateName string = `name`
+ AttribTemplateNamespace string = `namespace`
+ AttribTemplateCluster string = `cluster_scope`
+
+ AttribWorkerType string = `worker_type`
+
+ AttribWorkflowNamespace string = `namespace`
+ AttribWorkflowPhase string = `phase`
+ AttribWorkflowStatus = `status`
+ AttribWorkflowType = `type`
+)
diff --git a/workflow/metrics/exporter_prometheus.go b/util/telemetry/exporter_prometheus.go
similarity index 95%
rename from workflow/metrics/exporter_prometheus.go
rename to util/telemetry/exporter_prometheus.go
index cbea7d80de56..1ed45a90c91e 100644
--- a/workflow/metrics/exporter_prometheus.go
+++ b/util/telemetry/exporter_prometheus.go
@@ -1,4 +1,4 @@
-package metrics
+package telemetry
import (
"context"
@@ -20,8 +20,8 @@ import (
)
const (
- defaultPrometheusServerPort = 9090
- defaultPrometheusServerPath = "/metrics"
+ DefaultPrometheusServerPort = 9090
+ DefaultPrometheusServerPath = "/metrics"
)
func (config *Config) prometheusMetricsExporter(namespace string) (*prometheus.Exporter, error) {
@@ -39,14 +39,14 @@ func (config *Config) prometheusMetricsExporter(namespace string) (*prometheus.E
func (config *Config) path() string {
if config.Path == "" {
- return defaultPrometheusServerPath
+ return DefaultPrometheusServerPath
}
return config.Path
}
func (config *Config) port() int {
if config.Port == 0 {
- return defaultPrometheusServerPort
+ return DefaultPrometheusServerPort
}
return config.Port
}
diff --git a/workflow/metrics/exporter_prometheus_test.go b/util/telemetry/exporter_prometheus_test.go
similarity index 57%
rename from workflow/metrics/exporter_prometheus_test.go
rename to util/telemetry/exporter_prometheus_test.go
index c80a3aa45057..05b34ea1eeff 100644
--- a/workflow/metrics/exporter_prometheus_test.go
+++ b/util/telemetry/exporter_prometheus_test.go
@@ -1,6 +1,6 @@
//go:build !windows
-package metrics
+package telemetry
import (
"context"
@@ -14,19 +14,22 @@ import (
"github.com/stretchr/testify/require"
)
+// testScopeName is the name that the metrics running under test will have
+const testScopeName string = "argo-workflows-test"
+
func TestDisablePrometheusServer(t *testing.T) {
config := Config{
Enabled: false,
- Path: defaultPrometheusServerPath,
- Port: defaultPrometheusServerPort,
+ Path: DefaultPrometheusServerPath,
+ Port: DefaultPrometheusServerPort,
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- m, err := New(ctx, TestScopeName, &config, Callbacks{})
+ m, err := NewMetrics(ctx, testScopeName, testScopeName, &config)
require.NoError(t, err)
go m.RunPrometheusServer(ctx, false)
time.Sleep(1 * time.Second) // to confirm that the server doesn't start, even if we wait
- resp, err := http.Get(fmt.Sprintf("http://localhost:%d%s", defaultPrometheusServerPort, defaultPrometheusServerPath))
+ resp, err := http.Get(fmt.Sprintf("http://localhost:%d%s", DefaultPrometheusServerPort, DefaultPrometheusServerPath))
if resp != nil {
defer resp.Body.Close()
}
@@ -37,16 +40,16 @@ func TestDisablePrometheusServer(t *testing.T) {
func TestPrometheusServer(t *testing.T) {
config := Config{
Enabled: true,
- Path: defaultPrometheusServerPath,
- Port: defaultPrometheusServerPort,
+ Path: DefaultPrometheusServerPath,
+ Port: DefaultPrometheusServerPort,
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- m, err := New(ctx, TestScopeName, &config, Callbacks{})
+ m, err := NewMetrics(ctx, testScopeName, testScopeName, &config)
require.NoError(t, err)
go m.RunPrometheusServer(ctx, false)
time.Sleep(1 * time.Second)
- resp, err := http.Get(fmt.Sprintf("http://localhost:%d%s", defaultPrometheusServerPort, defaultPrometheusServerPath))
+ resp, err := http.Get(fmt.Sprintf("http://localhost:%d%s", DefaultPrometheusServerPort, DefaultPrometheusServerPath))
require.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
@@ -57,22 +60,25 @@ func TestPrometheusServer(t *testing.T) {
bodyString := string(bodyBytes)
assert.NotEmpty(t, bodyString)
+
+ cancel() // Explicit cancel as sometimes in github CI port 9090 is still busy
+ time.Sleep(1 * time.Second) // Wait for prometheus server
}
func TestDummyPrometheusServer(t *testing.T) {
config := Config{
Enabled: true,
- Path: defaultPrometheusServerPath,
- Port: defaultPrometheusServerPort,
+ Path: DefaultPrometheusServerPath,
+ Port: DefaultPrometheusServerPort,
Secure: false,
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- m, err := New(ctx, TestScopeName, &config, Callbacks{})
+ m, err := NewMetrics(ctx, testScopeName, testScopeName, &config)
require.NoError(t, err)
go m.RunPrometheusServer(ctx, true)
time.Sleep(1 * time.Second)
- resp, err := http.Get(fmt.Sprintf("http://localhost:%d%s", defaultPrometheusServerPort, defaultPrometheusServerPath))
+ resp, err := http.Get(fmt.Sprintf("http://localhost:%d%s", DefaultPrometheusServerPort, DefaultPrometheusServerPath))
require.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
@@ -84,4 +90,7 @@ func TestDummyPrometheusServer(t *testing.T) {
bodyString := string(bodyBytes)
assert.Empty(t, bodyString) // expect the dummy metrics server to provide no metrics responses
+
+ cancel() // Explicit cancel as sometimes in github CI port 9090 is still busy
+ time.Sleep(1 * time.Second) // Wait for prometheus server
}
diff --git a/util/telemetry/helpers_test.go b/util/telemetry/helpers_test.go
new file mode 100644
index 000000000000..2aedaed192d6
--- /dev/null
+++ b/util/telemetry/helpers_test.go
@@ -0,0 +1,65 @@
+package telemetry
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/sdk/metric"
+)
+
+func createDefaultTestMetrics() (*Metrics, *TestMetricsExporter, error) {
+ config := Config{
+ Enabled: true,
+ }
+ return createTestMetrics(&config)
+}
+
+func createTestMetrics(config *Config) (*Metrics, *TestMetricsExporter, error) {
+ ctx /* with cancel*/ := context.Background()
+ te := NewTestMetricsExporter()
+
+ m, err := NewMetrics(ctx, TestScopeName, TestScopeName, config, metric.WithReader(te))
+ if err != nil {
+ return nil, nil, err
+ }
+ err = m.Populate(ctx, AddVersion, addTestingCounter, addTestingHistogram)
+ return m, te, err
+}
+
+const (
+ nameTestingHistogram = `testing_histogram`
+ nameTestingCounter = `testing_counter`
+ errorCauseTestingA = "TestingA"
+ errorCauseTestingB = "TestingB"
+)
+
+func addTestingHistogram(_ context.Context, m *Metrics) error {
+ // The buckets here are only the 'defaults' and can be overridden with configmap defaults
+ return m.CreateInstrument(Float64Histogram,
+ nameTestingHistogram,
+ "Testing Metric",
+ "s",
+ WithDefaultBuckets([]float64{0.0, 1.0, 5.0, 10.0}),
+ WithAsBuiltIn(),
+ )
+}
+
+func (m *Metrics) TestingHistogramRecord(ctx context.Context, value float64) {
+ m.Record(ctx, nameTestingHistogram, value, InstAttribs{})
+}
+
+func addTestingCounter(ctx context.Context, m *Metrics) error {
+ return m.CreateInstrument(Int64Counter,
+ nameTestingCounter,
+ "Testing Error Counting Metric",
+ "{errors}",
+ WithAsBuiltIn(),
+ )
+}
+
+func (m *Metrics) TestingErrorA(ctx context.Context) {
+ m.AddInt(ctx, nameTestingCounter, 1, InstAttribs{{Name: AttribErrorCause, Value: errorCauseTestingB}})
+}
+
+func (m *Metrics) TestingErrorB(ctx context.Context) {
+ m.AddInt(ctx, nameTestingCounter, 1, InstAttribs{{Name: AttribErrorCause, Value: errorCauseTestingB}})
+}
diff --git a/workflow/metrics/instrument.go b/util/telemetry/instrument.go
similarity index 73%
rename from workflow/metrics/instrument.go
rename to util/telemetry/instrument.go
index 35ba4ea28acd..6831dd5be804 100644
--- a/workflow/metrics/instrument.go
+++ b/util/telemetry/instrument.go
@@ -1,4 +1,4 @@
-package metrics
+package telemetry
import (
"fmt"
@@ -9,7 +9,7 @@ import (
"github.com/argoproj/argo-workflows/v3/util/help"
)
-type instrument struct {
+type Instrument struct {
name string
description string
otel interface{}
@@ -17,7 +17,7 @@ type instrument struct {
}
func (m *Metrics) preCreateCheck(name string) error {
- if _, exists := m.allInstruments[name]; exists {
+ if _, exists := m.AllInstruments[name]; exists {
return fmt.Errorf("Instrument called %s already exists", name)
}
return nil
@@ -30,13 +30,13 @@ func addHelpLink(name, description string) string {
type instrumentType int
const (
- float64ObservableGauge instrumentType = iota
- float64Histogram
- float64UpDownCounter
- float64ObservableUpDownCounter
- int64ObservableGauge
- int64UpDownCounter
- int64Counter
+ Float64ObservableGauge instrumentType = iota
+ Float64Histogram
+ Float64UpDownCounter
+ Float64ObservableUpDownCounter
+ Int64ObservableGauge
+ Int64UpDownCounter
+ Int64Counter
)
// InstrumentOption applies options to all instruments.
@@ -47,13 +47,13 @@ type instrumentOptions struct {
type instrumentOption func(*instrumentOptions)
-func withAsBuiltIn() instrumentOption {
+func WithAsBuiltIn() instrumentOption {
return func(o *instrumentOptions) {
o.builtIn = true
}
}
-func withDefaultBuckets(buckets []float64) instrumentOption {
+func WithDefaultBuckets(buckets []float64) instrumentOption {
return func(o *instrumentOptions) {
o.defaultBuckets = buckets
}
@@ -67,10 +67,10 @@ func collectOptions(options ...instrumentOption) instrumentOptions {
return o
}
-func (m *Metrics) createInstrument(instType instrumentType, name, desc, unit string, options ...instrumentOption) error {
+func (m *Metrics) CreateInstrument(instType instrumentType, name, desc, unit string, options ...instrumentOption) error {
opts := collectOptions(options...)
- m.mutex.Lock()
- defer m.mutex.Unlock()
+ m.Mutex.Lock()
+ defer m.Mutex.Unlock()
err := m.preCreateCheck(name)
if err != nil {
return err
@@ -81,14 +81,14 @@ func (m *Metrics) createInstrument(instType instrumentType, name, desc, unit str
}
var instPtr interface{}
switch instType {
- case float64ObservableGauge:
+ case Float64ObservableGauge:
inst, insterr := (*m.otelMeter).Float64ObservableGauge(name,
metric.WithDescription(desc),
metric.WithUnit(unit),
)
instPtr = &inst
err = insterr
- case float64Histogram:
+ case Float64Histogram:
inst, insterr := (*m.otelMeter).Float64Histogram(name,
metric.WithDescription(desc),
metric.WithUnit(unit),
@@ -96,35 +96,35 @@ func (m *Metrics) createInstrument(instType instrumentType, name, desc, unit str
)
instPtr = &inst
err = insterr
- case float64UpDownCounter:
+ case Float64UpDownCounter:
inst, insterr := (*m.otelMeter).Float64UpDownCounter(name,
metric.WithDescription(desc),
metric.WithUnit(unit),
)
instPtr = &inst
err = insterr
- case float64ObservableUpDownCounter:
+ case Float64ObservableUpDownCounter:
inst, insterr := (*m.otelMeter).Float64ObservableUpDownCounter(name,
metric.WithDescription(desc),
metric.WithUnit(unit),
)
instPtr = &inst
err = insterr
- case int64ObservableGauge:
+ case Int64ObservableGauge:
inst, insterr := (*m.otelMeter).Int64ObservableGauge(name,
metric.WithDescription(desc),
metric.WithUnit(unit),
)
instPtr = &inst
err = insterr
- case int64UpDownCounter:
+ case Int64UpDownCounter:
inst, insterr := (*m.otelMeter).Int64UpDownCounter(name,
metric.WithDescription(desc),
metric.WithUnit(unit),
)
instPtr = &inst
err = insterr
- case int64Counter:
+ case Int64Counter:
inst, insterr := (*m.otelMeter).Int64Counter(name,
metric.WithDescription(desc),
metric.WithUnit(unit),
@@ -137,7 +137,7 @@ func (m *Metrics) createInstrument(instType instrumentType, name, desc, unit str
if err != nil {
return err
}
- m.allInstruments[name] = &instrument{
+ m.AllInstruments[name] = &Instrument{
name: name,
description: desc,
otel: instPtr,
@@ -155,3 +155,23 @@ func (m *Metrics) buckets(name string, defaultBuckets []float64) []float64 {
}
return defaultBuckets
}
+
+func (i *Instrument) GetName() string {
+ return i.name
+}
+
+func (i *Instrument) GetDescription() string {
+ return i.description
+}
+
+func (i *Instrument) GetOtel() interface{} {
+ return i.otel
+}
+
+func (i *Instrument) SetUserdata(data interface{}) {
+ i.userdata = data
+}
+
+func (i *Instrument) GetUserdata() interface{} {
+ return i.userdata
+}
diff --git a/util/telemetry/metrics.go b/util/telemetry/metrics.go
new file mode 100644
index 000000000000..2a6be32c38fd
--- /dev/null
+++ b/util/telemetry/metrics.go
@@ -0,0 +1,120 @@
+package telemetry
+
+import (
+ "context"
+ "os"
+ "sync"
+ "time"
+
+ log "github.com/sirupsen/logrus"
+ "go.opentelemetry.io/otel"
+
+ wfconfig "github.com/argoproj/argo-workflows/v3/config"
+
+ "go.opentelemetry.io/contrib/instrumentation/runtime"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
+ "go.opentelemetry.io/otel/metric"
+ metricsdk "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ "go.opentelemetry.io/otel/sdk/resource"
+ semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
+)
+
+type Config struct {
+ Enabled bool
+ Path string
+ Port int
+ TTL time.Duration
+ IgnoreErrors bool
+ Secure bool
+ Modifiers map[string]Modifier
+ Temporality wfconfig.MetricsTemporality
+}
+
+type Metrics struct {
+ // Ensures mutual exclusion in workflows map
+ Mutex sync.RWMutex
+
+ // Evil context for compatibility with legacy context free interfaces
+ Ctx context.Context
+ otelMeter *metric.Meter
+ config *Config
+
+ AllInstruments map[string]*Instrument
+}
+
+func NewMetrics(ctx context.Context, serviceName, prometheusName string, config *Config, extraOpts ...metricsdk.Option) (*Metrics, error) {
+ res := resource.NewWithAttributes(
+ semconv.SchemaURL,
+ semconv.ServiceName(serviceName),
+ )
+
+ options := make([]metricsdk.Option, 0)
+ options = append(options, metricsdk.WithResource(res))
+ _, otlpEnabled := os.LookupEnv(`OTEL_EXPORTER_OTLP_ENDPOINT`)
+ _, otlpMetricsEnabled := os.LookupEnv(`OTEL_EXPORTER_OTLP_METRICS_ENDPOINT`)
+ if otlpEnabled || otlpMetricsEnabled {
+ log.Info("Starting OTLP metrics exporter")
+ otelExporter, err := otlpmetricgrpc.New(ctx, otlpmetricgrpc.WithTemporalitySelector(getTemporality(config)))
+ if err != nil {
+ return nil, err
+ }
+ options = append(options, metricsdk.WithReader(metricsdk.NewPeriodicReader(otelExporter)))
+ }
+
+ if config.Enabled {
+ log.Info("Starting Prometheus metrics exporter")
+ promExporter, err := config.prometheusMetricsExporter(prometheusName)
+ if err != nil {
+ return nil, err
+ }
+ options = append(options, metricsdk.WithReader(promExporter))
+ }
+ options = append(options, extraOpts...)
+ options = append(options, view(config))
+
+ provider := metricsdk.NewMeterProvider(options...)
+ otel.SetMeterProvider(provider)
+
+ // Add runtime metrics
+ err := runtime.Start(runtime.WithMinimumReadMemStatsInterval(time.Second))
+ if err != nil {
+ return nil, err
+ }
+
+ meter := provider.Meter(serviceName)
+ metrics := &Metrics{
+ Ctx: ctx,
+ otelMeter: &meter,
+ config: config,
+ AllInstruments: make(map[string]*Instrument),
+ }
+
+ return metrics, nil
+}
+
+type AddMetric func(context.Context, *Metrics) error
+
+func (m *Metrics) Populate(ctx context.Context, adders ...AddMetric) error {
+ for _, adder := range adders {
+ if err := adder(ctx, m); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func getTemporality(config *Config) metricsdk.TemporalitySelector {
+ switch config.Temporality {
+ case wfconfig.MetricsTemporalityCumulative:
+ return func(metricsdk.InstrumentKind) metricdata.Temporality {
+ return metricdata.CumulativeTemporality
+ }
+ case wfconfig.MetricsTemporalityDelta:
+ return func(metricsdk.InstrumentKind) metricdata.Temporality {
+ return metricdata.DeltaTemporality
+ }
+ default:
+ return metricsdk.DefaultTemporalitySelector
+ }
+}
diff --git a/workflow/metrics/modifiers.go b/util/telemetry/modifiers.go
similarity index 95%
rename from workflow/metrics/modifiers.go
rename to util/telemetry/modifiers.go
index 984c21867361..f752f6184085 100644
--- a/workflow/metrics/modifiers.go
+++ b/util/telemetry/modifiers.go
@@ -1,4 +1,4 @@
-package metrics
+package telemetry
import (
"go.opentelemetry.io/otel/attribute"
@@ -12,7 +12,7 @@ type Modifier struct {
HistogramBuckets []float64
}
-// Create an opentelemetry 'view' which disables whole metrics or aggregates across labels
+// Create an opentelemetry 'view' which disables whole metrics or aggregates across attributes
func view(config *Config) metricsdk.Option {
views := make([]metricsdk.View, 0)
for metric, modifier := range config.Modifiers {
diff --git a/workflow/metrics/modifiers_test.go b/util/telemetry/modifiers_test.go
similarity index 63%
rename from workflow/metrics/modifiers_test.go
rename to util/telemetry/modifiers_test.go
index 818432e35778..3f588065a3a3 100644
--- a/workflow/metrics/modifiers_test.go
+++ b/util/telemetry/modifiers_test.go
@@ -1,4 +1,4 @@
-package metrics
+package telemetry
import (
"context"
@@ -13,42 +13,38 @@ func TestViewDisable(t *testing.T) {
// Same metric as TestMetrics, but disabled by a view
m, te, err := createTestMetrics(&Config{
Modifiers: map[string]Modifier{
- nameOperationDuration: {
+ nameTestingHistogram: {
Disabled: true,
},
},
- },
- Callbacks{},
- )
+ })
require.NoError(t, err)
- m.OperationCompleted(m.ctx, 5)
+ m.TestingHistogramRecord(m.Ctx, 5)
attribs := attribute.NewSet()
- _, err = te.GetFloat64HistogramData(nameOperationDuration, &attribs)
+ _, err = te.GetFloat64HistogramData(nameTestingHistogram, &attribs)
require.Error(t, err)
}
func TestViewDisabledAttributes(t *testing.T) {
- // Disable the error cause label
+ // Disable the error cause attribute
m, te, err := createTestMetrics(&Config{
Modifiers: map[string]Modifier{
- nameErrorCount: {
- DisabledAttributes: []string{labelErrorCause},
+ nameTestingCounter: {
+ DisabledAttributes: []string{AttribErrorCause},
},
},
- },
- Callbacks{},
- )
+ })
require.NoError(t, err)
// Submit a couple of errors
- m.OperationPanic(context.Background())
- m.CronWorkflowSubmissionError(context.Background())
+ m.TestingErrorA(context.Background())
+ m.TestingErrorB(context.Background())
// See if we can find this with the attributes, we should not be able to
- attribsFail := attribute.NewSet(attribute.String(labelErrorCause, string(ErrorCauseOperationPanic)))
- _, err = te.GetInt64CounterValue(nameErrorCount, &attribsFail)
+ attribsFail := attribute.NewSet(attribute.String(AttribErrorCause, string(errorCauseTestingA)))
+ _, err = te.GetInt64CounterValue(nameTestingCounter, &attribsFail)
require.Error(t, err)
// Find a sum of all error types
attribsSuccess := attribute.NewSet()
- val, err := te.GetInt64CounterValue(nameErrorCount, &attribsSuccess)
+ val, err := te.GetInt64CounterValue(nameTestingCounter, &attribsSuccess)
require.NoError(t, err)
// Sum of the two submitted errors is 2
assert.Equal(t, int64(2), val)
@@ -59,17 +55,15 @@ func TestViewHistogramBuckets(t *testing.T) {
bounds := []float64{1.0, 3.0, 5.0, 10.0}
m, te, err := createTestMetrics(&Config{
Modifiers: map[string]Modifier{
- nameOperationDuration: {
+ nameTestingHistogram: {
HistogramBuckets: bounds,
},
},
- },
- Callbacks{},
- )
+ })
require.NoError(t, err)
- m.OperationCompleted(m.ctx, 5)
+ m.TestingHistogramRecord(m.Ctx, 5)
attribs := attribute.NewSet()
- val, err := te.GetFloat64HistogramData(nameOperationDuration, &attribs)
+ val, err := te.GetFloat64HistogramData(nameTestingHistogram, &attribs)
require.NoError(t, err)
assert.Equal(t, bounds, val.Bounds)
assert.Equal(t, []uint64{0, 0, 1, 0, 0}, val.BucketCounts)
diff --git a/workflow/metrics/operators.go b/util/telemetry/operators.go
similarity index 52%
rename from workflow/metrics/operators.go
rename to util/telemetry/operators.go
index afe7b33c5459..f99f7426d5d6 100644
--- a/workflow/metrics/operators.go
+++ b/util/telemetry/operators.go
@@ -1,4 +1,4 @@
-package metrics
+package telemetry
import (
"context"
@@ -9,43 +9,43 @@ import (
"go.opentelemetry.io/otel/metric"
)
-func (m *Metrics) addInt(ctx context.Context, name string, val int64, labels instAttribs) {
- if instrument, ok := m.allInstruments[name]; ok {
- instrument.addInt(ctx, val, labels)
+func (m *Metrics) AddInt(ctx context.Context, name string, val int64, attribs InstAttribs) {
+ if instrument, ok := m.AllInstruments[name]; ok {
+ instrument.AddInt(ctx, val, attribs)
} else {
log.Errorf("Metrics addInt() to non-existent metric %s", name)
}
}
-func (i *instrument) addInt(ctx context.Context, val int64, labels instAttribs) {
+func (i *Instrument) AddInt(ctx context.Context, val int64, attribs InstAttribs) {
switch inst := i.otel.(type) {
case *metric.Int64UpDownCounter:
- (*inst).Add(ctx, val, i.attributes(labels))
+ (*inst).Add(ctx, val, i.attributes(attribs))
case *metric.Int64Counter:
- (*inst).Add(ctx, val, i.attributes(labels))
+ (*inst).Add(ctx, val, i.attributes(attribs))
default:
log.Errorf("Metrics addInt() to invalid type %s (%t)", i.name, i.otel)
}
}
-func (m *Metrics) record(ctx context.Context, name string, val float64, labels instAttribs) {
- if instrument, ok := m.allInstruments[name]; ok {
- instrument.record(ctx, val, labels)
+func (m *Metrics) Record(ctx context.Context, name string, val float64, attribs InstAttribs) {
+ if instrument, ok := m.AllInstruments[name]; ok {
+ instrument.Record(ctx, val, attribs)
} else {
log.Errorf("Metrics record() to non-existent metric %s", name)
}
}
-func (i *instrument) record(ctx context.Context, val float64, labels instAttribs) {
+func (i *Instrument) Record(ctx context.Context, val float64, attribs InstAttribs) {
switch inst := i.otel.(type) {
case *metric.Float64Histogram:
- (*inst).Record(ctx, val, i.attributes(labels))
+ (*inst).Record(ctx, val, i.attributes(attribs))
default:
log.Errorf("Metrics record() to invalid type %s (%t)", i.name, i.otel)
}
}
-func (i *instrument) registerCallback(m *Metrics, f metric.Callback) error {
+func (i *Instrument) RegisterCallback(m *Metrics, f metric.Callback) error {
switch inst := i.otel.(type) {
case *metric.Float64ObservableUpDownCounter:
_, err := (*m.otelMeter).RegisterCallback(f, *inst)
@@ -61,46 +61,46 @@ func (i *instrument) registerCallback(m *Metrics, f metric.Callback) error {
}
}
-func (i *instrument) observeInt(o metric.Observer, val int64, labels instAttribs) {
+func (i *Instrument) ObserveInt(o metric.Observer, val int64, attribs InstAttribs) {
switch inst := i.otel.(type) {
case *metric.Int64ObservableGauge:
- o.ObserveInt64(*inst, val, i.attributes(labels))
+ o.ObserveInt64(*inst, val, i.attributes(attribs))
default:
log.Errorf("Metrics observeFloat() to invalid type %s (%t)", i.name, i.otel)
}
}
-func (i *instrument) observeFloat(o metric.Observer, val float64, labels instAttribs) {
+func (i *Instrument) ObserveFloat(o metric.Observer, val float64, attribs InstAttribs) {
switch inst := i.otel.(type) {
case *metric.Float64ObservableGauge:
- o.ObserveFloat64(*inst, val, i.attributes(labels))
+ o.ObserveFloat64(*inst, val, i.attributes(attribs))
case *metric.Float64ObservableUpDownCounter:
- o.ObserveFloat64(*inst, val, i.attributes(labels))
+ o.ObserveFloat64(*inst, val, i.attributes(attribs))
default:
log.Errorf("Metrics observeFloat() to invalid type %s (%t)", i.name, i.otel)
}
}
-type instAttribs []instAttrib
-type instAttrib struct {
- name string
- value interface{}
+type InstAttribs []InstAttrib
+type InstAttrib struct {
+ Name string
+ Value interface{}
}
-func (i *instrument) attributes(labels instAttribs) metric.MeasurementOption {
+func (i *Instrument) attributes(labels InstAttribs) metric.MeasurementOption {
attribs := make([]attribute.KeyValue, 0)
for _, label := range labels {
- switch value := label.value.(type) {
+ switch value := label.Value.(type) {
case string:
- attribs = append(attribs, attribute.String(label.name, value))
+ attribs = append(attribs, attribute.String(label.Name, value))
case bool:
- attribs = append(attribs, attribute.Bool(label.name, value))
+ attribs = append(attribs, attribute.Bool(label.Name, value))
case int:
- attribs = append(attribs, attribute.Int(label.name, value))
+ attribs = append(attribs, attribute.Int(label.Name, value))
case int64:
- attribs = append(attribs, attribute.Int64(label.name, value))
+ attribs = append(attribs, attribute.Int64(label.Name, value))
case float64:
- attribs = append(attribs, attribute.Float64(label.name, value))
+ attribs = append(attribs, attribute.Float64(label.Name, value))
default:
log.Errorf("Attempt to use label of unhandled type in metric %s", i.name)
}
diff --git a/workflow/metrics/test_exporter.go b/util/telemetry/test_metrics_exporter.go
similarity index 61%
rename from workflow/metrics/test_exporter.go
rename to util/telemetry/test_metrics_exporter.go
index 071e436386ac..4a49b1965306 100644
--- a/workflow/metrics/test_exporter.go
+++ b/util/telemetry/test_metrics_exporter.go
@@ -1,84 +1,39 @@
-package metrics
+package telemetry
import (
"context"
"fmt"
- "time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
- "k8s.io/client-go/util/workqueue"
)
+// TestScopeName is the name that the metrics running under test will have
+const TestScopeName string = "argo-workflows-test"
+
// TestExporter is an opentelemetry metrics exporter, purely for use within
// tests. It is not possible to query the values of an instrument via the otel
// SDK, so this exporter provides methods by which you can request
// metrics by name+attributes and therefore inspect whether they exist, and
// their values for the purposes of testing only.
// This is a public structure as it is used outside of this module also.
-type TestExporter struct {
+type TestMetricsExporter struct {
metric.Reader
}
-// TestScopeName is the name that the metrics running under test will have
-const TestScopeName string = "argo-workflows-test"
-
-var _ metric.Reader = &TestExporter{}
-
-var sharedMetrics *Metrics = nil
-var sharedTE *TestExporter = nil
-
-// getSharedMetrics returns a singleton metrics with test exporter
-// This is necessary because only the first call to workqueue.SetProvider
-// takes effect within a single binary
-// This can be fixed when we update to client-go 0.27 or later and we can
-// create workqueues with https://godocs.io/k8s.io/client-go/util/workqueue#NewRateLimitingQueueWithConfig
-func getSharedMetrics() (*Metrics, *TestExporter, error) {
- if sharedMetrics == nil {
- config := Config{
- Enabled: true,
- TTL: 1 * time.Second,
- }
- var err error
- sharedMetrics, sharedTE, err = createTestMetrics(&config, Callbacks{})
- if err != nil {
- return nil, nil, err
- }
-
- workqueue.SetProvider(sharedMetrics)
- }
- return sharedMetrics, sharedTE, nil
-}
-
-// CreateDefaultTestMetrics creates a boring testExporter enabled
-// metrics, suitable for many tests
-func CreateDefaultTestMetrics() (*Metrics, *TestExporter, error) {
- config := Config{
- Enabled: true,
- }
- return createTestMetrics(&config, Callbacks{})
-}
-
-func createTestMetrics(config *Config, callbacks Callbacks) (*Metrics, *TestExporter, error) {
- ctx /* with cancel*/ := context.Background()
- te := newTestExporter()
-
- m, err := New(ctx, TestScopeName, config, callbacks, metric.WithReader(te))
- return m, te, err
-
-}
+var _ metric.Reader = &TestMetricsExporter{}
-func newTestExporter() *TestExporter {
+func NewTestMetricsExporter() *TestMetricsExporter {
reader := metric.NewManualReader()
- e := &TestExporter{
+ e := &TestMetricsExporter{
Reader: reader,
}
return e
}
-func (t *TestExporter) getOurMetrics() (*[]metricdata.Metrics, error) {
+func (t *TestMetricsExporter) getOurMetrics() (*[]metricdata.Metrics, error) {
metrics := metricdata.ResourceMetrics{}
err := t.Collect(context.TODO(), &metrics)
if err != nil {
@@ -92,7 +47,7 @@ func (t *TestExporter) getOurMetrics() (*[]metricdata.Metrics, error) {
return nil, fmt.Errorf("%s scope not found", TestScopeName)
}
-func (t *TestExporter) getNamedMetric(name string) (*metricdata.Metrics, error) {
+func (t *TestMetricsExporter) getNamedMetric(name string) (*metricdata.Metrics, error) {
mtcs, err := t.getOurMetrics()
if err != nil {
return nil, err
@@ -105,7 +60,7 @@ func (t *TestExporter) getNamedMetric(name string) (*metricdata.Metrics, error)
return nil, fmt.Errorf("%s named metric not found in %v", name, mtcs)
}
-func (t *TestExporter) getNamedInt64CounterData(name string, attribs *attribute.Set) (*metricdata.DataPoint[int64], error) {
+func (t *TestMetricsExporter) getNamedInt64CounterData(name string, attribs *attribute.Set) (*metricdata.DataPoint[int64], error) {
mtc, err := t.getNamedMetric(name)
if err != nil {
return nil, err
@@ -122,7 +77,7 @@ func (t *TestExporter) getNamedInt64CounterData(name string, attribs *attribute.
return nil, fmt.Errorf("%s type counter[int64] not found in %v", name, mtc)
}
-func (t *TestExporter) getNamedFloat64GaugeData(name string, attribs *attribute.Set) (*metricdata.DataPoint[float64], error) {
+func (t *TestMetricsExporter) getNamedFloat64GaugeData(name string, attribs *attribute.Set) (*metricdata.DataPoint[float64], error) {
mtc, err := t.getNamedMetric(name)
if err != nil {
return nil, err
@@ -139,7 +94,7 @@ func (t *TestExporter) getNamedFloat64GaugeData(name string, attribs *attribute.
return nil, fmt.Errorf("%s type gauge[float64] not found in %v", name, mtc)
}
-func (t *TestExporter) getNamedInt64GaugeData(name string, attribs *attribute.Set) (*metricdata.DataPoint[int64], error) {
+func (t *TestMetricsExporter) getNamedInt64GaugeData(name string, attribs *attribute.Set) (*metricdata.DataPoint[int64], error) {
mtc, err := t.getNamedMetric(name)
if err != nil {
return nil, err
@@ -158,7 +113,7 @@ func (t *TestExporter) getNamedInt64GaugeData(name string, attribs *attribute.Se
return nil, fmt.Errorf("%s named gauge[float64] with attribs %v not found in %v", name, attribs, mtc)
}
-func (t *TestExporter) getNamedFloat64CounterData(name string, attribs *attribute.Set) (*metricdata.DataPoint[float64], error) {
+func (t *TestMetricsExporter) getNamedFloat64CounterData(name string, attribs *attribute.Set) (*metricdata.DataPoint[float64], error) {
mtc, err := t.getNamedMetric(name)
if err != nil {
return nil, err
@@ -175,7 +130,7 @@ func (t *TestExporter) getNamedFloat64CounterData(name string, attribs *attribut
return nil, fmt.Errorf("%s type counter[float64] not found in %v", name, mtc)
}
-func (t *TestExporter) getNamedFloat64HistogramData(name string, attribs *attribute.Set) (*metricdata.HistogramDataPoint[float64], error) {
+func (t *TestMetricsExporter) getNamedFloat64HistogramData(name string, attribs *attribute.Set) (*metricdata.HistogramDataPoint[float64], error) {
mtc, err := t.getNamedMetric(name)
if err != nil {
return nil, err
@@ -193,13 +148,13 @@ func (t *TestExporter) getNamedFloat64HistogramData(name string, attribs *attrib
}
// GetFloat64HistogramData returns an otel histogram float64 data point for test reads
-func (t *TestExporter) GetFloat64HistogramData(name string, attribs *attribute.Set) (*metricdata.HistogramDataPoint[float64], error) {
+func (t *TestMetricsExporter) GetFloat64HistogramData(name string, attribs *attribute.Set) (*metricdata.HistogramDataPoint[float64], error) {
data, err := t.getNamedFloat64HistogramData(name, attribs)
return data, err
}
// GetInt64CounterValue returns an otel int64 counter value for test reads
-func (t *TestExporter) GetInt64CounterValue(name string, attribs *attribute.Set) (int64, error) {
+func (t *TestMetricsExporter) GetInt64CounterValue(name string, attribs *attribute.Set) (int64, error) {
counter, err := t.getNamedInt64CounterData(name, attribs)
if err != nil {
return 0, err
@@ -208,7 +163,7 @@ func (t *TestExporter) GetInt64CounterValue(name string, attribs *attribute.Set)
}
// GetFloat64GaugeValue returns an otel float64 gauge value for test reads
-func (t *TestExporter) GetFloat64GaugeValue(name string, attribs *attribute.Set) (float64, error) {
+func (t *TestMetricsExporter) GetFloat64GaugeValue(name string, attribs *attribute.Set) (float64, error) {
gauge, err := t.getNamedFloat64GaugeData(name, attribs)
if err != nil {
return 0, err
@@ -217,7 +172,7 @@ func (t *TestExporter) GetFloat64GaugeValue(name string, attribs *attribute.Set)
}
// GetInt64GaugeValue returns an otel int64 gauge value for test reads
-func (t *TestExporter) GetInt64GaugeValue(name string, attribs *attribute.Set) (int64, error) {
+func (t *TestMetricsExporter) GetInt64GaugeValue(name string, attribs *attribute.Set) (int64, error) {
gauge, err := t.getNamedInt64GaugeData(name, attribs)
if err != nil {
return 0, err
@@ -226,7 +181,7 @@ func (t *TestExporter) GetInt64GaugeValue(name string, attribs *attribute.Set) (
}
// GetFloat64CounterValue returns an otel float64 counter value for test reads
-func (t *TestExporter) GetFloat64CounterValue(name string, attribs *attribute.Set) (float64, error) {
+func (t *TestMetricsExporter) GetFloat64CounterValue(name string, attribs *attribute.Set) (float64, error) {
counter, err := t.getNamedFloat64CounterData(name, attribs)
if err != nil {
return 0, err
diff --git a/util/telemetry/version.go b/util/telemetry/version.go
new file mode 100644
index 000000000000..055aa038bf74
--- /dev/null
+++ b/util/telemetry/version.go
@@ -0,0 +1,33 @@
+package telemetry
+
+import (
+ "context"
+
+ "github.com/argoproj/argo-workflows/v3"
+)
+
+func AddVersion(ctx context.Context, m *Metrics) error {
+ const nameVersion = `version`
+ err := m.CreateInstrument(Int64Counter,
+ nameVersion,
+ "Build metadata for this Controller",
+ "{unused}",
+ WithAsBuiltIn(),
+ )
+ if err != nil {
+ return err
+ }
+
+ version := argo.GetVersion()
+ m.AddInt(ctx, nameVersion, 1, InstAttribs{
+ {Name: AttribBuildVersion, Value: version.Version},
+ {Name: AttribBuildPlatform, Value: version.Platform},
+ {Name: AttribBuildGoVersion, Value: version.GoVersion},
+ {Name: AttribBuildDate, Value: version.BuildDate},
+ {Name: AttribBuildCompiler, Value: version.Compiler},
+ {Name: AttribBuildGitCommit, Value: version.GitCommit},
+ {Name: AttribBuildGitTreeState, Value: version.GitTreeState},
+ {Name: AttribBuildGitTag, Value: version.GitTag},
+ })
+ return nil
+}
diff --git a/util/telemetry/version_test.go b/util/telemetry/version_test.go
new file mode 100644
index 000000000000..1337c1520b21
--- /dev/null
+++ b/util/telemetry/version_test.go
@@ -0,0 +1,31 @@
+package telemetry
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.opentelemetry.io/otel/attribute"
+
+ "github.com/argoproj/argo-workflows/v3"
+)
+
+func TestVersion(t *testing.T) {
+ _, te, err := createDefaultTestMetrics()
+ require.NoError(t, err)
+ assert.NotNil(t, te)
+ version := argo.GetVersion()
+ attribs := attribute.NewSet(
+ attribute.String(AttribBuildVersion, version.Version),
+ attribute.String(AttribBuildPlatform, version.Platform),
+ attribute.String(AttribBuildGoVersion, version.GoVersion),
+ attribute.String(AttribBuildDate, version.BuildDate),
+ attribute.String(AttribBuildCompiler, version.Compiler),
+ attribute.String(AttribBuildGitCommit, version.GitCommit),
+ attribute.String(AttribBuildGitTreeState, version.GitTreeState),
+ attribute.String(AttribBuildGitTag, version.GitTag),
+ )
+ val, err := te.GetInt64CounterValue(`version`, &attribs)
+ require.NoError(t, err)
+ assert.Equal(t, int64(1), val)
+}
diff --git a/workflow/artifacts/http/http_test.go b/workflow/artifacts/http/http_test.go
index 2bf1ebd2bc37..66a913748eba 100644
--- a/workflow/artifacts/http/http_test.go
+++ b/workflow/artifacts/http/http_test.go
@@ -48,9 +48,8 @@ func TestHTTPArtifactDriver_Load(t *testing.T) {
}, "/tmp/not-found")
require.Error(t, err)
argoError, ok := err.(errors.ArgoError)
- if assert.True(t, ok) {
- assert.Equal(t, errors.CodeNotFound, argoError.Code())
- }
+ require.True(t, ok)
+ assert.Equal(t, errors.CodeNotFound, argoError.Code())
})
}
@@ -64,9 +63,8 @@ func TestArtifactoryArtifactDriver_Load(t *testing.T) {
}, "/tmp/not-found")
require.Error(t, err)
argoError, ok := err.(errors.ArgoError)
- if assert.True(t, ok) {
- assert.Equal(t, errors.CodeNotFound, argoError.Code())
- }
+ require.True(t, ok)
+ assert.Equal(t, errors.CodeNotFound, argoError.Code())
})
t.Run("Found", func(t *testing.T) {
err := driver.Load(&wfv1.Artifact{
diff --git a/workflow/common/convert_test.go b/workflow/common/convert_test.go
index 7930749cb529..95a9bf4f9eb2 100644
--- a/workflow/common/convert_test.go
+++ b/workflow/common/convert_test.go
@@ -109,9 +109,8 @@ spec:
err = yaml.Unmarshal([]byte(cronWfInstanceIdString), &cronWf)
require.NoError(t, err)
wf = ConvertCronWorkflowToWorkflow(&cronWf)
- if assert.Contains(t, wf.GetLabels(), LabelKeyControllerInstanceID) {
- assert.Equal(t, "test-controller", wf.GetLabels()[LabelKeyControllerInstanceID])
- }
+ require.Contains(t, wf.GetLabels(), LabelKeyControllerInstanceID)
+ assert.Equal(t, "test-controller", wf.GetLabels()[LabelKeyControllerInstanceID])
err = yaml.Unmarshal([]byte(cronWfInstanceIdString), &cronWf)
require.NoError(t, err)
diff --git a/workflow/controller/cache_test.go b/workflow/controller/cache_test.go
index f8dbd105a520..4a4aaf224417 100644
--- a/workflow/controller/cache_test.go
+++ b/workflow/controller/cache_test.go
@@ -63,10 +63,9 @@ func TestConfigMapCacheLoadHit(t *testing.T) {
outputs := entry.Outputs
require.NoError(t, err)
- if assert.Len(t, outputs.Parameters, 1) {
- assert.Equal(t, "hello", outputs.Parameters[0].Name)
- assert.Equal(t, "foobar", outputs.Parameters[0].Value.String())
- }
+ require.Len(t, outputs.Parameters, 1)
+ assert.Equal(t, "hello", outputs.Parameters[0].Name)
+ assert.Equal(t, "foobar", outputs.Parameters[0].Value.String())
}
func TestConfigMapCacheLoadMiss(t *testing.T) {
diff --git a/workflow/controller/container_set_template_test.go b/workflow/controller/container_set_template_test.go
index ea8c4d9919cd..470546b1e6b3 100644
--- a/workflow/controller/container_set_template_test.go
+++ b/workflow/controller/container_set_template_test.go
@@ -118,14 +118,13 @@ spec:
{Name: "input-artifacts", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}},
}, pod.Spec.Volumes)
- if assert.Len(t, pod.Spec.InitContainers, 1) {
- c := pod.Spec.InitContainers[0]
- assert.ElementsMatch(t, []corev1.VolumeMount{
- {Name: "input-artifacts", MountPath: "/argo/inputs/artifacts"},
- {Name: "workspace", MountPath: "/mainctrfs/workspace"},
- {Name: "var-run-argo", MountPath: common.VarRunArgoPath},
- }, c.VolumeMounts)
- }
+ require.Len(t, pod.Spec.InitContainers, 1)
+ c := pod.Spec.InitContainers[0]
+ assert.ElementsMatch(t, []corev1.VolumeMount{
+ {Name: "input-artifacts", MountPath: "/argo/inputs/artifacts"},
+ {Name: "workspace", MountPath: "/mainctrfs/workspace"},
+ {Name: "var-run-argo", MountPath: common.VarRunArgoPath},
+ }, c.VolumeMounts)
assert.Len(t, pod.Spec.Containers, 2)
for _, c := range pod.Spec.Containers {
diff --git a/workflow/controller/controller.go b/workflow/controller/controller.go
index 50d222dcb55a..dc3ca0fd4c47 100644
--- a/workflow/controller/controller.go
+++ b/workflow/controller/controller.go
@@ -52,6 +52,7 @@ import (
"github.com/argoproj/argo-workflows/v3/util/diff"
"github.com/argoproj/argo-workflows/v3/util/env"
errorsutil "github.com/argoproj/argo-workflows/v3/util/errors"
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
"github.com/argoproj/argo-workflows/v3/workflow/artifactrepositories"
"github.com/argoproj/argo-workflows/v3/workflow/common"
controllercache "github.com/argoproj/argo-workflows/v3/workflow/controller/cache"
@@ -221,6 +222,7 @@ func NewWorkflowController(ctx context.Context, restConfig *rest.Config, kubecli
wfc.maxStackDepth = wfc.getMaxStackDepth()
wfc.metrics, err = metrics.New(ctx,
`workflows-controller`,
+ `argo_workflows`,
wfc.getMetricsServerConfig(),
metrics.Callbacks{
PodPhase: wfc.getPodPhaseMetrics,
@@ -1391,18 +1393,18 @@ func (wfc *WorkflowController) getMaxStackDepth() int {
return maxAllowedStackDepth
}
-func (wfc *WorkflowController) getMetricsServerConfig() *metrics.Config {
+func (wfc *WorkflowController) getMetricsServerConfig() *telemetry.Config {
// Metrics config
- modifiers := make(map[string]metrics.Modifier)
+ modifiers := make(map[string]telemetry.Modifier)
for name, modifier := range wfc.Config.MetricsConfig.Modifiers {
- modifiers[name] = metrics.Modifier{
+ modifiers[name] = telemetry.Modifier{
Disabled: modifier.Disabled,
DisabledAttributes: modifier.DisabledAttributes,
HistogramBuckets: modifier.HistogramBuckets,
}
}
- metricsConfig := metrics.Config{
+ metricsConfig := telemetry.Config{
Enabled: wfc.Config.MetricsConfig.Enabled == nil || *wfc.Config.MetricsConfig.Enabled,
Path: wfc.Config.MetricsConfig.Path,
Port: wfc.Config.MetricsConfig.Port,
diff --git a/workflow/controller/controller_test.go b/workflow/controller/controller_test.go
index c1b2c52b59fa..e7c018d92857 100644
--- a/workflow/controller/controller_test.go
+++ b/workflow/controller/controller_test.go
@@ -32,6 +32,7 @@ import (
"github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme"
wfextv "github.com/argoproj/argo-workflows/v3/pkg/client/informers/externalversions"
envutil "github.com/argoproj/argo-workflows/v3/util/env"
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
armocks "github.com/argoproj/argo-workflows/v3/workflow/artifactrepositories/mocks"
"github.com/argoproj/argo-workflows/v3/workflow/common"
controllercache "github.com/argoproj/argo-workflows/v3/workflow/controller/cache"
@@ -248,7 +249,7 @@ var defaultServiceAccount = &apiv1.ServiceAccount{
}
// test exporter extract metric values from the metrics subsystem
-var testExporter *metrics.TestExporter
+var testExporter *telemetry.TestMetricsExporter
func newController(options ...interface{}) (context.CancelFunc, *WorkflowController) {
// get all the objects and add to the fake
@@ -716,20 +717,17 @@ spec:
assert.True(t, controller.processNextItem(ctx))
expectWorkflow(ctx, controller, "my-wf-0", func(wf *wfv1.Workflow) {
- if assert.NotNil(t, wf) {
- assert.Equal(t, wfv1.WorkflowRunning, wf.Status.Phase)
- }
+ require.NotNil(t, wf)
+ assert.Equal(t, wfv1.WorkflowRunning, wf.Status.Phase)
})
expectWorkflow(ctx, controller, "my-wf-1", func(wf *wfv1.Workflow) {
- if assert.NotNil(t, wf) {
- assert.Equal(t, wfv1.WorkflowPending, wf.Status.Phase)
- assert.Equal(t, "Workflow processing has been postponed because too many workflows are already running", wf.Status.Message)
- }
+ require.NotNil(t, wf)
+ assert.Equal(t, wfv1.WorkflowPending, wf.Status.Phase)
+ assert.Equal(t, "Workflow processing has been postponed because too many workflows are already running", wf.Status.Message)
})
expectWorkflow(ctx, controller, "my-wf-2", func(wf *wfv1.Workflow) {
- if assert.NotNil(t, wf) {
- assert.Equal(t, wfv1.WorkflowFailed, wf.Status.Phase)
- }
+ require.NotNil(t, wf)
+ assert.Equal(t, wfv1.WorkflowFailed, wf.Status.Phase)
})
})
}
@@ -995,19 +993,17 @@ status:
// process my-wf-0; update status to Pending
assert.True(t, controller.processNextItem(ctx))
expectWorkflow(ctx, controller, "my-wf-0", func(wf *wfv1.Workflow) {
- if assert.NotNil(t, wf) {
- assert.Equal(t, wfv1.WorkflowPending, wf.Status.Phase)
- assert.Equal(t, "Workflow processing has been postponed because too many workflows are already running", wf.Status.Message)
- }
+ require.NotNil(t, wf)
+ assert.Equal(t, wfv1.WorkflowPending, wf.Status.Phase)
+ assert.Equal(t, "Workflow processing has been postponed because too many workflows are already running", wf.Status.Message)
})
// process my-wf-1; update status to Pending
assert.True(t, controller.processNextItem(ctx))
expectWorkflow(ctx, controller, "my-wf-1", func(wf *wfv1.Workflow) {
- if assert.NotNil(t, wf) {
- assert.Equal(t, wfv1.WorkflowPending, wf.Status.Phase)
- assert.Equal(t, "Workflow processing has been postponed because too many workflows are already running", wf.Status.Message)
- }
+ require.NotNil(t, wf)
+ assert.Equal(t, wfv1.WorkflowPending, wf.Status.Phase)
+ assert.Equal(t, "Workflow processing has been postponed because too many workflows are already running", wf.Status.Message)
})
})
}
@@ -1088,23 +1084,21 @@ status:
assert.True(t, controller.processNextItem(ctx))
if !ns0PendingWfTested {
expectNamespacedWorkflow(ctx, controller, "ns-0", "my-ns-0-wf-0", func(wf *wfv1.Workflow) {
- if assert.NotNil(t, wf) {
- if wf.Status.Phase != "" {
- assert.Equal(t, wfv1.WorkflowPending, wf.Status.Phase)
- assert.Equal(t, "Workflow processing has been postponed because too many workflows are already running", wf.Status.Message)
- ns0PendingWfTested = true
- }
+ require.NotNil(t, wf)
+ if wf.Status.Phase != "" {
+ assert.Equal(t, wfv1.WorkflowPending, wf.Status.Phase)
+ assert.Equal(t, "Workflow processing has been postponed because too many workflows are already running", wf.Status.Message)
+ ns0PendingWfTested = true
}
})
}
if !ns1PendingWfTested {
expectNamespacedWorkflow(ctx, controller, "ns-1", "my-ns-1-wf-0", func(wf *wfv1.Workflow) {
- if assert.NotNil(t, wf) {
- if wf.Status.Phase != "" {
- assert.Equal(t, wfv1.WorkflowPending, wf.Status.Phase)
- assert.Equal(t, "Workflow processing has been postponed because too many workflows are already running", wf.Status.Message)
- ns1PendingWfTested = true
- }
+ require.NotNil(t, wf)
+ if wf.Status.Phase != "" {
+ assert.Equal(t, wfv1.WorkflowPending, wf.Status.Phase)
+ assert.Equal(t, "Workflow processing has been postponed because too many workflows are already running", wf.Status.Message)
+ ns1PendingWfTested = true
}
})
}
diff --git a/workflow/controller/dag_test.go b/workflow/controller/dag_test.go
index da87e12d6e85..dee2ef413f64 100644
--- a/workflow/controller/dag_test.go
+++ b/workflow/controller/dag_test.go
@@ -3365,14 +3365,11 @@ func TestDAGReferTaskAggregatedOutputs(t *testing.T) {
woc.operate(ctx)
dagNode := woc.wf.Status.Nodes.FindByDisplayName("parameter-aggregation-dag-h8b82")
- if assert.NotNil(t, dagNode) {
- if assert.NotNil(t, dagNode.Outputs) {
- if assert.Len(t, dagNode.Outputs.Parameters, 2) {
- assert.Equal(t, `["1","2"]`, dagNode.Outputs.Parameters[0].Value.String())
- assert.Equal(t, `["odd","even"]`, dagNode.Outputs.Parameters[1].Value.String())
- }
- }
- }
+ require.NotNil(t, dagNode)
+ require.NotNil(t, dagNode.Outputs)
+ require.Len(t, dagNode.Outputs.Parameters, 2)
+ assert.Equal(t, `["1","2"]`, dagNode.Outputs.Parameters[0].Value.String())
+ assert.Equal(t, `["odd","even"]`, dagNode.Outputs.Parameters[1].Value.String())
}
var dagHttpChildrenAssigned = `apiVersion: argoproj.io/v1alpha1
@@ -3450,11 +3447,9 @@ func TestDagHttpChildrenAssigned(t *testing.T) {
assert.NotNil(t, dagNode)
dagNode = woc.wf.Status.Nodes.FindByDisplayName("good1")
- if assert.NotNil(t, dagNode) {
- if assert.Len(t, dagNode.Children, 1) {
- assert.Equal(t, "http-template-nv52d-495103493", dagNode.Children[0])
- }
- }
+ require.NotNil(t, dagNode)
+ require.Len(t, dagNode.Children, 1)
+ assert.Equal(t, "http-template-nv52d-495103493", dagNode.Children[0])
}
var retryTypeDagTaskRunExitNodeAfterCompleted = `
diff --git a/workflow/controller/estimation/estimator_factory_test.go b/workflow/controller/estimation/estimator_factory_test.go
index ffb64242ed38..9848a8e16494 100644
--- a/workflow/controller/estimation/estimator_factory_test.go
+++ b/workflow/controller/estimation/estimator_factory_test.go
@@ -60,57 +60,52 @@ metadata:
t.Run("None", func(t *testing.T) {
p, err := f.NewEstimator(&wfv1.Workflow{})
require.NoError(t, err)
- if assert.NotNil(t, p) {
- e := p.(*estimator)
- assert.Nil(t, e.baselineWF)
- }
+ require.NotNil(t, p)
+ e := p.(*estimator)
+ assert.Nil(t, e.baselineWF)
})
t.Run("WorkflowTemplate", func(t *testing.T) {
p, err := f.NewEstimator(&wfv1.Workflow{
ObjectMeta: metav1.ObjectMeta{Namespace: "my-ns", Labels: map[string]string{common.LabelKeyWorkflowTemplate: "my-wftmpl"}},
})
require.NoError(t, err)
- if assert.NotNil(t, p) {
- e := p.(*estimator)
- if assert.NotNil(t, e) && assert.NotNil(t, e.baselineWF) {
- assert.Equal(t, "my-wftmpl-baseline", e.baselineWF.Name)
- }
- }
+ require.NotNil(t, p)
+ e := p.(*estimator)
+ require.NotNil(t, e)
+ require.NotNil(t, e.baselineWF)
+ assert.Equal(t, "my-wftmpl-baseline", e.baselineWF.Name)
})
t.Run("ClusterWorkflowTemplate", func(t *testing.T) {
p, err := f.NewEstimator(&wfv1.Workflow{
ObjectMeta: metav1.ObjectMeta{Namespace: "my-ns", Labels: map[string]string{common.LabelKeyClusterWorkflowTemplate: "my-cwft"}},
})
require.NoError(t, err)
- if assert.NotNil(t, p) {
- e := p.(*estimator)
- if assert.NotNil(t, e) && assert.NotNil(t, e.baselineWF) {
- assert.Equal(t, "my-cwft-baseline", e.baselineWF.Name)
- }
- }
+ require.NotNil(t, p)
+ e := p.(*estimator)
+ require.NotNil(t, e)
+ require.NotNil(t, e.baselineWF)
+ assert.Equal(t, "my-cwft-baseline", e.baselineWF.Name)
})
t.Run("CronWorkflowTemplate", func(t *testing.T) {
p, err := f.NewEstimator(&wfv1.Workflow{
ObjectMeta: metav1.ObjectMeta{Namespace: "my-ns", Labels: map[string]string{common.LabelKeyCronWorkflow: "my-cwf"}},
})
require.NoError(t, err)
- if assert.NotNil(t, p) {
- e := p.(*estimator)
- if assert.NotNil(t, e) && assert.NotNil(t, e.baselineWF) {
- assert.Equal(t, "my-cwf-baseline", e.baselineWF.Name)
- }
- }
+ require.NotNil(t, p)
+ e := p.(*estimator)
+ require.NotNil(t, e)
+ require.NotNil(t, e.baselineWF)
+ assert.Equal(t, "my-cwf-baseline", e.baselineWF.Name)
})
t.Run("WorkflowArchive", func(t *testing.T) {
p, err := f.NewEstimator(&wfv1.Workflow{
ObjectMeta: metav1.ObjectMeta{Namespace: "my-ns", Labels: map[string]string{common.LabelKeyWorkflowTemplate: "my-archived-wftmpl"}},
})
require.NoError(t, err)
- if assert.NotNil(t, p) {
- e := p.(*estimator)
- if assert.NotNil(t, e) && assert.NotNil(t, e.baselineWF) {
- assert.Equal(t, "my-archived-wftmpl-baseline", e.baselineWF.Name)
- }
- }
+ require.NotNil(t, p)
+ e := p.(*estimator)
+ require.NotNil(t, e)
+ require.NotNil(t, e.baselineWF)
+ assert.Equal(t, "my-archived-wftmpl-baseline", e.baselineWF.Name)
})
}
diff --git a/workflow/controller/exit_handler_test.go b/workflow/controller/exit_handler_test.go
index 7b37d759ce61..b55923c95d3d 100644
--- a/workflow/controller/exit_handler_test.go
+++ b/workflow/controller/exit_handler_test.go
@@ -1062,11 +1062,9 @@ spec:
hookNode := woc.wf.Status.Nodes.FindByDisplayName(exitNodeName)
- if assert.NotNil(t, hookNode) {
- assert.NotNil(t, hookNode.Inputs)
- if assert.Len(t, hookNode.Inputs.Parameters, 1) {
- assert.NotNil(t, hookNode.Inputs.Parameters[0].Value)
- assert.Equal(t, hookNode.Inputs.Parameters[0].Value.String(), string(apiv1.PodFailed))
- }
- }
+ require.NotNil(t, hookNode)
+ assert.NotNil(t, hookNode.Inputs)
+ require.Len(t, hookNode.Inputs.Parameters, 1)
+ assert.NotNil(t, hookNode.Inputs.Parameters[0].Value)
+ assert.Equal(t, hookNode.Inputs.Parameters[0].Value.String(), string(apiv1.PodFailed))
}
diff --git a/workflow/controller/informer/cluster_workflow_template_convert_test.go b/workflow/controller/informer/cluster_workflow_template_convert_test.go
index fd72fce03d63..45e541ba043a 100644
--- a/workflow/controller/informer/cluster_workflow_template_convert_test.go
+++ b/workflow/controller/informer/cluster_workflow_template_convert_test.go
@@ -24,9 +24,8 @@ func Test_objectToClusterWorkflowTemplate(t *testing.T) {
"spec": "ops",
}})
require.EqualError(t, err, "malformed cluster workflow template \"my-name\": cannot restore struct from: string")
- if assert.NotNil(t, v) {
- assert.Equal(t, "my-name", v.Name)
- }
+ require.NotNil(t, v)
+ assert.Equal(t, "my-name", v.Name)
})
t.Run("ClusterWorkflowTemplate", func(t *testing.T) {
v, err := objectToClusterWorkflowTemplate(&unstructured.Unstructured{})
diff --git a/workflow/controller/informer/workflow_template_convert_test.go b/workflow/controller/informer/workflow_template_convert_test.go
index 48d65d0b4d94..341e89bc06ae 100644
--- a/workflow/controller/informer/workflow_template_convert_test.go
+++ b/workflow/controller/informer/workflow_template_convert_test.go
@@ -24,10 +24,9 @@ func Test_objectToWorkflowTemplate(t *testing.T) {
"spec": "ops",
}})
require.EqualError(t, err, "malformed workflow template \"my-ns/my-name\": cannot restore struct from: string")
- if assert.NotNil(t, v) {
- assert.Equal(t, "my-ns", v.Namespace)
- assert.Equal(t, "my-name", v.Name)
- }
+ require.NotNil(t, v)
+ assert.Equal(t, "my-ns", v.Namespace)
+ assert.Equal(t, "my-name", v.Name)
})
t.Run("WorkflowTemplate", func(t *testing.T) {
v, err := objectToWorkflowTemplate(&unstructured.Unstructured{})
diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go
index fb7955cbe099..2ca55ca833a3 100644
--- a/workflow/controller/operator.go
+++ b/workflow/controller/operator.go
@@ -234,10 +234,7 @@ func (woc *wfOperationCtx) operate(ctx context.Context) {
woc.addArtifactGCFinalizer()
// Reconciliation of Outputs (Artifacts). See ReportOutputs() of executor.go.
- err = woc.taskResultReconciliation()
- if err != nil {
- woc.markWorkflowError(ctx, fmt.Errorf("failed to reconcile: %v", err))
- }
+ woc.taskResultReconciliation()
// Do artifact GC if task result reconciliation is complete.
if woc.wf.Status.Fulfilled() {
@@ -999,7 +996,7 @@ func (woc *wfOperationCtx) processNodeRetries(node *wfv1.NodeStatus, retryStrate
maxDurationDeadline := time.Time{}
// Process max duration limit
if retryStrategy.Backoff.MaxDuration != "" && len(childNodeIds) > 0 {
- maxDuration, err := parseStringToDuration(retryStrategy.Backoff.MaxDuration)
+ maxDuration, err := wfv1.ParseStringToDuration(retryStrategy.Backoff.MaxDuration)
if err != nil {
return nil, false, err
}
@@ -1019,7 +1016,7 @@ func (woc *wfOperationCtx) processNodeRetries(node *wfv1.NodeStatus, retryStrate
return nil, false, fmt.Errorf("no base duration specified for retryStrategy")
}
- baseDuration, err := parseStringToDuration(retryStrategy.Backoff.Duration)
+ baseDuration, err := wfv1.ParseStringToDuration(retryStrategy.Backoff.Duration)
if err != nil {
return nil, false, err
}
@@ -1227,7 +1224,7 @@ func (woc *wfOperationCtx) podReconciliation(ctx context.Context) (error, bool)
}
if recentlyStarted {
- // If the pod was deleted, then we it is possible that the controller never get another informer message about it.
+ // If the pod was deleted, then it is possible that the controller never get another informer message about it.
// In this case, the workflow will only be requeued after the resync period (20m). This means
// workflow will not update for 20m. Requeuing here prevents that happening.
woc.requeue()
@@ -1317,19 +1314,6 @@ func (woc *wfOperationCtx) getAllWorkflowPods() ([]*apiv1.Pod, error) {
return pods, nil
}
-func (woc *wfOperationCtx) getAllWorkflowPodsMap() (map[string]*apiv1.Pod, error) {
- podList, err := woc.getAllWorkflowPods()
- if err != nil {
- return nil, err
- }
- podMap := make(map[string]*apiv1.Pod)
- for _, pod := range podList {
- nodeID := woc.nodeID(pod)
- podMap[nodeID] = pod
- }
- return podMap, nil
-}
-
func printPodSpecLog(pod *apiv1.Pod, wfName string) {
podSpecByte, err := json.Marshal(pod)
log := log.WithField("workflow", wfName).
@@ -1496,14 +1480,34 @@ func (woc *wfOperationCtx) assessNodeStatus(ctx context.Context, pod *apiv1.Pod,
}
}
+ waitContainerCleanedUp := true
// We cannot fail the node if the wait container is still running because it may be busy saving outputs, and these
// would not get captured successfully.
for _, c := range pod.Status.ContainerStatuses {
- if c.Name == common.WaitContainerName && c.State.Running != nil && new.Phase.Completed() {
- woc.log.WithField("new.phase", new.Phase).Info("leaving phase un-changed: wait container is not yet terminated ")
- new.Phase = old.Phase
+ if c.Name == common.WaitContainerName {
+ waitContainerCleanedUp = false
+ switch {
+ case c.State.Running != nil && new.Phase.Completed():
+ woc.log.WithField("new.phase", new.Phase).Info("leaving phase un-changed: wait container is not yet terminated ")
+ new.Phase = old.Phase
+ case c.State.Terminated != nil && c.State.Terminated.ExitCode != 0:
+ // Mark its taskResult as completed directly since wait container did not exit normally,
+ // and it will never have a chance to report taskResult correctly.
+ nodeID := woc.nodeID(pod)
+ woc.log.WithFields(log.Fields{"nodeID": nodeID, "exitCode": c.State.Terminated.ExitCode, "reason": c.State.Terminated.Reason}).
+ Warn("marking its taskResult as completed since wait container did not exit normally")
+ woc.wf.Status.MarkTaskResultComplete(nodeID)
+ }
}
}
+ if pod.Status.Phase == apiv1.PodFailed && pod.Status.Reason == "Evicted" && waitContainerCleanedUp {
+ // Mark its taskResult as completed directly since wait container has been cleaned up because of pod evicted,
+ // and it will never have a chance to report taskResult correctly.
+ nodeID := woc.nodeID(pod)
+ woc.log.WithFields(log.Fields{"nodeID": nodeID}).
+ Warn("marking its taskResult as completed since wait container has been cleaned up.")
+ woc.wf.Status.MarkTaskResultComplete(nodeID)
+ }
// if we are transitioning from Pending to a different state, clear out unchanged message
if old.Phase == wfv1.NodePending && new.Phase != wfv1.NodePending && old.Message == new.Message {
@@ -3265,15 +3269,66 @@ func (woc *wfOperationCtx) processAggregateNodeOutputs(scope *wfScope, prefix st
// Adding per-output aggregated value placeholders
for outputName, valueList := range outputParamValueLists {
key = fmt.Sprintf("%s.outputs.parameters.%s", prefix, outputName)
- valueListJSON, err := json.Marshal(valueList)
+ valueListJson, err := aggregatedJsonValueList(valueList)
if err != nil {
return err
}
- scope.addParamToScope(key, string(valueListJSON))
+ scope.addParamToScope(key, valueListJson)
}
return nil
}
+// tryJsonUnmarshal unmarshals each item in the list assuming it is
+// JSON and NOT a plain JSON value.
+// If returns success only if all items can be unmarshalled and are either
+// maps or lists
+func tryJsonUnmarshal(valueList []string) ([]interface{}, bool) {
+ success := true
+ var list []interface{}
+ for _, value := range valueList {
+ var unmarshalledValue interface{}
+ err := json.Unmarshal([]byte(value), &unmarshalledValue)
+ if err != nil {
+ success = false
+ break // Unmarshal failed, fall back to strings
+ }
+ switch unmarshalledValue.(type) {
+ case []interface{}:
+ case map[string]interface{}:
+ // Keep these types
+ default:
+ // Drop anything else
+ success = false
+ }
+ if !success {
+ break
+ }
+ list = append(list, unmarshalledValue)
+ }
+ return list, success
+}
+
+// aggregatedJsonValueList returns a string containing a JSON list, holding
+// all of the values from the valueList.
+// It tries to understand what's wanted from inner JSON using tryJsonUnmarshall
+func aggregatedJsonValueList(valueList []string) (string, error) {
+ unmarshalledList, success := tryJsonUnmarshal(valueList)
+ var valueListJSON []byte
+ var err error
+ if success {
+ valueListJSON, err = json.Marshal(unmarshalledList)
+ if err != nil {
+ return "", err
+ }
+ } else {
+ valueListJSON, err = json.Marshal(valueList)
+ if err != nil {
+ return "", err
+ }
+ }
+ return string(valueListJSON), nil
+}
+
// addParamToGlobalScope exports any desired node outputs to the global scope, and adds it to the global outputs.
func (woc *wfOperationCtx) addParamToGlobalScope(param wfv1.Parameter) {
if param.GlobalName == "" {
@@ -3421,7 +3476,7 @@ func (woc *wfOperationCtx) executeSuspend(nodeName string, templateScope string,
if err != nil {
return nil, err
}
- suspendDuration, err := parseStringToDuration(tmpl.Suspend.Duration)
+ suspendDuration, err := wfv1.ParseStringToDuration(tmpl.Suspend.Duration)
if err != nil {
return node, err
}
@@ -3494,19 +3549,6 @@ func addRawOutputFields(node *wfv1.NodeStatus, tmpl *wfv1.Template) *wfv1.NodeSt
return node
}
-func parseStringToDuration(durationString string) (time.Duration, error) {
- var suspendDuration time.Duration
- // If no units are attached, treat as seconds
- if val, err := strconv.Atoi(durationString); err == nil {
- suspendDuration = time.Duration(val) * time.Second
- } else if duration, err := time.ParseDuration(durationString); err == nil {
- suspendDuration = duration
- } else {
- return 0, fmt.Errorf("unable to parse %s as a duration: %w", durationString, err)
- }
- return suspendDuration, nil
-}
-
func processItem(tmpl template.Template, name string, index int, item wfv1.Item, obj interface{}, whenCondition string) (string, error) {
replaceMap := make(map[string]string)
var newName string
diff --git a/workflow/controller/operator_aggregation_test.go b/workflow/controller/operator_aggregation_test.go
new file mode 100644
index 000000000000..b966c448a6d3
--- /dev/null
+++ b/workflow/controller/operator_aggregation_test.go
@@ -0,0 +1,64 @@
+package controller
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestTryJsonUnmarshal(t *testing.T) {
+ for _, testcase := range []struct {
+ input []string
+ success bool
+ expected []interface{}
+ }{
+ {[]string{"1"}, false, nil},
+ {[]string{"1", "2"}, false, nil},
+ {[]string{"foo"}, false, nil},
+ {[]string{"foo", "bar"}, false, nil},
+ {[]string{`["1"]`, "2"}, false, nil}, // Fails on second element
+ {[]string{`{"foo":"1"}`, "2"}, false, nil}, // Fails on second element
+ {[]string{`["1"]`, `["2"]`}, true, []interface{}{[]interface{}{"1"}, []interface{}{"2"}}},
+ {[]string{`["1"]`, `["2"]`}, true, []interface{}{[]interface{}{"1"}, []interface{}{"2"}}},
+ {[]string{"\n[\"1\"] \n", "\t[\"2\"]\t"}, true, []interface{}{[]interface{}{"1"}, []interface{}{"2"}}},
+ {[]string{`{"number":"1"}`, `{"number":"2"}`}, true, []interface{}{map[string]interface{}{"number": "1"}, map[string]interface{}{"number": "2"}}},
+ {[]string{`[{"foo":"apple", "bar":"pear"}]`, `{"foo":"banana"}`}, true, []interface{}{[]interface{}{map[string]interface{}{"bar": "pear", "foo": "apple"}}, map[string]interface{}{"foo": "banana"}}},
+ } {
+ t.Run(fmt.Sprintf("Unmarshal %v", testcase.input),
+ func(t *testing.T) {
+ list, success := tryJsonUnmarshal(testcase.input)
+ require.Equal(t, testcase.success, success)
+ if success {
+ assert.Equal(t, testcase.expected, list)
+ }
+ })
+ }
+}
+
+func TestAggregatedJsonValueList(t *testing.T) {
+ for _, testcase := range []struct {
+ input []string
+ expected string
+ }{
+ {[]string{"1"}, `["1"]`},
+ {[]string{"1", "2"}, `["1","2"]`},
+ {[]string{"foo"}, `["foo"]`},
+ {[]string{"foo", "bar"}, `["foo","bar"]`},
+ {[]string{`["1"]`, "2"}, `["[\"1\"]","2"]`}, // This is expected, but not really useful
+ {[]string{`{"foo":"1"}`, "2"}, `["{\"foo\":\"1\"}","2"]`}, // This is expected, but not really useful
+ {[]string{`["1"]`, `["2"]`}, `[["1"],["2"]]`},
+ {[]string{` ["1"]`, `["2"] `}, `[["1"],["2"]]`},
+ {[]string{"\n[\"1\"] \n", "\t[\"2\"]\t"}, `[["1"],["2"]]`},
+ {[]string{`{"number":"1"}`, `{"number":"2"}`}, `[{"number":"1"},{"number":"2"}]`},
+ {[]string{`[{"foo":"apple", "bar":"pear"}]`}, `[[{"bar":"pear","foo":"apple"}]]`}, // Sorted map keys here may make this a fragile test, can be dropped
+ } {
+ t.Run(fmt.Sprintf("Aggregate %v", testcase.input),
+ func(t *testing.T) {
+ result, err := aggregatedJsonValueList(testcase.input)
+ require.NoError(t, err)
+ assert.Equal(t, testcase.expected, result)
+ })
+ }
+}
diff --git a/workflow/controller/operator_data_test.go b/workflow/controller/operator_data_test.go
index 6ae315daac48..019a65704543 100644
--- a/workflow/controller/operator_data_test.go
+++ b/workflow/controller/operator_data_test.go
@@ -6,6 +6,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
)
@@ -236,7 +237,6 @@ func TestDataTemplateCreatesPod(t *testing.T) {
woc.operate(ctx)
node := woc.wf.Status.Nodes.FindByDisplayName("collect-artifact")
- if assert.NotNil(t, node) {
- assert.Equal(t, wfv1.NodePending, node.Phase)
- }
+ require.NotNil(t, node)
+ assert.Equal(t, wfv1.NodePending, node.Phase)
}
diff --git a/workflow/controller/operator_template_scope_test.go b/workflow/controller/operator_template_scope_test.go
index 340582d53713..65088519b1fa 100644
--- a/workflow/controller/operator_template_scope_test.go
+++ b/workflow/controller/operator_template_scope_test.go
@@ -87,40 +87,34 @@ func TestTemplateScope(t *testing.T) {
wf = woc.wf
node := findNodeByName(wf.Status.Nodes, "test-template-scope[0].step")
- if assert.NotNil(t, node, "Node %s not found", "test-templte-scope") {
- assert.Equal(t, wfv1.NodeTypeSteps, node.Type)
- assert.Equal(t, "local/test-template-scope", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-templte-scope")
+ assert.Equal(t, wfv1.NodeTypeSteps, node.Type)
+ assert.Equal(t, "local/test-template-scope", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope[0].step[0]")
- if assert.NotNil(t, node, "Node %s not found", "test-templte-scope[0]") {
- assert.Equal(t, wfv1.NodeTypeStepGroup, node.Type)
- assert.Equal(t, "namespaced/test-template-scope-1", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-templte-scope[0]")
+ assert.Equal(t, wfv1.NodeTypeStepGroup, node.Type)
+ assert.Equal(t, "namespaced/test-template-scope-1", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope[0].step[0].hello")
- if assert.NotNil(t, node, "Node %s not found", "test-templte-scope[0].hello") {
- assert.Equal(t, wfv1.NodeTypePod, node.Type)
- assert.Equal(t, "namespaced/test-template-scope-1", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-templte-scope[0].hello")
+ assert.Equal(t, wfv1.NodeTypePod, node.Type)
+ assert.Equal(t, "namespaced/test-template-scope-1", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope[0].step[0].other-wftmpl")
- if assert.NotNil(t, node, "Node %s not found", "test-template-scope[0].other-wftmpl") {
- assert.Equal(t, wfv1.NodeTypeSteps, node.Type)
- assert.Equal(t, "namespaced/test-template-scope-1", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-template-scope[0].other-wftmpl")
+ assert.Equal(t, wfv1.NodeTypeSteps, node.Type)
+ assert.Equal(t, "namespaced/test-template-scope-1", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope[0].step[0].other-wftmpl[0]")
- if assert.NotNil(t, node, "Node %s not found", "test-template-scope[0].other-wftmpl[0]") {
- assert.Equal(t, wfv1.NodeTypeStepGroup, node.Type)
- assert.Equal(t, "namespaced/test-template-scope-2", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-template-scope[0].other-wftmpl[0]")
+ assert.Equal(t, wfv1.NodeTypeStepGroup, node.Type)
+ assert.Equal(t, "namespaced/test-template-scope-2", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope[0].step[0].other-wftmpl[0].hello")
- if assert.NotNil(t, node, "Node %s not found", "test-template-scope[0].other-wftmpl[0].hello") {
- assert.Equal(t, wfv1.NodeTypePod, node.Type)
- assert.Equal(t, "namespaced/test-template-scope-2", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-template-scope[0].other-wftmpl[0].hello")
+ assert.Equal(t, wfv1.NodeTypePod, node.Type)
+ assert.Equal(t, "namespaced/test-template-scope-2", node.TemplateScope)
}
var testTemplateScopeWithParamWorkflowYaml = `
@@ -183,34 +177,29 @@ func TestTemplateScopeWithParam(t *testing.T) {
require.NoError(t, err)
node := findNodeByName(wf.Status.Nodes, "test-template-scope-with-param[0].step")
- if assert.NotNil(t, node, "Node %s not found", "test-template-scope-with-param") {
- assert.Equal(t, wfv1.NodeTypeSteps, node.Type)
- assert.Equal(t, "local/test-template-scope-with-param", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-template-scope-with-param")
+ assert.Equal(t, wfv1.NodeTypeSteps, node.Type)
+ assert.Equal(t, "local/test-template-scope-with-param", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope-with-param[0].step[0]")
- if assert.NotNil(t, node, "Node %s not found", "test-template-scope-with-param[0]") {
- assert.Equal(t, wfv1.NodeTypeStepGroup, node.Type)
- assert.Equal(t, "namespaced/test-template-scope-with-param-1", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-template-scope-with-param[0]")
+ assert.Equal(t, wfv1.NodeTypeStepGroup, node.Type)
+ assert.Equal(t, "namespaced/test-template-scope-with-param-1", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope-with-param[0].step[0].print-string(0:x)")
- if assert.NotNil(t, node, "Node %s not found", "test-template-scope-with-param[0].print-string(0:x)") {
- assert.Equal(t, wfv1.NodeTypePod, node.Type)
- assert.Equal(t, "namespaced/test-template-scope-with-param-1", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-template-scope-with-param[0].print-string(0:x)")
+ assert.Equal(t, wfv1.NodeTypePod, node.Type)
+ assert.Equal(t, "namespaced/test-template-scope-with-param-1", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope-with-param[0].step[0].print-string(1:y)")
- if assert.NotNil(t, node, "Node %s not found", "test-template-scope-with-param[0].print-string(1:y)") {
- assert.Equal(t, wfv1.NodeTypePod, node.Type)
- assert.Equal(t, "namespaced/test-template-scope-with-param-1", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-template-scope-with-param[0].print-string(1:y)")
+ assert.Equal(t, wfv1.NodeTypePod, node.Type)
+ assert.Equal(t, "namespaced/test-template-scope-with-param-1", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope-with-param[0].step[0].print-string(2:z)")
- if assert.NotNil(t, node, "Node %s not found", "test-template-scope-with-param[0].print-string(2:z)") {
- assert.Equal(t, wfv1.NodeTypePod, node.Type)
- assert.Equal(t, "namespaced/test-template-scope-with-param-1", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-template-scope-with-param[0].print-string(2:z)")
+ assert.Equal(t, wfv1.NodeTypePod, node.Type)
+ assert.Equal(t, "namespaced/test-template-scope-with-param-1", node.TemplateScope)
}
var testTemplateScopeNestedStepsWithParamsWorkflowYaml = `
@@ -277,46 +266,39 @@ func TestTemplateScopeNestedStepsWithParams(t *testing.T) {
require.NoError(t, err)
node := findNodeByName(wf.Status.Nodes, "test-template-scope-nested-steps-with-params")
- if assert.NotNil(t, node, "Node %s not found", "test-template-scope-with-param") {
- assert.Equal(t, wfv1.NodeTypeSteps, node.Type)
- assert.Equal(t, "local/test-template-scope-nested-steps-with-params", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-template-scope-with-param")
+ assert.Equal(t, wfv1.NodeTypeSteps, node.Type)
+ assert.Equal(t, "local/test-template-scope-nested-steps-with-params", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope-nested-steps-with-params[0].step[0]")
- if assert.NotNil(t, node, "Node %s not found", "test-template-scope-with-param[0]") {
- assert.Equal(t, wfv1.NodeTypeStepGroup, node.Type)
- assert.Equal(t, "namespaced/test-template-scope-nested-steps-with-params-1", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-template-scope-with-param[0]")
+ assert.Equal(t, wfv1.NodeTypeStepGroup, node.Type)
+ assert.Equal(t, "namespaced/test-template-scope-nested-steps-with-params-1", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope-nested-steps-with-params[0].step[0].main")
- if assert.NotNil(t, node, "Node %s not found", "test-template-scope-nested-steps-with-params[0].main") {
- assert.Equal(t, wfv1.NodeTypeSteps, node.Type)
- assert.Equal(t, "namespaced/test-template-scope-nested-steps-with-params-1", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-template-scope-nested-steps-with-params[0].main")
+ assert.Equal(t, wfv1.NodeTypeSteps, node.Type)
+ assert.Equal(t, "namespaced/test-template-scope-nested-steps-with-params-1", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope-nested-steps-with-params[0].step[0].main[0]")
- if assert.NotNil(t, node, "Node %s not found", "test-template-scope-nested-steps-with-params[0].main[0]") {
- assert.Equal(t, wfv1.NodeTypeStepGroup, node.Type)
- assert.Equal(t, "namespaced/test-template-scope-nested-steps-with-params-1", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-template-scope-nested-steps-with-params[0].main[0]")
+ assert.Equal(t, wfv1.NodeTypeStepGroup, node.Type)
+ assert.Equal(t, "namespaced/test-template-scope-nested-steps-with-params-1", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope-nested-steps-with-params[0].step[0].main[0].print-string(0:x)")
- if assert.NotNil(t, node, "Node %s not found", "test-template-scope-nested-steps-with-params[0].main[0].print-string(0:x)") {
- assert.Equal(t, wfv1.NodeTypePod, node.Type)
- assert.Equal(t, "namespaced/test-template-scope-nested-steps-with-params-1", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-template-scope-nested-steps-with-params[0].main[0].print-string(0:x)")
+ assert.Equal(t, wfv1.NodeTypePod, node.Type)
+ assert.Equal(t, "namespaced/test-template-scope-nested-steps-with-params-1", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope-nested-steps-with-params[0].step[0].main[0].print-string(1:y)")
- if assert.NotNil(t, node, "Node %s not found", "test-template-scope-nested-steps-with-params[0].main[0].print-string(1:y)") {
- assert.Equal(t, wfv1.NodeTypePod, node.Type)
- assert.Equal(t, "namespaced/test-template-scope-nested-steps-with-params-1", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-template-scope-nested-steps-with-params[0].main[0].print-string(1:y)")
+ assert.Equal(t, wfv1.NodeTypePod, node.Type)
+ assert.Equal(t, "namespaced/test-template-scope-nested-steps-with-params-1", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope-nested-steps-with-params[0].step[0].main[0].print-string(2:z)")
- if assert.NotNil(t, node, "Node %s not found", "test-template-scope-nested-steps-with-params[0].main[0].print-string(2:z)") {
- assert.Equal(t, wfv1.NodeTypePod, node.Type)
- assert.Equal(t, "namespaced/test-template-scope-nested-steps-with-params-1", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-template-scope-nested-steps-with-params[0].main[0].print-string(2:z)")
+ assert.Equal(t, wfv1.NodeTypePod, node.Type)
+ assert.Equal(t, "namespaced/test-template-scope-nested-steps-with-params-1", node.TemplateScope)
}
var testTemplateScopeDAGWorkflowYaml = `
@@ -386,40 +368,34 @@ func TestTemplateScopeDAG(t *testing.T) {
require.NoError(t, err)
node := findNodeByName(wf.Status.Nodes, "test-template-scope-dag[0].step")
- if assert.NotNil(t, node, "Node %s not found", "test-template-scope-dag") {
- assert.Equal(t, wfv1.NodeTypeDAG, node.Type)
- assert.Equal(t, "local/test-template-scope-dag", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-template-scope-dag")
+ assert.Equal(t, wfv1.NodeTypeDAG, node.Type)
+ assert.Equal(t, "local/test-template-scope-dag", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope-dag[0].step.A")
- if assert.NotNil(t, node, "Node %s not found", "test-template-scope-dag.A") {
- assert.Equal(t, wfv1.NodeTypePod, node.Type)
- assert.Equal(t, "namespaced/test-template-scope-dag-1", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-template-scope-dag.A")
+ assert.Equal(t, wfv1.NodeTypePod, node.Type)
+ assert.Equal(t, "namespaced/test-template-scope-dag-1", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope-dag[0].step.B")
- if assert.NotNil(t, node, "Node %s not found", "test-template-scope-dag.B") {
- assert.Equal(t, wfv1.NodeTypeTaskGroup, node.Type)
- assert.Equal(t, "namespaced/test-template-scope-dag-1", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-template-scope-dag.B")
+ assert.Equal(t, wfv1.NodeTypeTaskGroup, node.Type)
+ assert.Equal(t, "namespaced/test-template-scope-dag-1", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope-dag[0].step.B(0:x)")
- if assert.NotNil(t, node, "Node %s not found", "test-template-scope-dag.B(0:x") {
- assert.Equal(t, wfv1.NodeTypePod, node.Type)
- assert.Equal(t, "namespaced/test-template-scope-dag-1", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-template-scope-dag.B(0:x")
+ assert.Equal(t, wfv1.NodeTypePod, node.Type)
+ assert.Equal(t, "namespaced/test-template-scope-dag-1", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope-dag[0].step.B(1:y)")
- if assert.NotNil(t, node, "Node %s not found", "test-template-scope-dag.B(0:x") {
- assert.Equal(t, wfv1.NodeTypePod, node.Type)
- assert.Equal(t, "namespaced/test-template-scope-dag-1", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-template-scope-dag.B(0:x")
+ assert.Equal(t, wfv1.NodeTypePod, node.Type)
+ assert.Equal(t, "namespaced/test-template-scope-dag-1", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope-dag[0].step.B(2:z)")
- if assert.NotNil(t, node, "Node %s not found", "test-template-scope-dag.B(0:x") {
- assert.Equal(t, wfv1.NodeTypePod, node.Type)
- assert.Equal(t, "namespaced/test-template-scope-dag-1", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-template-scope-dag.B(0:x")
+ assert.Equal(t, wfv1.NodeTypePod, node.Type)
+ assert.Equal(t, "namespaced/test-template-scope-dag-1", node.TemplateScope)
}
func findNodeByName(nodes map[string]wfv1.NodeStatus, name string) *wfv1.NodeStatus {
@@ -488,38 +464,32 @@ func TestTemplateClusterScope(t *testing.T) {
require.NoError(t, err)
node := findNodeByName(wf.Status.Nodes, "test-template-scope[0].step")
- if assert.NotNil(t, node, "Node %s not found", "test-templte-scope") {
- assert.Equal(t, wfv1.NodeTypeSteps, node.Type)
- assert.Equal(t, "local/test-template-scope", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-templte-scope")
+ assert.Equal(t, wfv1.NodeTypeSteps, node.Type)
+ assert.Equal(t, "local/test-template-scope", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope[0].step[0]")
- if assert.NotNil(t, node, "Node %s not found", "test-templte-scope[0]") {
- assert.Equal(t, wfv1.NodeTypeStepGroup, node.Type)
- assert.Equal(t, "cluster/test-template-scope-1", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-templte-scope[0]")
+ assert.Equal(t, wfv1.NodeTypeStepGroup, node.Type)
+ assert.Equal(t, "cluster/test-template-scope-1", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope[0].step[0].hello")
- if assert.NotNil(t, node, "Node %s not found", "test-templte-scope[0].hello") {
- assert.Equal(t, wfv1.NodeTypePod, node.Type)
- assert.Equal(t, "cluster/test-template-scope-1", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-templte-scope[0].hello")
+ assert.Equal(t, wfv1.NodeTypePod, node.Type)
+ assert.Equal(t, "cluster/test-template-scope-1", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope[0].step[0].other-wftmpl")
- if assert.NotNil(t, node, "Node %s not found", "test-template-scope[0].other-wftmpl") {
- assert.Equal(t, wfv1.NodeTypeSteps, node.Type)
- assert.Equal(t, "cluster/test-template-scope-1", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-template-scope[0].other-wftmpl")
+ assert.Equal(t, wfv1.NodeTypeSteps, node.Type)
+ assert.Equal(t, "cluster/test-template-scope-1", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope[0].step[0].other-wftmpl[0]")
- if assert.NotNil(t, node, "Node %s not found", "test-template-scope[0].other-wftmpl[0]") {
- assert.Equal(t, wfv1.NodeTypeStepGroup, node.Type)
- assert.Equal(t, "namespaced/test-template-scope-2", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-template-scope[0].other-wftmpl[0]")
+ assert.Equal(t, wfv1.NodeTypeStepGroup, node.Type)
+ assert.Equal(t, "namespaced/test-template-scope-2", node.TemplateScope)
node = findNodeByName(wf.Status.Nodes, "test-template-scope[0].step[0].other-wftmpl[0].hello")
- if assert.NotNil(t, node, "Node %s not found", "test-template-scope[0].other-wftmpl[0].hello") {
- assert.Equal(t, wfv1.NodeTypePod, node.Type)
- assert.Equal(t, "namespaced/test-template-scope-2", node.TemplateScope)
- }
+ require.NotNil(t, node, "Node %s not found", "test-template-scope[0].other-wftmpl[0].hello")
+ assert.Equal(t, wfv1.NodeTypePod, node.Type)
+ assert.Equal(t, "namespaced/test-template-scope-2", node.TemplateScope)
}
diff --git a/workflow/controller/operator_test.go b/workflow/controller/operator_test.go
index 2788b594ab6e..7ac9e7809112 100644
--- a/workflow/controller/operator_test.go
+++ b/workflow/controller/operator_test.go
@@ -81,13 +81,11 @@ func Test_wfOperationCtx_reapplyUpdate(t *testing.T) {
// now force a re-apply update
updatedWf, err := woc.reapplyUpdate(ctx, controller.wfclientset.ArgoprojV1alpha1().Workflows(""), nodes)
require.NoError(t, err)
- if assert.NotNil(t, updatedWf) {
- assert.True(t, woc.controller.hydrator.IsHydrated(updatedWf))
- if assert.Contains(t, updatedWf.Status.Nodes, "foo") {
- assert.Equal(t, "my-foo", updatedWf.Status.Nodes["foo"].Name)
- assert.Equal(t, wfv1.NodeSucceeded, updatedWf.Status.Nodes["foo"].Phase, "phase is merged")
- }
- }
+ require.NotNil(t, updatedWf)
+ assert.True(t, woc.controller.hydrator.IsHydrated(updatedWf))
+ require.Contains(t, updatedWf.Status.Nodes, "foo")
+ assert.Equal(t, "my-foo", updatedWf.Status.Nodes["foo"].Name)
+ assert.Equal(t, wfv1.NodeSucceeded, updatedWf.Status.Nodes["foo"].Phase, "phase is merged")
})
t.Run("ErrUpdatingCompletedWorkflow", func(t *testing.T) {
wf := &wfv1.Workflow{
@@ -369,9 +367,8 @@ func TestGlobalParams(t *testing.T) {
ctx := context.Background()
woc := newWorkflowOperationCtx(wf, controller)
woc.operate(ctx)
- if assert.Contains(t, woc.globalParams, "workflow.creationTimestamp") {
- assert.NotContains(t, woc.globalParams["workflow.creationTimestamp"], "UTC")
- }
+ require.Contains(t, woc.globalParams, "workflow.creationTimestamp")
+ assert.NotContains(t, woc.globalParams["workflow.creationTimestamp"], "UTC")
for char := range strftime.FormatChars {
assert.Contains(t, woc.globalParams, fmt.Sprintf("%s.%s", "workflow.creationTimestamp", string(char)))
}
@@ -1987,13 +1984,12 @@ func TestWorkflowStepRetry(t *testing.T) {
woc.operate(ctx)
pods, err = listPods(woc)
require.NoError(t, err)
- if assert.Len(t, pods.Items, 3) {
- assert.Equal(t, "cowsay success", pods.Items[0].Spec.Containers[1].Args[0])
- assert.Equal(t, "cowsay failure", pods.Items[1].Spec.Containers[1].Args[0])
+ require.Len(t, pods.Items, 3)
+ assert.Equal(t, "cowsay success", pods.Items[0].Spec.Containers[1].Args[0])
+ assert.Equal(t, "cowsay failure", pods.Items[1].Spec.Containers[1].Args[0])
- // verify that after the cowsay failure pod failed, we are retrying cowsay success
- assert.Equal(t, "cowsay success", pods.Items[2].Spec.Containers[1].Args[0])
- }
+ // verify that after the cowsay failure pod failed, we are retrying cowsay success
+ assert.Equal(t, "cowsay success", pods.Items[2].Spec.Containers[1].Args[0])
}
var workflowParallelismLimit = `
@@ -2277,10 +2273,10 @@ func TestSidecarResourceLimits(t *testing.T) {
break
}
}
- if assert.NotNil(t, waitCtr) && assert.NotNil(t, waitCtr.Resources) {
- assert.Len(t, waitCtr.Resources.Limits, 2)
- assert.Len(t, waitCtr.Resources.Requests, 2)
- }
+ require.NotNil(t, waitCtr)
+ require.NotNil(t, waitCtr.Resources)
+ assert.Len(t, waitCtr.Resources.Limits, 2)
+ assert.Len(t, waitCtr.Resources.Requests, 2)
}
// TestSuspendResume tests the suspend and resume feature
@@ -3690,16 +3686,15 @@ func TestResourceWithOwnerReferenceTemplate(t *testing.T) {
require.NoError(t, err)
objectMetas[cm.Name] = cm.ObjectMeta
}
- if assert.Len(t, objectMetas["resource-cm-1"].OwnerReferences, 1) {
- assert.Equal(t, "manual-ref-name", objectMetas["resource-cm-1"].OwnerReferences[0].Name)
- }
- if assert.Len(t, objectMetas["resource-cm-2"].OwnerReferences, 1) {
- assert.Equal(t, "resource-with-ownerreference-template", objectMetas["resource-cm-2"].OwnerReferences[0].Name)
- }
- if assert.Len(t, objectMetas["resource-cm-3"].OwnerReferences, 2) {
- assert.Equal(t, "manual-ref-name", objectMetas["resource-cm-3"].OwnerReferences[0].Name)
- assert.Equal(t, "resource-with-ownerreference-template", objectMetas["resource-cm-3"].OwnerReferences[1].Name)
- }
+ require.Len(t, objectMetas["resource-cm-1"].OwnerReferences, 1)
+ assert.Equal(t, "manual-ref-name", objectMetas["resource-cm-1"].OwnerReferences[0].Name)
+
+ require.Len(t, objectMetas["resource-cm-2"].OwnerReferences, 1)
+ assert.Equal(t, "resource-with-ownerreference-template", objectMetas["resource-cm-2"].OwnerReferences[0].Name)
+
+ require.Len(t, objectMetas["resource-cm-3"].OwnerReferences, 2)
+ assert.Equal(t, "manual-ref-name", objectMetas["resource-cm-3"].OwnerReferences[0].Name)
+ assert.Equal(t, "resource-with-ownerreference-template", objectMetas["resource-cm-3"].OwnerReferences[1].Name)
}
var stepScriptTmpl = `
@@ -4832,11 +4827,12 @@ func TestNestedStepGroupGlobalParams(t *testing.T) {
woc.operate(ctx)
node := woc.wf.Status.Nodes.FindByDisplayName("generate")
- if assert.NotNil(t, node) && assert.NotNil(t, node.Outputs) && assert.Len(t, node.Outputs.Parameters, 1) {
- assert.Equal(t, "hello-param", node.Outputs.Parameters[0].Name)
- assert.Equal(t, "global-param", node.Outputs.Parameters[0].GlobalName)
- assert.Equal(t, "hello world", node.Outputs.Parameters[0].Value.String())
- }
+ require.NotNil(t, node)
+ require.NotNil(t, node.Outputs)
+ require.Len(t, node.Outputs.Parameters, 1)
+ assert.Equal(t, "hello-param", node.Outputs.Parameters[0].Name)
+ assert.Equal(t, "global-param", node.Outputs.Parameters[0].GlobalName)
+ assert.Equal(t, "hello world", node.Outputs.Parameters[0].Value.String())
assert.Equal(t, "hello world", woc.wf.Status.Outputs.Parameters[0].Value.String())
assert.Equal(t, "global-param", woc.wf.Status.Outputs.Parameters[0].Name)
@@ -5612,13 +5608,12 @@ func TestConfigMapCacheLoadOperate(t *testing.T) {
woc := newWorkflowOperationCtx(wf, controller)
woc.operate(ctx)
- if assert.Len(t, woc.wf.Status.Nodes, 1) {
- for _, node := range woc.wf.Status.Nodes {
- assert.NotNil(t, node.Outputs)
- assert.Equal(t, "hello", node.Outputs.Parameters[0].Name)
- assert.Equal(t, "foobar", node.Outputs.Parameters[0].Value.String())
- assert.Equal(t, wfv1.NodeSucceeded, node.Phase)
- }
+ require.Len(t, woc.wf.Status.Nodes, 1)
+ for _, node := range woc.wf.Status.Nodes {
+ assert.NotNil(t, node.Outputs)
+ assert.Equal(t, "hello", node.Outputs.Parameters[0].Name)
+ assert.Equal(t, "foobar", node.Outputs.Parameters[0].Value.String())
+ assert.Equal(t, wfv1.NodeSucceeded, node.Phase)
}
}
@@ -5685,11 +5680,10 @@ func TestConfigMapCacheLoadOperateNoOutputs(t *testing.T) {
woc := newWorkflowOperationCtx(wf, controller)
woc.operate(ctx)
- if assert.Len(t, woc.wf.Status.Nodes, 1) {
- for _, node := range woc.wf.Status.Nodes {
- assert.Nil(t, node.Outputs)
- assert.Equal(t, wfv1.NodeSucceeded, node.Phase)
- }
+ require.Len(t, woc.wf.Status.Nodes, 1)
+ for _, node := range woc.wf.Status.Nodes {
+ assert.Nil(t, node.Outputs)
+ assert.Equal(t, wfv1.NodeSucceeded, node.Phase)
}
}
@@ -5931,13 +5925,12 @@ func TestConfigMapCacheLoadOperateMaxAge(t *testing.T) {
woc := newWorkflowOperationCtx(wf, controller)
woc.operate(ctx)
- if assert.Len(t, woc.wf.Status.Nodes, 1) {
- for _, node := range woc.wf.Status.Nodes {
- assert.NotNil(t, node.Outputs)
- assert.Equal(t, "hello", node.Outputs.Parameters[0].Name)
- assert.Equal(t, "foobar", node.Outputs.Parameters[0].Value.String())
- assert.Equal(t, wfv1.NodeSucceeded, node.Phase)
- }
+ require.Len(t, woc.wf.Status.Nodes, 1)
+ for _, node := range woc.wf.Status.Nodes {
+ assert.NotNil(t, node.Outputs)
+ assert.Equal(t, "hello", node.Outputs.Parameters[0].Name)
+ assert.Equal(t, "foobar", node.Outputs.Parameters[0].Value.String())
+ assert.Equal(t, wfv1.NodeSucceeded, node.Phase)
}
cancel()
@@ -5951,11 +5944,10 @@ func TestConfigMapCacheLoadOperateMaxAge(t *testing.T) {
woc = newWorkflowOperationCtx(wf, controller)
woc.operate(ctx)
- if assert.Len(t, woc.wf.Status.Nodes, 1) {
- for _, node := range woc.wf.Status.Nodes {
- assert.Nil(t, node.Outputs)
- assert.Equal(t, wfv1.NodePending, node.Phase)
- }
+ require.Len(t, woc.wf.Status.Nodes, 1)
+ for _, node := range woc.wf.Status.Nodes {
+ assert.Nil(t, node.Outputs)
+ assert.Equal(t, wfv1.NodePending, node.Phase)
}
}
@@ -6199,11 +6191,10 @@ func TestConfigMapCacheLoadNoLabels(t *testing.T) {
assert.NotPanics(t, fn)
assert.Equal(t, wfv1.WorkflowError, woc.wf.Status.Phase)
- if assert.Len(t, woc.wf.Status.Nodes, 1) {
- for _, node := range woc.wf.Status.Nodes {
- assert.Nil(t, node.Outputs)
- assert.Equal(t, wfv1.NodeError, node.Phase)
- }
+ require.Len(t, woc.wf.Status.Nodes, 1)
+ for _, node := range woc.wf.Status.Nodes {
+ assert.Nil(t, node.Outputs)
+ assert.Equal(t, wfv1.NodeError, node.Phase)
}
}
@@ -6240,12 +6231,11 @@ func TestConfigMapCacheLoadNilOutputs(t *testing.T) {
}
assert.NotPanics(t, fn)
- if assert.Len(t, woc.wf.Status.Nodes, 1) {
- for _, node := range woc.wf.Status.Nodes {
- assert.NotNil(t, node.Outputs)
- assert.False(t, node.Outputs.HasOutputs())
- assert.Equal(t, wfv1.NodeSucceeded, node.Phase)
- }
+ require.Len(t, woc.wf.Status.Nodes, 1)
+ for _, node := range woc.wf.Status.Nodes {
+ assert.NotNil(t, node.Outputs)
+ assert.False(t, node.Outputs.HasOutputs())
+ assert.Equal(t, wfv1.NodeSucceeded, node.Phase)
}
}
@@ -6277,9 +6267,8 @@ func TestConfigMapCacheSaveOperate(t *testing.T) {
var entry cache.Entry
wfv1.MustUnmarshal(rawEntry, &entry)
- if assert.NotNil(t, entry.Outputs) {
- assert.Equal(t, sampleOutputs, *entry.Outputs)
- }
+ require.NotNil(t, entry.Outputs)
+ assert.Equal(t, sampleOutputs, *entry.Outputs)
}
var propagate = `
@@ -6705,9 +6694,10 @@ func TestGlobalVarsOnExit(t *testing.T) {
woc.operate(ctx)
node := woc.wf.Status.Nodes["hello-world-6gphm-8n22g-3224262006"]
- if assert.NotNil(t, node) && assert.NotNil(t, node.Inputs) && assert.NotEmpty(t, node.Inputs.Parameters) {
- assert.Equal(t, "nononono", node.Inputs.Parameters[0].Value.String())
- }
+ require.NotNil(t, node)
+ require.NotNil(t, node.Inputs)
+ require.NotEmpty(t, node.Inputs.Parameters)
+ assert.Equal(t, "nononono", node.Inputs.Parameters[0].Value.String())
}
var deadlineWf = `
@@ -7291,13 +7281,12 @@ func TestWFWithRetryAndWithParam(t *testing.T) {
pods, err := listPods(woc)
require.NoError(t, err)
assert.NotEmpty(t, pods.Items)
- if assert.Len(t, pods.Items, 3) {
- ctrs := pods.Items[0].Spec.Containers
- assert.Len(t, ctrs, 2)
- envs := ctrs[1].Env
- assert.Len(t, envs, 8)
- assert.Equal(t, apiv1.EnvVar{Name: "ARGO_INCLUDE_SCRIPT_OUTPUT", Value: "true"}, envs[3])
- }
+ require.Len(t, pods.Items, 3)
+ ctrs := pods.Items[0].Spec.Containers
+ assert.Len(t, ctrs, 2)
+ envs := ctrs[1].Env
+ assert.Len(t, envs, 8)
+ assert.Equal(t, apiv1.EnvVar{Name: "ARGO_INCLUDE_SCRIPT_OUTPUT", Value: "true"}, envs[3])
})
}
@@ -7492,18 +7481,14 @@ func TestParamAggregation(t *testing.T) {
woc.operate(ctx)
evenNode := woc.wf.Status.Nodes.FindByDisplayName("print-evenness")
- if assert.NotNil(t, evenNode) {
- if assert.Len(t, evenNode.Inputs.Parameters, 1) {
- assert.Equal(t, `["odd","even"]`, evenNode.Inputs.Parameters[0].Value.String())
- }
- }
+ require.NotNil(t, evenNode)
+ require.Len(t, evenNode.Inputs.Parameters, 1)
+ assert.Equal(t, `["odd","even"]`, evenNode.Inputs.Parameters[0].Value.String())
numNode := woc.wf.Status.Nodes.FindByDisplayName("print-nums")
- if assert.NotNil(t, numNode) {
- if assert.Len(t, numNode.Inputs.Parameters, 1) {
- assert.Equal(t, `["1","2"]`, numNode.Inputs.Parameters[0].Value.String())
- }
- }
+ require.NotNil(t, numNode)
+ require.Len(t, numNode.Inputs.Parameters, 1)
+ assert.Equal(t, `["1","2"]`, numNode.Inputs.Parameters[0].Value.String())
}
func TestPodHasContainerNeedingTermination(t *testing.T) {
@@ -7700,10 +7685,9 @@ func TestRetryOnNodeAntiAffinity(t *testing.T) {
woc.operate(ctx)
node := woc.wf.Status.Nodes.FindByDisplayName("retry-fail(0)")
- if assert.NotNil(t, node) {
- assert.Equal(t, wfv1.NodeFailed, node.Phase)
- assert.Equal(t, "node0", node.HostNodeName)
- }
+ require.NotNil(t, node)
+ assert.Equal(t, wfv1.NodeFailed, node.Phase)
+ assert.Equal(t, "node0", node.HostNodeName)
pods, err = listPods(woc)
require.NoError(t, err)
@@ -7733,10 +7717,9 @@ func TestRetryOnNodeAntiAffinity(t *testing.T) {
woc.operate(ctx)
node1 := woc.wf.Status.Nodes.FindByDisplayName("retry-fail(1)")
- if assert.NotNil(t, node) {
- assert.Equal(t, wfv1.NodeFailed, node1.Phase)
- assert.Equal(t, "node1", node1.HostNodeName)
- }
+ require.NotNil(t, node)
+ assert.Equal(t, wfv1.NodeFailed, node1.Phase)
+ assert.Equal(t, "node1", node1.HostNodeName)
pods, err = listPods(woc)
require.NoError(t, err)
@@ -7784,10 +7767,9 @@ func TestNoPodsWhenShutdown(t *testing.T) {
woc.operate(ctx)
node := woc.wf.Status.Nodes.FindByDisplayName("hello-world")
- if assert.NotNil(t, node) {
- assert.Equal(t, wfv1.NodeFailed, node.Phase)
- assert.Contains(t, node.Message, "workflow shutdown with strategy: Stop")
- }
+ require.NotNil(t, node)
+ assert.Equal(t, wfv1.NodeFailed, node.Phase)
+ assert.Contains(t, node.Message, "workflow shutdown with strategy: Stop")
}
var wfscheVariable = `
@@ -7934,10 +7916,9 @@ spec:
woc1.operate(ctx)
node := woc1.wf.Status.Nodes.FindByDisplayName("whalesay")
- if assert.NotNil(t, node) {
- assert.Contains(t, node.Message, "workflow shutdown with strategy")
- assert.Contains(t, node.Message, "Stop")
- }
+ require.NotNil(t, node)
+ assert.Contains(t, node.Message, "workflow shutdown with strategy")
+ assert.Contains(t, node.Message, "Stop")
})
t.Run("TerminateStrategy", func(t *testing.T) {
@@ -7956,10 +7937,9 @@ spec:
woc1 := newWorkflowOperationCtx(wfOut, controller)
woc1.operate(ctx)
for _, node := range woc1.wf.Status.Nodes {
- if assert.NotNil(t, node) {
- assert.Contains(t, node.Message, "workflow shutdown with strategy")
- assert.Contains(t, node.Message, "Terminate")
- }
+ require.NotNil(t, node)
+ assert.Contains(t, node.Message, "workflow shutdown with strategy")
+ assert.Contains(t, node.Message, "Terminate")
}
})
}
@@ -8201,13 +8181,12 @@ func TestStepsFailFast(t *testing.T) {
assert.Equal(t, wfv1.WorkflowFailed, woc.wf.Status.Phase)
node := woc.wf.Status.Nodes.FindByDisplayName("iteration(0:a)")
- if assert.NotNil(t, node) {
- assert.Equal(t, wfv1.NodeFailed, node.Phase)
- }
+ require.NotNil(t, node)
+ assert.Equal(t, wfv1.NodeFailed, node.Phase)
+
node = woc.wf.Status.Nodes.FindByDisplayName("seq-loop-pz4hh")
- if assert.NotNil(t, node) {
- assert.Equal(t, wfv1.NodeFailed, node.Phase)
- }
+ require.NotNil(t, node)
+ assert.Equal(t, wfv1.NodeFailed, node.Phase)
}
func TestGetStepOrDAGTaskName(t *testing.T) {
@@ -10527,10 +10506,9 @@ func TestMaxDepth(t *testing.T) {
assert.Equal(t, wfv1.WorkflowError, woc.wf.Status.Phase)
node := woc.wf.Status.Nodes["hello-world-713168755"]
- if assert.NotNil(t, node) {
- assert.Equal(t, wfv1.NodeError, node.Phase)
- assert.Contains(t, node.Message, "Maximum recursion depth exceeded")
- }
+ require.NotNil(t, node)
+ assert.Equal(t, wfv1.NodeError, node.Phase)
+ assert.Contains(t, node.Message, "Maximum recursion depth exceeded")
// Max depth is enabled, but not too small, no error expected
controller.maxStackDepth = 3
@@ -10540,9 +10518,8 @@ func TestMaxDepth(t *testing.T) {
assert.Equal(t, wfv1.WorkflowRunning, woc.wf.Status.Phase)
node = woc.wf.Status.Nodes["hello-world-713168755"]
- if assert.NotNil(t, node) {
- assert.Equal(t, wfv1.NodePending, node.Phase)
- }
+ require.NotNil(t, node)
+ assert.Equal(t, wfv1.NodePending, node.Phase)
makePodsPhase(ctx, woc, apiv1.PodSucceeded)
woc.operate(ctx)
@@ -10568,9 +10545,8 @@ func TestMaxDepthEnvVariable(t *testing.T) {
assert.Equal(t, wfv1.WorkflowRunning, woc.wf.Status.Phase)
node := woc.wf.Status.Nodes["hello-world-713168755"]
- if assert.NotNil(t, node) {
- assert.Equal(t, wfv1.NodePending, node.Phase)
- }
+ require.NotNil(t, node)
+ assert.Equal(t, wfv1.NodePending, node.Phase)
makePodsPhase(ctx, woc, apiv1.PodSucceeded)
woc.operate(ctx)
@@ -10902,10 +10878,9 @@ func TestWorkflowNeedReconcile(t *testing.T) {
woc.operate(ctx)
pods, err = listPods(woc)
require.NoError(t, err)
- if assert.Len(t, pods.Items, 2) {
- assert.Equal(t, "hello1", pods.Items[0].Spec.Containers[1].Env[0].Value)
- assert.Equal(t, "steps-need-reconcile", pods.Items[1].Spec.Containers[1].Env[0].Value)
- }
+ require.Len(t, pods.Items, 2)
+ assert.Equal(t, "hello1", pods.Items[0].Spec.Containers[1].Env[0].Value)
+ assert.Equal(t, "steps-need-reconcile", pods.Items[1].Spec.Containers[1].Env[0].Value)
}
func TestWorkflowRunningButLabelCompleted(t *testing.T) {
diff --git a/workflow/controller/operator_workflow_template_ref_test.go b/workflow/controller/operator_workflow_template_ref_test.go
index bff5e18720bc..51e9fbf96596 100644
--- a/workflow/controller/operator_workflow_template_ref_test.go
+++ b/workflow/controller/operator_workflow_template_ref_test.go
@@ -341,10 +341,9 @@ func TestWorkflowTemplateRefWithShutdownAndSuspend(t *testing.T) {
assert.NotEmpty(t, woc1.wf.Status.StoredWorkflowSpec.Shutdown)
assert.Equal(t, wfv1.ShutdownStrategyTerminate, woc1.wf.Status.StoredWorkflowSpec.Shutdown)
for _, node := range woc1.wf.Status.Nodes {
- if assert.NotNil(t, node) {
- assert.Contains(t, node.Message, "workflow shutdown with strategy")
- assert.Contains(t, node.Message, "Terminate")
- }
+ require.NotNil(t, node)
+ assert.Contains(t, node.Message, "workflow shutdown with strategy")
+ assert.Contains(t, node.Message, "Terminate")
}
})
t.Run("WorkflowTemplateRefWithShutdownStop", func(t *testing.T) {
@@ -364,10 +363,9 @@ func TestWorkflowTemplateRefWithShutdownAndSuspend(t *testing.T) {
assert.NotEmpty(t, woc1.wf.Status.StoredWorkflowSpec.Shutdown)
assert.Equal(t, wfv1.ShutdownStrategyStop, woc1.wf.Status.StoredWorkflowSpec.Shutdown)
for _, node := range woc1.wf.Status.Nodes {
- if assert.NotNil(t, node) {
- assert.Contains(t, node.Message, "workflow shutdown with strategy")
- assert.Contains(t, node.Message, "Stop")
- }
+ require.NotNil(t, node)
+ assert.Contains(t, node.Message, "workflow shutdown with strategy")
+ assert.Contains(t, node.Message, "Stop")
}
})
}
diff --git a/workflow/controller/pod_cleanup.go b/workflow/controller/pod_cleanup.go
index 55823c70d654..12787ae77d22 100644
--- a/workflow/controller/pod_cleanup.go
+++ b/workflow/controller/pod_cleanup.go
@@ -13,8 +13,11 @@ import (
func (woc *wfOperationCtx) queuePodsForCleanup() {
delay := woc.controller.Config.GetPodGCDeleteDelayDuration()
podGC := woc.execWf.Spec.PodGC
- if podGC != nil && podGC.DeleteDelayDuration != nil {
- delay = podGC.DeleteDelayDuration.Duration
+ podGCDelay, err := podGC.GetDeleteDelayDuration()
+ if err != nil {
+ woc.log.WithError(err).Warn("failed to parse podGC.deleteDelayDuration")
+ } else if podGCDelay >= 0 {
+ delay = podGCDelay
}
strategy := podGC.GetStrategy()
selector, _ := podGC.GetLabelSelector()
diff --git a/workflow/controller/scope.go b/workflow/controller/scope.go
index 30eb60a54230..f709708dce98 100644
--- a/workflow/controller/scope.go
+++ b/workflow/controller/scope.go
@@ -144,7 +144,11 @@ func (s *wfScope) resolveArtifact(art *wfv1.Artifact) (*wfv1.Artifact, error) {
return copyArt, errors.New(errors.CodeBadRequest, "failed to unmarshal artifact subpath for templating")
}
- return copyArt, copyArt.AppendToKey(resolvedSubPath)
+ err = copyArt.AppendToKey(resolvedSubPath)
+ if err != nil && copyArt.Optional { //Ignore error when artifact optional
+ return copyArt, nil
+ }
+ return copyArt, err
}
return &valArt, nil
diff --git a/workflow/controller/steps_test.go b/workflow/controller/steps_test.go
index c03c42e24e78..dae2fa623f2f 100644
--- a/workflow/controller/steps_test.go
+++ b/workflow/controller/steps_test.go
@@ -315,3 +315,133 @@ func TestOptionalArgumentAndParameter(t *testing.T) {
woc.operate(ctx)
assert.Equal(t, wfv1.WorkflowRunning, woc.wf.Status.Phase)
}
+
+var artifactResolutionWhenOptionalAndSubpath = `
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ name: artifact-passing-subpath-rx7f4
+spec:
+ entrypoint: artifact-example
+ templates:
+ - name: artifact-example
+ steps:
+ - - name: hello-world-to-file
+ template: hello-world-to-file
+ - - name: hello-world-to-file2
+ template: hello-world-to-file2
+ arguments:
+ artifacts:
+ - name: bar
+ from: "{{steps.hello-world-to-file.outputs.artifacts.foo}}"
+ optional: true
+ subpath: bar.txt
+ withParam: "[0, 1]"
+
+ - name: hello-world-to-file
+ container:
+ image: busybox:latest
+ imagePullPolicy: IfNotPresent
+ command: [sh, -c]
+ args: ["sleep 1; echo hello world"]
+ outputs:
+ artifacts:
+ - name: foo
+ path: /tmp/foo
+ optional: true
+ archive:
+ none: {}
+
+ - name: hello-world-to-file2
+ inputs:
+ artifacts:
+ - name: bar
+ path: /tmp/bar.txt
+ optional: true
+ archive:
+ none: {}
+ container:
+ image: busybox:latest
+ imagePullPolicy: IfNotPresent
+ command: [sh, -c]
+ args: ["sleep 1; echo hello world"]
+status:
+ nodes:
+ artifact-passing-subpath-rx7f4:
+ children:
+ - artifact-passing-subpath-rx7f4-1763046061
+ displayName: artifact-passing-subpath-rx7f4
+ id: artifact-passing-subpath-rx7f4
+ name: artifact-passing-subpath-rx7f4
+ phase: Running
+ progress: 1/1
+ resourcesDuration:
+ cpu: 0
+ memory: 5
+ startedAt: "2024-09-06T04:53:32Z"
+ templateName: artifact-example
+ templateScope: local/artifact-passing-subpath-rx7f4
+ type: Steps
+ artifact-passing-subpath-rx7f4-511855021:
+ boundaryID: artifact-passing-subpath-rx7f4
+ children:
+ - artifact-passing-subpath-rx7f4-1696082680
+ displayName: hello-world-to-file
+ finishedAt: "2024-09-06T04:53:39Z"
+ id: artifact-passing-subpath-rx7f4-511855021
+ name: artifact-passing-subpath-rx7f4[0].hello-world-to-file
+ outputs:
+ artifacts:
+ - archive:
+ none: {}
+ name: foo
+ optional: true
+ path: /tmp/foo
+ - name: main-logs
+ s3:
+ key: artifact-passing-subpath-rx7f4/artifact-passing-subpath-rx7f4-hello-world-to-file-511855021/main.log
+ exitCode: "0"
+ phase: Succeeded
+ progress: 1/1
+ resourcesDuration:
+ cpu: 0
+ memory: 5
+ startedAt: "2024-09-06T04:53:32Z"
+ templateName: hello-world-to-file
+ templateScope: local/artifact-passing-subpath-rx7f4
+ type: Pod
+ artifact-passing-subpath-rx7f4-1763046061:
+ boundaryID: artifact-passing-subpath-rx7f4
+ children:
+ - artifact-passing-subpath-rx7f4-511855021
+ displayName: '[0]'
+ finishedAt: "2024-09-06T04:53:41Z"
+ id: artifact-passing-subpath-rx7f4-1763046061
+ name: artifact-passing-subpath-rx7f4[0]
+ nodeFlag: {}
+ phase: Succeeded
+ progress: 1/1
+ resourcesDuration:
+ cpu: 0
+ memory: 5
+ startedAt: "2024-09-06T04:53:32Z"
+ templateScope: local/artifact-passing-subpath-rx7f4
+ type: StepGroup
+ phase: Running
+ taskResultsCompletionStatus:
+ artifact-passing-subpath-rx7f4-511855021: true`
+
+func TestOptionalArgumentUseSubPathInLoop(t *testing.T) {
+ cancel, controller := newController()
+ defer cancel()
+ wfcset := controller.wfclientset.ArgoprojV1alpha1().Workflows("")
+
+ ctx := context.Background()
+ wf := wfv1.MustUnmarshalWorkflow(artifactResolutionWhenOptionalAndSubpath)
+ wf, err := wfcset.Create(ctx, wf, metav1.CreateOptions{})
+ require.NoError(t, err)
+ woc := newWorkflowOperationCtx(wf, controller)
+
+ woc.operate(ctx)
+ assert.Equal(t, wfv1.WorkflowRunning, woc.wf.Status.Phase)
+}
diff --git a/workflow/controller/taskresult.go b/workflow/controller/taskresult.go
index ef08d33386ed..8118084dd360 100644
--- a/workflow/controller/taskresult.go
+++ b/workflow/controller/taskresult.go
@@ -54,19 +54,14 @@ func (wfc *WorkflowController) newWorkflowTaskResultInformer() cache.SharedIndex
return informer
}
-func podAbsentTimeout(node *wfv1.NodeStatus) bool {
- return time.Since(node.StartedAt.Time) <= envutil.LookupEnvDurationOr("POD_ABSENT_TIMEOUT", 2*time.Minute)
+func recentlyDeleted(node *wfv1.NodeStatus) bool {
+ return time.Since(node.FinishedAt.Time) <= envutil.LookupEnvDurationOr("RECENTLY_DELETED_POD_DURATION", 10*time.Second)
}
-func (woc *wfOperationCtx) taskResultReconciliation() error {
-
+func (woc *wfOperationCtx) taskResultReconciliation() {
objs, _ := woc.controller.taskResultInformer.GetIndexer().ByIndex(indexes.WorkflowIndex, woc.wf.Namespace+"/"+woc.wf.Name)
woc.log.WithField("numObjs", len(objs)).Info("Task-result reconciliation")
- podMap, err := woc.getAllWorkflowPodsMap()
- if err != nil {
- return err
- }
for _, obj := range objs {
result := obj.(*wfv1.WorkflowTaskResult)
resultName := result.GetName()
@@ -75,7 +70,6 @@ func (woc *wfOperationCtx) taskResultReconciliation() error {
woc.log.Debugf("task result name:\n%+v", resultName)
label := result.Labels[common.LabelKeyReportOutputsCompleted]
-
// If the task result is completed, set the state to true.
if label == "true" {
woc.log.Debugf("Marking task result complete %s", resultName)
@@ -85,33 +79,25 @@ func (woc *wfOperationCtx) taskResultReconciliation() error {
woc.wf.Status.MarkTaskResultIncomplete(resultName)
}
- _, foundPod := podMap[result.Name]
- node, err := woc.wf.Status.Nodes.Get(result.Name)
+ nodeID := result.Name
+ old, err := woc.wf.Status.Nodes.Get(nodeID)
if err != nil {
- if foundPod {
- // how does this path make any sense?
- // pod created but informer not yet updated
- woc.log.Errorf("couldn't obtain node for %s, but found pod, this is not expected, doing nothing", result.Name)
- }
continue
}
-
- if !foundPod && !node.Completed() {
- if podAbsentTimeout(node) {
- woc.log.Infof("Determined controller should timeout for %s", result.Name)
- woc.wf.Status.MarkTaskResultComplete(resultName)
-
- woc.markNodePhase(node.Name, wfv1.NodeFailed, "pod was absent")
+ // Mark task result as completed if it has no chance to be completed.
+ if label == "false" && old.IsPodDeleted() {
+ if recentlyDeleted(old) {
+ woc.log.WithField("nodeID", nodeID).Debug("Wait for marking task result as completed because pod is recently deleted.")
+ // If the pod was deleted, then it is possible that the controller never get another informer message about it.
+ // In this case, the workflow will only be requeued after the resync period (20m). This means
+ // workflow will not update for 20m. Requeuing here prevents that happening.
+ woc.requeue()
+ continue
} else {
- woc.log.Debugf("Determined controller shouldn't timeout %s", result.Name)
+ woc.log.WithField("nodeID", nodeID).Info("Marking task result as completed because pod has been deleted for a while.")
+ woc.wf.Status.MarkTaskResultComplete(nodeID)
}
}
-
- nodeID := result.Name
- old, err := woc.wf.Status.Nodes.Get(nodeID)
- if err != nil {
- continue
- }
newNode := old.DeepCopy()
if result.Outputs.HasOutputs() {
if newNode.Outputs == nil {
@@ -133,5 +119,4 @@ func (woc *wfOperationCtx) taskResultReconciliation() error {
woc.updated = true
}
}
- return nil
}
diff --git a/workflow/controller/workflowpod_test.go b/workflow/controller/workflowpod_test.go
index 3680bff28247..99ceefab4f0e 100644
--- a/workflow/controller/workflowpod_test.go
+++ b/workflow/controller/workflowpod_test.go
@@ -730,34 +730,30 @@ func TestVolumeAndVolumeMounts(t *testing.T) {
require.NoError(t, err)
assert.Len(t, pods.Items, 1)
pod := pods.Items[0]
- if assert.Len(t, pod.Spec.Volumes, 3) {
- assert.Equal(t, "var-run-argo", pod.Spec.Volumes[0].Name)
- assert.Equal(t, "tmp-dir-argo", pod.Spec.Volumes[1].Name)
- assert.Equal(t, "volume-name", pod.Spec.Volumes[2].Name)
- }
- if assert.Len(t, pod.Spec.InitContainers, 1) {
- init := pod.Spec.InitContainers[0]
- if assert.Len(t, init.VolumeMounts, 1) {
- assert.Equal(t, "var-run-argo", init.VolumeMounts[0].Name)
- }
- }
+ require.Len(t, pod.Spec.Volumes, 3)
+ assert.Equal(t, "var-run-argo", pod.Spec.Volumes[0].Name)
+ assert.Equal(t, "tmp-dir-argo", pod.Spec.Volumes[1].Name)
+ assert.Equal(t, "volume-name", pod.Spec.Volumes[2].Name)
+
+ require.Len(t, pod.Spec.InitContainers, 1)
+ init := pod.Spec.InitContainers[0]
+ require.Len(t, init.VolumeMounts, 1)
+ assert.Equal(t, "var-run-argo", init.VolumeMounts[0].Name)
+
containers := pod.Spec.Containers
- if assert.Len(t, containers, 2) {
- wait := containers[0]
- if assert.Len(t, wait.VolumeMounts, 3) {
- assert.Equal(t, "volume-name", wait.VolumeMounts[0].Name)
- assert.Equal(t, "tmp-dir-argo", wait.VolumeMounts[1].Name)
- assert.Equal(t, "var-run-argo", wait.VolumeMounts[2].Name)
- }
- main := containers[1]
- assert.Equal(t, []string{"/var/run/argo/argoexec", "emissary",
- "--loglevel", getExecutorLogLevel(), "--log-format", woc.controller.cliExecutorLogFormat,
- "--", "cowsay"}, main.Command)
- if assert.Len(t, main.VolumeMounts, 2) {
- assert.Equal(t, "volume-name", main.VolumeMounts[0].Name)
- assert.Equal(t, "var-run-argo", main.VolumeMounts[1].Name)
- }
- }
+ require.Len(t, containers, 2)
+ wait := containers[0]
+ require.Len(t, wait.VolumeMounts, 3)
+ assert.Equal(t, "volume-name", wait.VolumeMounts[0].Name)
+ assert.Equal(t, "tmp-dir-argo", wait.VolumeMounts[1].Name)
+ assert.Equal(t, "var-run-argo", wait.VolumeMounts[2].Name)
+ main := containers[1]
+ assert.Equal(t, []string{"/var/run/argo/argoexec", "emissary",
+ "--loglevel", getExecutorLogLevel(), "--log-format", woc.controller.cliExecutorLogFormat,
+ "--", "cowsay"}, main.Command)
+ require.Len(t, main.VolumeMounts, 2)
+ assert.Equal(t, "volume-name", main.VolumeMounts[0].Name)
+ assert.Equal(t, "var-run-argo", main.VolumeMounts[1].Name)
})
}
@@ -1478,9 +1474,8 @@ func TestMainContainerCustomization(t *testing.T) {
require.NoError(t, err)
ctr := pod.Spec.Containers[1]
assert.NotNil(t, ctr.SecurityContext)
- if assert.NotNil(t, pod.Spec.Containers[1].Resources) {
- assert.Equal(t, "0.200", pod.Spec.Containers[1].Resources.Limits.Cpu().AsDec().String())
- }
+ require.NotNil(t, pod.Spec.Containers[1].Resources)
+ assert.Equal(t, "0.200", pod.Spec.Containers[1].Resources.Limits.Cpu().AsDec().String())
})
// Workflow spec's main container takes precedence over config in controller
diff --git a/workflow/creator/creator_test.go b/workflow/creator/creator_test.go
index 0871eb36cc56..05e1ce5fbba0 100644
--- a/workflow/creator/creator_test.go
+++ b/workflow/creator/creator_test.go
@@ -7,6 +7,7 @@ import (
"github.com/go-jose/go-jose/v3/jwt"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
@@ -24,34 +25,30 @@ func TestLabel(t *testing.T) {
t.Run("NotEmpty", func(t *testing.T) {
wf := &wfv1.Workflow{}
Label(context.WithValue(context.TODO(), auth.ClaimsKey, &types.Claims{Claims: jwt.Claims{Subject: strings.Repeat("x", 63) + "y"}, Email: "my@email", PreferredUsername: "username"}), wf)
- if assert.NotEmpty(t, wf.Labels) {
- assert.Equal(t, strings.Repeat("x", 62)+"y", wf.Labels[common.LabelKeyCreator], "creator is truncated")
- assert.Equal(t, "my.at.email", wf.Labels[common.LabelKeyCreatorEmail], "'@' is replaced by '.at.'")
- assert.Equal(t, "username", wf.Labels[common.LabelKeyCreatorPreferredUsername], "username is matching")
- }
+ require.NotEmpty(t, wf.Labels)
+ assert.Equal(t, strings.Repeat("x", 62)+"y", wf.Labels[common.LabelKeyCreator], "creator is truncated")
+ assert.Equal(t, "my.at.email", wf.Labels[common.LabelKeyCreatorEmail], "'@' is replaced by '.at.'")
+ assert.Equal(t, "username", wf.Labels[common.LabelKeyCreatorPreferredUsername], "username is matching")
})
t.Run("TooLongHyphen", func(t *testing.T) {
wf := &wfv1.Workflow{}
Label(context.WithValue(context.TODO(), auth.ClaimsKey, &types.Claims{Claims: jwt.Claims{Subject: strings.Repeat("-", 63) + "y"}}), wf)
- if assert.NotEmpty(t, wf.Labels) {
- assert.Equal(t, "y", wf.Labels[common.LabelKeyCreator])
- }
+ require.NotEmpty(t, wf.Labels)
+ assert.Equal(t, "y", wf.Labels[common.LabelKeyCreator])
})
t.Run("InvalidDNSNames", func(t *testing.T) {
wf := &wfv1.Workflow{}
Label(context.WithValue(context.TODO(), auth.ClaimsKey, &types.Claims{Claims: jwt.Claims{Subject: "!@#$%^&*()--__" + strings.Repeat("y", 35) + "__--!@#$%^&*()"}, PreferredUsername: "us#er@name#"}), wf)
- if assert.NotEmpty(t, wf.Labels) {
- assert.Equal(t, strings.Repeat("y", 35), wf.Labels[common.LabelKeyCreator])
- assert.Equal(t, "us-er-name", wf.Labels[common.LabelKeyCreatorPreferredUsername], "username is truncated")
- }
+ require.NotEmpty(t, wf.Labels)
+ assert.Equal(t, strings.Repeat("y", 35), wf.Labels[common.LabelKeyCreator])
+ assert.Equal(t, "us-er-name", wf.Labels[common.LabelKeyCreatorPreferredUsername], "username is truncated")
})
t.Run("InvalidDNSNamesWithMidDashes", func(t *testing.T) {
wf := &wfv1.Workflow{}
sub := strings.Repeat("x", 20) + strings.Repeat("-", 70) + strings.Repeat("x", 20)
Label(context.WithValue(context.TODO(), auth.ClaimsKey, &types.Claims{Claims: jwt.Claims{Subject: sub}}), wf)
- if assert.NotEmpty(t, wf.Labels) {
- assert.Equal(t, strings.Repeat("x", 20), wf.Labels[common.LabelKeyCreator])
- }
+ require.NotEmpty(t, wf.Labels)
+ assert.Equal(t, strings.Repeat("x", 20), wf.Labels[common.LabelKeyCreator])
})
t.Run("DifferentUsersFromCreatorLabels", func(t *testing.T) {
type input struct {
diff --git a/workflow/cron/operator_test.go b/workflow/cron/operator_test.go
index eb10bce8b55c..0c1aa82312bd 100644
--- a/workflow/cron/operator_test.go
+++ b/workflow/cron/operator_test.go
@@ -13,6 +13,7 @@ import (
"github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/fake"
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
"github.com/argoproj/argo-workflows/v3/workflow/common"
"github.com/argoproj/argo-workflows/v3/workflow/metrics"
"github.com/argoproj/argo-workflows/v3/workflow/util"
@@ -181,7 +182,7 @@ func TestCronWorkflowConditionSubmissionError(t *testing.T) {
v1alpha1.MustUnmarshal([]byte(invalidWf), &cronWf)
cs := fake.NewSimpleClientset()
- testMetrics, err := metrics.New(context.Background(), metrics.TestScopeName, &metrics.Config{}, metrics.Callbacks{})
+ testMetrics, err := metrics.New(context.Background(), telemetry.TestScopeName, telemetry.TestScopeName, &telemetry.Config{}, metrics.Callbacks{})
require.NoError(t, err)
woc := &cronWfOperationCtx{
wfClientset: cs,
@@ -237,7 +238,7 @@ func TestSpecError(t *testing.T) {
cs := fake.NewSimpleClientset()
ctx := context.Background()
- testMetrics, err := metrics.New(ctx, metrics.TestScopeName, &metrics.Config{}, metrics.Callbacks{})
+ testMetrics, err := metrics.New(ctx, telemetry.TestScopeName, telemetry.TestScopeName, &telemetry.Config{}, metrics.Callbacks{})
require.NoError(t, err)
woc := &cronWfOperationCtx{
wfClientset: cs,
@@ -262,7 +263,7 @@ func TestScheduleTimeParam(t *testing.T) {
v1alpha1.MustUnmarshal([]byte(scheduledWf), &cronWf)
cs := fake.NewSimpleClientset()
- testMetrics, _ := metrics.New(context.Background(), metrics.TestScopeName, &metrics.Config{}, metrics.Callbacks{})
+ testMetrics, _ := metrics.New(context.Background(), telemetry.TestScopeName, telemetry.TestScopeName, &telemetry.Config{}, metrics.Callbacks{})
woc := &cronWfOperationCtx{
wfClientset: cs,
wfClient: cs.ArgoprojV1alpha1().Workflows(""),
@@ -312,7 +313,7 @@ func TestLastUsedSchedule(t *testing.T) {
v1alpha1.MustUnmarshal([]byte(lastUsedSchedule), &cronWf)
cs := fake.NewSimpleClientset()
- testMetrics, err := metrics.New(context.Background(), metrics.TestScopeName, &metrics.Config{}, metrics.Callbacks{})
+ testMetrics, err := metrics.New(context.Background(), telemetry.TestScopeName, telemetry.TestScopeName, &telemetry.Config{}, metrics.Callbacks{})
require.NoError(t, err)
woc := &cronWfOperationCtx{
wfClientset: cs,
@@ -330,9 +331,8 @@ func TestLastUsedSchedule(t *testing.T) {
woc.cronWf.SetSchedule(woc.cronWf.Spec.GetScheduleString())
- if assert.NotNil(t, woc.cronWf.Annotations) {
- assert.Equal(t, woc.cronWf.Spec.GetScheduleString(), woc.cronWf.GetLatestSchedule())
- }
+ require.NotNil(t, woc.cronWf.Annotations)
+ assert.Equal(t, woc.cronWf.Spec.GetScheduleString(), woc.cronWf.GetLatestSchedule())
}
var forbidMissedSchedule = `apiVersion: argoproj.io/v1alpha1
@@ -441,7 +441,7 @@ func TestMultipleSchedules(t *testing.T) {
v1alpha1.MustUnmarshal([]byte(multipleSchedulesWf), &cronWf)
cs := fake.NewSimpleClientset()
- testMetrics, err := metrics.New(context.Background(), metrics.TestScopeName, &metrics.Config{}, metrics.Callbacks{})
+ testMetrics, err := metrics.New(context.Background(), telemetry.TestScopeName, telemetry.TestScopeName, &telemetry.Config{}, metrics.Callbacks{})
require.NoError(t, err)
woc := &cronWfOperationCtx{
wfClientset: cs,
@@ -504,7 +504,7 @@ func TestSpecErrorWithScheduleAndSchedules(t *testing.T) {
cs := fake.NewSimpleClientset()
ctx := context.Background()
- testMetrics, err := metrics.New(ctx, metrics.TestScopeName, &metrics.Config{}, metrics.Callbacks{})
+ testMetrics, err := metrics.New(ctx, telemetry.TestScopeName, telemetry.TestScopeName, &telemetry.Config{}, metrics.Callbacks{})
require.NoError(t, err)
woc := &cronWfOperationCtx{
wfClientset: cs,
@@ -565,7 +565,7 @@ func TestSpecErrorWithValidAndInvalidSchedules(t *testing.T) {
cs := fake.NewSimpleClientset()
ctx := context.Background()
- testMetrics, err := metrics.New(ctx, metrics.TestScopeName, &metrics.Config{}, metrics.Callbacks{})
+ testMetrics, err := metrics.New(ctx, telemetry.TestScopeName, telemetry.TestScopeName, &telemetry.Config{}, metrics.Callbacks{})
require.NoError(t, err)
woc := &cronWfOperationCtx{
wfClientset: cs,
diff --git a/workflow/executor/executor.go b/workflow/executor/executor.go
index 716f8fa8f30e..4f0b52875ac9 100644
--- a/workflow/executor/executor.go
+++ b/workflow/executor/executor.go
@@ -267,11 +267,13 @@ func (we *WorkflowExecutor) LoadArtifacts(ctx context.Context) error {
func (we *WorkflowExecutor) StageFiles() error {
var filePath string
var body []byte
+ mode := os.FileMode(0o644)
switch we.Template.GetType() {
case wfv1.TemplateTypeScript:
log.Infof("Loading script source to %s", common.ExecutorScriptSourcePath)
filePath = common.ExecutorScriptSourcePath
body = []byte(we.Template.Script.Source)
+ mode = os.FileMode(0o755)
case wfv1.TemplateTypeResource:
if we.Template.Resource.ManifestFrom != nil && we.Template.Resource.ManifestFrom.Artifact != nil {
log.Infof("manifest %s already staged", we.Template.Resource.ManifestFrom.Artifact.Name)
@@ -283,7 +285,7 @@ func (we *WorkflowExecutor) StageFiles() error {
default:
return nil
}
- err := os.WriteFile(filePath, body, 0o644)
+ err := os.WriteFile(filePath, body, mode)
if err != nil {
return argoerrs.InternalWrapError(err)
}
diff --git a/workflow/gccontroller/gc_controller_test.go b/workflow/gccontroller/gc_controller_test.go
index 5ac263ecba85..e1e77cdd4f09 100644
--- a/workflow/gccontroller/gc_controller_test.go
+++ b/workflow/gccontroller/gc_controller_test.go
@@ -15,6 +15,7 @@ import (
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
fakewfclientset "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/fake"
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
"github.com/argoproj/argo-workflows/v3/workflow/metrics"
"github.com/argoproj/argo-workflows/v3/workflow/util"
)
@@ -345,7 +346,7 @@ func newTTLController(t *testing.T) *Controller {
clock := testingclock.NewFakeClock(time.Now())
wfclientset := fakewfclientset.NewSimpleClientset()
wfInformer := cache.NewSharedIndexInformer(nil, nil, 0, nil)
- gcMetrics, err := metrics.New(context.Background(), metrics.TestScopeName, &metrics.Config{}, metrics.Callbacks{})
+ gcMetrics, err := metrics.New(context.Background(), telemetry.TestScopeName, telemetry.TestScopeName, &telemetry.Config{}, metrics.Callbacks{})
require.NoError(t, err)
return &Controller{
wfclientset: wfclientset,
diff --git a/workflow/metrics/counter_cronworkflow_trigger.go b/workflow/metrics/counter_cronworkflow_trigger.go
index 2f1950e4331e..f77c488b6b36 100644
--- a/workflow/metrics/counter_cronworkflow_trigger.go
+++ b/workflow/metrics/counter_cronworkflow_trigger.go
@@ -2,6 +2,8 @@ package metrics
import (
"context"
+
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
)
const (
@@ -9,17 +11,17 @@ const (
)
func addCronWfTriggerCounter(_ context.Context, m *Metrics) error {
- return m.createInstrument(int64Counter,
+ return m.CreateInstrument(telemetry.Int64Counter,
nameCronTriggered,
"Total number of cron workflows triggered",
"{cronworkflow}",
- withAsBuiltIn(),
+ telemetry.WithAsBuiltIn(),
)
}
func (m *Metrics) CronWfTrigger(ctx context.Context, name, namespace string) {
- m.addInt(ctx, nameCronTriggered, 1, instAttribs{
- {name: labelCronWFName, value: name},
- {name: labelWorkflowNamespace, value: namespace},
+ m.AddInt(ctx, nameCronTriggered, 1, telemetry.InstAttribs{
+ {Name: telemetry.AttribCronWFName, Value: name},
+ {Name: telemetry.AttribWorkflowNamespace, Value: namespace},
})
}
diff --git a/workflow/metrics/counter_error.go b/workflow/metrics/counter_error.go
index f53a14f44c13..f71f7e6d8315 100644
--- a/workflow/metrics/counter_error.go
+++ b/workflow/metrics/counter_error.go
@@ -2,6 +2,8 @@ package metrics
import (
"context"
+
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
)
type ErrorCause string
@@ -14,30 +16,30 @@ const (
)
func addErrorCounter(ctx context.Context, m *Metrics) error {
- err := m.createInstrument(int64Counter,
+ err := m.CreateInstrument(telemetry.Int64Counter,
nameErrorCount,
"Number of errors encountered by the controller by cause",
"{error}",
- withAsBuiltIn(),
+ telemetry.WithAsBuiltIn(),
)
if err != nil {
return err
}
// Initialise all values to zero
for _, cause := range []ErrorCause{ErrorCauseOperationPanic, ErrorCauseCronWorkflowSubmissionError, ErrorCauseCronWorkflowSpecError} {
- m.addInt(ctx, nameErrorCount, 0, instAttribs{{name: labelErrorCause, value: string(cause)}})
+ m.AddInt(ctx, nameErrorCount, 0, telemetry.InstAttribs{{Name: telemetry.AttribErrorCause, Value: string(cause)}})
}
return nil
}
func (m *Metrics) OperationPanic(ctx context.Context) {
- m.addInt(ctx, nameErrorCount, 1, instAttribs{{name: labelErrorCause, value: string(ErrorCauseOperationPanic)}})
+ m.AddInt(ctx, nameErrorCount, 1, telemetry.InstAttribs{{Name: telemetry.AttribErrorCause, Value: string(ErrorCauseOperationPanic)}})
}
func (m *Metrics) CronWorkflowSubmissionError(ctx context.Context) {
- m.addInt(ctx, nameErrorCount, 1, instAttribs{{name: labelErrorCause, value: string(ErrorCauseCronWorkflowSubmissionError)}})
+ m.AddInt(ctx, nameErrorCount, 1, telemetry.InstAttribs{{Name: telemetry.AttribErrorCause, Value: string(ErrorCauseCronWorkflowSubmissionError)}})
}
func (m *Metrics) CronWorkflowSpecError(ctx context.Context) {
- m.addInt(ctx, nameErrorCount, 1, instAttribs{{name: labelErrorCause, value: string(ErrorCauseCronWorkflowSpecError)}})
+ m.AddInt(ctx, nameErrorCount, 1, telemetry.InstAttribs{{Name: telemetry.AttribErrorCause, Value: string(ErrorCauseCronWorkflowSpecError)}})
}
diff --git a/workflow/metrics/counter_log.go b/workflow/metrics/counter_log.go
index 96ed960943ab..b9cea55952ab 100644
--- a/workflow/metrics/counter_log.go
+++ b/workflow/metrics/counter_log.go
@@ -3,28 +3,30 @@ package metrics
import (
"context"
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
+
log "github.com/sirupsen/logrus"
)
type logMetric struct {
- counter *instrument
+ counter *telemetry.Instrument
}
func addLogCounter(ctx context.Context, m *Metrics) error {
const nameLogMessages = `log_messages`
- err := m.createInstrument(int64Counter,
+ err := m.CreateInstrument(telemetry.Int64Counter,
nameLogMessages,
"Total number of log messages.",
"{message}",
- withAsBuiltIn(),
+ telemetry.WithAsBuiltIn(),
)
lm := logMetric{
- counter: m.allInstruments[nameLogMessages],
+ counter: m.AllInstruments[nameLogMessages],
}
log.AddHook(lm)
for _, level := range lm.Levels() {
- m.addInt(ctx, nameLogMessages, 0, instAttribs{
- {name: labelLogLevel, value: level.String()},
+ m.AddInt(ctx, nameLogMessages, 0, telemetry.InstAttribs{
+ {Name: telemetry.AttribLogLevel, Value: level.String()},
})
}
@@ -36,8 +38,8 @@ func (m logMetric) Levels() []log.Level {
}
func (m logMetric) Fire(entry *log.Entry) error {
- (*m.counter).addInt(entry.Context, 1, instAttribs{
- {name: labelLogLevel, value: entry.Level.String()},
+ (*m.counter).AddInt(entry.Context, 1, telemetry.InstAttribs{
+ {Name: telemetry.AttribLogLevel, Value: entry.Level.String()},
})
return nil
}
diff --git a/workflow/metrics/counter_pod_missing.go b/workflow/metrics/counter_pod_missing.go
index 991a2184796b..8f3b227d8ef0 100644
--- a/workflow/metrics/counter_pod_missing.go
+++ b/workflow/metrics/counter_pod_missing.go
@@ -2,6 +2,8 @@ package metrics
import (
"context"
+
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
)
const (
@@ -9,18 +11,18 @@ const (
)
func addPodMissingCounter(_ context.Context, m *Metrics) error {
- return m.createInstrument(int64Counter,
+ return m.CreateInstrument(telemetry.Int64Counter,
namePodMissing,
"Incidents of pod missing.",
"{pod}",
- withAsBuiltIn(),
+ telemetry.WithAsBuiltIn(),
)
}
func (m *Metrics) incPodMissing(ctx context.Context, val int64, recentlyStarted bool, phase string) {
- m.addInt(ctx, namePodMissing, val, instAttribs{
- {name: labelRecentlyStarted, value: recentlyStarted},
- {name: labelNodePhase, value: phase},
+ m.AddInt(ctx, namePodMissing, val, telemetry.InstAttribs{
+ {Name: telemetry.AttribRecentlyStarted, Value: recentlyStarted},
+ {Name: telemetry.AttribNodePhase, Value: phase},
})
}
diff --git a/workflow/metrics/counter_pod_pending.go b/workflow/metrics/counter_pod_pending.go
index 4c47fbb4a22f..5138df86abea 100644
--- a/workflow/metrics/counter_pod_pending.go
+++ b/workflow/metrics/counter_pod_pending.go
@@ -3,6 +3,8 @@ package metrics
import (
"context"
"strings"
+
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
)
const (
@@ -10,11 +12,11 @@ const (
)
func addPodPendingCounter(_ context.Context, m *Metrics) error {
- return m.createInstrument(int64Counter,
+ return m.CreateInstrument(telemetry.Int64Counter,
namePodPending,
"Total number of pods that started pending by reason",
"{pod}",
- withAsBuiltIn(),
+ telemetry.WithAsBuiltIn(),
)
}
@@ -28,9 +30,9 @@ func (m *Metrics) ChangePodPending(ctx context.Context, reason, namespace string
// the pod_phase metric can cope with this being visible
return
default:
- m.addInt(ctx, namePodPending, 1, instAttribs{
- {name: labelPodPendingReason, value: splitReason[0]},
- {name: labelPodNamespace, value: namespace},
+ m.AddInt(ctx, namePodPending, 1, telemetry.InstAttribs{
+ {Name: telemetry.AttribPodPendingReason, Value: splitReason[0]},
+ {Name: telemetry.AttribPodNamespace, Value: namespace},
})
}
}
diff --git a/workflow/metrics/counter_pod_phase.go b/workflow/metrics/counter_pod_phase.go
index 530be09ad5ce..37686c5fc4c3 100644
--- a/workflow/metrics/counter_pod_phase.go
+++ b/workflow/metrics/counter_pod_phase.go
@@ -2,6 +2,8 @@ package metrics
import (
"context"
+
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
)
const (
@@ -9,17 +11,17 @@ const (
)
func addPodPhaseCounter(_ context.Context, m *Metrics) error {
- return m.createInstrument(int64Counter,
+ return m.CreateInstrument(telemetry.Int64Counter,
namePodPhase,
"Total number of Pods that have entered each phase",
"{pod}",
- withAsBuiltIn(),
+ telemetry.WithAsBuiltIn(),
)
}
func (m *Metrics) ChangePodPhase(ctx context.Context, phase, namespace string) {
- m.addInt(ctx, namePodPhase, 1, instAttribs{
- {name: labelPodPhase, value: phase},
- {name: labelPodNamespace, value: namespace},
+ m.AddInt(ctx, namePodPhase, 1, telemetry.InstAttribs{
+ {Name: telemetry.AttribPodPhase, Value: phase},
+ {Name: telemetry.AttribPodNamespace, Value: namespace},
})
}
diff --git a/workflow/metrics/counter_template.go b/workflow/metrics/counter_template.go
index 85861179cf42..f1cb3500cab3 100644
--- a/workflow/metrics/counter_template.go
+++ b/workflow/metrics/counter_template.go
@@ -2,6 +2,8 @@ package metrics
import (
"context"
+
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
)
const (
@@ -9,25 +11,25 @@ const (
)
func addWorkflowTemplateCounter(_ context.Context, m *Metrics) error {
- return m.createInstrument(int64Counter,
+ return m.CreateInstrument(telemetry.Int64Counter,
nameWFTemplateTriggered,
"Total number of workflow templates triggered by workflowTemplateRef",
"{workflow_template}",
- withAsBuiltIn(),
+ telemetry.WithAsBuiltIn(),
)
}
-func templateLabels(name, namespace string, cluster bool) instAttribs {
- return instAttribs{
- {name: labelTemplateName, value: name},
- {name: labelTemplateNamespace, value: namespace},
- {name: labelTemplateCluster, value: cluster},
+func templateAttribs(name, namespace string, cluster bool) telemetry.InstAttribs {
+ return telemetry.InstAttribs{
+ {Name: telemetry.AttribTemplateName, Value: name},
+ {Name: telemetry.AttribTemplateNamespace, Value: namespace},
+ {Name: telemetry.AttribTemplateCluster, Value: cluster},
}
}
func (m *Metrics) CountWorkflowTemplate(ctx context.Context, phase MetricWorkflowPhase, name, namespace string, cluster bool) {
- labels := templateLabels(name, namespace, cluster)
- labels = append(labels, instAttrib{name: labelWorkflowPhase, value: string(phase)})
+ attribs := templateAttribs(name, namespace, cluster)
+ attribs = append(attribs, telemetry.InstAttrib{Name: telemetry.AttribWorkflowPhase, Value: string(phase)})
- m.addInt(ctx, nameWFTemplateTriggered, 1, labels)
+ m.AddInt(ctx, nameWFTemplateTriggered, 1, attribs)
}
diff --git a/workflow/metrics/counter_workflow_phase.go b/workflow/metrics/counter_workflow_phase.go
index 693dcae12cf4..91c56eef4853 100644
--- a/workflow/metrics/counter_workflow_phase.go
+++ b/workflow/metrics/counter_workflow_phase.go
@@ -4,6 +4,7 @@ import (
"context"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
)
const (
@@ -40,17 +41,17 @@ func ConvertWorkflowPhase(inPhase wfv1.WorkflowPhase) MetricWorkflowPhase {
}
func addWorkflowPhaseCounter(_ context.Context, m *Metrics) error {
- return m.createInstrument(int64Counter,
+ return m.CreateInstrument(telemetry.Int64Counter,
nameWorkflowPhaseCounter,
"Total number of workflows that have entered each phase",
"{workflow}",
- withAsBuiltIn(),
+ telemetry.WithAsBuiltIn(),
)
}
func (m *Metrics) ChangeWorkflowPhase(ctx context.Context, phase MetricWorkflowPhase, namespace string) {
- m.addInt(ctx, nameWorkflowPhaseCounter, 1, instAttribs{
- {name: labelWorkflowPhase, value: string(phase)},
- {name: labelWorkflowNamespace, value: namespace},
+ m.AddInt(ctx, nameWorkflowPhaseCounter, 1, telemetry.InstAttribs{
+ {Name: telemetry.AttribWorkflowPhase, Value: string(phase)},
+ {Name: telemetry.AttribWorkflowNamespace, Value: namespace},
})
}
diff --git a/workflow/metrics/gauge_pod_phase.go b/workflow/metrics/gauge_pod_phase.go
index d9514ee90c79..b93e9c33729b 100644
--- a/workflow/metrics/gauge_pod_phase.go
+++ b/workflow/metrics/gauge_pod_phase.go
@@ -3,6 +3,8 @@ package metrics
import (
"context"
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
+
"go.opentelemetry.io/otel/metric"
)
@@ -11,16 +13,16 @@ type PodPhaseCallback func() map[string]int64
type podPhaseGauge struct {
callback PodPhaseCallback
- gauge *instrument
+ gauge *telemetry.Instrument
}
func addPodPhaseGauge(ctx context.Context, m *Metrics) error {
const namePodsPhase = `pods_gauge`
- err := m.createInstrument(int64ObservableGauge,
+ err := m.CreateInstrument(telemetry.Int64ObservableGauge,
namePodsPhase,
"Number of Pods from Workflows currently accessible by the controller by status.",
"{pod}",
- withAsBuiltIn(),
+ telemetry.WithAsBuiltIn(),
)
if err != nil {
return err
@@ -29,9 +31,9 @@ func addPodPhaseGauge(ctx context.Context, m *Metrics) error {
if m.callbacks.PodPhase != nil {
ppGauge := podPhaseGauge{
callback: m.callbacks.PodPhase,
- gauge: m.allInstruments[namePodsPhase],
+ gauge: m.AllInstruments[namePodsPhase],
}
- return m.allInstruments[namePodsPhase].registerCallback(m, ppGauge.update)
+ return m.AllInstruments[namePodsPhase].RegisterCallback(m.Metrics, ppGauge.update)
}
return nil
}
@@ -39,7 +41,7 @@ func addPodPhaseGauge(ctx context.Context, m *Metrics) error {
func (p *podPhaseGauge) update(_ context.Context, o metric.Observer) error {
phases := p.callback()
for phase, val := range phases {
- p.gauge.observeInt(o, val, instAttribs{{name: labelPodPhase, value: phase}})
+ p.gauge.ObserveInt(o, val, telemetry.InstAttribs{{Name: telemetry.AttribPodPhase, Value: phase}})
}
return nil
}
diff --git a/workflow/metrics/gauge_workflow_condition.go b/workflow/metrics/gauge_workflow_condition.go
index 19e01917643f..3709cd07d9d1 100644
--- a/workflow/metrics/gauge_workflow_condition.go
+++ b/workflow/metrics/gauge_workflow_condition.go
@@ -6,6 +6,7 @@ import (
"go.opentelemetry.io/otel/metric"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
)
// WorkflowConditionCallback is the function prototype to provide this gauge with the condition of the workflows
@@ -13,16 +14,16 @@ type WorkflowConditionCallback func() map[wfv1.Condition]int64
type workflowConditionGauge struct {
callback WorkflowConditionCallback
- gauge *instrument
+ gauge *telemetry.Instrument
}
func addWorkflowConditionGauge(_ context.Context, m *Metrics) error {
const nameWorkflowCondition = `workflow_condition`
- err := m.createInstrument(int64ObservableGauge,
+ err := m.CreateInstrument(telemetry.Int64ObservableGauge,
nameWorkflowCondition,
"Workflow condition.",
"{unit}",
- withAsBuiltIn(),
+ telemetry.WithAsBuiltIn(),
)
if err != nil {
return err
@@ -31,9 +32,9 @@ func addWorkflowConditionGauge(_ context.Context, m *Metrics) error {
if m.callbacks.WorkflowCondition != nil {
wfcGauge := workflowConditionGauge{
callback: m.callbacks.WorkflowCondition,
- gauge: m.allInstruments[nameWorkflowCondition],
+ gauge: m.AllInstruments[nameWorkflowCondition],
}
- return m.allInstruments[nameWorkflowCondition].registerCallback(m, wfcGauge.update)
+ return m.AllInstruments[nameWorkflowCondition].RegisterCallback(m.Metrics, wfcGauge.update)
}
return nil
// TODO init all phases?
@@ -42,9 +43,9 @@ func addWorkflowConditionGauge(_ context.Context, m *Metrics) error {
func (c *workflowConditionGauge) update(_ context.Context, o metric.Observer) error {
conditions := c.callback()
for condition, val := range conditions {
- c.gauge.observeInt(o, val, instAttribs{
- {name: labelWorkflowType, value: string(condition.Type)},
- {name: labelWorkflowStatus, value: string(condition.Status)},
+ c.gauge.ObserveInt(o, val, telemetry.InstAttribs{
+ {Name: telemetry.AttribWorkflowType, Value: string(condition.Type)},
+ {Name: telemetry.AttribWorkflowStatus, Value: string(condition.Status)},
})
}
return nil
diff --git a/workflow/metrics/gauge_workflow_phase.go b/workflow/metrics/gauge_workflow_phase.go
index 357feaf19863..59a6e670e413 100644
--- a/workflow/metrics/gauge_workflow_phase.go
+++ b/workflow/metrics/gauge_workflow_phase.go
@@ -3,6 +3,8 @@ package metrics
import (
"context"
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
+
"go.opentelemetry.io/otel/metric"
)
@@ -11,16 +13,16 @@ type WorkflowPhaseCallback func() map[string]int64
type workflowPhaseGauge struct {
callback WorkflowPhaseCallback
- gauge *instrument
+ gauge *telemetry.Instrument
}
func addWorkflowPhaseGauge(_ context.Context, m *Metrics) error {
const nameWorkflowPhaseGauge = `gauge`
- err := m.createInstrument(int64ObservableGauge,
+ err := m.CreateInstrument(telemetry.Int64ObservableGauge,
nameWorkflowPhaseGauge,
"number of Workflows currently accessible by the controller by status",
"{workflow}",
- withAsBuiltIn(),
+ telemetry.WithAsBuiltIn(),
)
if err != nil {
return err
@@ -29,9 +31,9 @@ func addWorkflowPhaseGauge(_ context.Context, m *Metrics) error {
if m.callbacks.WorkflowPhase != nil {
wfpGauge := workflowPhaseGauge{
callback: m.callbacks.WorkflowPhase,
- gauge: m.allInstruments[nameWorkflowPhaseGauge],
+ gauge: m.AllInstruments[nameWorkflowPhaseGauge],
}
- return m.allInstruments[nameWorkflowPhaseGauge].registerCallback(m, wfpGauge.update)
+ return m.AllInstruments[nameWorkflowPhaseGauge].RegisterCallback(m.Metrics, wfpGauge.update)
}
return nil
// TODO init all phases?
@@ -40,7 +42,7 @@ func addWorkflowPhaseGauge(_ context.Context, m *Metrics) error {
func (p *workflowPhaseGauge) update(_ context.Context, o metric.Observer) error {
phases := p.callback()
for phase, val := range phases {
- p.gauge.observeInt(o, val, instAttribs{{name: labelWorkflowStatus, value: phase}})
+ p.gauge.ObserveInt(o, val, telemetry.InstAttribs{{Name: telemetry.AttribWorkflowStatus, Value: phase}})
}
return nil
}
diff --git a/workflow/metrics/histogram_durations.go b/workflow/metrics/histogram_durations.go
index 88c00ad28bbb..6b6038e31ff2 100644
--- a/workflow/metrics/histogram_durations.go
+++ b/workflow/metrics/histogram_durations.go
@@ -4,6 +4,8 @@ import (
"context"
"time"
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
+
"github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
@@ -24,15 +26,15 @@ func addOperationDurationHistogram(_ context.Context, m *Metrics) error {
}
bucketWidth := maxOperationTimeSeconds / float64(operationDurationMetricBucketCount)
// The buckets here are only the 'defaults' and can be overridden with configmap defaults
- return m.createInstrument(float64Histogram,
+ return m.CreateInstrument(telemetry.Float64Histogram,
nameOperationDuration,
"Histogram of durations of operations",
"s",
- withDefaultBuckets(prometheus.LinearBuckets(bucketWidth, bucketWidth, operationDurationMetricBucketCount)),
- withAsBuiltIn(),
+ telemetry.WithDefaultBuckets(prometheus.LinearBuckets(bucketWidth, bucketWidth, operationDurationMetricBucketCount)),
+ telemetry.WithAsBuiltIn(),
)
}
func (m *Metrics) OperationCompleted(ctx context.Context, durationSeconds float64) {
- m.record(ctx, nameOperationDuration, durationSeconds, instAttribs{})
+ m.Record(ctx, nameOperationDuration, durationSeconds, telemetry.InstAttribs{})
}
diff --git a/workflow/metrics/histogram_template.go b/workflow/metrics/histogram_template.go
index 7fe50895b5d8..a466b9112fa3 100644
--- a/workflow/metrics/histogram_template.go
+++ b/workflow/metrics/histogram_template.go
@@ -3,6 +3,8 @@ package metrics
import (
"context"
"time"
+
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
)
const (
@@ -10,14 +12,14 @@ const (
)
func addWorkflowTemplateHistogram(_ context.Context, m *Metrics) error {
- return m.createInstrument(float64Histogram,
+ return m.CreateInstrument(telemetry.Float64Histogram,
nameWorkflowTemplateRuntime,
"Duration of workflow template runs run through workflowTemplateRefs",
"s",
- withAsBuiltIn(),
+ telemetry.WithAsBuiltIn(),
)
}
func (m *Metrics) RecordWorkflowTemplateTime(ctx context.Context, duration time.Duration, name, namespace string, cluster bool) {
- m.record(ctx, nameWorkflowTemplateRuntime, duration.Seconds(), templateLabels(name, namespace, cluster))
+ m.Record(ctx, nameWorkflowTemplateRuntime, duration.Seconds(), templateAttribs(name, namespace, cluster))
}
diff --git a/workflow/metrics/labels.go b/workflow/metrics/labels.go
deleted file mode 100644
index 47de721689be..000000000000
--- a/workflow/metrics/labels.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package metrics
-
-const (
- labelBuildVersion string = `version`
- labelBuildPlatform string = `platform`
- labelBuildGoVersion string = `go_version`
- labelBuildDate string = `build_date`
- labelBuildCompiler string = `compiler`
- labelBuildGitCommit string = `git_commit`
- labelBuildGitTreeState string = `git_treestate`
- labelBuildGitTag string = `git_tag`
-
- labelCronWFName string = `name`
-
- labelErrorCause string = "cause"
-
- labelLogLevel string = `level`
-
- labelNodePhase string = `node_phase`
-
- labelPodPhase string = `phase`
- labelPodNamespace string = `namespace`
- labelPodPendingReason string = `reason`
-
- labelQueueName string = `queue_name`
-
- labelRecentlyStarted string = `recently_started`
-
- labelRequestKind = `kind`
- labelRequestVerb = `verb`
- labelRequestCode = `status_code`
-
- labelTemplateName string = `name`
- labelTemplateNamespace string = `namespace`
- labelTemplateCluster string = `cluster_scope`
-
- labelWorkerType string = `worker_type`
-
- labelWorkflowNamespace string = `namespace`
- labelWorkflowPhase string = `phase`
- labelWorkflowStatus = `status`
- labelWorkflowType = `type`
-)
diff --git a/workflow/metrics/leader.go b/workflow/metrics/leader.go
index ec58dbc1e774..ff3562b66b52 100644
--- a/workflow/metrics/leader.go
+++ b/workflow/metrics/leader.go
@@ -3,6 +3,8 @@ package metrics
import (
"context"
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
+
"go.opentelemetry.io/otel/metric"
)
@@ -10,16 +12,16 @@ type IsLeaderCallback func() bool
type leaderGauge struct {
callback IsLeaderCallback
- gauge *instrument
+ gauge *telemetry.Instrument
}
func addIsLeader(ctx context.Context, m *Metrics) error {
const nameLeader = `is_leader`
- err := m.createInstrument(int64ObservableGauge,
+ err := m.CreateInstrument(telemetry.Int64ObservableGauge,
nameLeader,
"Emits 1 if leader, 0 otherwise. Always 1 if leader election is disabled.",
"{leader}",
- withAsBuiltIn(),
+ telemetry.WithAsBuiltIn(),
)
if err != nil {
return err
@@ -29,9 +31,9 @@ func addIsLeader(ctx context.Context, m *Metrics) error {
}
lGauge := leaderGauge{
callback: m.callbacks.IsLeader,
- gauge: m.allInstruments[nameLeader],
+ gauge: m.AllInstruments[nameLeader],
}
- return m.allInstruments[nameLeader].registerCallback(m, lGauge.update)
+ return m.AllInstruments[nameLeader].RegisterCallback(m.Metrics, lGauge.update)
}
func (l *leaderGauge) update(_ context.Context, o metric.Observer) error {
@@ -39,6 +41,6 @@ func (l *leaderGauge) update(_ context.Context, o metric.Observer) error {
if l.callback() {
val = 1
}
- l.gauge.observeInt(o, val, instAttribs{})
+ l.gauge.ObserveInt(o, val, telemetry.InstAttribs{})
return nil
}
diff --git a/workflow/metrics/leader_test.go b/workflow/metrics/leader_test.go
index 623c5931464d..76514ab4fa51 100644
--- a/workflow/metrics/leader_test.go
+++ b/workflow/metrics/leader_test.go
@@ -6,11 +6,13 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/attribute"
+
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
)
func TestIsLeader(t *testing.T) {
_, te, err := createTestMetrics(
- &Config{},
+ &telemetry.Config{},
Callbacks{
IsLeader: func() bool {
return true
@@ -27,7 +29,7 @@ func TestIsLeader(t *testing.T) {
func TestNotLeader(t *testing.T) {
_, te, err := createTestMetrics(
- &Config{},
+ &telemetry.Config{},
Callbacks{
IsLeader: func() bool {
return false
diff --git a/workflow/metrics/metrics.go b/workflow/metrics/metrics.go
index f2377333aa40..622b3147a9c4 100644
--- a/workflow/metrics/metrics.go
+++ b/workflow/metrics/metrics.go
@@ -2,96 +2,38 @@ package metrics
import (
"context"
- "os"
- "sync"
- "time"
- log "github.com/sirupsen/logrus"
- "go.opentelemetry.io/otel"
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
- wfconfig "github.com/argoproj/argo-workflows/v3/config"
-
- "go.opentelemetry.io/contrib/instrumentation/runtime"
- "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
- "go.opentelemetry.io/otel/metric"
metricsdk "go.opentelemetry.io/otel/sdk/metric"
- "go.opentelemetry.io/otel/sdk/metric/metricdata"
- "go.opentelemetry.io/otel/sdk/resource"
- semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
)
-type Config struct {
- Enabled bool
- Path string
- Port int
- TTL time.Duration
- IgnoreErrors bool
- Secure bool
- Modifiers map[string]Modifier
- Temporality wfconfig.MetricsTemporality
-}
-
type Metrics struct {
- // Ensures mutual exclusion in workflows map
- mutex sync.RWMutex
+ *telemetry.Metrics
- // Evil context for compatibility with legacy context free interfaces
- ctx context.Context
- otelMeter *metric.Meter
- callbacks Callbacks
- config *Config
-
- allInstruments map[string]*instrument
+ callbacks Callbacks
realtimeWorkflows map[string][]realtimeTracker
}
-func New(ctx context.Context, serviceName string, config *Config, callbacks Callbacks, extraOpts ...metricsdk.Option) (*Metrics, error) {
- res := resource.NewWithAttributes(
- semconv.SchemaURL,
- semconv.ServiceName(serviceName),
- )
-
- options := make([]metricsdk.Option, 0)
- options = append(options, metricsdk.WithResource(res))
- _, otlpEnabled := os.LookupEnv(`OTEL_EXPORTER_OTLP_ENDPOINT`)
- _, otlpMetricsEnabled := os.LookupEnv(`OTEL_EXPORTER_OTLP_METRICS_ENDPOINT`)
- if otlpEnabled || otlpMetricsEnabled {
- log.Info("Starting OTLP metrics exporter")
- otelExporter, err := otlpmetricgrpc.New(ctx, otlpmetricgrpc.WithTemporalitySelector(getTemporality(config)))
- if err != nil {
- return nil, err
- }
- options = append(options, metricsdk.WithReader(metricsdk.NewPeriodicReader(otelExporter)))
- }
-
- if config.Enabled {
- log.Info("Starting Prometheus metrics exporter")
- promExporter, err := config.prometheusMetricsExporter(`argo_workflows`)
- if err != nil {
- return nil, err
- }
- options = append(options, metricsdk.WithReader(promExporter))
+func New(ctx context.Context, serviceName, prometheusName string, config *telemetry.Config, callbacks Callbacks, extraOpts ...metricsdk.Option) (*Metrics, error) {
+ m, err := telemetry.NewMetrics(ctx, serviceName, prometheusName, config, extraOpts...)
+ if err != nil {
+ return nil, err
}
- options = append(options, extraOpts...)
- options = append(options, view(config))
-
- provider := metricsdk.NewMeterProvider(options...)
- otel.SetMeterProvider(provider)
- // Add runtime metrics
- err := runtime.Start(runtime.WithMinimumReadMemStatsInterval(time.Second))
+ err = m.Populate(ctx,
+ telemetry.AddVersion,
+ )
if err != nil {
return nil, err
}
- meter := provider.Meter(serviceName)
metrics := &Metrics{
- ctx: ctx,
- otelMeter: &meter,
+ Metrics: m,
callbacks: callbacks,
- config: config,
realtimeWorkflows: make(map[string][]realtimeTracker),
}
+
err = metrics.populate(ctx,
addIsLeader,
addPodPhaseGauge,
@@ -107,7 +49,6 @@ func New(ctx context.Context, serviceName string, config *Config, callbacks Call
addErrorCounter,
addLogCounter,
addK8sRequests,
- addVersion,
addWorkflowConditionGauge,
addWorkQueueMetrics,
)
@@ -123,7 +64,6 @@ func New(ctx context.Context, serviceName string, config *Config, callbacks Call
type addMetric func(context.Context, *Metrics) error
func (m *Metrics) populate(ctx context.Context, adders ...addMetric) error {
- m.allInstruments = make(map[string]*instrument)
for _, adder := range adders {
if err := adder(ctx, m); err != nil {
return err
@@ -131,18 +71,3 @@ func (m *Metrics) populate(ctx context.Context, adders ...addMetric) error {
}
return nil
}
-
-func getTemporality(config *Config) metricsdk.TemporalitySelector {
- switch config.Temporality {
- case wfconfig.MetricsTemporalityCumulative:
- return func(metricsdk.InstrumentKind) metricdata.Temporality {
- return metricdata.CumulativeTemporality
- }
- case wfconfig.MetricsTemporalityDelta:
- return func(metricsdk.InstrumentKind) metricdata.Temporality {
- return metricdata.DeltaTemporality
- }
- default:
- return metricsdk.DefaultTemporalitySelector
- }
-}
diff --git a/workflow/metrics/metrics_custom.go b/workflow/metrics/metrics_custom.go
index 3b6957e69576..f4057540408d 100644
--- a/workflow/metrics/metrics_custom.go
+++ b/workflow/metrics/metrics_custom.go
@@ -10,6 +10,7 @@ import (
"go.opentelemetry.io/otel/metric"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
)
type RealTimeValueFunc func() float64
@@ -28,52 +29,56 @@ type customMetricValue struct {
}
type realtimeTracker struct {
- inst *instrument
+ inst *telemetry.Instrument
key string
}
-func (cmv *customMetricValue) getLabels() instAttribs {
- labels := make(instAttribs, len(cmv.labels))
+func (cmv *customMetricValue) getLabels() telemetry.InstAttribs {
+ labels := make(telemetry.InstAttribs, len(cmv.labels))
for i := range cmv.labels {
- labels[i] = instAttrib{name: cmv.labels[i].Key, value: cmv.labels[i].Value}
+ labels[i] = telemetry.InstAttrib{Name: cmv.labels[i].Key, Value: cmv.labels[i].Value}
}
return labels
}
-func (i *instrument) customUserdata(requireSuccess bool) map[string]*customMetricValue {
- switch val := i.userdata.(type) {
+func customUserdata(i *telemetry.Instrument, requireSuccess bool) map[string]*customMetricValue {
+ switch val := i.GetUserdata().(type) {
case map[string]*customMetricValue:
return val
default:
if requireSuccess {
- panic(fmt.Errorf("internal error: unexpected userdata on custom metric %s", i.name))
+ panic(fmt.Errorf("internal error: unexpected userdata on custom metric %s", i.GetName()))
}
return make(map[string]*customMetricValue)
}
}
-func (i *instrument) getOrCreateValue(key string, labels []*wfv1.MetricLabel) *customMetricValue {
- if value, ok := i.customUserdata(true)[key]; ok {
+func getOrCreateValue(i *telemetry.Instrument, key string, labels []*wfv1.MetricLabel) *customMetricValue {
+ if value, ok := customUserdata(i, true)[key]; ok {
return value
}
newValue := customMetricValue{
key: key,
labels: labels,
}
- i.customUserdata(true)[key] = &newValue
+ customUserdata(i, true)[key] = &newValue
return &newValue
}
+type customInstrument struct {
+ *telemetry.Instrument
+}
+
// Common callback for realtime and gauges
// For realtime this acts as a thunk to the calling convention
// For non-realtime we have to fake observability as prometheus provides
// up/down and set on the same gauge type, which otel forbids.
-func (i *instrument) customCallback(_ context.Context, o metric.Observer) error {
- for _, value := range i.customUserdata(true) {
+func (i *customInstrument) customCallback(_ context.Context, o metric.Observer) error {
+ for _, value := range customUserdata(i.Instrument, true) {
if value.rtValueFunc != nil {
- i.observeFloat(o, value.rtValueFunc(), value.getLabels())
+ i.ObserveFloat(o, value.rtValueFunc(), value.getLabels())
} else {
- i.observeFloat(o, value.prometheusValue, value.getLabels())
+ i.ObserveFloat(o, value.prometheusValue, value.getLabels())
}
}
return nil
@@ -86,33 +91,33 @@ func (i *instrument) customCallback(_ context.Context, o metric.Observer) error
// GetCustomMetric returns a custom (or any) metric from it's key
// This is exported for legacy testing only
-func (m *Metrics) GetCustomMetric(key string) *instrument {
- m.mutex.RLock()
- defer m.mutex.RUnlock()
+func (m *Metrics) GetCustomMetric(key string) *telemetry.Instrument {
+ m.Mutex.RLock()
+ defer m.Mutex.RUnlock()
// It's okay to return nil metrics in this function
- return m.allInstruments[key]
+ return m.AllInstruments[key]
}
// CustomMetricExists returns if metric exists from its key
// This is exported for testing only
func (m *Metrics) CustomMetricExists(key string) bool {
- m.mutex.RLock()
- defer m.mutex.RUnlock()
+ m.Mutex.RLock()
+ defer m.Mutex.RUnlock()
// It's okay to return nil metrics in this function
- return m.allInstruments[key] != nil
+ return m.AllInstruments[key] != nil
}
// TODO labels on custom metrics
-func (m *Metrics) matchExistingMetric(metricSpec *wfv1.Prometheus) (*instrument, error) {
+func (m *Metrics) matchExistingMetric(metricSpec *wfv1.Prometheus) (*telemetry.Instrument, error) {
key := metricSpec.Name
- if inst, ok := m.allInstruments[key]; ok {
- if inst.description != metricSpec.Help {
- return nil, fmt.Errorf("Help for metric %s is already set to %s, it cannot be changed", metricSpec.Name, inst.description)
+ if inst, ok := m.AllInstruments[key]; ok {
+ if inst.GetDescription() != metricSpec.Help {
+ return nil, fmt.Errorf("Help for metric %s is already set to %s, it cannot be changed", metricSpec.Name, inst.GetDescription())
}
wantedType := metricSpec.GetMetricType()
- switch inst.otel.(type) {
+ switch inst.GetOtel().(type) {
case *metric.Float64ObservableGauge:
if wantedType != wfv1.MetricTypeGauge && !metricSpec.IsRealtime() {
return nil, fmt.Errorf("Found existing gauge for custom metric %s of type %s", metricSpec.Name, wantedType)
@@ -126,14 +131,14 @@ func (m *Metrics) matchExistingMetric(metricSpec *wfv1.Prometheus) (*instrument,
return nil, fmt.Errorf("Found existing histogram for custom metric %s of type %s", metricSpec.Name, wantedType)
}
default:
- return nil, fmt.Errorf("Found unwanted type %s for custom metric %s of type %s", reflect.TypeOf(inst.otel), metricSpec.Name, wantedType)
+ return nil, fmt.Errorf("Found unwanted type %s for custom metric %s of type %s", reflect.TypeOf(inst.GetOtel()), metricSpec.Name, wantedType)
}
return inst, nil
}
return nil, nil
}
-func (m *Metrics) ensureBaseMetric(metricSpec *wfv1.Prometheus, ownerKey string) (*instrument, error) {
+func (m *Metrics) ensureBaseMetric(metricSpec *wfv1.Prometheus, ownerKey string) (*telemetry.Instrument, error) {
metric, err := m.matchExistingMetric(metricSpec)
if err != nil {
return nil, err
@@ -147,11 +152,11 @@ func (m *Metrics) ensureBaseMetric(metricSpec *wfv1.Prometheus, ownerKey string)
return nil, err
}
m.attachCustomMetricToWorkflow(metricSpec, ownerKey)
- inst := m.allInstruments[metricSpec.Name]
+ inst := m.AllInstruments[metricSpec.Name]
if inst == nil {
return nil, fmt.Errorf("Failed to create new metric %s", metricSpec.Name)
}
- inst.userdata = make(map[string]*customMetricValue)
+ inst.SetUserdata(make(map[string]*customMetricValue))
return inst, nil
}
@@ -163,7 +168,7 @@ func (m *Metrics) UpsertCustomMetric(ctx context.Context, metricSpec *wfv1.Prome
if err != nil {
return err
}
- metricValue := baseMetric.getOrCreateValue(metricSpec.GetKey(), metricSpec.Labels)
+ metricValue := getOrCreateValue(baseMetric, metricSpec.GetKey(), metricSpec.Labels)
metricValue.lastUpdated = time.Now()
metricType := metricSpec.GetMetricType()
@@ -191,7 +196,7 @@ func (m *Metrics) UpsertCustomMetric(ctx context.Context, metricSpec *wfv1.Prome
if err != nil {
return err
}
- baseMetric.record(ctx, val, metricValue.getLabels())
+ baseMetric.Record(ctx, val, metricValue.getLabels())
case metricType == wfv1.MetricTypeCounter:
val, err := strconv.ParseFloat(metricSpec.Counter.Value, 64)
if err != nil {
@@ -213,7 +218,7 @@ func (m *Metrics) attachCustomMetricToWorkflow(metricSpec *wfv1.Prometheus, owne
}
}
m.realtimeWorkflows[ownerKey] = append(m.realtimeWorkflows[ownerKey], realtimeTracker{
- inst: m.allInstruments[metricSpec.Name],
+ inst: m.AllInstruments[metricSpec.Name],
key: metricSpec.GetKey(),
})
}
@@ -231,33 +236,35 @@ func (m *Metrics) createCustomMetric(metricSpec *wfv1.Prometheus) error {
case metricType == wfv1.MetricTypeGauge:
return m.createCustomGauge(metricSpec)
case metricType == wfv1.MetricTypeHistogram:
- return m.createInstrument(float64Histogram, metricSpec.Name, metricSpec.Help, "{item}", withDefaultBuckets(metricSpec.Histogram.GetBuckets()))
+ return m.CreateInstrument(telemetry.Float64Histogram, metricSpec.Name, metricSpec.Help, "{item}", telemetry.WithDefaultBuckets(metricSpec.Histogram.GetBuckets()))
case metricType == wfv1.MetricTypeCounter:
- err := m.createInstrument(float64ObservableUpDownCounter, metricSpec.Name, metricSpec.Help, "{item}")
+ err := m.CreateInstrument(telemetry.Float64ObservableUpDownCounter, metricSpec.Name, metricSpec.Help, "{item}")
if err != nil {
return err
}
- inst := m.allInstruments[metricSpec.Name]
- return inst.registerCallback(m, inst.customCallback)
+ inst := m.AllInstruments[metricSpec.Name]
+ customInst := customInstrument{Instrument: inst}
+ return inst.RegisterCallback(m.Metrics, customInst.customCallback)
default:
return fmt.Errorf("invalid metric spec")
}
}
func (m *Metrics) createCustomGauge(metricSpec *wfv1.Prometheus) error {
- err := m.createInstrument(float64ObservableGauge, metricSpec.Name, metricSpec.Help, "{item}")
+ err := m.CreateInstrument(telemetry.Float64ObservableGauge, metricSpec.Name, metricSpec.Help, "{item}")
if err != nil {
return err
}
- inst := m.allInstruments[metricSpec.Name]
- return inst.registerCallback(m, inst.customCallback)
+ inst := m.AllInstruments[metricSpec.Name]
+ customInst := customInstrument{Instrument: inst}
+ return inst.RegisterCallback(m.Metrics, customInst.customCallback)
}
func (m *Metrics) runCustomGC(ttl time.Duration) {
- m.mutex.Lock()
- defer m.mutex.Unlock()
- for _, baseMetric := range m.allInstruments {
- custom := baseMetric.customUserdata(false)
+ m.Mutex.Lock()
+ defer m.Mutex.Unlock()
+ for _, baseMetric := range m.AllInstruments {
+ custom := customUserdata(baseMetric, false)
for key, value := range custom {
if time.Since(value.lastUpdated) > ttl {
delete(custom, key)
@@ -284,8 +291,8 @@ func (m *Metrics) customMetricsGC(ctx context.Context, ttl time.Duration) {
}
func (m *Metrics) StopRealtimeMetricsForWfUID(key string) {
- m.mutex.Lock()
- defer m.mutex.Unlock()
+ m.Mutex.Lock()
+ defer m.Mutex.Unlock()
if _, exists := m.realtimeWorkflows[key]; !exists {
return
@@ -293,7 +300,7 @@ func (m *Metrics) StopRealtimeMetricsForWfUID(key string) {
realtimeMetrics := m.realtimeWorkflows[key]
for _, metric := range realtimeMetrics {
- delete(metric.inst.customUserdata(true), metric.key)
+ delete(customUserdata(metric.inst, true), metric.key)
}
delete(m.realtimeWorkflows, key)
diff --git a/workflow/metrics/metrics_k8s_request.go b/workflow/metrics/metrics_k8s_request.go
index b361baeae848..70fc46c29110 100644
--- a/workflow/metrics/metrics_k8s_request.go
+++ b/workflow/metrics/metrics_k8s_request.go
@@ -8,6 +8,7 @@ import (
"k8s.io/client-go/rest"
"github.com/argoproj/argo-workflows/v3/util/k8s"
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
)
const (
@@ -16,21 +17,21 @@ const (
)
func addK8sRequests(_ context.Context, m *Metrics) error {
- err := m.createInstrument(int64Counter,
+ err := m.CreateInstrument(telemetry.Int64Counter,
nameK8sRequestTotal,
"Number of kubernetes requests executed.",
"{request}",
- withAsBuiltIn(),
+ telemetry.WithAsBuiltIn(),
)
if err != nil {
return err
}
- err = m.createInstrument(float64Histogram,
+ err = m.CreateInstrument(telemetry.Float64Histogram,
nameK8sRequestDuration,
"Duration of kubernetes requests executed.",
"s",
- withDefaultBuckets([]float64{0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 20.0, 60.0, 180.0}),
- withAsBuiltIn(),
+ telemetry.WithDefaultBuckets([]float64{0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 20.0, 60.0, 180.0}),
+ telemetry.WithAsBuiltIn(),
)
// Register this metrics with the global
k8sMetrics.metrics = m
@@ -53,13 +54,13 @@ func (m metricsRoundTripper) RoundTrip(r *http.Request) (*http.Response, error)
duration := time.Since(startTime)
if x != nil && m.metrics != nil {
verb, kind := k8s.ParseRequest(r)
- attribs := instAttribs{
- {name: labelRequestKind, value: kind},
- {name: labelRequestVerb, value: verb},
- {name: labelRequestCode, value: x.StatusCode},
+ attribs := telemetry.InstAttribs{
+ {Name: telemetry.AttribRequestKind, Value: kind},
+ {Name: telemetry.AttribRequestVerb, Value: verb},
+ {Name: telemetry.AttribRequestCode, Value: x.StatusCode},
}
- (*m.metrics).addInt(m.ctx, nameK8sRequestTotal, 1, attribs)
- (*m.metrics).record(m.ctx, nameK8sRequestDuration, duration.Seconds(), attribs)
+ (*m.metrics).AddInt(m.ctx, nameK8sRequestTotal, 1, attribs)
+ (*m.metrics).Record(m.ctx, nameK8sRequestDuration, duration.Seconds(), attribs)
}
return x, err
}
diff --git a/workflow/metrics/metrics_test.go b/workflow/metrics/metrics_test.go
index 113fa4d44cb6..de2b224f9bc3 100644
--- a/workflow/metrics/metrics_test.go
+++ b/workflow/metrics/metrics_test.go
@@ -12,13 +12,14 @@ import (
"k8s.io/utils/pointer"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
)
func TestMetrics(t *testing.T) {
m, te, err := CreateDefaultTestMetrics()
require.NoError(t, err)
// Default buckets: {5, 10, 15, 20, 25, 30}
- m.OperationCompleted(m.ctx, 5)
+ m.OperationCompleted(m.Ctx, 5)
assert.NotNil(t, te)
attribs := attribute.NewSet()
val, err := te.GetFloat64HistogramData(nameOperationDuration, &attribs)
@@ -33,12 +34,12 @@ func TestErrors(t *testing.T) {
assert.Nil(t, m.GetCustomMetric("does-not-exist"))
require.NoError(t, err)
- err = m.UpsertCustomMetric(m.ctx, &wfv1.Prometheus{
+ err = m.UpsertCustomMetric(m.Ctx, &wfv1.Prometheus{
Name: "invalid.name",
}, "owner", func() float64 { return 0.0 })
require.Error(t, err)
- err = m.UpsertCustomMetric(m.ctx, &wfv1.Prometheus{
+ err = m.UpsertCustomMetric(m.Ctx, &wfv1.Prometheus{
Name: "name",
Labels: []*wfv1.MetricLabel{{
Key: "invalid-key",
@@ -49,10 +50,10 @@ func TestErrors(t *testing.T) {
}
func TestMetricGC(t *testing.T) {
- config := Config{
+ config := telemetry.Config{
Enabled: true,
- Path: defaultPrometheusServerPath,
- Port: defaultPrometheusServerPort,
+ Path: telemetry.DefaultPrometheusServerPath,
+ Port: telemetry.DefaultPrometheusServerPort,
TTL: 1 * time.Second,
}
@@ -63,7 +64,7 @@ func TestMetricGC(t *testing.T) {
labels := []*wfv1.MetricLabel{
{Key: "foo", Value: "bar"},
}
- err = m.UpsertCustomMetric(m.ctx, &wfv1.Prometheus{
+ err = m.UpsertCustomMetric(m.Ctx, &wfv1.Prometheus{
Name: key,
Labels: labels,
Help: "none",
@@ -73,7 +74,7 @@ func TestMetricGC(t *testing.T) {
baseCm := m.GetCustomMetric(key)
assert.NotNil(t, baseCm)
- cm := baseCm.customUserdata(true)
+ cm := customUserdata(baseCm, true)
assert.Len(t, cm, 1)
// Ensure we get at least one TTL run
@@ -92,15 +93,15 @@ func TestMetricGC(t *testing.T) {
}
func TestRealtimeMetricGC(t *testing.T) {
- config := Config{
+ config := telemetry.Config{
Enabled: true,
- Path: defaultPrometheusServerPath,
- Port: defaultPrometheusServerPort,
+ Path: telemetry.DefaultPrometheusServerPath,
+ Port: telemetry.DefaultPrometheusServerPort,
TTL: 1 * time.Second,
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- m, err := New(ctx, TestScopeName, &config, Callbacks{})
+ m, err := New(ctx, telemetry.TestScopeName, telemetry.TestScopeName, &config, Callbacks{})
require.NoError(t, err)
labels := []*wfv1.MetricLabel{
@@ -108,7 +109,7 @@ func TestRealtimeMetricGC(t *testing.T) {
}
name := "realtime_metric"
wfKey := "workflow-uid"
- err = m.UpsertCustomMetric(m.ctx, &wfv1.Prometheus{
+ err = m.UpsertCustomMetric(m.Ctx, &wfv1.Prometheus{
Name: name,
Labels: labels,
Help: "None",
@@ -146,31 +147,31 @@ func TestRealtimeMetricGC(t *testing.T) {
func TestWorkflowQueueMetrics(t *testing.T) {
m, te, err := getSharedMetrics()
require.NoError(t, err)
- attribs := attribute.NewSet(attribute.String(labelQueueName, "workflow_queue"))
- wfQueue := m.RateLimiterWithBusyWorkers(m.ctx, workqueue.DefaultControllerRateLimiter(), "workflow_queue")
+ attribs := attribute.NewSet(attribute.String(telemetry.AttribQueueName, "workflow_queue"))
+ wfQueue := m.RateLimiterWithBusyWorkers(m.Ctx, workqueue.DefaultControllerRateLimiter(), "workflow_queue")
defer wfQueue.ShutDown()
- assert.NotNil(t, m.allInstruments[nameWorkersQueueDepth])
- assert.NotNil(t, m.allInstruments[nameWorkersQueueLatency])
+ assert.NotNil(t, m.AllInstruments[nameWorkersQueueDepth])
+ assert.NotNil(t, m.AllInstruments[nameWorkersQueueLatency])
wfQueue.Add("hello")
- require.NotNil(t, m.allInstruments[nameWorkersQueueAdds])
+ require.NotNil(t, m.AllInstruments[nameWorkersQueueAdds])
val, err := te.GetInt64CounterValue(nameWorkersQueueAdds, &attribs)
require.NoError(t, err)
assert.Equal(t, int64(1), val)
}
func TestRealTimeMetricDeletion(t *testing.T) {
- config := Config{
+ config := telemetry.Config{
Enabled: true,
- Path: defaultPrometheusServerPath,
- Port: defaultPrometheusServerPort,
+ Path: telemetry.DefaultPrometheusServerPath,
+ Port: telemetry.DefaultPrometheusServerPort,
TTL: 1 * time.Second,
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- m, err := New(ctx, TestScopeName, &config, Callbacks{})
+ m, err := New(ctx, telemetry.TestScopeName, telemetry.TestScopeName, &config, Callbacks{})
require.NoError(t, err)
// We've not yet fed a metric in for 123
@@ -200,7 +201,7 @@ func TestRealTimeMetricDeletion(t *testing.T) {
m.StopRealtimeMetricsForWfUID("456")
assert.Empty(t, m.realtimeWorkflows["456"])
- cm := baseCm.customUserdata(true)
+ cm := customUserdata(baseCm, true)
assert.Len(t, cm, 1)
assert.Len(t, m.realtimeWorkflows["123"], 1)
diff --git a/workflow/metrics/test_helpers.go b/workflow/metrics/test_helpers.go
new file mode 100644
index 000000000000..944d46a1cff4
--- /dev/null
+++ b/workflow/metrics/test_helpers.go
@@ -0,0 +1,53 @@
+package metrics
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/sdk/metric"
+ "k8s.io/client-go/util/workqueue"
+
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
+)
+
+var sharedMetrics *Metrics = nil
+var sharedTE *telemetry.TestMetricsExporter = nil
+
+// getSharedMetrics returns a singleton metrics with test exporter
+// This is necessary because only the first call to workqueue.SetProvider
+// takes effect within a single binary
+// This can be fixed when we update to client-go 0.27 or later and we can
+// create workqueues with https://godocs.io/k8s.io/client-go/util/workqueue#NewRateLimitingQueueWithConfig
+func getSharedMetrics() (*Metrics, *telemetry.TestMetricsExporter, error) {
+ if sharedMetrics == nil {
+ config := telemetry.Config{
+ Enabled: true,
+ TTL: 1 * time.Second,
+ }
+ var err error
+ sharedMetrics, sharedTE, err = createTestMetrics(&config, Callbacks{})
+ if err != nil {
+ return nil, nil, err
+ }
+
+ workqueue.SetProvider(sharedMetrics)
+ }
+ return sharedMetrics, sharedTE, nil
+}
+
+// CreateDefaultTestMetrics creates a boring testExporter enabled
+// metrics, suitable for many tests
+func CreateDefaultTestMetrics() (*Metrics, *telemetry.TestMetricsExporter, error) {
+ config := telemetry.Config{
+ Enabled: true,
+ }
+ return createTestMetrics(&config, Callbacks{})
+}
+
+func createTestMetrics(config *telemetry.Config, callbacks Callbacks) (*Metrics, *telemetry.TestMetricsExporter, error) {
+ ctx /* with cancel*/ := context.Background()
+ te := telemetry.NewTestMetricsExporter()
+
+ m, err := New(ctx, telemetry.TestScopeName, telemetry.TestScopeName, config, callbacks, metric.WithReader(te))
+ return m, te, err
+}
diff --git a/workflow/metrics/version.go b/workflow/metrics/version.go
deleted file mode 100644
index afc2b1c0a3d8..000000000000
--- a/workflow/metrics/version.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package metrics
-
-import (
- "context"
-
- "github.com/argoproj/argo-workflows/v3"
-)
-
-func addVersion(ctx context.Context, m *Metrics) error {
- const nameVersion = `version`
- err := m.createInstrument(int64Counter,
- nameVersion,
- "Build metadata for this Controller",
- "{unused}",
- withAsBuiltIn(),
- )
- if err != nil {
- return err
- }
-
- version := argo.GetVersion()
- m.addInt(ctx, nameVersion, 1, instAttribs{
- {name: labelBuildVersion, value: version.Version},
- {name: labelBuildPlatform, value: version.Platform},
- {name: labelBuildGoVersion, value: version.GoVersion},
- {name: labelBuildDate, value: version.BuildDate},
- {name: labelBuildCompiler, value: version.Compiler},
- {name: labelBuildGitCommit, value: version.GitCommit},
- {name: labelBuildGitTreeState, value: version.GitTreeState},
- {name: labelBuildGitTag, value: version.GitTag},
- })
- return nil
-}
diff --git a/workflow/metrics/version_test.go b/workflow/metrics/version_test.go
deleted file mode 100644
index 5d2c22d8165d..000000000000
--- a/workflow/metrics/version_test.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package metrics
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
- "go.opentelemetry.io/otel/attribute"
-
- "github.com/argoproj/argo-workflows/v3"
-)
-
-func TestVersion(t *testing.T) {
- _, te, err := CreateDefaultTestMetrics()
- require.NoError(t, err)
- assert.NotNil(t, te)
- version := argo.GetVersion()
- attribs := attribute.NewSet(
- attribute.String(labelBuildVersion, version.Version),
- attribute.String(labelBuildPlatform, version.Platform),
- attribute.String(labelBuildGoVersion, version.GoVersion),
- attribute.String(labelBuildDate, version.BuildDate),
- attribute.String(labelBuildCompiler, version.Compiler),
- attribute.String(labelBuildGitCommit, version.GitCommit),
- attribute.String(labelBuildGitTreeState, version.GitTreeState),
- attribute.String(labelBuildGitTag, version.GitTag),
- )
- val, err := te.GetInt64CounterValue(`version`, &attribs)
- require.NoError(t, err)
- assert.Equal(t, int64(1), val)
-}
diff --git a/workflow/metrics/work_queue.go b/workflow/metrics/work_queue.go
index acca059b73b3..366188a63f95 100644
--- a/workflow/metrics/work_queue.go
+++ b/workflow/metrics/work_queue.go
@@ -3,6 +3,8 @@ package metrics
import (
"context"
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
+
log "github.com/sirupsen/logrus"
"go.opentelemetry.io/otel/metric"
"k8s.io/client-go/util/workqueue"
@@ -26,100 +28,100 @@ var _ workqueue.MetricsProvider = &Metrics{}
type workersBusyRateLimiterWorkQueue struct {
workqueue.RateLimitingInterface
workerType string
- busyGauge *instrument
+ busyGauge *telemetry.Instrument
// Evil storage of context for compatibility with legacy interface to workqueue
ctx context.Context
}
func addWorkQueueMetrics(_ context.Context, m *Metrics) error {
- err := m.createInstrument(int64UpDownCounter,
+ err := m.CreateInstrument(telemetry.Int64UpDownCounter,
nameWorkersBusy,
"Number of workers currently busy",
"{worker}",
- withAsBuiltIn(),
+ telemetry.WithAsBuiltIn(),
)
if err != nil {
return err
}
- err = m.createInstrument(int64UpDownCounter,
+ err = m.CreateInstrument(telemetry.Int64UpDownCounter,
nameWorkersQueueDepth,
"Depth of the queue",
"{item}",
- withAsBuiltIn(),
+ telemetry.WithAsBuiltIn(),
)
if err != nil {
return err
}
- err = m.createInstrument(int64Counter,
+ err = m.CreateInstrument(telemetry.Int64Counter,
nameWorkersQueueAdds,
"Adds to the queue",
"{item}",
- withAsBuiltIn(),
+ telemetry.WithAsBuiltIn(),
)
if err != nil {
return err
}
- err = m.createInstrument(float64Histogram,
+ err = m.CreateInstrument(telemetry.Float64Histogram,
nameWorkersQueueLatency,
"Time objects spend waiting in the queue",
"s",
- withDefaultBuckets([]float64{1.0, 5.0, 20.0, 60.0, 180.0}),
- withAsBuiltIn(),
+ telemetry.WithDefaultBuckets([]float64{1.0, 5.0, 20.0, 60.0, 180.0}),
+ telemetry.WithAsBuiltIn(),
)
if err != nil {
return err
}
- err = m.createInstrument(float64Histogram,
+ err = m.CreateInstrument(telemetry.Float64Histogram,
nameWorkersQueueDuration,
"Time objects spend being processed from the queue",
"s",
- withDefaultBuckets([]float64{0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 20.0, 60.0, 180.0}),
- withAsBuiltIn(),
+ telemetry.WithDefaultBuckets([]float64{0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 20.0, 60.0, 180.0}),
+ telemetry.WithAsBuiltIn(),
)
if err != nil {
return err
}
- err = m.createInstrument(int64Counter,
+ err = m.CreateInstrument(telemetry.Int64Counter,
nameWorkersRetries,
"Retries in the queues",
"{item}",
- withAsBuiltIn(),
+ telemetry.WithAsBuiltIn(),
)
if err != nil {
return err
}
- err = m.createInstrument(float64ObservableGauge,
+ err = m.CreateInstrument(telemetry.Float64ObservableGauge,
nameWorkersUnfinishedWork,
"Unfinished work time",
"s",
- withAsBuiltIn(),
+ telemetry.WithAsBuiltIn(),
)
if err != nil {
return err
}
unfinishedCallback := queueUserdata{
- gauge: m.allInstruments[nameWorkersUnfinishedWork],
+ gauge: m.AllInstruments[nameWorkersUnfinishedWork],
}
- m.allInstruments[nameWorkersUnfinishedWork].userdata = &unfinishedCallback
- err = m.allInstruments[nameWorkersUnfinishedWork].registerCallback(m, unfinishedCallback.update)
+ m.AllInstruments[nameWorkersUnfinishedWork].SetUserdata(&unfinishedCallback)
+ err = m.AllInstruments[nameWorkersUnfinishedWork].RegisterCallback(m.Metrics, unfinishedCallback.update)
if err != nil {
return err
}
- err = m.createInstrument(float64ObservableGauge,
+ err = m.CreateInstrument(telemetry.Float64ObservableGauge,
nameWorkersLongestRunning,
"Longest running worker",
"s",
- withAsBuiltIn(),
+ telemetry.WithAsBuiltIn(),
)
if err != nil {
return err
}
longestRunningCallback := queueUserdata{
- gauge: m.allInstruments[nameWorkersLongestRunning],
+ gauge: m.AllInstruments[nameWorkersLongestRunning],
}
- m.allInstruments[nameWorkersLongestRunning].userdata = &longestRunningCallback
- err = m.allInstruments[nameWorkersLongestRunning].registerCallback(m, longestRunningCallback.update)
+ m.AllInstruments[nameWorkersLongestRunning].SetUserdata(&longestRunningCallback)
+ err = m.AllInstruments[nameWorkersLongestRunning].RegisterCallback(m.Metrics, longestRunningCallback.update)
if err != nil {
return err
}
@@ -130,27 +132,27 @@ func (m *Metrics) RateLimiterWithBusyWorkers(ctx context.Context, workQueue work
queue := workersBusyRateLimiterWorkQueue{
RateLimitingInterface: workqueue.NewNamedRateLimitingQueue(workQueue, queueName),
workerType: queueName,
- busyGauge: m.allInstruments[nameWorkersBusy],
+ busyGauge: m.AllInstruments[nameWorkersBusy],
ctx: ctx,
}
queue.newWorker(ctx)
return queue
}
-func (w *workersBusyRateLimiterWorkQueue) attributes() instAttribs {
- return instAttribs{{name: labelWorkerType, value: w.workerType}}
+func (w *workersBusyRateLimiterWorkQueue) attributes() telemetry.InstAttribs {
+ return telemetry.InstAttribs{{Name: telemetry.AttribWorkerType, Value: w.workerType}}
}
func (w *workersBusyRateLimiterWorkQueue) newWorker(ctx context.Context) {
- w.busyGauge.addInt(ctx, 0, w.attributes())
+ w.busyGauge.AddInt(ctx, 0, w.attributes())
}
func (w *workersBusyRateLimiterWorkQueue) workerBusy(ctx context.Context) {
- w.busyGauge.addInt(ctx, 1, w.attributes())
+ w.busyGauge.AddInt(ctx, 1, w.attributes())
}
func (w *workersBusyRateLimiterWorkQueue) workerFree(ctx context.Context) {
- w.busyGauge.addInt(ctx, -1, w.attributes())
+ w.busyGauge.AddInt(ctx, -1, w.attributes())
}
func (w workersBusyRateLimiterWorkQueue) Get() (interface{}, bool) {
@@ -168,29 +170,29 @@ func (w workersBusyRateLimiterWorkQueue) Done(item interface{}) {
type queueMetric struct {
ctx context.Context
name string
- inst *instrument
+ inst *telemetry.Instrument
value *float64
}
type queueUserdata struct {
- gauge *instrument
+ gauge *telemetry.Instrument
metrics []queueMetric
}
-func (q *queueMetric) attributes() instAttribs {
- return instAttribs{{name: labelQueueName, value: q.name}}
+func (q *queueMetric) attributes() telemetry.InstAttribs {
+ return telemetry.InstAttribs{{Name: telemetry.AttribQueueName, Value: q.name}}
}
func (q queueMetric) Inc() {
- q.inst.addInt(q.ctx, 1, q.attributes())
+ q.inst.AddInt(q.ctx, 1, q.attributes())
}
func (q queueMetric) Dec() {
- q.inst.addInt(q.ctx, -1, q.attributes())
+ q.inst.AddInt(q.ctx, -1, q.attributes())
}
func (q queueMetric) Observe(val float64) {
- q.inst.record(q.ctx, val, q.attributes())
+ q.inst.Record(q.ctx, val, q.attributes())
}
// Observable gauge stores in the shim
@@ -198,83 +200,83 @@ func (q queueMetric) Set(val float64) {
*(q.value) = val
}
-func (i *instrument) queueUserdata() *queueUserdata {
- switch val := i.userdata.(type) {
+func getQueueUserdata(i *telemetry.Instrument) *queueUserdata {
+ switch val := i.GetUserdata().(type) {
case *queueUserdata:
return val
default:
- log.Errorf("internal error: unexpected userdata on queue metric %s", i.name)
+ log.Errorf("internal error: unexpected userdata on queue metric %s", i.GetName())
return &queueUserdata{}
}
}
func (q *queueUserdata) update(_ context.Context, o metric.Observer) error {
for _, metric := range q.metrics {
- q.gauge.observeFloat(o, *metric.value, metric.attributes())
+ q.gauge.ObserveFloat(o, *metric.value, metric.attributes())
}
return nil
}
func (m *Metrics) NewDepthMetric(name string) workqueue.GaugeMetric {
return queueMetric{
- ctx: m.ctx,
+ ctx: m.Ctx,
name: name,
- inst: m.allInstruments[nameWorkersQueueDepth],
+ inst: m.AllInstruments[nameWorkersQueueDepth],
}
}
func (m *Metrics) NewAddsMetric(name string) workqueue.CounterMetric {
return queueMetric{
- ctx: m.ctx,
+ ctx: m.Ctx,
name: name,
- inst: m.allInstruments[nameWorkersQueueAdds],
+ inst: m.AllInstruments[nameWorkersQueueAdds],
}
}
func (m *Metrics) NewLatencyMetric(name string) workqueue.HistogramMetric {
return queueMetric{
- ctx: m.ctx,
+ ctx: m.Ctx,
name: name,
- inst: m.allInstruments[nameWorkersQueueLatency],
+ inst: m.AllInstruments[nameWorkersQueueLatency],
}
}
func (m *Metrics) NewWorkDurationMetric(name string) workqueue.HistogramMetric {
return queueMetric{
- ctx: m.ctx,
+ ctx: m.Ctx,
name: name,
- inst: m.allInstruments[nameWorkersQueueDuration],
+ inst: m.AllInstruments[nameWorkersQueueDuration],
}
}
func (m *Metrics) NewRetriesMetric(name string) workqueue.CounterMetric {
return queueMetric{
- ctx: m.ctx,
+ ctx: m.Ctx,
name: name,
- inst: m.allInstruments[nameWorkersRetries],
+ inst: m.AllInstruments[nameWorkersRetries],
}
}
func (m *Metrics) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric {
metric := queueMetric{
- ctx: m.ctx,
+ ctx: m.Ctx,
name: name,
- inst: m.allInstruments[nameWorkersUnfinishedWork],
+ inst: m.AllInstruments[nameWorkersUnfinishedWork],
value: pointer.Float64(0.0),
}
- ud := metric.inst.queueUserdata()
+ ud := getQueueUserdata(metric.inst)
ud.metrics = append(ud.metrics, metric)
return metric
}
func (m *Metrics) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric {
metric := queueMetric{
- ctx: m.ctx,
+ ctx: m.Ctx,
name: name,
- inst: m.allInstruments[nameWorkersLongestRunning],
+ inst: m.AllInstruments[nameWorkersLongestRunning],
value: pointer.Float64(0.0),
}
- ud := metric.inst.queueUserdata()
+ ud := getQueueUserdata(metric.inst)
ud.metrics = append(ud.metrics, metric)
return metric
}
diff --git a/workflow/metrics/work_queue_test.go b/workflow/metrics/work_queue_test.go
index f1a998ba7255..9c7f9766936b 100644
--- a/workflow/metrics/work_queue_test.go
+++ b/workflow/metrics/work_queue_test.go
@@ -7,21 +7,23 @@ import (
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/attribute"
"k8s.io/client-go/util/workqueue"
+
+ "github.com/argoproj/argo-workflows/v3/util/telemetry"
)
func TestMetricsWorkQueue(t *testing.T) {
m, te, err := getSharedMetrics()
require.NoError(t, err)
- attribsWT := attribute.NewSet(attribute.String(labelWorkerType, "test"))
+ attribsWT := attribute.NewSet(attribute.String(telemetry.AttribWorkerType, "test"))
- queue := m.RateLimiterWithBusyWorkers(m.ctx, workqueue.DefaultControllerRateLimiter(), "test")
+ queue := m.RateLimiterWithBusyWorkers(m.Ctx, workqueue.DefaultControllerRateLimiter(), "test")
defer queue.ShutDown()
val, err := te.GetInt64CounterValue(nameWorkersBusy, &attribsWT)
require.NoError(t, err)
assert.Equal(t, int64(0), val)
- attribsQN := attribute.NewSet(attribute.String(labelQueueName, "test"))
+ attribsQN := attribute.NewSet(attribute.String(telemetry.AttribQueueName, "test"))
queue.Add("A")
val, err = te.GetInt64CounterValue(nameWorkersBusy, &attribsWT)
require.NoError(t, err)
diff --git a/workflow/templateresolution/context_test.go b/workflow/templateresolution/context_test.go
index 5f136a3ef125..70abade41c04 100644
--- a/workflow/templateresolution/context_test.go
+++ b/workflow/templateresolution/context_test.go
@@ -191,9 +191,7 @@ func TestGetCurrentTemplateBase(t *testing.T) {
// Get the template base of existing template name.
tmplBase := ctx.GetCurrentTemplateBase()
wftmpl, ok := tmplBase.(*wfv1.WorkflowTemplate)
- if !assert.True(t, ok) {
- t.Fatal("tmplBase is not a WorkflowTemplate")
- }
+ require.True(t, ok, "tmplBase is not a WorkflowTemplate")
assert.Equal(t, "base-workflow-template", wftmpl.Name)
}
@@ -216,9 +214,7 @@ func TestWithTemplateHolder(t *testing.T) {
newCtx, err := ctx.WithTemplateHolder(&tmplHolder)
require.NoError(t, err)
tmplGetter, ok := newCtx.GetCurrentTemplateBase().(*wfv1.WorkflowTemplate)
- if !assert.True(t, ok) {
- t.Fatal("tmplBase is not a WorkflowTemplate")
- }
+ require.True(t, ok, "tmplBase is not a WorkflowTemplate")
assert.Equal(t, "base-workflow-template", tmplGetter.GetName())
// Get the template base of unexisting template name.
@@ -226,9 +222,7 @@ func TestWithTemplateHolder(t *testing.T) {
newCtx, err = ctx.WithTemplateHolder(&tmplHolder)
require.NoError(t, err)
tmplGetter, ok = newCtx.GetCurrentTemplateBase().(*wfv1.WorkflowTemplate)
- if !assert.True(t, ok) {
- t.Fatal("tmplBase is not a WorkflowTemplate")
- }
+ require.True(t, ok, "tmplBase is not a WorkflowTemplate")
assert.Equal(t, "base-workflow-template", tmplGetter.GetName())
// Get the template base of existing template reference.
@@ -236,9 +230,7 @@ func TestWithTemplateHolder(t *testing.T) {
newCtx, err = ctx.WithTemplateHolder(&tmplHolder)
require.NoError(t, err)
tmplGetter, ok = newCtx.GetCurrentTemplateBase().(*wfv1.WorkflowTemplate)
- if !assert.True(t, ok) {
- t.Fatal("tmplBase is not a WorkflowTemplate")
- }
+ require.True(t, ok, "tmplBase is not a WorkflowTemplate")
assert.Equal(t, "some-workflow-template", tmplGetter.GetName())
// Get the template base of unexisting template reference.
@@ -263,9 +255,7 @@ func TestResolveTemplate(t *testing.T) {
ctx, tmpl, _, err := ctx.ResolveTemplate(&tmplHolder)
require.NoError(t, err)
wftmpl, ok := ctx.tmplBase.(*wfv1.WorkflowTemplate)
- if !assert.True(t, ok) {
- t.Fatal("tmplBase is not a WorkflowTemplate")
- }
+ require.True(t, ok, "tmplBase is not a WorkflowTemplate")
assert.Equal(t, "base-workflow-template", wftmpl.Name)
assert.Equal(t, "whalesay", tmpl.Name)
@@ -276,9 +266,7 @@ func TestResolveTemplate(t *testing.T) {
require.NoError(t, err)
tmplGetter, ok = ctx.tmplBase.(*wfv1.WorkflowTemplate)
- if !assert.True(t, ok) {
- t.Fatal("tmplBase is not a WorkflowTemplate")
- }
+ require.True(t, ok, "tmplBase is not a WorkflowTemplate")
assert.Equal(t, "some-workflow-template", tmplGetter.GetName())
assert.Equal(t, "whalesay", tmpl.Name)
assert.NotNil(t, tmpl.Container)
@@ -289,9 +277,7 @@ func TestResolveTemplate(t *testing.T) {
require.NoError(t, err)
tmplGetter, ok = ctx.tmplBase.(*wfv1.WorkflowTemplate)
- if !assert.True(t, ok) {
- t.Fatal("tmplBase is not a WorkflowTemplate")
- }
+ require.True(t, ok, "tmplBase is not a WorkflowTemplate")
assert.Equal(t, "some-workflow-template", tmplGetter.GetName())
assert.Equal(t, "local-whalesay", tmpl.Name)
assert.NotNil(t, tmpl.Steps)
@@ -302,9 +288,7 @@ func TestResolveTemplate(t *testing.T) {
require.NoError(t, err)
tmplGetter, ok = ctx.tmplBase.(*wfv1.WorkflowTemplate)
- if !assert.True(t, ok) {
- t.Fatal("tmplBase is not a WorkflowTemplate")
- }
+ require.True(t, ok, "tmplBase is not a WorkflowTemplate")
assert.Equal(t, "some-workflow-template", tmplGetter.GetName())
assert.Equal(t, "another-whalesay", tmpl.Name)
assert.NotNil(t, tmpl.Steps)
@@ -317,9 +301,7 @@ func TestResolveTemplate(t *testing.T) {
require.NoError(t, err)
tmplGetter, ok = ctx.tmplBase.(*wfv1.WorkflowTemplate)
- if !assert.True(t, ok) {
- t.Fatal("tmplBase is not a WorkflowTemplate")
- }
+ require.True(t, ok, "tmplBase is not a WorkflowTemplate")
assert.Equal(t, "some-workflow-template", tmplGetter.GetName())
assert.Equal(t, "whalesay-with-arguments", tmpl.Name)
@@ -331,9 +313,7 @@ func TestResolveTemplate(t *testing.T) {
require.NoError(t, err)
tmplGetter, ok = ctx.tmplBase.(*wfv1.WorkflowTemplate)
- if !assert.True(t, ok) {
- t.Fatal("tmplBase is not a WorkflowTemplate")
- }
+ require.True(t, ok, "tmplBase is not a WorkflowTemplate")
assert.Equal(t, "some-workflow-template", tmplGetter.GetName())
assert.Equal(t, "nested-whalesay-with-arguments", tmpl.Name)
}
@@ -348,9 +328,7 @@ func TestWithTemplateBase(t *testing.T) {
// Get the template base of existing template name.
newCtx := ctx.WithTemplateBase(anotherWftmpl)
wftmpl, ok := newCtx.tmplBase.(*wfv1.WorkflowTemplate)
- if !assert.True(t, ok) {
- t.Fatal("tmplBase is not a WorkflowTemplate")
- }
+ require.True(t, ok, "tmplBase is not a WorkflowTemplate")
assert.Equal(t, "another-workflow-template", wftmpl.Name)
}
diff --git a/workflow/util/util_test.go b/workflow/util/util_test.go
index 3e70d27b9dcd..67b1b2e84374 100644
--- a/workflow/util/util_test.go
+++ b/workflow/util/util_test.go
@@ -582,10 +582,9 @@ func TestApplySubmitOpts(t *testing.T) {
wf := &wfv1.Workflow{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"a": "0", "b": "0"}}}
err := ApplySubmitOpts(wf, &wfv1.SubmitOpts{Labels: "a=1"})
require.NoError(t, err)
- if assert.Len(t, wf.GetLabels(), 2) {
- assert.Equal(t, "1", wf.GetLabels()["a"])
- assert.Equal(t, "0", wf.GetLabels()["b"])
- }
+ require.Len(t, wf.GetLabels(), 2)
+ assert.Equal(t, "1", wf.GetLabels()["a"])
+ assert.Equal(t, "0", wf.GetLabels()["b"])
})
t.Run("InvalidParameters", func(t *testing.T) {
require.Error(t, ApplySubmitOpts(&wfv1.Workflow{}, &wfv1.SubmitOpts{Parameters: []string{"a"}}))
@@ -601,10 +600,9 @@ func TestApplySubmitOpts(t *testing.T) {
err := ApplySubmitOpts(wf, &wfv1.SubmitOpts{Parameters: []string{"a=81861780812"}})
require.NoError(t, err)
parameters := wf.Spec.Arguments.Parameters
- if assert.Len(t, parameters, 1) {
- assert.Equal(t, "a", parameters[0].Name)
- assert.Equal(t, "81861780812", parameters[0].Value.String())
- }
+ require.Len(t, parameters, 1)
+ assert.Equal(t, "a", parameters[0].Name)
+ assert.Equal(t, "81861780812", parameters[0].Value.String())
})
t.Run("PodPriorityClassName", func(t *testing.T) {
wf := &wfv1.Workflow{}
@@ -624,9 +622,8 @@ func TestReadParametersFile(t *testing.T) {
err = ReadParametersFile(file.Name(), opts)
require.NoError(t, err)
parameters := opts.Parameters
- if assert.Len(t, parameters, 1) {
- assert.Equal(t, "a=81861780812", parameters[0])
- }
+ require.Len(t, parameters, 1)
+ assert.Equal(t, "a=81861780812", parameters[0])
}
func TestFormulateResubmitWorkflow(t *testing.T) {
@@ -1090,9 +1087,8 @@ func TestFormulateRetryWorkflow(t *testing.T) {
require.NoError(t, err)
wf, _, err = FormulateRetryWorkflow(ctx, wf, false, "", nil)
require.NoError(t, err)
- if assert.Len(t, wf.Status.Nodes, 1) {
- assert.Equal(t, wfv1.NodeRunning, wf.Status.Nodes[""].Phase)
- }
+ require.Len(t, wf.Status.Nodes, 1)
+ assert.Equal(t, wfv1.NodeRunning, wf.Status.Nodes[""].Phase)
})
t.Run("Skipped and Suspended Nodes", func(t *testing.T) {
wf := &wfv1.Workflow{
@@ -1123,16 +1119,15 @@ func TestFormulateRetryWorkflow(t *testing.T) {
require.NoError(t, err)
wf, _, err = FormulateRetryWorkflow(ctx, wf, true, "id=suspended", nil)
require.NoError(t, err)
- if assert.Len(t, wf.Status.Nodes, 3) {
- assert.Equal(t, wfv1.NodeRunning, wf.Status.Nodes["entrypoint"].Phase)
- assert.Equal(t, wfv1.NodeRunning, wf.Status.Nodes["suspended"].Phase)
- assert.Equal(t, wfv1.Parameter{
- Name: "param-1",
- Value: nil,
- ValueFrom: &wfv1.ValueFrom{Supplied: &wfv1.SuppliedValueFrom{}},
- }, wf.Status.Nodes["suspended"].Outputs.Parameters[0])
- assert.Equal(t, wfv1.NodeSkipped, wf.Status.Nodes["skipped"].Phase)
- }
+ require.Len(t, wf.Status.Nodes, 3)
+ assert.Equal(t, wfv1.NodeRunning, wf.Status.Nodes["entrypoint"].Phase)
+ assert.Equal(t, wfv1.NodeRunning, wf.Status.Nodes["suspended"].Phase)
+ assert.Equal(t, wfv1.Parameter{
+ Name: "param-1",
+ Value: nil,
+ ValueFrom: &wfv1.ValueFrom{Supplied: &wfv1.SuppliedValueFrom{}},
+ }, wf.Status.Nodes["suspended"].Outputs.Parameters[0])
+ assert.Equal(t, wfv1.NodeSkipped, wf.Status.Nodes["skipped"].Phase)
})
t.Run("Nested DAG with Non-group Node Selected", func(t *testing.T) {
wf := &wfv1.Workflow{
@@ -1155,12 +1150,11 @@ func TestFormulateRetryWorkflow(t *testing.T) {
wf, _, err = FormulateRetryWorkflow(ctx, wf, true, "id=3", nil)
require.NoError(t, err)
// Node #3, #4 are deleted and will be recreated so only 3 nodes left in wf.Status.Nodes
- if assert.Len(t, wf.Status.Nodes, 3) {
- assert.Equal(t, wfv1.NodeSucceeded, wf.Status.Nodes["my-nested-dag-1"].Phase)
- // The parent group nodes should be running.
- assert.Equal(t, wfv1.NodeRunning, wf.Status.Nodes["1"].Phase)
- assert.Equal(t, wfv1.NodeRunning, wf.Status.Nodes["2"].Phase)
- }
+ require.Len(t, wf.Status.Nodes, 3)
+ assert.Equal(t, wfv1.NodeSucceeded, wf.Status.Nodes["my-nested-dag-1"].Phase)
+ // The parent group nodes should be running.
+ assert.Equal(t, wfv1.NodeRunning, wf.Status.Nodes["1"].Phase)
+ assert.Equal(t, wfv1.NodeRunning, wf.Status.Nodes["2"].Phase)
})
t.Run("Nested DAG without Node Selected", func(t *testing.T) {
wf := &wfv1.Workflow{
@@ -1183,13 +1177,12 @@ func TestFormulateRetryWorkflow(t *testing.T) {
wf, _, err = FormulateRetryWorkflow(ctx, wf, true, "", nil)
require.NoError(t, err)
// Node #2, #3, and #4 are deleted and will be recreated so only 2 nodes left in wf.Status.Nodes
- if assert.Len(t, wf.Status.Nodes, 4) {
- assert.Equal(t, wfv1.NodeSucceeded, wf.Status.Nodes["my-nested-dag-2"].Phase)
- assert.Equal(t, wfv1.NodeSucceeded, wf.Status.Nodes["1"].Phase)
- assert.Equal(t, wfv1.NodeSucceeded, wf.Status.Nodes["2"].Phase)
- assert.Equal(t, wfv1.NodeSucceeded, wf.Status.Nodes["3"].Phase)
- assert.Equal(t, "", string(wf.Status.Nodes["4"].Phase))
- }
+ require.Len(t, wf.Status.Nodes, 4)
+ assert.Equal(t, wfv1.NodeSucceeded, wf.Status.Nodes["my-nested-dag-2"].Phase)
+ assert.Equal(t, wfv1.NodeSucceeded, wf.Status.Nodes["1"].Phase)
+ assert.Equal(t, wfv1.NodeSucceeded, wf.Status.Nodes["2"].Phase)
+ assert.Equal(t, wfv1.NodeSucceeded, wf.Status.Nodes["3"].Phase)
+ assert.Equal(t, "", string(wf.Status.Nodes["4"].Phase))
})
t.Run("OverrideParams", func(t *testing.T) {
@@ -1319,13 +1312,12 @@ func TestFormulateRetryWorkflow(t *testing.T) {
wf, _, err = FormulateRetryWorkflow(ctx, wf, true, "id=4", nil)
require.NoError(t, err)
// Node #4 is deleted and will be recreated so only 4 nodes left in wf.Status.Nodes
- if assert.Len(t, wf.Status.Nodes, 4) {
- assert.Equal(t, wfv1.NodeSucceeded, wf.Status.Nodes["successful-workflow-2"].Phase)
- // The parent group nodes should be running.
- assert.Equal(t, wfv1.NodeRunning, wf.Status.Nodes["1"].Phase)
- assert.Equal(t, wfv1.NodeSucceeded, wf.Status.Nodes["2"].Phase)
- assert.Equal(t, wfv1.NodeSucceeded, wf.Status.Nodes["3"].Phase)
- }
+ require.Len(t, wf.Status.Nodes, 4)
+ assert.Equal(t, wfv1.NodeSucceeded, wf.Status.Nodes["successful-workflow-2"].Phase)
+ // The parent group nodes should be running.
+ assert.Equal(t, wfv1.NodeRunning, wf.Status.Nodes["1"].Phase)
+ assert.Equal(t, wfv1.NodeSucceeded, wf.Status.Nodes["2"].Phase)
+ assert.Equal(t, wfv1.NodeSucceeded, wf.Status.Nodes["3"].Phase)
})
t.Run("Retry continue on failed workflow", func(t *testing.T) {
@@ -1349,11 +1341,10 @@ func TestFormulateRetryWorkflow(t *testing.T) {
require.NoError(t, err)
wf, podsToDelete, err := FormulateRetryWorkflow(ctx, wf, false, "", nil)
require.NoError(t, err)
- if assert.Len(t, wf.Status.Nodes, 4) {
- assert.Equal(t, wfv1.NodeFailed, wf.Status.Nodes["2"].Phase)
- assert.Equal(t, wfv1.NodeSucceeded, wf.Status.Nodes["3"].Phase)
- assert.Len(t, podsToDelete, 2)
- }
+ require.Len(t, wf.Status.Nodes, 4)
+ assert.Equal(t, wfv1.NodeFailed, wf.Status.Nodes["2"].Phase)
+ assert.Equal(t, wfv1.NodeSucceeded, wf.Status.Nodes["3"].Phase)
+ assert.Len(t, podsToDelete, 2)
})
t.Run("Retry continue on failed workflow with restartSuccessful and nodeFieldSelector", func(t *testing.T) {
@@ -1377,11 +1368,10 @@ func TestFormulateRetryWorkflow(t *testing.T) {
require.NoError(t, err)
wf, podsToDelete, err := FormulateRetryWorkflow(ctx, wf, true, "id=3", nil)
require.NoError(t, err)
- if assert.Len(t, wf.Status.Nodes, 2) {
- assert.Equal(t, wfv1.NodeSucceeded, wf.Status.Nodes["1"].Phase)
- assert.Equal(t, wfv1.NodeRunning, wf.Status.Nodes["continue-on-failed-workflow-2"].Phase)
- assert.Len(t, podsToDelete, 4)
- }
+ require.Len(t, wf.Status.Nodes, 2)
+ assert.Equal(t, wfv1.NodeSucceeded, wf.Status.Nodes["1"].Phase)
+ assert.Equal(t, wfv1.NodeRunning, wf.Status.Nodes["continue-on-failed-workflow-2"].Phase)
+ assert.Len(t, podsToDelete, 4)
})
}