From eeffafa3d53b1dfd98d2c39719d21abca383b59a Mon Sep 17 00:00:00 2001 From: Tiago Oliveira Date: Mon, 15 Jul 2024 13:11:50 +0100 Subject: [PATCH] Remove declarations of unused variables: remaining implementations of the release/2023.05 branch; Note: unused function arguments were not remove -- reason: avoid breaking changes. --- .../keccak/keccak1600/amd64/avx2/keccak1600.jinc | 2 -- .../keccak/keccak1600/amd64/bmi1/keccak1600.jinc | 1 - .../keccak/keccak1600/amd64/ref/keccak1600.jinc | 1 - .../keccak/keccak1600/amd64/ref1/keccak1600.jinc | 1 - .../poly1305/amd64/avx/poly1305.jinc | 16 ++++------------ .../poly1305/amd64/avx2/poly1305.jinc | 6 +----- .../poly1305/amd64/ref/poly1305.jinc | 5 +---- .../curve25519/amd64/common/51/cswap5.jinc | 2 +- .../curve25519/amd64/common/51/sub5.jinc | 1 - .../curve25519/amd64/common/64/sub4.jinc | 1 - .../curve25519/amd64/mulx/invert4.jinc | 2 +- .../curve25519/amd64/mulx/mul4.jinc | 7 +------ .../curve25519/amd64/mulx/reduce4.jinc | 1 - .../curve25519/amd64/mulx/sqr4.jinc | 1 - .../xsalsa20poly1305/amd64/avx/xsalsa20_32D.jinc | 2 -- .../amd64/avx2/xsalsa20_32D.jinc | 2 -- .../xsalsa20poly1305/amd64/ref/poly1305_kD.jinc | 2 +- .../xsalsa20poly1305/amd64/ref/xsalsa20_32D.jinc | 2 -- .../falcon/falcon512/amd64/avx2/NTT_params.jinc | 2 +- .../falcon/falcon512/amd64/avx2/sign.jazz | 2 +- .../falcon/falcon512/amd64/avx2/sign.jinc | 5 ----- .../falcon/falcon512/amd64/avx2/test.jazz | 2 +- .../chacha/common/amd64/avx/chacha_store_h.jinc | 4 ---- .../chacha/common/amd64/avx/chacha_store_v.jinc | 3 --- .../chacha/common/amd64/avx2/chacha_store_h.jinc | 4 ---- .../chacha/common/amd64/avx2/chacha_store_v.jinc | 4 ---- 26 files changed, 13 insertions(+), 68 deletions(-) diff --git a/src/common/keccak/keccak1600/amd64/avx2/keccak1600.jinc b/src/common/keccak/keccak1600/amd64/avx2/keccak1600.jinc index 17401adb..4403e5bb 100644 --- a/src/common/keccak/keccak1600/amd64/avx2/keccak1600.jinc +++ b/src/common/keccak/keccak1600/amd64/avx2/keccak1600.jinc @@ -39,7 +39,6 @@ inline fn __add_full_block_avx2( inline int i; reg u64 j l t rate8; - reg u8 c; rate8 = rate; rate8 >>= 3; @@ -141,7 +140,6 @@ inline fn __xtr_full_block_avx2( inline int i; stack u64[28] s_state; reg u64 j l t len8; - reg u8 c; for i = 0 to 7 { s_state[u256 i] = state[i]; } diff --git a/src/common/keccak/keccak1600/amd64/bmi1/keccak1600.jinc b/src/common/keccak/keccak1600/amd64/bmi1/keccak1600.jinc index 6ec2b0fc..fa81ca75 100644 --- a/src/common/keccak/keccak1600/amd64/bmi1/keccak1600.jinc +++ b/src/common/keccak/keccak1600/amd64/bmi1/keccak1600.jinc @@ -90,7 +90,6 @@ inline fn __absorb_bmi1( { stack u64 s_in s_inlen s_rate; reg u8 trail_byte; - reg u64 t; // intermediate blocks while ( inlen >= rate ) diff --git a/src/common/keccak/keccak1600/amd64/ref/keccak1600.jinc b/src/common/keccak/keccak1600/amd64/ref/keccak1600.jinc index 470fe23a..cd718735 100644 --- a/src/common/keccak/keccak1600/amd64/ref/keccak1600.jinc +++ b/src/common/keccak/keccak1600/amd64/ref/keccak1600.jinc @@ -89,7 +89,6 @@ inline fn __absorb_ref( { stack u64 s_in s_inlen s_rate; reg u8 trail_byte; - reg u64 t; // intermediate blocks while ( inlen >= rate ) diff --git a/src/common/keccak/keccak1600/amd64/ref1/keccak1600.jinc b/src/common/keccak/keccak1600/amd64/ref1/keccak1600.jinc index 36fdb8b3..c6dcf710 100644 --- a/src/common/keccak/keccak1600/amd64/ref1/keccak1600.jinc +++ b/src/common/keccak/keccak1600/amd64/ref1/keccak1600.jinc @@ -89,7 +89,6 @@ inline fn __absorb_ref1( { stack u64 s_in s_inlen s_rate; reg u8 trail_byte; - reg u64 t; // intermediate blocks while ( inlen >= rate ) diff --git a/src/crypto_onetimeauth/poly1305/amd64/avx/poly1305.jinc b/src/crypto_onetimeauth/poly1305/amd64/avx/poly1305.jinc index 7e194881..67cad799 100644 --- a/src/crypto_onetimeauth/poly1305/amd64/avx/poly1305.jinc +++ b/src/crypto_onetimeauth/poly1305/amd64/avx/poly1305.jinc @@ -93,11 +93,10 @@ inline fn __broadcast_r4_avx( stack u128[5], stack u128[4] { - inline int i mask26; + inline int i; stack u128[5] r44; stack u128[4] r44x5; reg u64[5] t; - reg u64 h l; r44 = __unpack_avx(r44, r4, 0); @@ -117,12 +116,10 @@ inline fn __poly1305_avx_setup( stack u128[5], stack u128[4], stack u128[5], stack u128[4] { - inline int i mask26; + inline int i; stack u128[5] r44 r22 r12; stack u128[4] r44x5 r22x5 r12x5; - reg u128 t; reg u64[3] rt; - reg u64 h l; // rt = r; store rt for i=0 to 2 { rt[i] = r[i]; } rt[2] = 0; @@ -151,7 +148,7 @@ inline fn __load_avx( reg u128[5], reg u64 { - reg u128 t m0 m1; + reg u128 t; reg u128[5] m; t = (u128)[in + 0]; @@ -182,9 +179,7 @@ inline fn __load_avx( inline fn __pack_avx(reg u128[5] h) -> reg u64[3] { reg bool cf; - inline int i; reg u128[3] t; - reg u128 t0; reg u128[2] u; reg u64[3] d r; reg u64 c cx4; @@ -281,7 +276,6 @@ inline fn __mulmod_avx( reg u128[5] t; reg u128[4] u; reg u128 r0 r1 r4x5 r2 r3x5 r3 r2x5; - reg u128 mask26; r0 = s_r[0]; r1 = s_r[1]; @@ -372,14 +366,12 @@ inline fn __mainloop_avx_v1( reg u128[5], reg u64 { - inline int i; reg u128 mask26; stack u128[5] s_h; reg u128[5] m; reg u128[5] t; reg u128[4] u; reg u128 r0 r1 r4x5 r2 r3x5 r3 r2x5; - reg u128 mask26; reg u128 m0 m1 mt; r0 = s_r44[0]; @@ -565,7 +557,7 @@ inline fn __poly1305_avx_update( { inline int i; stack u128 s_mask26 s_bit25; - reg u128[5] h m; + reg u128[5] h; reg u128 mask26 t; reg u64[3] h64; diff --git a/src/crypto_onetimeauth/poly1305/amd64/avx2/poly1305.jinc b/src/crypto_onetimeauth/poly1305/amd64/avx2/poly1305.jinc index f34791fc..498f73b2 100644 --- a/src/crypto_onetimeauth/poly1305/amd64/avx2/poly1305.jinc +++ b/src/crypto_onetimeauth/poly1305/amd64/avx2/poly1305.jinc @@ -96,12 +96,10 @@ inline fn __poly1305_avx2_setup( stack u256[5], stack u256[4] { - inline int i mask26; + inline int i; stack u256[5] r4444 r1234; stack u256[4] r4444x5 r1234x5; - reg u256 t; reg u64[3] rt; - reg u64 h l; // rt = r; store rt for i=0 to 2 { rt[i] = r[i]; } rt[2] = 0; @@ -195,7 +193,6 @@ inline fn __load_avx2( inline fn __pack_avx2(reg u256[5] h) -> reg u64[3] { reg bool cf; - inline int i; reg u256[3] t; reg u128 t0; reg u256[2] u; @@ -380,7 +377,6 @@ inline fn __add_mulmod_avx2( reg u256[5] t; reg u256[4] u; reg u256 r0 r1 r4x5 r2 r3x5 r3 r2x5; - reg u256 mask26; inline int i; r0 = s_r[0]; diff --git a/src/crypto_onetimeauth/poly1305/amd64/ref/poly1305.jinc b/src/crypto_onetimeauth/poly1305/amd64/ref/poly1305.jinc index 3c243afe..84b4d86e 100644 --- a/src/crypto_onetimeauth/poly1305/amd64/ref/poly1305.jinc +++ b/src/crypto_onetimeauth/poly1305/amd64/ref/poly1305.jinc @@ -190,9 +190,6 @@ inline fn __poly1305_setup_ref(reg u64 k) -> reg u64[3], reg u64[3], reg u64 inline fn __poly1305_update_ref(reg u64 in inlen, reg u64[3] h r) -> reg u64, reg u64, reg u64[3] { - reg bool cf; - reg u64[2] m; - while(inlen >= 16) { h = __load_add(h, in); @@ -207,7 +204,7 @@ inline fn __poly1305_update_ref(reg u64 in inlen, reg u64[3] h r) -> reg u64, re inline fn __poly1305_last_ref(reg u64 in inlen k, reg u64[3] h r) -> reg u64[2] { - reg u64[2] m s h2; + reg u64[2] s h2; if(inlen > 0) { h = __load_last_add(h, in, inlen); diff --git a/src/crypto_scalarmult/curve25519/amd64/common/51/cswap5.jinc b/src/crypto_scalarmult/curve25519/amd64/common/51/cswap5.jinc index e66651b5..4e0bae61 100644 --- a/src/crypto_scalarmult/curve25519/amd64/common/51/cswap5.jinc +++ b/src/crypto_scalarmult/curve25519/amd64/common/51/cswap5.jinc @@ -11,7 +11,7 @@ inline fn __cswap5( stack u64[5] { inline int i; - reg u64[5] t4 x2r x3r z3r; + reg u64[5] t4 x2r x3r; reg u64 t mask; ?{}, mask = #set0(); diff --git a/src/crypto_scalarmult/curve25519/amd64/common/51/sub5.jinc b/src/crypto_scalarmult/curve25519/amd64/common/51/sub5.jinc index fe52f7f6..9ab467e8 100644 --- a/src/crypto_scalarmult/curve25519/amd64/common/51/sub5.jinc +++ b/src/crypto_scalarmult/curve25519/amd64/common/51/sub5.jinc @@ -32,7 +32,6 @@ inline fn __sub5_sss(stack u64[5] fs gs) -> stack u64[5] inline fn __sub5_rss(stack u64[5] fs gs) -> reg u64[5] { - stack u64[5] hs; reg u64[5] h f; f = #copy(fs); diff --git a/src/crypto_scalarmult/curve25519/amd64/common/64/sub4.jinc b/src/crypto_scalarmult/curve25519/amd64/common/64/sub4.jinc index f1889625..8f6f66e6 100644 --- a/src/crypto_scalarmult/curve25519/amd64/common/64/sub4.jinc +++ b/src/crypto_scalarmult/curve25519/amd64/common/64/sub4.jinc @@ -45,7 +45,6 @@ inline fn __sub4_sss(stack u64[4] fs gs) -> stack u64[4] inline fn __sub4_rss(stack u64[4] fs gs) -> reg u64[4] { - stack u64[4] hs; reg u64[4] h f; f = #copy(fs); diff --git a/src/crypto_scalarmult/curve25519/amd64/mulx/invert4.jinc b/src/crypto_scalarmult/curve25519/amd64/mulx/invert4.jinc index 727e3bca..a6c7644b 100644 --- a/src/crypto_scalarmult/curve25519/amd64/mulx/invert4.jinc +++ b/src/crypto_scalarmult/curve25519/amd64/mulx/invert4.jinc @@ -7,7 +7,7 @@ require "sqr4.jinc" inline fn __invert4(reg u64[4] f) -> reg u64[4] { reg u32 i; - stack u64[4] fs t0s t1s t2s t3s; + stack u64[4] fs t0s t1s t2s; reg u64[4] t0 t1 t2 t3; fs = #copy(f); diff --git a/src/crypto_scalarmult/curve25519/amd64/mulx/mul4.jinc b/src/crypto_scalarmult/curve25519/amd64/mulx/mul4.jinc index 0cbb33e8..776b19c4 100644 --- a/src/crypto_scalarmult/curve25519/amd64/mulx/mul4.jinc +++ b/src/crypto_scalarmult/curve25519/amd64/mulx/mul4.jinc @@ -11,8 +11,7 @@ inline fn __mul4_c0 reg bool, reg bool { - inline int i; - reg u64 hi lo; + reg u64 lo; reg u64[4] h r; (h[1], h[0]) = #MULX ( f0, g[0] ); @@ -44,7 +43,6 @@ inline fn __mul4_c1 reg bool, reg bool { - inline int i; reg u64 hi lo; ( hi, lo ) = #MULX ( f, g[0] ); @@ -81,7 +79,6 @@ inline fn __mul4_c2 reg bool, reg bool { - inline int i; reg u64 hi lo; ( hi, lo ) = #MULX ( f, g[0] ); @@ -118,7 +115,6 @@ inline fn __mul4_c3 reg bool, reg bool { - inline int i; reg u64 hi lo; ( hi, lo ) = #MULX ( f, g[0] ); @@ -205,7 +201,6 @@ inline fn __mul4_rss(stack u64[4] fs gs) -> reg u64[4] inline fn __mul4_a24_rs(stack u64[4] fs, inline u64 a24) -> reg u64[4] { - inline int i; reg bool cf; reg u64[4] h; reg u64 c r0 lo; diff --git a/src/crypto_scalarmult/curve25519/amd64/mulx/reduce4.jinc b/src/crypto_scalarmult/curve25519/amd64/mulx/reduce4.jinc index fde30fd4..6767216e 100644 --- a/src/crypto_scalarmult/curve25519/amd64/mulx/reduce4.jinc +++ b/src/crypto_scalarmult/curve25519/amd64/mulx/reduce4.jinc @@ -6,7 +6,6 @@ inline fn __reduce4 reg bool cf of // cf = 0 and of = 0 ) -> reg u64[4] { - inline int i; reg u64 hi lo; // diff --git a/src/crypto_scalarmult/curve25519/amd64/mulx/sqr4.jinc b/src/crypto_scalarmult/curve25519/amd64/mulx/sqr4.jinc index 46fd5c62..4f1ff111 100644 --- a/src/crypto_scalarmult/curve25519/amd64/mulx/sqr4.jinc +++ b/src/crypto_scalarmult/curve25519/amd64/mulx/sqr4.jinc @@ -3,7 +3,6 @@ require "reduce4.jinc" inline fn __sqr4_rr(reg u64[4] f) -> reg u64[4] { reg bool cf of; - inline int i; reg u64[8] t; reg u64[4] h r; reg u64 z _38 fx; diff --git a/src/crypto_secretbox/xsalsa20poly1305/amd64/avx/xsalsa20_32D.jinc b/src/crypto_secretbox/xsalsa20poly1305/amd64/avx/xsalsa20_32D.jinc index c07920e8..c2163c05 100644 --- a/src/crypto_secretbox/xsalsa20poly1305/amd64/avx/xsalsa20_32D.jinc +++ b/src/crypto_secretbox/xsalsa20poly1305/amd64/avx/xsalsa20_32D.jinc @@ -1,5 +1,3 @@ -param int SALSA20_ROUNDS=20; - from Jade require "crypto_stream/xsalsa20/amd64/ref/hsalsa20.jinc" require "salsa20_32D.jinc" diff --git a/src/crypto_secretbox/xsalsa20poly1305/amd64/avx2/xsalsa20_32D.jinc b/src/crypto_secretbox/xsalsa20poly1305/amd64/avx2/xsalsa20_32D.jinc index c07920e8..c2163c05 100644 --- a/src/crypto_secretbox/xsalsa20poly1305/amd64/avx2/xsalsa20_32D.jinc +++ b/src/crypto_secretbox/xsalsa20poly1305/amd64/avx2/xsalsa20_32D.jinc @@ -1,5 +1,3 @@ -param int SALSA20_ROUNDS=20; - from Jade require "crypto_stream/xsalsa20/amd64/ref/hsalsa20.jinc" require "salsa20_32D.jinc" diff --git a/src/crypto_secretbox/xsalsa20poly1305/amd64/ref/poly1305_kD.jinc b/src/crypto_secretbox/xsalsa20poly1305/amd64/ref/poly1305_kD.jinc index fcd32d06..e00ec302 100644 --- a/src/crypto_secretbox/xsalsa20poly1305/amd64/ref/poly1305_kD.jinc +++ b/src/crypto_secretbox/xsalsa20poly1305/amd64/ref/poly1305_kD.jinc @@ -42,7 +42,7 @@ inline fn __poly1305_last_ref_k( reg u64[2] { reg bool cf; - reg u64[2] m s h2; + reg u64[2] h2; if(inlen > 0) { h = __load_last_add(h, in, inlen); diff --git a/src/crypto_secretbox/xsalsa20poly1305/amd64/ref/xsalsa20_32D.jinc b/src/crypto_secretbox/xsalsa20poly1305/amd64/ref/xsalsa20_32D.jinc index c07920e8..c2163c05 100644 --- a/src/crypto_secretbox/xsalsa20poly1305/amd64/ref/xsalsa20_32D.jinc +++ b/src/crypto_secretbox/xsalsa20poly1305/amd64/ref/xsalsa20_32D.jinc @@ -1,5 +1,3 @@ -param int SALSA20_ROUNDS=20; - from Jade require "crypto_stream/xsalsa20/amd64/ref/hsalsa20.jinc" require "salsa20_32D.jinc" diff --git a/src/crypto_sign/falcon/falcon512/amd64/avx2/NTT_params.jinc b/src/crypto_sign/falcon/falcon512/amd64/avx2/NTT_params.jinc index 40e679a2..9df71a41 100644 --- a/src/crypto_sign/falcon/falcon512/amd64/avx2/NTT_params.jinc +++ b/src/crypto_sign/falcon/falcon512/amd64/avx2/NTT_params.jinc @@ -1,5 +1,5 @@ -require "params.jinc" +from Jade require "crypto_sign/falcon/falcon512/common/params.jinc" param int TWIDDLE_N = 576; diff --git a/src/crypto_sign/falcon/falcon512/amd64/avx2/sign.jazz b/src/crypto_sign/falcon/falcon512/amd64/avx2/sign.jazz index fc3433d3..e873f419 100644 --- a/src/crypto_sign/falcon/falcon512/amd64/avx2/sign.jazz +++ b/src/crypto_sign/falcon/falcon512/amd64/avx2/sign.jazz @@ -1,5 +1,5 @@ -require "params.jinc" +from Jade require "crypto_sign/falcon/falcon512/common/params.jinc" require "vec.jinc" require "verify.jinc" require "sign.jinc" diff --git a/src/crypto_sign/falcon/falcon512/amd64/avx2/sign.jinc b/src/crypto_sign/falcon/falcon512/amd64/avx2/sign.jinc index 17bfdf27..ff0cd12a 100644 --- a/src/crypto_sign/falcon/falcon512/amd64/avx2/sign.jinc +++ b/src/crypto_sign/falcon/falcon512/amd64/avx2/sign.jinc @@ -11,7 +11,6 @@ inline fn __decode_public_key(stack u16[ARRAY_N] h, reg u64 pk) -> stack u16[ARR reg u32 failed res; reg u8 t; reg u64 ptr0; - reg u64 i; failed = 0; @@ -37,8 +36,6 @@ inline fn __decode_sign(stack u16[ARRAY_N] sign, reg u64 esign sign_len) -> stac reg u8 t; reg u64 ptr0 t_sign_len; - reg u64 i; - failed = 0; t = (u8)[esign]; @@ -190,11 +187,9 @@ inline fn __jade_sign_falcon_falcon512_amd64_avx2_open(reg u64 m mlen sm smlen p reg u64 sig_len msg_len esig; reg u64 shake_in; - reg u64[25] state; reg u64 i; reg u32 failed; reg u64 res; - reg u16[ARRAY_N] h hm sig; reg u8 tmp8; diff --git a/src/crypto_sign/falcon/falcon512/amd64/avx2/test.jazz b/src/crypto_sign/falcon/falcon512/amd64/avx2/test.jazz index f41af58f..f1892f09 100644 --- a/src/crypto_sign/falcon/falcon512/amd64/avx2/test.jazz +++ b/src/crypto_sign/falcon/falcon512/amd64/avx2/test.jazz @@ -1,4 +1,4 @@ -require "params.jinc" +from Jade require "crypto_sign/falcon/falcon512/common/params.jinc" require "vec.jinc" require "verify.jinc" diff --git a/src/crypto_stream/chacha/common/amd64/avx/chacha_store_h.jinc b/src/crypto_stream/chacha/common/amd64/avx/chacha_store_h.jinc index bee6784f..d1833300 100644 --- a/src/crypto_stream/chacha/common/amd64/avx/chacha_store_h.jinc +++ b/src/crypto_stream/chacha/common/amd64/avx/chacha_store_h.jinc @@ -96,8 +96,6 @@ inline fn __store_xor_h_x2_avx(reg u64 output input len, reg u128[4] k1 k2) -> r // <= 128 bytes inline fn __store_xor_last_h_x2_avx(reg u64 output input len, reg u128[4] k1 k2) { - inline int i; - // write 64 bytes if(len >= 64) { output, input, len = __store_xor_h_avx(output, input, len, k1); @@ -193,8 +191,6 @@ inline fn __store_h_x2_avx(reg u64 output len, reg u128[4] k1 k2) -> reg u64, re // <= 128 bytes inline fn __store_last_h_x2_avx(reg u64 output len, reg u128[4] k1 k2) { - inline int i; - // write 64 bytes if(len >= 64) { output, len = __store_h_avx(output, len, k1); diff --git a/src/crypto_stream/chacha/common/amd64/avx/chacha_store_v.jinc b/src/crypto_stream/chacha/common/amd64/avx/chacha_store_v.jinc index 96f8cb0a..a8d4d988 100644 --- a/src/crypto_stream/chacha/common/amd64/avx/chacha_store_v.jinc +++ b/src/crypto_stream/chacha/common/amd64/avx/chacha_store_v.jinc @@ -102,7 +102,6 @@ inline fn __rotate_first_half_v_avx(reg u128[16] k) -> reg u128[8], stack u128[8 inline fn __rotate_second_half_v_avx(stack u128[8] s_k8_15) -> reg u128[8] { - inline int i; reg u128[8] k8_15; k8_15 = __rotate_stack_avx(s_k8_15); return k8_15; @@ -166,7 +165,6 @@ inline fn __store_xor_v_avx(reg u64 output input len, reg u128[16] k) -> reg u64 // <= 256 bytes inline fn __store_xor_last_v_avx(reg u64 output input len, reg u128[16] k) { - inline int i; stack u128[8] s_k0_7 s_k8_15; reg u128[8] k0_7 k8_15; reg u128[4] k0_3 k4_7; @@ -220,7 +218,6 @@ inline fn __store_v_avx(reg u64 output len, reg u128[16] k) -> reg u64, reg u64 // <= 256 bytes inline fn __store_last_v_avx(reg u64 output len, reg u128[16] k) { - inline int i; stack u128[8] s_k0_7 s_k8_15; reg u128[8] k0_7 k8_15; reg u128[4] k0_3 k4_7; diff --git a/src/crypto_stream/chacha/common/amd64/avx2/chacha_store_h.jinc b/src/crypto_stream/chacha/common/amd64/avx2/chacha_store_h.jinc index be1226d5..4f17ffb6 100644 --- a/src/crypto_stream/chacha/common/amd64/avx2/chacha_store_h.jinc +++ b/src/crypto_stream/chacha/common/amd64/avx2/chacha_store_h.jinc @@ -108,8 +108,6 @@ inline fn __store_xor_h_x2_avx2(reg u64 output input len, reg u256[4] k1 k2) -> // <= 256 bytes inline fn __store_xor_last_h_x2_avx2(reg u64 output input len, reg u256[4] k1 k2) { - inline int i; - // write 128 bytes if(len >= 128) { output, input, len = __store_xor_h_avx2(output, input, len, k1); @@ -215,8 +213,6 @@ inline fn __store_h_x2_avx2(reg u64 output len, reg u256[4] k1 k2) -> reg u64, r // <= 256 bytes inline fn __store_last_h_x2_avx2(reg u64 output len, reg u256[4] k1 k2) { - inline int i; - // write 128 bytes if(len >= 128) { output, len = __store_h_avx2(output, len, k1); diff --git a/src/crypto_stream/chacha/common/amd64/avx2/chacha_store_v.jinc b/src/crypto_stream/chacha/common/amd64/avx2/chacha_store_v.jinc index 7d8f11dd..e76c3487 100644 --- a/src/crypto_stream/chacha/common/amd64/avx2/chacha_store_v.jinc +++ b/src/crypto_stream/chacha/common/amd64/avx2/chacha_store_v.jinc @@ -78,7 +78,6 @@ inline fn __rotate_first_half_v_avx2(reg u256[16] k) -> reg u256[8], stack u256[ inline fn __rotate_second_half_v_avx2(stack u256[8] s_k8_15) -> reg u256[8] { - inline int i; reg u256[8] k8_15; k8_15 = __rotate_stack_avx2(s_k8_15); return k8_15; @@ -87,7 +86,6 @@ inline fn __rotate_second_half_v_avx2(stack u256[8] s_k8_15) -> reg u256[8] inline fn __interleave_avx2(stack u256[8] s, reg u256[8] k, inline int o) -> reg u256[4], reg u256[4] { - inline int i; reg u256[4] sk1 sk2; sk1[0] = s[o + 0]; @@ -140,7 +138,6 @@ inline fn __store_xor_v_avx2(reg u64 output input len, reg u256[16] k) -> reg u6 // <= 512 bytes inline fn __store_xor_last_v_avx2(reg u64 output input len, reg u256[16] k) { - inline int i; stack u256[8] s_k0_7 s_k8_15; reg u256[8] k0_7 k8_15; reg u256[4] k0_3 k4_7; @@ -194,7 +191,6 @@ inline fn __store_v_avx2(reg u64 output len, reg u256[16] k) -> reg u64, reg u64 // <= 512 bytes inline fn __store_last_v_avx2(reg u64 output len, reg u256[16] k) { - inline int i; stack u256[8] s_k0_7 s_k8_15; reg u256[8] k0_7 k8_15; reg u256[4] k0_3 k4_7;