Skip to content

Commit

Permalink
crypto: arm64 - Use modern annotations for assembly functions
Browse files Browse the repository at this point in the history
In an effort to clarify and simplify the annotation of assembly functions
in the kernel new macros have been introduced. These replace ENTRY and
ENDPROC and also add a new annotation for static functions which previously
had no ENTRY equivalent. Update the annotations in the crypto code to the
new macros.

There are a small number of files imported from OpenSSL where the assembly
is generated using perl programs, these are not currently annotated at all
and have not been modified.

Signed-off-by: Mark Brown <[email protected]>
Acked-by: Ard Biesheuvel <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
  • Loading branch information
broonie authored and herbertx committed Dec 20, 2019
1 parent 3907ccf commit 0e89640
Show file tree
Hide file tree
Showing 17 changed files with 84 additions and 84 deletions.
16 changes: 8 additions & 8 deletions arch/arm64/crypto/aes-ce-ccm-core.S
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
* void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
* u32 *macp, u8 const rk[], u32 rounds);
*/
ENTRY(ce_aes_ccm_auth_data)
SYM_FUNC_START(ce_aes_ccm_auth_data)
ldr w8, [x3] /* leftover from prev round? */
ld1 {v0.16b}, [x0] /* load mac */
cbz w8, 1f
Expand Down Expand Up @@ -81,13 +81,13 @@ ENTRY(ce_aes_ccm_auth_data)
st1 {v0.16b}, [x0]
10: str w8, [x3]
ret
ENDPROC(ce_aes_ccm_auth_data)
SYM_FUNC_END(ce_aes_ccm_auth_data)

/*
* void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u8 const rk[],
* u32 rounds);
*/
ENTRY(ce_aes_ccm_final)
SYM_FUNC_START(ce_aes_ccm_final)
ld1 {v3.4s}, [x2], #16 /* load first round key */
ld1 {v0.16b}, [x0] /* load mac */
cmp w3, #12 /* which key size? */
Expand Down Expand Up @@ -121,7 +121,7 @@ ENTRY(ce_aes_ccm_final)
eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */
st1 {v0.16b}, [x0] /* store result */
ret
ENDPROC(ce_aes_ccm_final)
SYM_FUNC_END(ce_aes_ccm_final)

.macro aes_ccm_do_crypt,enc
ldr x8, [x6, #8] /* load lower ctr */
Expand Down Expand Up @@ -212,10 +212,10 @@ CPU_LE( rev x8, x8 )
* u8 const rk[], u32 rounds, u8 mac[],
* u8 ctr[]);
*/
ENTRY(ce_aes_ccm_encrypt)
SYM_FUNC_START(ce_aes_ccm_encrypt)
aes_ccm_do_crypt 1
ENDPROC(ce_aes_ccm_encrypt)
SYM_FUNC_END(ce_aes_ccm_encrypt)

ENTRY(ce_aes_ccm_decrypt)
SYM_FUNC_START(ce_aes_ccm_decrypt)
aes_ccm_do_crypt 0
ENDPROC(ce_aes_ccm_decrypt)
SYM_FUNC_END(ce_aes_ccm_decrypt)
16 changes: 8 additions & 8 deletions arch/arm64/crypto/aes-ce-core.S
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

.arch armv8-a+crypto

ENTRY(__aes_ce_encrypt)
SYM_FUNC_START(__aes_ce_encrypt)
sub w3, w3, #2
ld1 {v0.16b}, [x2]
ld1 {v1.4s}, [x0], #16
Expand All @@ -34,9 +34,9 @@ ENTRY(__aes_ce_encrypt)
eor v0.16b, v0.16b, v3.16b
st1 {v0.16b}, [x1]
ret
ENDPROC(__aes_ce_encrypt)
SYM_FUNC_END(__aes_ce_encrypt)

ENTRY(__aes_ce_decrypt)
SYM_FUNC_START(__aes_ce_decrypt)
sub w3, w3, #2
ld1 {v0.16b}, [x2]
ld1 {v1.4s}, [x0], #16
Expand All @@ -62,23 +62,23 @@ ENTRY(__aes_ce_decrypt)
eor v0.16b, v0.16b, v3.16b
st1 {v0.16b}, [x1]
ret
ENDPROC(__aes_ce_decrypt)
SYM_FUNC_END(__aes_ce_decrypt)

/*
* __aes_ce_sub() - use the aese instruction to perform the AES sbox
* substitution on each byte in 'input'
*/
ENTRY(__aes_ce_sub)
SYM_FUNC_START(__aes_ce_sub)
dup v1.4s, w0
movi v0.16b, #0
aese v0.16b, v1.16b
umov w0, v0.s[0]
ret
ENDPROC(__aes_ce_sub)
SYM_FUNC_END(__aes_ce_sub)

ENTRY(__aes_ce_invert)
SYM_FUNC_START(__aes_ce_invert)
ld1 {v0.4s}, [x1]
aesimc v1.16b, v0.16b
st1 {v1.4s}, [x0]
ret
ENDPROC(__aes_ce_invert)
SYM_FUNC_END(__aes_ce_invert)
4 changes: 2 additions & 2 deletions arch/arm64/crypto/aes-ce.S
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@
#include <linux/linkage.h>
#include <asm/assembler.h>

#define AES_ENTRY(func) ENTRY(ce_ ## func)
#define AES_ENDPROC(func) ENDPROC(ce_ ## func)
#define AES_ENTRY(func) SYM_FUNC_START(ce_ ## func)
#define AES_ENDPROC(func) SYM_FUNC_END(ce_ ## func)

.arch armv8-a+crypto

Expand Down
8 changes: 4 additions & 4 deletions arch/arm64/crypto/aes-cipher-core.S
Original file line number Diff line number Diff line change
Expand Up @@ -122,11 +122,11 @@ CPU_BE( rev w7, w7 )
ret
.endm

ENTRY(__aes_arm64_encrypt)
SYM_FUNC_START(__aes_arm64_encrypt)
do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2
ENDPROC(__aes_arm64_encrypt)
SYM_FUNC_END(__aes_arm64_encrypt)

.align 5
ENTRY(__aes_arm64_decrypt)
SYM_FUNC_START(__aes_arm64_decrypt)
do_crypt iround, crypto_it_tab, crypto_aes_inv_sbox, 0
ENDPROC(__aes_arm64_decrypt)
SYM_FUNC_END(__aes_arm64_decrypt)
16 changes: 8 additions & 8 deletions arch/arm64/crypto/aes-modes.S
Original file line number Diff line number Diff line change
Expand Up @@ -22,26 +22,26 @@
#define ST5(x...) x
#endif

aes_encrypt_block4x:
SYM_FUNC_START_LOCAL(aes_encrypt_block4x)
encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
ret
ENDPROC(aes_encrypt_block4x)
SYM_FUNC_END(aes_encrypt_block4x)

aes_decrypt_block4x:
SYM_FUNC_START_LOCAL(aes_decrypt_block4x)
decrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
ret
ENDPROC(aes_decrypt_block4x)
SYM_FUNC_END(aes_decrypt_block4x)

#if MAX_STRIDE == 5
aes_encrypt_block5x:
SYM_FUNC_START_LOCAL(aes_encrypt_block5x)
encrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7
ret
ENDPROC(aes_encrypt_block5x)
SYM_FUNC_END(aes_encrypt_block5x)

aes_decrypt_block5x:
SYM_FUNC_START_LOCAL(aes_decrypt_block5x)
decrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7
ret
ENDPROC(aes_decrypt_block5x)
SYM_FUNC_END(aes_decrypt_block5x)
#endif

/*
Expand Down
4 changes: 2 additions & 2 deletions arch/arm64/crypto/aes-neon.S
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
#include <linux/linkage.h>
#include <asm/assembler.h>

#define AES_ENTRY(func) ENTRY(neon_ ## func)
#define AES_ENDPROC(func) ENDPROC(neon_ ## func)
#define AES_ENTRY(func) SYM_FUNC_START(neon_ ## func)
#define AES_ENDPROC(func) SYM_FUNC_END(neon_ ## func)

xtsmask .req v7
cbciv .req v7
Expand Down
40 changes: 20 additions & 20 deletions arch/arm64/crypto/aes-neonbs-core.S
Original file line number Diff line number Diff line change
Expand Up @@ -380,7 +380,7 @@ ISRM0: .octa 0x0306090c00070a0d01040b0e0205080f
/*
* void aesbs_convert_key(u8 out[], u32 const rk[], int rounds)
*/
ENTRY(aesbs_convert_key)
SYM_FUNC_START(aesbs_convert_key)
ld1 {v7.4s}, [x1], #16 // load round 0 key
ld1 {v17.4s}, [x1], #16 // load round 1 key

Expand Down Expand Up @@ -425,10 +425,10 @@ ENTRY(aesbs_convert_key)
eor v17.16b, v17.16b, v7.16b
str q17, [x0]
ret
ENDPROC(aesbs_convert_key)
SYM_FUNC_END(aesbs_convert_key)

.align 4
aesbs_encrypt8:
SYM_FUNC_START_LOCAL(aesbs_encrypt8)
ldr q9, [bskey], #16 // round 0 key
ldr q8, M0SR
ldr q24, SR
Expand Down Expand Up @@ -488,10 +488,10 @@ aesbs_encrypt8:
eor v2.16b, v2.16b, v12.16b
eor v5.16b, v5.16b, v12.16b
ret
ENDPROC(aesbs_encrypt8)
SYM_FUNC_END(aesbs_encrypt8)

.align 4
aesbs_decrypt8:
SYM_FUNC_START_LOCAL(aesbs_decrypt8)
lsl x9, rounds, #7
add bskey, bskey, x9

Expand Down Expand Up @@ -553,7 +553,7 @@ aesbs_decrypt8:
eor v3.16b, v3.16b, v12.16b
eor v5.16b, v5.16b, v12.16b
ret
ENDPROC(aesbs_decrypt8)
SYM_FUNC_END(aesbs_decrypt8)

/*
* aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
Expand Down Expand Up @@ -621,21 +621,21 @@ ENDPROC(aesbs_decrypt8)
.endm

.align 4
ENTRY(aesbs_ecb_encrypt)
SYM_FUNC_START(aesbs_ecb_encrypt)
__ecb_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5
ENDPROC(aesbs_ecb_encrypt)
SYM_FUNC_END(aesbs_ecb_encrypt)

.align 4
ENTRY(aesbs_ecb_decrypt)
SYM_FUNC_START(aesbs_ecb_decrypt)
__ecb_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5
ENDPROC(aesbs_ecb_decrypt)
SYM_FUNC_END(aesbs_ecb_decrypt)

/*
* aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks, u8 iv[])
*/
.align 4
ENTRY(aesbs_cbc_decrypt)
SYM_FUNC_START(aesbs_cbc_decrypt)
frame_push 6

mov x19, x0
Expand Down Expand Up @@ -720,7 +720,7 @@ ENTRY(aesbs_cbc_decrypt)

2: frame_pop
ret
ENDPROC(aesbs_cbc_decrypt)
SYM_FUNC_END(aesbs_cbc_decrypt)

.macro next_tweak, out, in, const, tmp
sshr \tmp\().2d, \in\().2d, #63
Expand All @@ -736,7 +736,7 @@ ENDPROC(aesbs_cbc_decrypt)
* aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks, u8 iv[])
*/
__xts_crypt8:
SYM_FUNC_START_LOCAL(__xts_crypt8)
mov x6, #1
lsl x6, x6, x23
subs w23, w23, #8
Expand Down Expand Up @@ -789,7 +789,7 @@ __xts_crypt8:
0: mov bskey, x21
mov rounds, x22
br x7
ENDPROC(__xts_crypt8)
SYM_FUNC_END(__xts_crypt8)

.macro __xts_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7
frame_push 6, 64
Expand Down Expand Up @@ -854,13 +854,13 @@ ENDPROC(__xts_crypt8)
ret
.endm

ENTRY(aesbs_xts_encrypt)
SYM_FUNC_START(aesbs_xts_encrypt)
__xts_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5
ENDPROC(aesbs_xts_encrypt)
SYM_FUNC_END(aesbs_xts_encrypt)

ENTRY(aesbs_xts_decrypt)
SYM_FUNC_START(aesbs_xts_decrypt)
__xts_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5
ENDPROC(aesbs_xts_decrypt)
SYM_FUNC_END(aesbs_xts_decrypt)

.macro next_ctr, v
mov \v\().d[1], x8
Expand All @@ -874,7 +874,7 @@ ENDPROC(aesbs_xts_decrypt)
* aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
* int rounds, int blocks, u8 iv[], u8 final[])
*/
ENTRY(aesbs_ctr_encrypt)
SYM_FUNC_START(aesbs_ctr_encrypt)
frame_push 8

mov x19, x0
Expand Down Expand Up @@ -1002,4 +1002,4 @@ CPU_LE( rev x8, x8 )
7: cbz x25, 8b
st1 {v5.16b}, [x25]
b 8b
ENDPROC(aesbs_ctr_encrypt)
SYM_FUNC_END(aesbs_ctr_encrypt)
16 changes: 8 additions & 8 deletions arch/arm64/crypto/chacha-neon-core.S
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
*
* Clobbers: w3, x10, v4, v12
*/
chacha_permute:
SYM_FUNC_START_LOCAL(chacha_permute)

adr_l x10, ROT8
ld1 {v12.4s}, [x10]
Expand Down Expand Up @@ -104,9 +104,9 @@ chacha_permute:
b.ne .Ldoubleround

ret
ENDPROC(chacha_permute)
SYM_FUNC_END(chacha_permute)

ENTRY(chacha_block_xor_neon)
SYM_FUNC_START(chacha_block_xor_neon)
// x0: Input state matrix, s
// x1: 1 data block output, o
// x2: 1 data block input, i
Expand Down Expand Up @@ -143,9 +143,9 @@ ENTRY(chacha_block_xor_neon)

ldp x29, x30, [sp], #16
ret
ENDPROC(chacha_block_xor_neon)
SYM_FUNC_END(chacha_block_xor_neon)

ENTRY(hchacha_block_neon)
SYM_FUNC_START(hchacha_block_neon)
// x0: Input state matrix, s
// x1: output (8 32-bit words)
// w2: nrounds
Expand All @@ -163,7 +163,7 @@ ENTRY(hchacha_block_neon)

ldp x29, x30, [sp], #16
ret
ENDPROC(hchacha_block_neon)
SYM_FUNC_END(hchacha_block_neon)

a0 .req w12
a1 .req w13
Expand All @@ -183,7 +183,7 @@ ENDPROC(hchacha_block_neon)
a15 .req w28

.align 6
ENTRY(chacha_4block_xor_neon)
SYM_FUNC_START(chacha_4block_xor_neon)
frame_push 10

// x0: Input state matrix, s
Expand Down Expand Up @@ -845,7 +845,7 @@ CPU_BE( rev a15, a15 )
eor v31.16b, v31.16b, v3.16b
st1 {v28.16b-v31.16b}, [x1]
b .Lout
ENDPROC(chacha_4block_xor_neon)
SYM_FUNC_END(chacha_4block_xor_neon)

.section ".rodata", "a", %progbits
.align L1_CACHE_SHIFT
Expand Down
Loading

0 comments on commit 0e89640

Please sign in to comment.