Message ID | 20230603023426.1064431-32-richard.henderson@linaro.org |
---|---|
State | New |
Headers | show |
Series | crypto: Provide aes-round.h and host accel | expand |
On Sat, 3 Jun 2023 at 04:34, Richard Henderson <richard.henderson@linaro.org> wrote: > > Detect AES in cpuinfo; implement the accel hooks. > > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> > --- > host/include/aarch64/host/aes-round.h | 204 ++++++++++++++++++++++++++ > host/include/aarch64/host/cpuinfo.h | 1 + > util/cpuinfo-aarch64.c | 2 + > 3 files changed, 207 insertions(+) > create mode 100644 host/include/aarch64/host/aes-round.h > > diff --git a/host/include/aarch64/host/aes-round.h b/host/include/aarch64/host/aes-round.h > new file mode 100644 > index 0000000000..27ca823db6 > --- /dev/null > +++ b/host/include/aarch64/host/aes-round.h > @@ -0,0 +1,204 @@ > +/* > + * AArch64 specific aes acceleration. > + * SPDX-License-Identifier: GPL-2.0-or-later > + */ > + > +#ifndef HOST_AES_ROUND_H > +#define HOST_AES_ROUND_H > + > +#include "host/cpuinfo.h" > +#include <arm_neon.h> > + > +#ifdef __ARM_FEATURE_AES > +# define HAVE_AES_ACCEL true > +# define ATTR_AES_ACCEL > +#else > +# define HAVE_AES_ACCEL likely(cpuinfo & CPUINFO_AES) > +# define ATTR_AES_ACCEL __attribute__((target("+crypto"))) > +#endif > + > +static inline uint8x16_t aes_accel_bswap(uint8x16_t x) > +{ > + /* No arm_neon.h primitive, and the compilers don't share builtins. */ vqtbl1q_u8() perhaps? > +#ifdef __clang__ > + return __builtin_shufflevector(x, x, 15, 14, 13, 12, 11, 10, 9, 8, > + 7, 6, 5, 4, 3, 2, 1, 0); > +#else > + return __builtin_shuffle(x, (uint8x16_t) > + { 15, 14, 13, 12, 11, 10, 9, 8, > + 7, 6, 5, 4, 3, 2, 1, 0, }); > +#endif > +} > + > +/* > + * Through clang 15, the aes inlines are only defined if __ARM_FEATURE_AES; > + * one cannot use __attribute__((target)) to make them appear after the fact. > + * Therefore we must fallback to inline asm. > + */ > +#ifdef __ARM_FEATURE_AES > +# define aes_accel_aesd vaesdq_u8 > +# define aes_accel_aese vaeseq_u8 > +# define aes_accel_aesmc vaesmcq_u8 > +# define aes_accel_aesimc vaesimcq_u8 > +#else > +static inline uint8x16_t aes_accel_aesd(uint8x16_t d, uint8x16_t k) > +{ > + asm(".arch_extension aes\n\t" > + "aesd %0.16b, %1.16b" : "+w"(d) : "w"(k)); > + return d; > +} > + > +static inline uint8x16_t aes_accel_aese(uint8x16_t d, uint8x16_t k) > +{ > + asm(".arch_extension aes\n\t" > + "aese %0.16b, %1.16b" : "+w"(d) : "w"(k)); > + return d; > +} > + > +static inline uint8x16_t aes_accel_aesmc(uint8x16_t d) > +{ > + asm(".arch_extension aes\n\t" > + "aesmc %0.16b, %1.16b" : "=w"(d) : "w"(d)); Most ARM cores fuse aese/aesmc into a single uop (with the associated performance boost) if the pattern is aese x, y aesmc x,x aesd x, y aesimc x,x So it might make sense to use +w here at least, and use only a single register (which the compiler will likely do in any case, but still) I would assume that the compiler cannot issue these separately based on the sequences below, but if it might, it may be worth it to emit the aese/aesmc together in a single asm() block > + return d; > +} > + > +static inline uint8x16_t aes_accel_aesimc(uint8x16_t d) > +{ > + asm(".arch_extension aes\n\t" > + "aesimc %0.16b, %1.16b" : "=w"(d) : "w"(d)); > + return d; > +} > +#endif /* __ARM_FEATURE_AES */ > + > +static inline void ATTR_AES_ACCEL > +aesenc_MC_accel(AESState *ret, const AESState *st, bool be) > +{ > + uint8x16_t t = (uint8x16_t)st->v; > + > + if (be) { > + t = aes_accel_bswap(t); > + t = aes_accel_aesmc(t); > + t = aes_accel_bswap(t); > + } else { > + t = aes_accel_aesmc(t); > + } > + ret->v = (AESStateVec)t; > +} > + > +static inline void ATTR_AES_ACCEL > +aesenc_SB_SR_accel(AESState *ret, const AESState *st, bool be) > +{ > + uint8x16_t t = (uint8x16_t)st->v; > + uint8x16_t z = { }; > + > + if (be) { > + t = aes_accel_bswap(t); > + t = aes_accel_aese(t, z); > + t = aes_accel_bswap(t); > + } else { > + t = aes_accel_aese(t, z); > + } > + ret->v = (AESStateVec)t; > +} > + > +static inline void ATTR_AES_ACCEL > +aesenc_SB_SR_MC_AK_accel(AESState *ret, const AESState *st, > + const AESState *rk, bool be) > +{ > + uint8x16_t t = (uint8x16_t)st->v; > + uint8x16_t k = (uint8x16_t)rk->v; > + uint8x16_t z = { }; > + > + if (be) { > + t = aes_accel_bswap(t); > + k = aes_accel_bswap(k); > + t = aes_accel_aese(t, z); > + t = aes_accel_aesmc(t); > + t = veorq_u8(t, k); > + t = aes_accel_bswap(t); > + } else { > + t = aes_accel_aese(t, z); > + t = aes_accel_aesmc(t); > + t = veorq_u8(t, k); > + } > + ret->v = (AESStateVec)t; > +} > + > +static inline void ATTR_AES_ACCEL > +aesdec_IMC_accel(AESState *ret, const AESState *st, bool be) > +{ > + uint8x16_t t = (uint8x16_t)st->v; > + > + if (be) { > + t = aes_accel_bswap(t); > + t = aes_accel_aesimc(t); > + t = aes_accel_bswap(t); > + } else { > + t = aes_accel_aesimc(t); > + } > + ret->v = (AESStateVec)t; > +} > + > +static inline void ATTR_AES_ACCEL > +aesdec_ISB_ISR_accel(AESState *ret, const AESState *st, bool be) > +{ > + uint8x16_t t = (uint8x16_t)st->v; > + uint8x16_t z = { }; > + > + if (be) { > + t = aes_accel_bswap(t); > + t = aes_accel_aesd(t, z); > + t = aes_accel_bswap(t); > + } else { > + t = aes_accel_aesd(t, z); > + } > + ret->v = (AESStateVec)t; > +} > + > +static inline void ATTR_AES_ACCEL > +aesdec_ISB_ISR_AK_IMC_accel(AESState *ret, const AESState *st, > + const AESState *rk, bool be) > +{ > + uint8x16_t t = (uint8x16_t)st->v; > + uint8x16_t k = (uint8x16_t)rk->v; > + uint8x16_t z = { }; > + > + if (be) { > + t = aes_accel_bswap(t); > + k = aes_accel_bswap(k); > + t = aes_accel_aesd(t, z); > + t = veorq_u8(t, k); > + t = aes_accel_aesimc(t); > + t = aes_accel_bswap(t); > + } else { > + t = aes_accel_aesd(t, z); > + t = veorq_u8(t, k); > + t = aes_accel_aesimc(t); > + } > + ret->v = (AESStateVec)t; > +} > + > +static inline void ATTR_AES_ACCEL > +aesdec_ISB_ISR_IMC_AK_accel(AESState *ret, const AESState *st, > + const AESState *rk, bool be) > +{ > + uint8x16_t t = (uint8x16_t)st->v; > + uint8x16_t k = (uint8x16_t)rk->v; > + uint8x16_t z = { }; > + > + if (be) { > + t = aes_accel_bswap(t); > + k = aes_accel_bswap(k); > + t = aes_accel_aesd(t, z); > + t = aes_accel_aesimc(t); > + t = veorq_u8(t, k); > + t = aes_accel_bswap(t); > + } else { > + t = aes_accel_aesd(t, z); > + t = aes_accel_aesimc(t); > + t = veorq_u8(t, k); > + } > + ret->v = (AESStateVec)t; > +} > + > +#endif > diff --git a/host/include/aarch64/host/cpuinfo.h b/host/include/aarch64/host/cpuinfo.h > index 82227890b4..05feeb4f43 100644 > --- a/host/include/aarch64/host/cpuinfo.h > +++ b/host/include/aarch64/host/cpuinfo.h > @@ -9,6 +9,7 @@ > #define CPUINFO_ALWAYS (1u << 0) /* so cpuinfo is nonzero */ > #define CPUINFO_LSE (1u << 1) > #define CPUINFO_LSE2 (1u << 2) > +#define CPUINFO_AES (1u << 3) > > /* Initialized with a constructor. */ > extern unsigned cpuinfo; > diff --git a/util/cpuinfo-aarch64.c b/util/cpuinfo-aarch64.c > index f99acb7884..ababc39550 100644 > --- a/util/cpuinfo-aarch64.c > +++ b/util/cpuinfo-aarch64.c > @@ -56,10 +56,12 @@ unsigned __attribute__((constructor)) cpuinfo_init(void) > unsigned long hwcap = qemu_getauxval(AT_HWCAP); > info |= (hwcap & HWCAP_ATOMICS ? CPUINFO_LSE : 0); > info |= (hwcap & HWCAP_USCAT ? CPUINFO_LSE2 : 0); > + info |= (hwcap & HWCAP_AES ? CPUINFO_AES: 0); > #endif > #ifdef CONFIG_DARWIN > info |= sysctl_for_bool("hw.optional.arm.FEAT_LSE") * CPUINFO_LSE; > info |= sysctl_for_bool("hw.optional.arm.FEAT_LSE2") * CPUINFO_LSE2; > + info |= sysctl_for_bool("hw.optional.arm.FEAT_AES") * CPUINFO_AES; > #endif > > cpuinfo = info; > -- > 2.34.1 >
On 6/3/23 05:50, Ard Biesheuvel wrote: > On Sat, 3 Jun 2023 at 04:34, Richard Henderson > <richard.henderson@linaro.org> wrote: >> >> Detect AES in cpuinfo; implement the accel hooks. >> >> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> >> --- >> host/include/aarch64/host/aes-round.h | 204 ++++++++++++++++++++++++++ >> host/include/aarch64/host/cpuinfo.h | 1 + >> util/cpuinfo-aarch64.c | 2 + >> 3 files changed, 207 insertions(+) >> create mode 100644 host/include/aarch64/host/aes-round.h >> >> diff --git a/host/include/aarch64/host/aes-round.h b/host/include/aarch64/host/aes-round.h >> new file mode 100644 >> index 0000000000..27ca823db6 >> --- /dev/null >> +++ b/host/include/aarch64/host/aes-round.h >> @@ -0,0 +1,204 @@ >> +/* >> + * AArch64 specific aes acceleration. >> + * SPDX-License-Identifier: GPL-2.0-or-later >> + */ >> + >> +#ifndef HOST_AES_ROUND_H >> +#define HOST_AES_ROUND_H >> + >> +#include "host/cpuinfo.h" >> +#include <arm_neon.h> >> + >> +#ifdef __ARM_FEATURE_AES >> +# define HAVE_AES_ACCEL true >> +# define ATTR_AES_ACCEL >> +#else >> +# define HAVE_AES_ACCEL likely(cpuinfo & CPUINFO_AES) >> +# define ATTR_AES_ACCEL __attribute__((target("+crypto"))) >> +#endif >> + >> +static inline uint8x16_t aes_accel_bswap(uint8x16_t x) >> +{ >> + /* No arm_neon.h primitive, and the compilers don't share builtins. */ > > vqtbl1q_u8() perhaps? Ah, yes, thanks. >> +static inline uint8x16_t aes_accel_aesmc(uint8x16_t d) >> +{ >> + asm(".arch_extension aes\n\t" >> + "aesmc %0.16b, %1.16b" : "=w"(d) : "w"(d)); > > > Most ARM cores fuse aese/aesmc into a single uop (with the associated > performance boost) if the pattern is > > aese x, y > aesmc x,x > > aesd x, y > aesimc x,x > > So it might make sense to use +w here at least, and use only a single > register (which the compiler will likely do in any case, but still) > > I would assume that the compiler cannot issue these separately based > on the sequences below, but if it might, it may be worth it to emit > the aese/aesmc together in a single asm() block There could be shuffling. It's low probability, but possible. I really should move the builtin test to meson, as clang-16 fixes the builtin visibility issue. I can see that gcc knows fusion of these pairs; I assume clang does as well, but I don't know the code base well enough to check. I suppose it's going to be years until clang-16 can be assumed, as Debian bookworm is to be released this month with clang-14. So it's probably worth spending a few more minutes on this now. r~
diff --git a/host/include/aarch64/host/aes-round.h b/host/include/aarch64/host/aes-round.h new file mode 100644 index 0000000000..27ca823db6 --- /dev/null +++ b/host/include/aarch64/host/aes-round.h @@ -0,0 +1,204 @@ +/* + * AArch64 specific aes acceleration. + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HOST_AES_ROUND_H +#define HOST_AES_ROUND_H + +#include "host/cpuinfo.h" +#include <arm_neon.h> + +#ifdef __ARM_FEATURE_AES +# define HAVE_AES_ACCEL true +# define ATTR_AES_ACCEL +#else +# define HAVE_AES_ACCEL likely(cpuinfo & CPUINFO_AES) +# define ATTR_AES_ACCEL __attribute__((target("+crypto"))) +#endif + +static inline uint8x16_t aes_accel_bswap(uint8x16_t x) +{ + /* No arm_neon.h primitive, and the compilers don't share builtins. */ +#ifdef __clang__ + return __builtin_shufflevector(x, x, 15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0); +#else + return __builtin_shuffle(x, (uint8x16_t) + { 15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0, }); +#endif +} + +/* + * Through clang 15, the aes inlines are only defined if __ARM_FEATURE_AES; + * one cannot use __attribute__((target)) to make them appear after the fact. + * Therefore we must fallback to inline asm. + */ +#ifdef __ARM_FEATURE_AES +# define aes_accel_aesd vaesdq_u8 +# define aes_accel_aese vaeseq_u8 +# define aes_accel_aesmc vaesmcq_u8 +# define aes_accel_aesimc vaesimcq_u8 +#else +static inline uint8x16_t aes_accel_aesd(uint8x16_t d, uint8x16_t k) +{ + asm(".arch_extension aes\n\t" + "aesd %0.16b, %1.16b" : "+w"(d) : "w"(k)); + return d; +} + +static inline uint8x16_t aes_accel_aese(uint8x16_t d, uint8x16_t k) +{ + asm(".arch_extension aes\n\t" + "aese %0.16b, %1.16b" : "+w"(d) : "w"(k)); + return d; +} + +static inline uint8x16_t aes_accel_aesmc(uint8x16_t d) +{ + asm(".arch_extension aes\n\t" + "aesmc %0.16b, %1.16b" : "=w"(d) : "w"(d)); + return d; +} + +static inline uint8x16_t aes_accel_aesimc(uint8x16_t d) +{ + asm(".arch_extension aes\n\t" + "aesimc %0.16b, %1.16b" : "=w"(d) : "w"(d)); + return d; +} +#endif /* __ARM_FEATURE_AES */ + +static inline void ATTR_AES_ACCEL +aesenc_MC_accel(AESState *ret, const AESState *st, bool be) +{ + uint8x16_t t = (uint8x16_t)st->v; + + if (be) { + t = aes_accel_bswap(t); + t = aes_accel_aesmc(t); + t = aes_accel_bswap(t); + } else { + t = aes_accel_aesmc(t); + } + ret->v = (AESStateVec)t; +} + +static inline void ATTR_AES_ACCEL +aesenc_SB_SR_accel(AESState *ret, const AESState *st, bool be) +{ + uint8x16_t t = (uint8x16_t)st->v; + uint8x16_t z = { }; + + if (be) { + t = aes_accel_bswap(t); + t = aes_accel_aese(t, z); + t = aes_accel_bswap(t); + } else { + t = aes_accel_aese(t, z); + } + ret->v = (AESStateVec)t; +} + +static inline void ATTR_AES_ACCEL +aesenc_SB_SR_MC_AK_accel(AESState *ret, const AESState *st, + const AESState *rk, bool be) +{ + uint8x16_t t = (uint8x16_t)st->v; + uint8x16_t k = (uint8x16_t)rk->v; + uint8x16_t z = { }; + + if (be) { + t = aes_accel_bswap(t); + k = aes_accel_bswap(k); + t = aes_accel_aese(t, z); + t = aes_accel_aesmc(t); + t = veorq_u8(t, k); + t = aes_accel_bswap(t); + } else { + t = aes_accel_aese(t, z); + t = aes_accel_aesmc(t); + t = veorq_u8(t, k); + } + ret->v = (AESStateVec)t; +} + +static inline void ATTR_AES_ACCEL +aesdec_IMC_accel(AESState *ret, const AESState *st, bool be) +{ + uint8x16_t t = (uint8x16_t)st->v; + + if (be) { + t = aes_accel_bswap(t); + t = aes_accel_aesimc(t); + t = aes_accel_bswap(t); + } else { + t = aes_accel_aesimc(t); + } + ret->v = (AESStateVec)t; +} + +static inline void ATTR_AES_ACCEL +aesdec_ISB_ISR_accel(AESState *ret, const AESState *st, bool be) +{ + uint8x16_t t = (uint8x16_t)st->v; + uint8x16_t z = { }; + + if (be) { + t = aes_accel_bswap(t); + t = aes_accel_aesd(t, z); + t = aes_accel_bswap(t); + } else { + t = aes_accel_aesd(t, z); + } + ret->v = (AESStateVec)t; +} + +static inline void ATTR_AES_ACCEL +aesdec_ISB_ISR_AK_IMC_accel(AESState *ret, const AESState *st, + const AESState *rk, bool be) +{ + uint8x16_t t = (uint8x16_t)st->v; + uint8x16_t k = (uint8x16_t)rk->v; + uint8x16_t z = { }; + + if (be) { + t = aes_accel_bswap(t); + k = aes_accel_bswap(k); + t = aes_accel_aesd(t, z); + t = veorq_u8(t, k); + t = aes_accel_aesimc(t); + t = aes_accel_bswap(t); + } else { + t = aes_accel_aesd(t, z); + t = veorq_u8(t, k); + t = aes_accel_aesimc(t); + } + ret->v = (AESStateVec)t; +} + +static inline void ATTR_AES_ACCEL +aesdec_ISB_ISR_IMC_AK_accel(AESState *ret, const AESState *st, + const AESState *rk, bool be) +{ + uint8x16_t t = (uint8x16_t)st->v; + uint8x16_t k = (uint8x16_t)rk->v; + uint8x16_t z = { }; + + if (be) { + t = aes_accel_bswap(t); + k = aes_accel_bswap(k); + t = aes_accel_aesd(t, z); + t = aes_accel_aesimc(t); + t = veorq_u8(t, k); + t = aes_accel_bswap(t); + } else { + t = aes_accel_aesd(t, z); + t = aes_accel_aesimc(t); + t = veorq_u8(t, k); + } + ret->v = (AESStateVec)t; +} + +#endif diff --git a/host/include/aarch64/host/cpuinfo.h b/host/include/aarch64/host/cpuinfo.h index 82227890b4..05feeb4f43 100644 --- a/host/include/aarch64/host/cpuinfo.h +++ b/host/include/aarch64/host/cpuinfo.h @@ -9,6 +9,7 @@ #define CPUINFO_ALWAYS (1u << 0) /* so cpuinfo is nonzero */ #define CPUINFO_LSE (1u << 1) #define CPUINFO_LSE2 (1u << 2) +#define CPUINFO_AES (1u << 3) /* Initialized with a constructor. */ extern unsigned cpuinfo; diff --git a/util/cpuinfo-aarch64.c b/util/cpuinfo-aarch64.c index f99acb7884..ababc39550 100644 --- a/util/cpuinfo-aarch64.c +++ b/util/cpuinfo-aarch64.c @@ -56,10 +56,12 @@ unsigned __attribute__((constructor)) cpuinfo_init(void) unsigned long hwcap = qemu_getauxval(AT_HWCAP); info |= (hwcap & HWCAP_ATOMICS ? CPUINFO_LSE : 0); info |= (hwcap & HWCAP_USCAT ? CPUINFO_LSE2 : 0); + info |= (hwcap & HWCAP_AES ? CPUINFO_AES: 0); #endif #ifdef CONFIG_DARWIN info |= sysctl_for_bool("hw.optional.arm.FEAT_LSE") * CPUINFO_LSE; info |= sysctl_for_bool("hw.optional.arm.FEAT_LSE2") * CPUINFO_LSE2; + info |= sysctl_for_bool("hw.optional.arm.FEAT_AES") * CPUINFO_AES; #endif cpuinfo = info;
Detect AES in cpuinfo; implement the accel hooks. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- host/include/aarch64/host/aes-round.h | 204 ++++++++++++++++++++++++++ host/include/aarch64/host/cpuinfo.h | 1 + util/cpuinfo-aarch64.c | 2 + 3 files changed, 207 insertions(+) create mode 100644 host/include/aarch64/host/aes-round.h