@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <crypto/sm3.h>
#include <crypto/sm3_base.h>
+#include <asm/cpu_device_id.h>
#include <asm/simd.h>
/* avoid kernel_fpu_begin/end scheduler/rcu stalls */
@@ -119,14 +120,18 @@ static struct shash_alg sm3_avx_alg = {
}
};
+static const struct x86_cpu_id module_cpu_ids[] = {
+ X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
+
static int __init sm3_avx_mod_init(void)
{
const char *feature_name;
- if (!boot_cpu_has(X86_FEATURE_AVX)) {
- pr_info("AVX instruction are not detected.\n");
+ if (!x86_match_cpu(module_cpu_ids))
return -ENODEV;
- }
if (!boot_cpu_has(X86_FEATURE_BMI2)) {
pr_info("BMI2 instruction are not detected.\n");
Like commit aa031b8f702e ("crypto: x86/sha512 - load based on CPU features"), add module aliases for x86-optimized crypto modules: sm3 based on CPU feature bits so udev gets a chance to load them later in the boot process when the filesystems are all running. Signed-off-by: Robert Elliott <elliott@hpe.com> --- v4 removed second AVX check that is unreachable --- arch/x86/crypto/sm3_avx_glue.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-)