diff mbox series

[v2,02/11] crypto: x86/ghash - Use proper helpers to clone request

Message ID e8a0e14c9b573b8e68c07f6f48b013718036004b.1739674648.git.herbert@gondor.apana.org.au
State New
Headers show
Series Multibuffer hashing take two | expand

Commit Message

Herbert Xu Feb. 16, 2025, 3:07 a.m. UTC
Rather than copying a request by hand with memcpy, use the correct
API helpers to setup the new request.  This will matter once the
API helpers start setting up chained requests as a simple memcpy
will break chaining.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 arch/x86/crypto/ghash-clmulni-intel_glue.c | 23 ++++++++++++++++------
 1 file changed, 17 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 41bc02e48916..c759ec808bf1 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -189,6 +189,20 @@  static int ghash_async_init(struct ahash_request *req)
 	return crypto_shash_init(desc);
 }
 
+static void ghash_init_cryptd_req(struct ahash_request *req)
+{
+	struct ahash_request *cryptd_req = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
+
+	ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
+	ahash_request_set_callback(cryptd_req, req->base.flags,
+				   req->base.complete, req->base.data);
+	ahash_request_set_crypt(cryptd_req, req->src, req->result,
+				req->nbytes);
+}
+
 static int ghash_async_update(struct ahash_request *req)
 {
 	struct ahash_request *cryptd_req = ahash_request_ctx(req);
@@ -198,8 +212,7 @@  static int ghash_async_update(struct ahash_request *req)
 
 	if (!crypto_simd_usable() ||
 	    (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
-		memcpy(cryptd_req, req, sizeof(*req));
-		ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
+		ghash_init_cryptd_req(req);
 		return crypto_ahash_update(cryptd_req);
 	} else {
 		struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
@@ -216,8 +229,7 @@  static int ghash_async_final(struct ahash_request *req)
 
 	if (!crypto_simd_usable() ||
 	    (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
-		memcpy(cryptd_req, req, sizeof(*req));
-		ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
+		ghash_init_cryptd_req(req);
 		return crypto_ahash_final(cryptd_req);
 	} else {
 		struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
@@ -257,8 +269,7 @@  static int ghash_async_digest(struct ahash_request *req)
 
 	if (!crypto_simd_usable() ||
 	    (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
-		memcpy(cryptd_req, req, sizeof(*req));
-		ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
+		ghash_init_cryptd_req(req);
 		return crypto_ahash_digest(cryptd_req);
 	} else {
 		struct shash_desc *desc = cryptd_shash_desc(cryptd_req);