From patchwork Sun Feb 16 03:07:15 2025 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Herbert Xu X-Patchwork-Id: 865637 Received: from abb.hmeau.com (abb.hmeau.com [144.6.53.87]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 2EADC7E1 for ; Sun, 16 Feb 2025 03:07:18 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=144.6.53.87 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1739675241; cv=none; b=UYcbx0PFWAmjDOKQq630qbto+J6B4r/im0JrVz1nbVcyUlHsluKNYfmzgbIEZGL4kLStsX+jV0+DBd4hZRVSNmdYwOaQ8X7If0hum25GuaJjFCBTRQo0ld0C76aOs/vm2Hufo73RHd4ZZRrNWLk9cBB8C0ON4WhM9bT1neDlZf0= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1739675241; c=relaxed/simple; bh=sQQYWA6pMLOLdbNHlQ2+coyzbhGk4oJjTtOCtgn0uNE=; h=Date:Message-Id:In-Reply-To:References:From:Subject:To:Cc; b=dLk9XMEQzdRIah0mb1UV+XjuoWohUoN/GZoOgjId9MGEP72d4zoA55pDoYnS6F1HVXgfyjryflXI/beUfcLqXOU/n81zV/4f2B/jsNjq/ky+obWz6l7nDCQwXZNcErVObU0adTNqWb9Zx1TFolLzbdkniOOBjisB/5YhQ7n1KLw= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=gondor.apana.org.au; spf=pass smtp.mailfrom=gondor.apana.org.au; dkim=pass (2048-bit key) header.d=hmeau.com header.i=@hmeau.com header.b=S1lEQjBJ; arc=none smtp.client-ip=144.6.53.87 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=gondor.apana.org.au Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=gondor.apana.org.au Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=hmeau.com header.i=@hmeau.com header.b="S1lEQjBJ" DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=hmeau.com; s=formenos; h=Cc:To:Subject:From:References:In-Reply-To:Message-Id:Date: Sender:Reply-To:MIME-Version:Content-Type:Content-Transfer-Encoding: Content-ID:Content-Description:Resent-Date:Resent-From:Resent-Sender: Resent-To:Resent-Cc:Resent-Message-ID:List-Id:List-Help:List-Unsubscribe: List-Subscribe:List-Post:List-Owner:List-Archive; bh=dVrRxU7foExstX9hgCMbnUQLE4gwkVI3GUAFfq9oi5M=; b=S1lEQjBJiuIht80/JAx8UliTTh E3p5UZbok556snrecR30y01eZE1SB8knDAgIQgNBN/Gen1bamGgIrNriaS23hsTEK3cjvMDJAO2Nq u2UPRB60nuClto+rJFxJCsUqfFmgdBqLvxr5bNZ5TEoqVqRdqgVVwxsdmmWxpTFf8kwbW/R2W1bKA oU09QI0hVwxgRHc/Vwha+rYFdZjNV1UKFNihXVH0XTo6sL8eI4tMCwTMPiWCGapRTmJO69X3qSiJA s453B4zvOEP1kKziP2vAN5eEB4jolmgEqxchK6XYVTRLD8zJf8GNPqpz3WBoJbmfpS7jLgeAii1z7 bruxobvQ==; Received: from loth.rohan.me.apana.org.au ([192.168.167.2]) by formenos.hmeau.com with smtp (Exim 4.96 #2 (Debian)) id 1tjUn4-000gXh-1X; Sun, 16 Feb 2025 11:07:16 +0800 Received: by loth.rohan.me.apana.org.au (sSMTP sendmail emulation); Sun, 16 Feb 2025 11:07:15 +0800 Date: Sun, 16 Feb 2025 11:07:15 +0800 Message-Id: In-Reply-To: References: From: Herbert Xu Subject: [v2 PATCH 02/11] crypto: x86/ghash - Use proper helpers to clone request To: Linux Crypto Mailing List Cc: Eric Biggers , Ard Biesheuvel , Megha Dey , Tim Chen Precedence: bulk X-Mailing-List: linux-crypto@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: Rather than copying a request by hand with memcpy, use the correct API helpers to setup the new request. This will matter once the API helpers start setting up chained requests as a simple memcpy will break chaining. Signed-off-by: Herbert Xu --- arch/x86/crypto/ghash-clmulni-intel_glue.c | 23 ++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c index 41bc02e48916..c759ec808bf1 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_glue.c +++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c @@ -189,6 +189,20 @@ static int ghash_async_init(struct ahash_request *req) return crypto_shash_init(desc); } +static void ghash_init_cryptd_req(struct ahash_request *req) +{ + struct ahash_request *cryptd_req = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); + struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; + + ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); + ahash_request_set_callback(cryptd_req, req->base.flags, + req->base.complete, req->base.data); + ahash_request_set_crypt(cryptd_req, req->src, req->result, + req->nbytes); +} + static int ghash_async_update(struct ahash_request *req) { struct ahash_request *cryptd_req = ahash_request_ctx(req); @@ -198,8 +212,7 @@ static int ghash_async_update(struct ahash_request *req) if (!crypto_simd_usable() || (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { - memcpy(cryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); + ghash_init_cryptd_req(req); return crypto_ahash_update(cryptd_req); } else { struct shash_desc *desc = cryptd_shash_desc(cryptd_req); @@ -216,8 +229,7 @@ static int ghash_async_final(struct ahash_request *req) if (!crypto_simd_usable() || (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { - memcpy(cryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); + ghash_init_cryptd_req(req); return crypto_ahash_final(cryptd_req); } else { struct shash_desc *desc = cryptd_shash_desc(cryptd_req); @@ -257,8 +269,7 @@ static int ghash_async_digest(struct ahash_request *req) if (!crypto_simd_usable() || (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { - memcpy(cryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); + ghash_init_cryptd_req(req); return crypto_ahash_digest(cryptd_req); } else { struct shash_desc *desc = cryptd_shash_desc(cryptd_req); From patchwork Sun Feb 16 03:07:19 2025 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Herbert Xu X-Patchwork-Id: 865636 Received: from abb.hmeau.com (abb.hmeau.com [144.6.53.87]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 4C55BC8E0 for ; Sun, 16 Feb 2025 03:07:23 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=144.6.53.87 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1739675245; cv=none; b=jrIT7WhSgFdkX2xJ9iuq/Kt9PNBmj6Eb/Ju/hdQ2TLZtaMFKm4YnQu6C6fGc9//+bmDdmdbmoE2G9x7AtG6dZFj2hdb9kchYyBdjsTPcH/C4DJqq9xVHvyudjQtNFGKZpVTMd6vt9Ton3cje4RsghcGLKTFHiPGL46gJbnTaZ/o= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1739675245; c=relaxed/simple; bh=6uYRfnfABTZI+87Pbsa7eyHiBir6dg+GdVVyelYnnls=; h=Date:Message-Id:In-Reply-To:References:From:Subject:To:Cc; b=tb81CuWd0kVE8eF0gJ80XAUy1YwI1r4sPap6WlnRWT0DEGsatuG6kqO2fJQvcM8KEo2blXSLsha2exZ/NScZQImPoYfKcoTEtONls9Edirf3YS70j4+qLG7e79x6BgBS4W8IX7sZR2j1jphSh5cjMY8iE+z7I0ng8rgZTw0FEkI= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=gondor.apana.org.au; spf=pass smtp.mailfrom=gondor.apana.org.au; dkim=pass (2048-bit key) header.d=hmeau.com header.i=@hmeau.com header.b=HVRK4mDo; arc=none smtp.client-ip=144.6.53.87 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=gondor.apana.org.au Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=gondor.apana.org.au Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=hmeau.com header.i=@hmeau.com header.b="HVRK4mDo" DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=hmeau.com; s=formenos; h=Cc:To:Subject:From:References:In-Reply-To:Message-Id:Date: Sender:Reply-To:MIME-Version:Content-Type:Content-Transfer-Encoding: Content-ID:Content-Description:Resent-Date:Resent-From:Resent-Sender: Resent-To:Resent-Cc:Resent-Message-ID:List-Id:List-Help:List-Unsubscribe: List-Subscribe:List-Post:List-Owner:List-Archive; bh=Hwmc8xPh4SMYBYUlF10dsEfk/3KiAvetRcQZKKJ4QxI=; b=HVRK4mDo0TU/66pYnGOpxlxyTL wFh7u51tDDkuFnsM8Fr4YPnN9lDRjqOVW9LtkWTlTPIfPoXc2qHN9awl7LcI1c54P87rfohEGS9t/ X6VNWlMf4IOqigv4TMZb5SVsNxfhTHiAXoGhTQN0hLQ5pgB7XBYDRYnRHLvDvj55QVIB8kWhKHacY ipwgSr1cx+BxtjyOc3CD/OZfg8XrnICIdfcpEVruBByKydKp/Ol2n2zVvU/9PsqGh5dwLQ3I6I9C8 HGAb0dhF3GdK2mIU+ZlFYuMXnR9dUQKdECO0osDII+6oUdVSgkZx1VMK76+1Nq5Uocg1BAC8M+luz wF0V8F3Q==; Received: from loth.rohan.me.apana.org.au ([192.168.167.2]) by formenos.hmeau.com with smtp (Exim 4.96 #2 (Debian)) id 1tjUn9-000gY7-09; Sun, 16 Feb 2025 11:07:20 +0800 Received: by loth.rohan.me.apana.org.au (sSMTP sendmail emulation); Sun, 16 Feb 2025 11:07:19 +0800 Date: Sun, 16 Feb 2025 11:07:19 +0800 Message-Id: <7e79533fbbe4e0f56376963347b349935e6a343d.1739674648.git.herbert@gondor.apana.org.au> In-Reply-To: References: From: Herbert Xu Subject: [v2 PATCH 04/11] crypto: tcrypt - Restore multibuffer ahash tests To: Linux Crypto Mailing List Cc: Eric Biggers , Ard Biesheuvel , Megha Dey , Tim Chen Precedence: bulk X-Mailing-List: linux-crypto@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: This patch is a revert of commit 388ac25efc8ce3bf9768ce7bf24268d6fac285d5. As multibuffer ahash is coming back in the form of request chaining, restore the multibuffer ahash tests using the new interface. Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 231 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 231 insertions(+) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index e1a74cb2cfbe..f618f61c5615 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -716,6 +716,207 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret) return crypto_wait_req(ret, wait); } +struct test_mb_ahash_data { + struct scatterlist sg[XBUFSIZE]; + char result[64]; + struct ahash_request *req; + struct crypto_wait wait; + char *xbuf[XBUFSIZE]; +}; + +static inline int do_mult_ahash_op(struct test_mb_ahash_data *data, u32 num_mb, + int *rc) +{ + int i, err; + + /* Fire up a bunch of concurrent requests */ + err = crypto_ahash_digest(data[0].req); + + /* Wait for all requests to finish */ + err = crypto_wait_req(err, &data[0].wait); + if (num_mb < 2) + return err; + + for (i = 0; i < num_mb; i++) { + rc[i] = ahash_request_err(data[i].req); + if (rc[i]) { + pr_info("concurrent request %d error %d\n", i, rc[i]); + err = rc[i]; + } + } + + return err; +} + +static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen, + int secs, u32 num_mb) +{ + unsigned long start, end; + int bcount; + int ret = 0; + int *rc; + + rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL); + if (!rc) + return -ENOMEM; + + for (start = jiffies, end = start + secs * HZ, bcount = 0; + time_before(jiffies, end); bcount++) { + ret = do_mult_ahash_op(data, num_mb, rc); + if (ret) + goto out; + } + + pr_cont("%d operations in %d seconds (%llu bytes)\n", + bcount * num_mb, secs, (u64)bcount * blen * num_mb); + +out: + kfree(rc); + return ret; +} + +static int test_mb_ahash_cycles(struct test_mb_ahash_data *data, int blen, + u32 num_mb) +{ + unsigned long cycles = 0; + int ret = 0; + int i; + int *rc; + + rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL); + if (!rc) + return -ENOMEM; + + /* Warm-up run. */ + for (i = 0; i < 4; i++) { + ret = do_mult_ahash_op(data, num_mb, rc); + if (ret) + goto out; + } + + /* The real thing. */ + for (i = 0; i < 8; i++) { + cycles_t start, end; + + start = get_cycles(); + ret = do_mult_ahash_op(data, num_mb, rc); + end = get_cycles(); + + if (ret) + goto out; + + cycles += end - start; + } + + pr_cont("1 operation in %lu cycles (%d bytes)\n", + (cycles + 4) / (8 * num_mb), blen); + +out: + kfree(rc); + return ret; +} + +static void test_mb_ahash_speed(const char *algo, unsigned int secs, + struct hash_speed *speed, u32 num_mb) +{ + struct test_mb_ahash_data *data; + struct crypto_ahash *tfm; + unsigned int i, j, k; + int ret; + + data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL); + if (!data) + return; + + tfm = crypto_alloc_ahash(algo, 0, 0); + if (IS_ERR(tfm)) { + pr_err("failed to load transform for %s: %ld\n", + algo, PTR_ERR(tfm)); + goto free_data; + } + + for (i = 0; i < num_mb; ++i) { + if (testmgr_alloc_buf(data[i].xbuf)) + goto out; + + crypto_init_wait(&data[i].wait); + + data[i].req = ahash_request_alloc(tfm, GFP_KERNEL); + if (!data[i].req) { + pr_err("alg: hash: Failed to allocate request for %s\n", + algo); + goto out; + } + + + if (i) { + ahash_request_set_callback(data[i].req, 0, NULL, NULL); + ahash_request_chain(data[i].req, data[0].req); + } else + ahash_request_set_callback(data[0].req, 0, + crypto_req_done, + &data[0].wait); + + sg_init_table(data[i].sg, XBUFSIZE); + for (j = 0; j < XBUFSIZE; j++) { + sg_set_buf(data[i].sg + j, data[i].xbuf[j], PAGE_SIZE); + memset(data[i].xbuf[j], 0xff, PAGE_SIZE); + } + } + + pr_info("\ntesting speed of multibuffer %s (%s)\n", algo, + get_driver_name(crypto_ahash, tfm)); + + for (i = 0; speed[i].blen != 0; i++) { + /* For some reason this only tests digests. */ + if (speed[i].blen != speed[i].plen) + continue; + + if (speed[i].blen > XBUFSIZE * PAGE_SIZE) { + pr_err("template (%u) too big for tvmem (%lu)\n", + speed[i].blen, XBUFSIZE * PAGE_SIZE); + goto out; + } + + if (klen) + crypto_ahash_setkey(tfm, tvmem[0], klen); + + for (k = 0; k < num_mb; k++) + ahash_request_set_crypt(data[k].req, data[k].sg, + data[k].result, speed[i].blen); + + pr_info("test%3u " + "(%5u byte blocks,%5u bytes per update,%4u updates): ", + i, speed[i].blen, speed[i].plen, + speed[i].blen / speed[i].plen); + + if (secs) { + ret = test_mb_ahash_jiffies(data, speed[i].blen, secs, + num_mb); + cond_resched(); + } else { + ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb); + } + + + if (ret) { + pr_err("At least one hashing failed ret=%d\n", ret); + break; + } + } + +out: + ahash_request_free(data[0].req); + + for (k = 0; k < num_mb; ++k) + testmgr_free_buf(data[k].xbuf); + + crypto_free_ahash(tfm); + +free_data: + kfree(data); +} + static int test_ahash_jiffies_digest(struct ahash_request *req, int blen, char *out, int secs) { @@ -2391,6 +2592,36 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_ahash_speed("sm3", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; fallthrough; + case 450: + test_mb_ahash_speed("sha1", sec, generic_hash_speed_template, + num_mb); + if (mode > 400 && mode < 500) break; + fallthrough; + case 451: + test_mb_ahash_speed("sha256", sec, generic_hash_speed_template, + num_mb); + if (mode > 400 && mode < 500) break; + fallthrough; + case 452: + test_mb_ahash_speed("sha512", sec, generic_hash_speed_template, + num_mb); + if (mode > 400 && mode < 500) break; + fallthrough; + case 453: + test_mb_ahash_speed("sm3", sec, generic_hash_speed_template, + num_mb); + if (mode > 400 && mode < 500) break; + fallthrough; + case 454: + test_mb_ahash_speed("streebog256", sec, + generic_hash_speed_template, num_mb); + if (mode > 400 && mode < 500) break; + fallthrough; + case 455: + test_mb_ahash_speed("streebog512", sec, + generic_hash_speed_template, num_mb); + if (mode > 400 && mode < 500) break; + fallthrough; case 499: break; From patchwork Sun Feb 16 03:07:24 2025 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Herbert Xu X-Patchwork-Id: 865635 Received: from abb.hmeau.com (abb.hmeau.com [144.6.53.87]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 5042E7E1 for ; Sun, 16 Feb 2025 03:07:27 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=144.6.53.87 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1739675250; cv=none; b=fPJvF6F7x+6GxZEnnqg5YCuX8luJCRx/rz4Vtni1/8Zez8fp3JoVISPMRrvd3uPB3RkHpMfrJpQ0MA/ZOeGgi+/Kxxc+IXtzHTTQqSqYE3HThzO5BK918M9jPBC33a1R5N6hc+us9EIi8uZLlkU2du1fLEnYxbemM14c6uR6mhA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1739675250; c=relaxed/simple; bh=PPJR8P5RQRLlbn7T1W0+NiGHFT0qPn6JyhQBqNbAlbU=; h=Date:Message-Id:In-Reply-To:References:From:Subject:To:Cc; b=Ihm1nwBpz4DlOcK/eiyajF69wMY2o2+ASAMLxNalflo6acLiS3Q/yzMZLCbJwFQ5Zp+wBJ8LuHvBURkfDe4timaVsON5lr1t3Zi0As6tLpjcYZVFQUN4EGmYtgYb9hcOr4W0FUDRSoZKUYXjgD/3baKQ+XRHpW8aA8ASAXMZnAw= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=gondor.apana.org.au; spf=pass smtp.mailfrom=gondor.apana.org.au; dkim=pass (2048-bit key) header.d=hmeau.com header.i=@hmeau.com header.b=IMpqNtIl; arc=none smtp.client-ip=144.6.53.87 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=gondor.apana.org.au Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=gondor.apana.org.au Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=hmeau.com header.i=@hmeau.com header.b="IMpqNtIl" DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=hmeau.com; s=formenos; h=Cc:To:Subject:From:References:In-Reply-To:Message-Id:Date: Sender:Reply-To:MIME-Version:Content-Type:Content-Transfer-Encoding: Content-ID:Content-Description:Resent-Date:Resent-From:Resent-Sender: Resent-To:Resent-Cc:Resent-Message-ID:List-Id:List-Help:List-Unsubscribe: List-Subscribe:List-Post:List-Owner:List-Archive; bh=FGKxPWwVEcqQqOc0ZVw4JhKmYFcoBhe/DT7hf3S6hE8=; b=IMpqNtIlUfnhyKqSGUqD+pol2+ 9r8+OV3SqP1ebXCBuz5JfvEarXbB8Vz4hddTrINbpVyp71c4AevoVu8ACYudNp9DNGOgcPc3Xg9ZP TN2+y5Rt4JC7hPCnfC9igsISLKSooFMkQbd7E57Vzw2VNYZIIjlKnnkr+f8ty03rZKTqBSslGcFGA udNlrBmMIVofQeDHGu2f+4I/orFEkgKePKyud76XgJxtg9KyHEvY9poq3hg38GEYYXzLVVLAFlqqx Uzo2MoknfRl3cqbOKKfWbqXSQLQotTG7SLEJ+v7Z6RUGOik6+yEdprdG/lvzDAHSTuznV75de8/wN OPlobEkA==; Received: from loth.rohan.me.apana.org.au ([192.168.167.2]) by formenos.hmeau.com with smtp (Exim 4.96 #2 (Debian)) id 1tjUnD-000gYv-2C; Sun, 16 Feb 2025 11:07:25 +0800 Received: by loth.rohan.me.apana.org.au (sSMTP sendmail emulation); Sun, 16 Feb 2025 11:07:24 +0800 Date: Sun, 16 Feb 2025 11:07:24 +0800 Message-Id: <91e2551c839a649fea10a171d9ae0dde104e5679.1739674648.git.herbert@gondor.apana.org.au> In-Reply-To: References: From: Herbert Xu Subject: [v2 PATCH 06/11] crypto: ahash - Set default reqsize from ahash_alg To: Linux Crypto Mailing List Cc: Eric Biggers , Ard Biesheuvel , Megha Dey , Tim Chen Precedence: bulk X-Mailing-List: linux-crypto@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: Add a reqsize field to struct ahash_alg and use it to set the default reqsize so that algorithms with a static reqsize are not forced to create an init_tfm function. Signed-off-by: Herbert Xu --- crypto/ahash.c | 4 ++++ include/crypto/hash.h | 3 +++ 2 files changed, 7 insertions(+) diff --git a/crypto/ahash.c b/crypto/ahash.c index 40ccaf4c0cd6..6b19fa6fc628 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -862,6 +862,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) struct ahash_alg *alg = crypto_ahash_alg(hash); crypto_ahash_set_statesize(hash, alg->halg.statesize); + crypto_ahash_set_reqsize(hash, alg->reqsize); if (tfm->__crt_alg->cra_type == &crypto_shash_type) return crypto_init_ahash_using_shash(tfm); @@ -1027,6 +1028,9 @@ static int ahash_prepare_alg(struct ahash_alg *alg) if (alg->halg.statesize == 0) return -EINVAL; + if (alg->reqsize && alg->reqsize < alg->halg.statesize) + return -EINVAL; + err = hash_prepare_alg(&alg->halg); if (err) return err; diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 4e87e39679cb..2aa83ee0ec98 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -135,6 +135,7 @@ struct ahash_request { * This is a counterpart to @init_tfm, used to remove * various changes set in @init_tfm. * @clone_tfm: Copy transform into new object, may allocate memory. + * @reqsize: Size of the request context. * @halg: see struct hash_alg_common */ struct ahash_alg { @@ -151,6 +152,8 @@ struct ahash_alg { void (*exit_tfm)(struct crypto_ahash *tfm); int (*clone_tfm)(struct crypto_ahash *dst, struct crypto_ahash *src); + unsigned int reqsize; + struct hash_alg_common halg; }; From patchwork Sun Feb 16 03:07:31 2025 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Herbert Xu X-Patchwork-Id: 865634 Received: from abb.hmeau.com (abb.hmeau.com [144.6.53.87]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id CB7BB10A1F for ; Sun, 16 Feb 2025 03:07:34 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=144.6.53.87 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1739675256; cv=none; b=ER5mXAo8XGCpQxUSIkkHK21Skp0gSD1VYaVt/wDSvo9I3FnQuGSvUDUNy05cVZ4mzYOmTMAJIeTUmsZr6mJaKH7I2xqaBiOWK5AF4oTvFaiSqjsLt8513frO9KPsiny+yMJcbLbNDoAykhJMstDmC0gZzwEb5AjGQBzVRNDPiRU= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1739675256; c=relaxed/simple; bh=35yDBQpzpN6NuQDIyD47ciJFy71wiMY/6pgQ0NL/268=; h=Date:Message-Id:In-Reply-To:References:From:Subject:To:Cc; b=YU1r8YXNsvUD9pU3pIityiv83wHOOYmXQNoU2LmfI6uv99MTG3JZZGKrXF+vdYs/7KZQ3pBtrHBF+A25ZsqUjA8ff/eW3sVGYflyIcrh/bOPl+rHIU1LZc4dsCtkrw1r9v2sOot2XeEArdcNt/Bb+32Gdi5GfW7ETw5NAxjcVug= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=gondor.apana.org.au; spf=pass smtp.mailfrom=gondor.apana.org.au; dkim=pass (2048-bit key) header.d=hmeau.com header.i=@hmeau.com header.b=ZIF93BTV; arc=none smtp.client-ip=144.6.53.87 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=gondor.apana.org.au Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=gondor.apana.org.au Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=hmeau.com header.i=@hmeau.com header.b="ZIF93BTV" DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=hmeau.com; s=formenos; h=Cc:To:Subject:From:References:In-Reply-To:Message-Id:Date: Sender:Reply-To:MIME-Version:Content-Type:Content-Transfer-Encoding: Content-ID:Content-Description:Resent-Date:Resent-From:Resent-Sender: Resent-To:Resent-Cc:Resent-Message-ID:List-Id:List-Help:List-Unsubscribe: List-Subscribe:List-Post:List-Owner:List-Archive; bh=/Gy8ojeyn/pSetWrgiMW7tNsrPDhJ+xUsPdhwiBTYgM=; b=ZIF93BTVB/yehw23wiBlQJ0SKE sX28cEjd4VVnom8X7mgS1YsYUOFY2ikCh3Lb1MdxP95UbUZSmsMxDswoKZ96gZw/Y+blGM2RKGyAJ ZGi8v7m4LuhC4NGp/RB4TCnLwyecUzJaWZduKUDU5Z/bFUBt4QH7ifjKkpWyx5Gdsgq80EqQRIT4G J6x+lYf6pu01I51xp33+HgWsEfX3eEPcHM+mYpmoQqn479jFDo4hpE78wmtfwbltTiQYkTZngsbpw fbqvMEr0s1lUs0+kKrTOo9CpU+AKwqKWPeSgDtwyOvti00usNugRHwigKTjBmllYZKs84UBU5GlEj GlZULOrw==; Received: from loth.rohan.me.apana.org.au ([192.168.167.2]) by formenos.hmeau.com with smtp (Exim 4.96 #2 (Debian)) id 1tjUnK-000gZw-1z; Sun, 16 Feb 2025 11:07:32 +0800 Received: by loth.rohan.me.apana.org.au (sSMTP sendmail emulation); Sun, 16 Feb 2025 11:07:31 +0800 Date: Sun, 16 Feb 2025 11:07:31 +0800 Message-Id: In-Reply-To: References: From: Herbert Xu Subject: [v2 PATCH 09/11] crypto: hash - Add sync hash interface To: Linux Crypto Mailing List Cc: Eric Biggers , Ard Biesheuvel , Megha Dey , Tim Chen Precedence: bulk X-Mailing-List: linux-crypto@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: Introduce a new sync hash interface based on ahash, similar to sync skcipher. It will replace shash for existing users. Signed-off-by: Herbert Xu --- crypto/ahash.c | 37 ++++++++++++++++ include/crypto/hash.h | 100 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 137 insertions(+) diff --git a/crypto/ahash.c b/crypto/ahash.c index 6b19fa6fc628..fafce2e47a78 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -949,6 +949,27 @@ struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, } EXPORT_SYMBOL_GPL(crypto_alloc_ahash); +struct crypto_sync_hash *crypto_alloc_sync_hash(const char *alg_name, + u32 type, u32 mask) +{ + struct crypto_ahash *tfm; + + /* Only sync algorithms allowed. */ + mask |= CRYPTO_ALG_ASYNC; + type &= ~CRYPTO_ALG_ASYNC; + + tfm = crypto_alloc_ahash(alg_name, type, mask); + + if (!IS_ERR(tfm) && WARN_ON(crypto_ahash_reqsize(tfm) > + MAX_SYNC_HASH_REQSIZE)) { + crypto_free_ahash(tfm); + return ERR_PTR(-EINVAL); + } + + return container_of(tfm, struct crypto_sync_hash, base); +} +EXPORT_SYMBOL_GPL(crypto_alloc_sync_hash); + int crypto_has_ahash(const char *alg_name, u32 type, u32 mask) { return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask); @@ -1123,5 +1144,21 @@ void ahash_request_free(struct ahash_request *req) } EXPORT_SYMBOL_GPL(ahash_request_free); +int crypto_sync_hash_digest(struct crypto_sync_hash *tfm, const u8 *data, + unsigned int len, u8 *out) +{ + SYNC_HASH_REQUEST_ON_STACK(req, tfm); + int err; + + ahash_request_set_callback(req, 0, NULL, NULL); + ahash_request_set_virt(req, data, out, len); + err = crypto_ahash_digest(req); + + ahash_request_zero(req); + + return err; +} +EXPORT_SYMBOL_GPL(crypto_shash_tfm_digest); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 2aa83ee0ec98..f6e0c44331a3 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -8,6 +8,7 @@ #ifndef _CRYPTO_HASH_H #define _CRYPTO_HASH_H +#include #include #include #include @@ -162,6 +163,8 @@ struct shash_desc { void *__ctx[] __aligned(ARCH_SLAB_MINALIGN); }; +struct sync_hash_requests; + #define HASH_MAX_DIGESTSIZE 64 /* @@ -169,12 +172,30 @@ struct shash_desc { * containing a 'struct sha3_state'. */ #define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 360) +#define MAX_SYNC_HASH_REQSIZE HASH_MAX_DESCSIZE #define SHASH_DESC_ON_STACK(shash, ctx) \ char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \ __aligned(__alignof__(struct shash_desc)); \ struct shash_desc *shash = (struct shash_desc *)__##shash##_desc +#define SYNC_HASH_REQUEST_ON_STACK(name, _tfm) \ + char __##name##_req[sizeof(struct ahash_request) + \ + MAX_SYNC_HASH_REQSIZE \ + ] CRYPTO_MINALIGN_ATTR; \ + struct ahash_request *name = \ + (((struct ahash_request *)__##name##_req)->base.tfm = \ + crypto_sync_hash_tfm((_tfm)), \ + (void *)__##name##_req) + +#define SYNC_HASH_REQUESTS_ON_STACK(name, _n, _tfm) \ + char __##name##_req[(_n) * ALIGN(sizeof(struct ahash_request) + \ + MAX_SYNC_HASH_REQSIZE, \ + CRYPTO_MINALIGN) \ + ] CRYPTO_MINALIGN_ATTR; \ + struct sync_hash_requests *name = sync_hash_requests_on_stack_init( \ + __##name##_req, sizeof(__##name##_req), (_tfm)) + /** * struct shash_alg - synchronous message digest definition * @init: see struct ahash_alg @@ -241,6 +262,10 @@ struct crypto_shash { struct crypto_tfm base; }; +struct crypto_sync_hash { + struct crypto_ahash base; +}; + /** * DOC: Asynchronous Message Digest API * @@ -273,6 +298,9 @@ static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm) struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, u32 mask); +struct crypto_sync_hash *crypto_alloc_sync_hash(const char *alg_name, + u32 type, u32 mask); + struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *tfm); static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm) @@ -280,6 +308,12 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm) return &tfm->base; } +static inline struct crypto_tfm *crypto_sync_hash_tfm( + struct crypto_sync_hash *tfm) +{ + return crypto_ahash_tfm(&tfm->base); +} + /** * crypto_free_ahash() - zeroize and free the ahash handle * @tfm: cipher handle to be freed @@ -291,6 +325,11 @@ static inline void crypto_free_ahash(struct crypto_ahash *tfm) crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm)); } +static inline void crypto_free_sync_hash(struct crypto_sync_hash *tfm) +{ + crypto_free_ahash(&tfm->base); +} + /** * crypto_has_ahash() - Search for the availability of an ahash. * @alg_name: is the cra_name / name or cra_driver_name / driver name of the @@ -313,6 +352,12 @@ static inline const char *crypto_ahash_driver_name(struct crypto_ahash *tfm) return crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); } +static inline const char *crypto_sync_hash_driver_name( + struct crypto_sync_hash *tfm) +{ + return crypto_ahash_driver_name(&tfm->base); +} + /** * crypto_ahash_blocksize() - obtain block size for cipher * @tfm: cipher handle @@ -327,6 +372,12 @@ static inline unsigned int crypto_ahash_blocksize(struct crypto_ahash *tfm) return crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); } +static inline unsigned int crypto_sync_hash_blocksize( + struct crypto_sync_hash *tfm) +{ + return crypto_ahash_blocksize(&tfm->base); +} + static inline struct hash_alg_common *__crypto_hash_alg_common( struct crypto_alg *alg) { @@ -354,6 +405,12 @@ static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm) return crypto_hash_alg_common(tfm)->digestsize; } +static inline unsigned int crypto_sync_hash_digestsize( + struct crypto_sync_hash *tfm) +{ + return crypto_ahash_digestsize(&tfm->base); +} + /** * crypto_ahash_statesize() - obtain size of the ahash state * @tfm: cipher handle @@ -369,6 +426,12 @@ static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm) return tfm->statesize; } +static inline unsigned int crypto_sync_hash_statesize( + struct crypto_sync_hash *tfm) +{ + return crypto_ahash_statesize(&tfm->base); +} + static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm) { return crypto_tfm_get_flags(crypto_ahash_tfm(tfm)); @@ -877,6 +940,9 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data, unsigned int len, u8 *out); +int crypto_sync_hash_digest(struct crypto_sync_hash *tfm, const u8 *data, + unsigned int len, u8 *out); + /** * crypto_shash_export() - extract operational state for message digest * @desc: reference to the operational state handle whose state is exported @@ -982,6 +1048,13 @@ static inline void shash_desc_zero(struct shash_desc *desc) sizeof(*desc) + crypto_shash_descsize(desc->tfm)); } +static inline void ahash_request_zero(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + memzero_explicit(req, sizeof(*req) + crypto_ahash_reqsize(tfm)); +} + static inline int ahash_request_err(struct ahash_request *req) { return req->base.err; @@ -992,4 +1065,31 @@ static inline bool ahash_is_async(struct crypto_ahash *tfm) return crypto_tfm_is_async(&tfm->base); } +static inline struct ahash_request *sync_hash_requests( + struct sync_hash_requests *reqs, int i) +{ + unsigned unit = sizeof(struct ahash_request) + MAX_SYNC_HASH_REQSIZE; + unsigned alunit = ALIGN(unit, CRYPTO_MINALIGN); + + return (void *)((char *)reqs + i * alunit); +} + +static inline struct sync_hash_requests *sync_hash_requests_on_stack_init( + char *buf, unsigned len, struct crypto_sync_hash *tfm) +{ + unsigned unit = sizeof(struct ahash_request) + MAX_SYNC_HASH_REQSIZE; + unsigned alunit = ALIGN(unit, CRYPTO_MINALIGN); + struct sync_hash_requests *reqs = (void *)buf; + int n = len / alunit; + int i; + + for (i = 0; i < n; i++) { + struct ahash_request *req = sync_hash_requests(reqs, i); + + req->base.tfm = crypto_sync_hash_tfm(tfm); + } + + return reqs; +} + #endif /* _CRYPTO_HASH_H */ From patchwork Sun Feb 16 03:07:33 2025 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Herbert Xu X-Patchwork-Id: 865633 Received: from abb.hmeau.com (abb.hmeau.com [144.6.53.87]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 6613510A1F for ; Sun, 16 Feb 2025 03:07:37 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=144.6.53.87 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1739675259; cv=none; b=LPDpBuvy0DhvQ+iRdfRKtIFZZWLZexSrvWQIfwBxAhQwMLGMAsDLphMqlaTsBGL3W3pOg8nRsZSuu46FI3+Y/9hwbBi23vmlEVfhjgAWP5NZcC7OOPKylrG06oflAVb5daPgp6A/HQ7QpF5BO/TE6FWej93NRE3l/BZGYhgq6uQ= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1739675259; c=relaxed/simple; bh=3n7dW+Dn06EC4hrLV6q4tdY3D7DDC32kKDRs/inG9Dc=; h=Date:Message-Id:In-Reply-To:References:From:Subject:To:Cc; b=NiilKUWE6OhNv1VIpNMY5Xf1UaXUqmhCNIKwQLZo5ZfxM+fPZ9p8tqKA9wZvF/SoLvpeMbRji9Gjc9LS2pxLPQ9PC7VOu3CI7KrkU8kzez2uLsZjp/6aFPMecnPl61LdFPEh6K2FaRf//fF8gZaYbTvzwrkQvkIXrdcoAvcl69M= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=gondor.apana.org.au; spf=pass smtp.mailfrom=gondor.apana.org.au; dkim=pass (2048-bit key) header.d=hmeau.com header.i=@hmeau.com header.b=Z8OREJTD; arc=none smtp.client-ip=144.6.53.87 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=gondor.apana.org.au Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=gondor.apana.org.au Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=hmeau.com header.i=@hmeau.com header.b="Z8OREJTD" DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=hmeau.com; s=formenos; h=Cc:To:Subject:From:References:In-Reply-To:Message-Id:Date: Sender:Reply-To:MIME-Version:Content-Type:Content-Transfer-Encoding: Content-ID:Content-Description:Resent-Date:Resent-From:Resent-Sender: Resent-To:Resent-Cc:Resent-Message-ID:List-Id:List-Help:List-Unsubscribe: List-Subscribe:List-Post:List-Owner:List-Archive; bh=AfJ3SA/bRPE6cedgO69rzRNOIPPNNWgDTd0omE6JgLc=; b=Z8OREJTDYVUfKTZNaS9n5vQkyz GCbtht/dIVEDcoOoHla0MGNzDET4NKthaT77gOnTrLbukhIQjgB8gj2NBsU7lERg72wWZBzA1Fu/A w/Zya0eJBzWNBNVhAMnCL4q73t2Yu1oRMtVXjAZDYRgcJ6ClBjTun/vpkgADIcVyJ08pk2YW1O9PB mGo7/s3/RWFuMzVmGMgcSRMgRCzql3ED/Eo1uzX4xuzrZydmfHizv15qS/MJfmOmvIrBasSMnUb51 +DVsErh3GSwT2drO6z/qrCm97lxwzJ9YxNi9w+qwtTBPjys/KfYBLH+zJiUZHmxC81Rc5oyN1slKm YBREdUvQ==; Received: from loth.rohan.me.apana.org.au ([192.168.167.2]) by formenos.hmeau.com with smtp (Exim 4.96 #2 (Debian)) id 1tjUnM-000ga9-2r; Sun, 16 Feb 2025 11:07:34 +0800 Received: by loth.rohan.me.apana.org.au (sSMTP sendmail emulation); Sun, 16 Feb 2025 11:07:33 +0800 Date: Sun, 16 Feb 2025 11:07:33 +0800 Message-Id: <513768f4907245e15e5f12bb20bd50762c3cc25b.1739674648.git.herbert@gondor.apana.org.au> In-Reply-To: References: From: Herbert Xu Subject: [v2 PATCH 10/11] fsverity: Use sync hash instead of shash To: Linux Crypto Mailing List Cc: Eric Biggers , Ard Biesheuvel , Megha Dey , Tim Chen Precedence: bulk X-Mailing-List: linux-crypto@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: Use the sync hash interface instead of shash. Signed-off-by: Herbert Xu --- fs/verity/fsverity_private.h | 2 +- fs/verity/hash_algs.c | 41 +++++++++++++++++++----------------- 2 files changed, 23 insertions(+), 20 deletions(-) diff --git a/fs/verity/fsverity_private.h b/fs/verity/fsverity_private.h index b3506f56e180..aecc221daf8b 100644 --- a/fs/verity/fsverity_private.h +++ b/fs/verity/fsverity_private.h @@ -20,7 +20,7 @@ /* A hash algorithm supported by fs-verity */ struct fsverity_hash_alg { - struct crypto_shash *tfm; /* hash tfm, allocated on demand */ + struct crypto_sync_hash *tfm; /* hash tfm, allocated on demand */ const char *name; /* crypto API name, e.g. sha256 */ unsigned int digest_size; /* digest size in bytes, e.g. 32 for SHA-256 */ unsigned int block_size; /* block size in bytes, e.g. 64 for SHA-256 */ diff --git a/fs/verity/hash_algs.c b/fs/verity/hash_algs.c index 6b08b1d9a7d7..e088bcfe5ed1 100644 --- a/fs/verity/hash_algs.c +++ b/fs/verity/hash_algs.c @@ -43,7 +43,7 @@ const struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode, unsigned int num) { struct fsverity_hash_alg *alg; - struct crypto_shash *tfm; + struct crypto_sync_hash *tfm; int err; if (num >= ARRAY_SIZE(fsverity_hash_algs) || @@ -62,7 +62,7 @@ const struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode, if (alg->tfm != NULL) goto out_unlock; - tfm = crypto_alloc_shash(alg->name, 0, 0); + tfm = crypto_alloc_sync_hash(alg->name, 0, 0); if (IS_ERR(tfm)) { if (PTR_ERR(tfm) == -ENOENT) { fsverity_warn(inode, @@ -79,20 +79,20 @@ const struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode, } err = -EINVAL; - if (WARN_ON_ONCE(alg->digest_size != crypto_shash_digestsize(tfm))) + if (WARN_ON_ONCE(alg->digest_size != crypto_sync_hash_digestsize(tfm))) goto err_free_tfm; - if (WARN_ON_ONCE(alg->block_size != crypto_shash_blocksize(tfm))) + if (WARN_ON_ONCE(alg->block_size != crypto_sync_hash_blocksize(tfm))) goto err_free_tfm; pr_info("%s using implementation \"%s\"\n", - alg->name, crypto_shash_driver_name(tfm)); + alg->name, crypto_sync_hash_driver_name(tfm)); /* pairs with smp_load_acquire() above */ smp_store_release(&alg->tfm, tfm); goto out_unlock; err_free_tfm: - crypto_free_shash(tfm); + crypto_free_sync_hash(tfm); alg = ERR_PTR(err); out_unlock: mutex_unlock(&fsverity_hash_alg_init_mutex); @@ -112,17 +112,15 @@ const u8 *fsverity_prepare_hash_state(const struct fsverity_hash_alg *alg, const u8 *salt, size_t salt_size) { u8 *hashstate = NULL; - SHASH_DESC_ON_STACK(desc, alg->tfm); + SYNC_HASH_REQUEST_ON_STACK(req, alg->tfm); u8 *padded_salt = NULL; size_t padded_salt_size; int err; - desc->tfm = alg->tfm; - if (salt_size == 0) return NULL; - hashstate = kmalloc(crypto_shash_statesize(alg->tfm), GFP_KERNEL); + hashstate = kmalloc(crypto_sync_hash_statesize(alg->tfm), GFP_KERNEL); if (!hashstate) return ERR_PTR(-ENOMEM); @@ -140,15 +138,19 @@ const u8 *fsverity_prepare_hash_state(const struct fsverity_hash_alg *alg, goto err_free; } memcpy(padded_salt, salt, salt_size); - err = crypto_shash_init(desc); + + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); + + err = crypto_ahash_init(req); if (err) goto err_free; - err = crypto_shash_update(desc, padded_salt, padded_salt_size); + ahash_request_set_virt(req, padded_salt, NULL, padded_salt_size); + err = crypto_ahash_update(req); if (err) goto err_free; - err = crypto_shash_export(desc, hashstate); + err = crypto_ahash_export(req, hashstate); if (err) goto err_free; out: @@ -176,21 +178,22 @@ const u8 *fsverity_prepare_hash_state(const struct fsverity_hash_alg *alg, int fsverity_hash_block(const struct merkle_tree_params *params, const struct inode *inode, const void *data, u8 *out) { - SHASH_DESC_ON_STACK(desc, params->hash_alg->tfm); + SYNC_HASH_REQUEST_ON_STACK(req, params->hash_alg->tfm); int err; - desc->tfm = params->hash_alg->tfm; + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); + ahash_request_set_virt(req, data, out, params->block_size); if (params->hashstate) { - err = crypto_shash_import(desc, params->hashstate); + err = crypto_ahash_import(req, params->hashstate); if (err) { fsverity_err(inode, "Error %d importing hash state", err); return err; } - err = crypto_shash_finup(desc, data, params->block_size, out); + err = crypto_ahash_finup(req); } else { - err = crypto_shash_digest(desc, data, params->block_size, out); + err = crypto_ahash_digest(req); } if (err) fsverity_err(inode, "Error %d computing block hash", err); @@ -209,7 +212,7 @@ int fsverity_hash_block(const struct merkle_tree_params *params, int fsverity_hash_buffer(const struct fsverity_hash_alg *alg, const void *data, size_t size, u8 *out) { - return crypto_shash_tfm_digest(alg->tfm, data, size, out); + return crypto_sync_hash_digest(alg->tfm, data, size, out); } void __init fsverity_check_hash_algs(void) From patchwork Sun Feb 16 03:10:18 2025 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Herbert Xu X-Patchwork-Id: 865632 Received: from abb.hmeau.com (abb.hmeau.com [144.6.53.87]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id A9C19199B8 for ; Sun, 16 Feb 2025 03:10:22 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=144.6.53.87 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1739675424; cv=none; b=eY9Rh734QAo2rkuukCO/WbUVIcO76IjLO83tBkjKB43x9sHhRJe0bmkplI10vD2txh+KdVcNtpwYsfI7eX9NQWP2uQR55BjB1kpJj7bmaryf680FJVipiYp0DS0XbdYEUB20lAvgoDK2K2D/jV7CbQaxfW976ZBt0qrFZt8z/Co= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1739675424; c=relaxed/simple; bh=p+/lVKuxK3rootGsjG5xAoagLcyOv18N6K9+Ale1Bu0=; h=Date:Message-Id:In-Reply-To:References:From:Subject:To:Cc; b=QNSHzVstosy90Q1+Y5552NRNb0VLiHDwmFKdZXdyQdCiJo6NdQxcklF7Spfp2Qne7fy+3nzp8wcogVoh9HPo2/LZ0jOB0tzaZlSZE7SmOs73QKvg8Vgmsme51TdHgAmfb9Qln8kDrngNw33RISIBFEgNdNOpSD7ullhmTm6NGY0= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=gondor.apana.org.au; spf=pass smtp.mailfrom=gondor.apana.org.au; dkim=pass (2048-bit key) header.d=hmeau.com header.i=@hmeau.com header.b=shEke4aE; arc=none smtp.client-ip=144.6.53.87 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=gondor.apana.org.au Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=gondor.apana.org.au Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=hmeau.com header.i=@hmeau.com header.b="shEke4aE" DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=hmeau.com; s=formenos; h=Cc:To:Subject:From:References:In-Reply-To:Message-Id:Date: Sender:Reply-To:MIME-Version:Content-Type:Content-Transfer-Encoding: Content-ID:Content-Description:Resent-Date:Resent-From:Resent-Sender: Resent-To:Resent-Cc:Resent-Message-ID:List-Id:List-Help:List-Unsubscribe: List-Subscribe:List-Post:List-Owner:List-Archive; bh=wquegPewatpvz20WiIh0fEF1nRCAWV/gKId/8RXXn9w=; b=shEke4aEZgJsRANcK8vh2jOBAc fgv+QYRBn+0+81pISVy/mDQxuI3w8LaEkel1+MyTyML1NlQYmEsVc293ojeg10KolviSR0UpGYzK/ AFnhw00sduU9oKqYWc3d54zHwDMQWoTNk9B8q1nVlRmidTDwOI5d0tTWvnMCqM5h4FQFbPKUKOMp9 8udWoid2Z5xwpckwkYiTJfQacn0bWMNe64L9tZx4WPl0/CtwnyxHGZornWkrGXrXLwXDuxy1IPvoV guyoI3yWJ3vShcxwGFG+y2c2GSV8AjDxVdBlrVnrCd5whenyoR5gdeLVtVf1NEq5iA30EN9G9gMTm ZFIDOahw==; Received: from loth.rohan.me.apana.org.au ([192.168.167.2]) by formenos.hmeau.com with smtp (Exim 4.96 #2 (Debian)) id 1tjUq1-000gcf-2g; Sun, 16 Feb 2025 11:10:19 +0800 Received: by loth.rohan.me.apana.org.au (sSMTP sendmail emulation); Sun, 16 Feb 2025 11:10:18 +0800 Date: Sun, 16 Feb 2025 11:10:18 +0800 Message-Id: In-Reply-To: References: From: Herbert Xu Subject: [v2 PATCH 11/11] fsverity: improve performance by using multibuffer hashing To: Linux Crypto Mailing List Cc: Eric Biggers , Ard Biesheuvel , Megha Dey , Tim Chen Precedence: bulk X-Mailing-List: linux-crypto@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: From: Eric Biggers When supported by the hash algorithm, use crypto_shash_finup_mb() to interleave the hashing of pairs of data blocks. On some CPUs this nearly doubles hashing performance. The increase in overall throughput of cold-cache fsverity reads that I'm seeing on arm64 and x86_64 is roughly 35% (though this metric is hard to measure as it jumps around a lot). For now this is only done on the verification path, and only for data blocks, not Merkle tree blocks. We could use finup_mb on Merkle tree blocks too, but that is less important as there aren't as many Merkle tree blocks as data blocks, and that would require some additional code restructuring. We could also use finup_mb to accelerate building the Merkle tree, but verification performance is more important. Reviewed-by: Sami Tolvanen Acked-by: Ard Biesheuvel Signed-off-by: Eric Biggers --- fs/verity/fsverity_private.h | 2 + fs/verity/verify.c | 179 +++++++++++++++++++++++++++++------ 2 files changed, 151 insertions(+), 30 deletions(-) diff --git a/fs/verity/fsverity_private.h b/fs/verity/fsverity_private.h index aecc221daf8b..3d03fb1e41f0 100644 --- a/fs/verity/fsverity_private.h +++ b/fs/verity/fsverity_private.h @@ -152,6 +152,8 @@ static inline void fsverity_init_signature(void) /* verify.c */ +#define FS_VERITY_MAX_PENDING_DATA_BLOCKS 2 + void __init fsverity_init_workqueue(void); #endif /* _FSVERITY_PRIVATE_H */ diff --git a/fs/verity/verify.c b/fs/verity/verify.c index 4fcad0825a12..15bf0887a827 100644 --- a/fs/verity/verify.c +++ b/fs/verity/verify.c @@ -10,6 +10,27 @@ #include #include +struct fsverity_pending_block { + const void *data; + u64 pos; + u8 real_hash[FS_VERITY_MAX_DIGEST_SIZE]; +}; + +struct fsverity_verification_context { + struct inode *inode; + struct fsverity_info *vi; + unsigned long max_ra_pages; + + /* + * This is the queue of data blocks that are pending verification. We + * allow multiple blocks to be queued up in order to support multibuffer + * hashing, i.e. interleaving the hashing of multiple messages. On many + * CPUs this improves performance significantly. + */ + int num_pending; + struct fsverity_pending_block pending_blocks[FS_VERITY_MAX_PENDING_DATA_BLOCKS]; +}; + static struct workqueue_struct *fsverity_read_workqueue; /* @@ -79,7 +100,7 @@ static bool is_hash_block_verified(struct fsverity_info *vi, struct page *hpage, } /* - * Verify a single data block against the file's Merkle tree. + * Verify the hash of a single data block against the file's Merkle tree. * * In principle, we need to verify the entire path to the root node. However, * for efficiency the filesystem may cache the hash blocks. Therefore we need @@ -90,8 +111,10 @@ static bool is_hash_block_verified(struct fsverity_info *vi, struct page *hpage, */ static bool verify_data_block(struct inode *inode, struct fsverity_info *vi, - const void *data, u64 data_pos, unsigned long max_ra_pages) + const struct fsverity_pending_block *dblock, + unsigned long max_ra_pages) { + const u64 data_pos = dblock->pos; const struct merkle_tree_params *params = &vi->tree_params; const unsigned int hsize = params->digest_size; int level; @@ -115,8 +138,12 @@ verify_data_block(struct inode *inode, struct fsverity_info *vi, */ u64 hidx = data_pos >> params->log_blocksize; - /* Up to 1 + FS_VERITY_MAX_LEVELS pages may be mapped at once */ - BUILD_BUG_ON(1 + FS_VERITY_MAX_LEVELS > KM_MAX_IDX); + /* + * Up to FS_VERITY_MAX_PENDING_DATA_BLOCKS + FS_VERITY_MAX_LEVELS pages + * may be mapped at once. + */ + BUILD_BUG_ON(FS_VERITY_MAX_PENDING_DATA_BLOCKS + + FS_VERITY_MAX_LEVELS > KM_MAX_IDX); if (unlikely(data_pos >= inode->i_size)) { /* @@ -127,7 +154,7 @@ verify_data_block(struct inode *inode, struct fsverity_info *vi, * any part past EOF should be all zeroes. Therefore, we need * to verify that any data blocks fully past EOF are all zeroes. */ - if (memchr_inv(data, 0, params->block_size)) { + if (memchr_inv(dblock->data, 0, params->block_size)) { fsverity_err(inode, "FILE CORRUPTED! Data past EOF is not zeroed"); return false; @@ -221,10 +248,8 @@ verify_data_block(struct inode *inode, struct fsverity_info *vi, put_page(hpage); } - /* Finally, verify the data block. */ - if (fsverity_hash_block(params, inode, data, real_hash) != 0) - goto error; - if (memcmp(want_hash, real_hash, hsize) != 0) + /* Finally, verify the hash of the data block. */ + if (memcmp(want_hash, dblock->real_hash, hsize) != 0) goto corrupted; return true; @@ -233,7 +258,8 @@ verify_data_block(struct inode *inode, struct fsverity_info *vi, "FILE CORRUPTED! pos=%llu, level=%d, want_hash=%s:%*phN, real_hash=%s:%*phN", data_pos, level - 1, params->hash_alg->name, hsize, want_hash, - params->hash_alg->name, hsize, real_hash); + params->hash_alg->name, hsize, + level == 0 ? dblock->real_hash : real_hash); error: for (; level > 0; level--) { kunmap_local(hblocks[level - 1].addr); @@ -242,13 +268,91 @@ verify_data_block(struct inode *inode, struct fsverity_info *vi, return false; } -static bool -verify_data_blocks(struct folio *data_folio, size_t len, size_t offset, - unsigned long max_ra_pages) +static void +fsverity_init_verification_context(struct fsverity_verification_context *ctx, + struct inode *inode, + unsigned long max_ra_pages) { - struct inode *inode = data_folio->mapping->host; - struct fsverity_info *vi = inode->i_verity_info; - const unsigned int block_size = vi->tree_params.block_size; + ctx->inode = inode; + ctx->vi = inode->i_verity_info; + ctx->max_ra_pages = max_ra_pages; + ctx->num_pending = 0; +} + +static void +fsverity_clear_pending_blocks(struct fsverity_verification_context *ctx) +{ + int i; + + for (i = ctx->num_pending - 1; i >= 0; i--) { + kunmap_local(ctx->pending_blocks[i].data); + ctx->pending_blocks[i].data = NULL; + } + ctx->num_pending = 0; +} + +static bool +fsverity_verify_pending_blocks(struct fsverity_verification_context *ctx) +{ + struct inode *inode = ctx->inode; + struct fsverity_info *vi = ctx->vi; + const struct merkle_tree_params *params = &vi->tree_params; + SYNC_HASH_REQUESTS_ON_STACK(reqs, FS_VERITY_MAX_PENDING_DATA_BLOCKS, params->hash_alg->tfm); + struct ahash_request *req; + int i; + int err; + + if (ctx->num_pending == 0) + return true; + + req = sync_hash_requests(reqs, 0); + for (i = 0; i < ctx->num_pending; i++) { + struct ahash_request *reqi = sync_hash_requests(reqs, i); + + ahash_request_set_callback(reqi, CRYPTO_TFM_REQ_MAY_SLEEP, + NULL, NULL); + ahash_request_set_virt(reqi, ctx->pending_blocks[i].data, + ctx->pending_blocks[i].real_hash, + params->block_size); + if (i) + ahash_request_chain(reqi, req); + if (!params->hashstate) + continue; + + err = crypto_ahash_import(reqi, params->hashstate); + if (err) { + fsverity_err(inode, "Error %d importing hash state", err); + return false; + } + } + + if (params->hashstate) + err = crypto_ahash_finup(req); + else + err = crypto_ahash_digest(req); + if (err) { + fsverity_err(inode, "Error %d computing block hashes", err); + return false; + } + + for (i = 0; i < ctx->num_pending; i++) { + if (!verify_data_block(inode, vi, &ctx->pending_blocks[i], + ctx->max_ra_pages)) + return false; + } + + fsverity_clear_pending_blocks(ctx); + return true; +} + +static bool +fsverity_add_data_blocks(struct fsverity_verification_context *ctx, + struct folio *data_folio, size_t len, size_t offset) +{ + struct fsverity_info *vi = ctx->vi; + const struct merkle_tree_params *params = &vi->tree_params; + const unsigned int block_size = params->block_size; + const int mb_max_msgs = FS_VERITY_MAX_PENDING_DATA_BLOCKS; u64 pos = (u64)data_folio->index << PAGE_SHIFT; if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offset, block_size))) @@ -257,14 +361,11 @@ verify_data_blocks(struct folio *data_folio, size_t len, size_t offset, folio_test_uptodate(data_folio))) return false; do { - void *data; - bool valid; - - data = kmap_local_folio(data_folio, offset); - valid = verify_data_block(inode, vi, data, pos + offset, - max_ra_pages); - kunmap_local(data); - if (!valid) + ctx->pending_blocks[ctx->num_pending].data = + kmap_local_folio(data_folio, offset); + ctx->pending_blocks[ctx->num_pending].pos = pos + offset; + if (++ctx->num_pending == mb_max_msgs && + !fsverity_verify_pending_blocks(ctx)) return false; offset += block_size; len -= block_size; @@ -286,7 +387,15 @@ verify_data_blocks(struct folio *data_folio, size_t len, size_t offset, */ bool fsverity_verify_blocks(struct folio *folio, size_t len, size_t offset) { - return verify_data_blocks(folio, len, offset, 0); + struct fsverity_verification_context ctx; + + fsverity_init_verification_context(&ctx, folio->mapping->host, 0); + + if (fsverity_add_data_blocks(&ctx, folio, len, offset) && + fsverity_verify_pending_blocks(&ctx)) + return true; + fsverity_clear_pending_blocks(&ctx); + return false; } EXPORT_SYMBOL_GPL(fsverity_verify_blocks); @@ -307,6 +416,8 @@ EXPORT_SYMBOL_GPL(fsverity_verify_blocks); */ void fsverity_verify_bio(struct bio *bio) { + struct inode *inode = bio_first_folio_all(bio)->mapping->host; + struct fsverity_verification_context ctx; struct folio_iter fi; unsigned long max_ra_pages = 0; @@ -323,13 +434,21 @@ void fsverity_verify_bio(struct bio *bio) max_ra_pages = bio->bi_iter.bi_size >> (PAGE_SHIFT + 2); } + fsverity_init_verification_context(&ctx, inode, max_ra_pages); + bio_for_each_folio_all(fi, bio) { - if (!verify_data_blocks(fi.folio, fi.length, fi.offset, - max_ra_pages)) { - bio->bi_status = BLK_STS_IOERR; - break; - } + if (!fsverity_add_data_blocks(&ctx, fi.folio, fi.length, + fi.offset)) + goto ioerr; } + + if (!fsverity_verify_pending_blocks(&ctx)) + goto ioerr; + return; + +ioerr: + fsverity_clear_pending_blocks(&ctx); + bio->bi_status = BLK_STS_IOERR; } EXPORT_SYMBOL_GPL(fsverity_verify_bio); #endif /* CONFIG_BLOCK */