diff mbox series

crypto: sa2ul: Fix DMA mapping API usage

Message ID 20200921113846.6973-1-peter.ujfalusi@ti.com
State New
Headers show
Series crypto: sa2ul: Fix DMA mapping API usage | expand

Commit Message

Peter Ujfalusi Sept. 21, 2020, 11:38 a.m. UTC
Make sure that we call the dma_unmap_sg on the correct scatterlist on
completion with the correct sg_nents.

We also should be calling dma_sync_sg_for_device() on the tx buffer before
giving it to the DMA and the dma_sync_sg_for_cpu() should be called on the
scatterlist we received the data back.

Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>

---
 drivers/crypto/sa2ul.c | 157 ++++++++++++++++++++++-------------------
 1 file changed, 83 insertions(+), 74 deletions(-)

-- 
Peter

Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki.
Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki

Comments

'Christoph Hellwig' Sept. 23, 2020, 6:02 a.m. UTC | #1
On Mon, Sep 21, 2020 at 02:38:46PM +0300, Peter Ujfalusi wrote:
> Make sure that we call the dma_unmap_sg on the correct scatterlist on

> completion with the correct sg_nents.

> 

> We also should be calling dma_sync_sg_for_device() on the tx buffer before

> giving it to the DMA and the dma_sync_sg_for_cpu() should be called on the

> scatterlist we received the data back.


You might want to look into using struct sg_table and the DMA mapping
helpers using it to simplify this a bit.
Peter Ujfalusi Sept. 23, 2020, 7:48 a.m. UTC | #2
On 23/09/2020 9.02, Christoph Hellwig wrote:
> On Mon, Sep 21, 2020 at 02:38:46PM +0300, Peter Ujfalusi wrote:

>> Make sure that we call the dma_unmap_sg on the correct scatterlist on

>> completion with the correct sg_nents.

>>

>> We also should be calling dma_sync_sg_for_device() on the tx buffer before

>> giving it to the DMA and the dma_sync_sg_for_cpu() should be called on the

>> scatterlist we received the data back.

> 

> You might want to look into using struct sg_table and the DMA mapping

> helpers using it to simplify this a bit.


It would have simplified it a bit further if the dma_map_sgtable() would
have saved the dir we used when mapping (and then dma_unmap_sgtable()
would use the stored dir) and had a bool to tell that the mapping was
successful.

I'll send v2 with the use of sg_table in a bit after running some tests
to make sure it is working as expected. It is passing the extended boot
time tests on j721e.

- Péter

Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki.
Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki
diff mbox series

Patch

diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index a0b398ac6723..85eb14775847 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -142,34 +142,40 @@  struct sa_alg_tmpl {
 	bool registered;
 };
 
+/**
+ * struct sa_mapped_sg: scatterlist information for tx and rx
+ * @dir: mapping direction of @sgl
+ * @split_sg: Set if the sg is split and needs to be freed up
+ * @static_sg: Static scatterlist entry for overriding data
+ * @sgl: scatterlist which is used for dma_map_sg/dma_unmap_sg
+ * @sg_nents: sg_nents which is used for dma_map_sg/dma_unmap_sg
+ */
+struct sa_mapped_sg {
+	enum dma_data_direction dir;
+	struct scatterlist static_sg;
+	struct scatterlist *split_sg;
+	struct scatterlist *sgl;
+	int mapped_sg_nents;
+	int sg_nents;
+};
 /**
  * struct sa_rx_data: RX Packet miscellaneous data place holder
  * @req: crypto request data pointer
  * @ddev: pointer to the DMA device
  * @tx_in: dma_async_tx_descriptor pointer for rx channel
- * @split_src_sg: Set if the src sg is split and needs to be freed up
- * @split_dst_sg: Set if the dst sg is split and needs to be freed up
+ * @mapped_sg: Information on tx (0) and rx (1) scatterlist DMA mapping
  * @enc: Flag indicating either encryption or decryption
  * @enc_iv_size: Initialisation vector size
  * @iv_idx: Initialisation vector index
- * @rx_sg: Static scatterlist entry for overriding RX data
- * @tx_sg: Static scatterlist entry for overriding TX data
- * @src: Source data pointer
- * @dst: Destination data pointer
  */
 struct sa_rx_data {
 	void *req;
 	struct device *ddev;
 	struct dma_async_tx_descriptor *tx_in;
-	struct scatterlist *split_src_sg;
-	struct scatterlist *split_dst_sg;
+	struct sa_mapped_sg mapped_sg[2];
 	u8 enc;
 	u8 enc_iv_size;
 	u8 iv_idx;
-	struct scatterlist rx_sg;
-	struct scatterlist tx_sg;
-	struct scatterlist *src;
-	struct scatterlist *dst;
 };
 
 /**
@@ -976,23 +982,47 @@  static int sa_3des_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
 	return sa_cipher_setkey(tfm, key, keylen, &ad);
 }
 
+static void sa_sync_from_device(struct sa_rx_data *rxd)
+{
+	struct sa_mapped_sg *mapped_sg;
+
+	if (rxd->mapped_sg[0].dir == DMA_BIDIRECTIONAL)
+		mapped_sg = &rxd->mapped_sg[0];
+	else
+		mapped_sg = &rxd->mapped_sg[1];
+
+	dma_sync_sg_for_cpu(rxd->ddev, mapped_sg->sgl, mapped_sg->sg_nents,
+			    DMA_FROM_DEVICE);
+}
+
+static void sa_free_sa_rx_data(struct sa_rx_data *rxd)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(rxd->mapped_sg); i++) {
+		struct sa_mapped_sg *mapped_sg = &rxd->mapped_sg[i];
+
+		if (mapped_sg->sg_nents) {
+			dma_unmap_sg(rxd->ddev, mapped_sg->sgl,
+				     mapped_sg->sg_nents, mapped_sg->dir);
+			kfree(mapped_sg->split_sg);
+		}
+	}
+
+	kfree(rxd);
+}
+
 static void sa_aes_dma_in_callback(void *data)
 {
 	struct sa_rx_data *rxd = (struct sa_rx_data *)data;
 	struct skcipher_request *req;
-	int sglen;
 	u32 *result;
 	__be32 *mdptr;
 	size_t ml, pl;
 	int i;
-	enum dma_data_direction dir_src;
-	bool diff_dst;
 
+	sa_sync_from_device(rxd);
 	req = container_of(rxd->req, struct skcipher_request, base);
-	sglen = sg_nents_for_len(req->src, req->cryptlen);
-
-	diff_dst = (req->src != req->dst) ? true : false;
-	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
 
 	if (req->iv) {
 		mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl,
@@ -1003,18 +1033,7 @@  static void sa_aes_dma_in_callback(void *data)
 			result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]);
 	}
 
-	dma_unmap_sg(rxd->ddev, req->src, sglen, dir_src);
-	kfree(rxd->split_src_sg);
-
-	if (diff_dst) {
-		sglen = sg_nents_for_len(req->dst, req->cryptlen);
-
-		dma_unmap_sg(rxd->ddev, req->dst, sglen,
-			     DMA_FROM_DEVICE);
-		kfree(rxd->split_dst_sg);
-	}
-
-	kfree(rxd);
+	sa_free_sa_rx_data(rxd);
 
 	skcipher_request_complete(req, 0);
 }
@@ -1052,6 +1071,7 @@  static int sa_run(struct sa_req *req)
 	u32 *mdptr;
 	bool diff_dst;
 	enum dma_data_direction dir_src;
+	struct sa_mapped_sg *mapped_sg;
 
 	gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 		GFP_KERNEL : GFP_ATOMIC;
@@ -1082,6 +1102,7 @@  static int sa_run(struct sa_req *req)
 		dma_rx = pdata->dma_rx1;
 
 	ddev = dma_rx->device->dev;
+	rxd->ddev = ddev;
 
 	memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
 
@@ -1109,49 +1130,68 @@  static int sa_run(struct sa_req *req)
 
 	split_size = req->size;
 
+	mapped_sg = &rxd->mapped_sg[0];
 	if (sg_nents == 1 && split_size <= req->src->length) {
-		src = &rxd->rx_sg;
+		src = &mapped_sg->static_sg;
 		sg_init_table(src, 1);
 		sg_set_page(src, sg_page(req->src), split_size,
 			    req->src->offset);
 		src_nents = 1;
 		dma_map_sg(ddev, src, sg_nents, dir_src);
+		mapped_sg->sgl = src;
+		mapped_sg->sg_nents = sg_nents;
+		mapped_sg->dir = dir_src;
 	} else {
 		mapped_src_nents = dma_map_sg(ddev, req->src, sg_nents,
 					      dir_src);
+		mapped_sg->sgl = req->src;
+		mapped_sg->sg_nents = sg_nents;
+		mapped_sg->dir = dir_src;
+
 		ret = sg_split(req->src, mapped_src_nents, 0, 1, &split_size,
 			       &src, &src_nents, gfp_flags);
 		if (ret) {
 			src_nents = sg_nents;
 			src = req->src;
 		} else {
-			rxd->split_src_sg = src;
+			mapped_sg->split_sg = src;
 		}
 	}
 
+	dma_sync_sg_for_device(ddev, mapped_sg->sgl, mapped_sg->sg_nents,
+			       DMA_TO_DEVICE);
+
 	if (!diff_dst) {
 		dst_nents = src_nents;
 		dst = src;
 	} else {
 		dst_nents = sg_nents_for_len(req->dst, req->size);
+		mapped_sg = &rxd->mapped_sg[1];
 
 		if (dst_nents == 1 && split_size <= req->dst->length) {
-			dst = &rxd->tx_sg;
+			dst = &mapped_sg->static_sg;
 			sg_init_table(dst, 1);
 			sg_set_page(dst, sg_page(req->dst), split_size,
 				    req->dst->offset);
 			dst_nents = 1;
 			dma_map_sg(ddev, dst, dst_nents, DMA_FROM_DEVICE);
+			mapped_sg->sgl = dst;
+			mapped_sg->sg_nents = dst_nents;
+			mapped_sg->dir = DMA_FROM_DEVICE;
 		} else {
 			mapped_dst_nents = dma_map_sg(ddev, req->dst, dst_nents,
 						      DMA_FROM_DEVICE);
+			mapped_sg->sgl = req->dst;
+			mapped_sg->sg_nents = dst_nents;
+			mapped_sg->dir = DMA_FROM_DEVICE;
+
 			ret = sg_split(req->dst, mapped_dst_nents, 0, 1,
 				       &split_size, &dst, &dst_nents,
 				       gfp_flags);
 			if (ret)
 				dst = req->dst;
 			else
-				rxd->split_dst_sg = dst;
+				mapped_sg->split_sg = dst;
 		}
 	}
 
@@ -1172,9 +1212,6 @@  static int sa_run(struct sa_req *req)
 
 	rxd->req = (void *)req->base;
 	rxd->enc = req->enc;
-	rxd->ddev = ddev;
-	rxd->src = src;
-	rxd->dst = dst;
 	rxd->iv_idx = req->ctx->iv_idx;
 	rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
 	rxd->tx_in->callback = req->callback;
@@ -1212,16 +1249,7 @@  static int sa_run(struct sa_req *req)
 	return -EINPROGRESS;
 
 err_cleanup:
-	dma_unmap_sg(ddev, req->src, sg_nents, DMA_TO_DEVICE);
-	kfree(rxd->split_src_sg);
-
-	if (req->src != req->dst) {
-		dst_nents = sg_nents_for_len(req->dst, req->size);
-		dma_unmap_sg(ddev, req->dst, dst_nents, DMA_FROM_DEVICE);
-		kfree(rxd->split_dst_sg);
-	}
-
-	kfree(rxd);
+	sa_free_sa_rx_data(rxd);
 
 	return ret;
 }
@@ -1291,11 +1319,12 @@  static void sa_sha_dma_in_callback(void *data)
 	struct ahash_request *req;
 	struct crypto_ahash *tfm;
 	unsigned int authsize;
-	int i, sg_nents;
+	int i;
 	size_t ml, pl;
 	u32 *result;
 	__be32 *mdptr;
 
+	sa_sync_from_device(rxd);
 	req = container_of(rxd->req, struct ahash_request, base);
 	tfm = crypto_ahash_reqtfm(req);
 	authsize = crypto_ahash_digestsize(tfm);
@@ -1306,12 +1335,7 @@  static void sa_sha_dma_in_callback(void *data)
 	for (i = 0; i < (authsize / 4); i++)
 		result[i] = be32_to_cpu(mdptr[i + 4]);
 
-	sg_nents = sg_nents_for_len(req->src, req->nbytes);
-	dma_unmap_sg(rxd->ddev, req->src, sg_nents, DMA_FROM_DEVICE);
-
-	kfree(rxd->split_src_sg);
-
-	kfree(rxd);
+	sa_free_sa_rx_data(rxd);
 
 	ahash_request_complete(req, 0);
 }
@@ -1635,43 +1659,28 @@  static void sa_aead_dma_in_callback(void *data)
 	unsigned int authsize;
 	u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
 	size_t pl, ml;
-	int i, sglen;
+	int i;
 	int err = 0;
 	u16 auth_len;
 	u32 *mdptr;
-	bool diff_dst;
-	enum dma_data_direction dir_src;
 
+	sa_sync_from_device(rxd);
 	req = container_of(rxd->req, struct aead_request, base);
 	tfm = crypto_aead_reqtfm(req);
 	start = req->assoclen + req->cryptlen;
 	authsize = crypto_aead_authsize(tfm);
 
-	diff_dst = (req->src != req->dst) ? true : false;
-	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
-
 	mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
 	for (i = 0; i < (authsize / 4); i++)
 		mdptr[i + 4] = swab32(mdptr[i + 4]);
 
 	auth_len = req->assoclen + req->cryptlen;
-	if (!rxd->enc)
-		auth_len -= authsize;
-
-	sglen =  sg_nents_for_len(rxd->src, auth_len);
-	dma_unmap_sg(rxd->ddev, rxd->src, sglen, dir_src);
-	kfree(rxd->split_src_sg);
-
-	if (diff_dst) {
-		sglen = sg_nents_for_len(rxd->dst, auth_len);
-		dma_unmap_sg(rxd->ddev, rxd->dst, sglen, DMA_FROM_DEVICE);
-		kfree(rxd->split_dst_sg);
-	}
 
 	if (rxd->enc) {
 		scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize,
 					 1);
 	} else {
+		auth_len -= authsize;
 		start -= authsize;
 		scatterwalk_map_and_copy(auth_tag, req->src, start, authsize,
 					 0);
@@ -1679,7 +1688,7 @@  static void sa_aead_dma_in_callback(void *data)
 		err = memcmp(&mdptr[4], auth_tag, authsize) ? -EBADMSG : 0;
 	}
 
-	kfree(rxd);
+	sa_free_sa_rx_data(rxd);
 
 	aead_request_complete(req, err);
 }