@@ -282,7 +282,7 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
/* Prepare the command and submit for execution */
cmdlen = tegra_aes_prep_cmd(ctx, rctx);
- ret = tegra_se_host1x_submit(se, cmdlen);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
/* Copy the result */
tegra_aes_update_iv(req, ctx);
@@ -719,7 +719,7 @@ static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqct
cmdlen = tegra_gmac_prep_cmd(ctx, rctx);
- return tegra_se_host1x_submit(se, cmdlen);
+ return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
}
static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
@@ -736,7 +736,7 @@ static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
/* Prepare command and submit */
cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx);
- ret = tegra_se_host1x_submit(se, cmdlen);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
return ret;
@@ -759,7 +759,7 @@ static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
/* Prepare command and submit */
cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
- ret = tegra_se_host1x_submit(se, cmdlen);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
return ret;
@@ -891,7 +891,7 @@ static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_req
/* Prepare command and submit */
cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx);
- return tegra_se_host1x_submit(se, cmdlen);
+ return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
}
static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
@@ -1098,7 +1098,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx
/* Prepare command and submit */
cmdlen = tegra_ctr_prep_cmd(ctx, rctx);
- ret = tegra_se_host1x_submit(se, cmdlen);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
return ret;
@@ -1519,8 +1519,8 @@ static int tegra_cmac_do_update(struct ahash_request *req)
tegra_cmac_paste_result(ctx->se, rctx);
cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
- ret = tegra_se_host1x_submit(se, cmdlen);
/*
* If this is not the final update, copy the intermediate results
* from the registers so that it can be used in the next 'update'
@@ -1553,7 +1553,7 @@ static int tegra_cmac_do_final(struct ahash_request *req)
/* Prepare command and submit */
cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
- ret = tegra_se_host1x_submit(se, cmdlen);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
goto out;
@@ -300,8 +300,9 @@ static int tegra_sha_do_update(struct ahash_request *req)
{
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct tegra_se *se = ctx->se;
unsigned int nblks, nresidue, size, ret;
- u32 *cpuvaddr = ctx->se->cmdbuf->addr;
+ u32 *cpuvaddr = se->cmdbuf->addr;
nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
@@ -353,11 +354,11 @@ static int tegra_sha_do_update(struct ahash_request *req)
* This is to support the import/export functionality.
*/
if (!(rctx->task & SHA_FIRST))
- tegra_sha_paste_hash_result(ctx->se, rctx);
+ tegra_sha_paste_hash_result(se, rctx);
- size = tegra_sha_prep_cmd(ctx->se, cpuvaddr, rctx);
+ size = tegra_sha_prep_cmd(se, cpuvaddr, rctx);
- ret = tegra_se_host1x_submit(ctx->se, size);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
/*
* If this is not the final update, copy the intermediate results
@@ -365,7 +366,7 @@ static int tegra_sha_do_update(struct ahash_request *req)
* call. This is to support the import/export functionality.
*/
if (!(rctx->task & SHA_FINAL))
- tegra_sha_copy_hash_result(ctx->se, rctx);
+ tegra_sha_copy_hash_result(se, rctx);
return ret;
}
@@ -388,7 +389,7 @@ static int tegra_sha_do_final(struct ahash_request *req)
size = tegra_sha_prep_cmd(se, cpuvaddr, rctx);
- ret = tegra_se_host1x_submit(se, size);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
if (ret)
goto out;
@@ -115,11 +115,17 @@ static int tegra_key_insert(struct tegra_se *se, const u8 *key,
u32 keylen, u16 slot, u32 alg)
{
const u32 *keyval = (u32 *)key;
- u32 *addr = se->cmdbuf->addr, size;
+ u32 *addr = se->keybuf->addr, size;
+ int ret;
+
+ mutex_lock(&kslt_lock);
size = tegra_key_prep_ins_cmd(se, addr, keyval, keylen, slot, alg);
+ ret = tegra_se_host1x_submit(se, se->keybuf, size);
- return tegra_se_host1x_submit(se, size);
+ mutex_unlock(&kslt_lock);
+
+ return ret;
}
void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg)
@@ -141,7 +141,7 @@ static struct tegra_se_cmdbuf *tegra_se_host1x_bo_alloc(struct tegra_se *se, ssi
return cmdbuf;
}
-int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
+int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size)
{
struct host1x_job *job;
int ret;
@@ -160,9 +160,9 @@ int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
job->engine_fallback_streamid = se->stream_id;
job->engine_streamid_offset = SE_STREAM_ID;
- se->cmdbuf->words = size;
+ cmdbuf->words = size;
- host1x_job_add_gather(job, &se->cmdbuf->bo, size, 0);
+ host1x_job_add_gather(job, &cmdbuf->bo, size, 0);
ret = host1x_job_pin(job, se->dev);
if (ret) {
@@ -220,14 +220,22 @@ static int tegra_se_client_init(struct host1x_client *client)
goto syncpt_put;
}
+ se->keybuf = tegra_se_host1x_bo_alloc(se, SZ_4K);
+ if (!se->keybuf) {
+ ret = -ENOMEM;
+ goto cmdbuf_put;
+ }
+
ret = se->hw->init_alg(se);
if (ret) {
dev_err(se->dev, "failed to register algorithms\n");
- goto cmdbuf_put;
+ goto keybuf_put;
}
return 0;
+keybuf_put:
+ tegra_se_cmdbuf_put(&se->keybuf->bo);
cmdbuf_put:
tegra_se_cmdbuf_put(&se->cmdbuf->bo);
syncpt_put:
@@ -420,6 +420,7 @@ struct tegra_se {
struct host1x_client client;
struct host1x_channel *channel;
struct tegra_se_cmdbuf *cmdbuf;
+ struct tegra_se_cmdbuf *keybuf;
struct crypto_engine *engine;
struct host1x_syncpt *syncpt;
struct device *dev;
@@ -502,7 +503,7 @@ void tegra_deinit_hash(struct tegra_se *se);
int tegra_key_submit(struct tegra_se *se, const u8 *key,
u32 keylen, u32 alg, u32 *keyid);
void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg);
-int tegra_se_host1x_submit(struct tegra_se *se, u32 size);
+int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size);
/* HOST1x OPCODES */
static inline u32 host1x_opcode_setpayload(unsigned int payload)
The buffer which sends the commands to host1x was shared for all tasks in the engine. This causes a problem with the setkey() function as it gets called asynchronous to the crypto engine queue. Modifying the same cmdbuf in setkey() will corrupt the ongoing host1x task and in turn break the encryption/decryption operation. Hence use a separate cmdbuf for setkey(). Fixes: 0880bb3b00c8 ("crypto: tegra - Add Tegra Security Engine driver") Signed-off-by: Akhil R <akhilrajeev@nvidia.com> --- drivers/crypto/tegra/tegra-se-aes.c | 16 ++++++++-------- drivers/crypto/tegra/tegra-se-hash.c | 13 +++++++------ drivers/crypto/tegra/tegra-se-key.c | 10 ++++++++-- drivers/crypto/tegra/tegra-se-main.c | 16 ++++++++++++---- drivers/crypto/tegra/tegra-se.h | 3 ++- 5 files changed, 37 insertions(+), 21 deletions(-)