@@ -253,7 +253,7 @@ static int ccm_encrypt(struct aead_request *req)
/* preserve the original iv for the final round */
memcpy(buf, req->iv, AES_BLOCK_SIZE);
- err = skcipher_walk_aead_encrypt(&walk, req, true);
+ err = skcipher_walk_aead_encrypt(&walk, req, false);
if (may_use_simd()) {
while (walk.nbytes) {
@@ -311,7 +311,7 @@ static int ccm_decrypt(struct aead_request *req)
/* preserve the original iv for the final round */
memcpy(buf, req->iv, AES_BLOCK_SIZE);
- err = skcipher_walk_aead_decrypt(&walk, req, true);
+ err = skcipher_walk_aead_decrypt(&walk, req, false);
if (may_use_simd()) {
while (walk.nbytes) {
When the AES-CCM code was first added, the NEON register were saved and restored eagerly, and so the code avoided doing so, and executed the scatterwalk in atomic context inside the kernel_neon_begin/end section. This has been changed in the meantime, so switch to non-atomic scatterwalks. Fixes: bd2ad885e30d ("crypto: arm64/aes-ce-ccm - move kernel mode neon ...") Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> --- arch/arm64/crypto/aes-ce-ccm-glue.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) -- 2.20.1