@@ -87,8 +87,9 @@ EXPORT_SYMBOL_GPL(crypto_lskcipher_setkey);
static int crypto_lskcipher_crypt_unaligned(
struct crypto_lskcipher *tfm, const u8 *src, u8 *dst, unsigned len,
- u8 *iv, int (*crypt)(struct crypto_lskcipher *tfm, const u8 *src,
- u8 *dst, unsigned len, u8 *iv, u32 flags))
+ u8 *iv, u32 flags,
+ int (*crypt)(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *iv, u32 flags))
{
unsigned statesize = crypto_lskcipher_statesize(tfm);
unsigned ivsize = crypto_lskcipher_ivsize(tfm);
@@ -120,7 +121,7 @@ static int crypto_lskcipher_crypt_unaligned(
chunk &= ~(cs - 1);
memcpy(p, src, chunk);
- err = crypt(tfm, p, p, chunk, tiv, CRYPTO_LSKCIPHER_FLAG_FINAL);
+ err = crypt(tfm, p, p, chunk, tiv, flags);
if (err)
goto out;
@@ -140,7 +141,7 @@ static int crypto_lskcipher_crypt_unaligned(
}
static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src,
- u8 *dst, unsigned len, u8 *iv,
+ u8 *dst, unsigned len, u8 *iv, u32 flags,
int (*crypt)(struct crypto_lskcipher *tfm,
const u8 *src, u8 *dst,
unsigned len, u8 *iv,
@@ -153,18 +154,18 @@ static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src,
if (((unsigned long)src | (unsigned long)dst | (unsigned long)iv) &
alignmask) {
ret = crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv,
- crypt);
+ flags, crypt);
goto out;
}
- ret = crypt(tfm, src, dst, len, iv, CRYPTO_LSKCIPHER_FLAG_FINAL);
+ ret = crypt(tfm, src, dst, len, iv, flags);
out:
return crypto_lskcipher_errstat(alg, ret);
}
-int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
- u8 *dst, unsigned len, u8 *iv)
+int crypto_lskcipher_encrypt_ext(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *iv, u32 flags)
{
struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
@@ -175,12 +176,13 @@ int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
atomic64_add(len, &istat->encrypt_tlen);
}
- return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->encrypt);
+ return crypto_lskcipher_crypt(tfm, src, dst, len, iv, flags,
+ alg->encrypt);
}
-EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt);
+EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt_ext);
-int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
- u8 *dst, unsigned len, u8 *iv)
+int crypto_lskcipher_decrypt_ext(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *iv, u32 flags)
{
struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
@@ -191,7 +193,24 @@ int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
atomic64_add(len, &istat->decrypt_tlen);
}
- return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->decrypt);
+ return crypto_lskcipher_crypt(tfm, src, dst, len, iv, flags,
+ alg->decrypt);
+}
+EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt_ext);
+
+int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *iv)
+{
+ return crypto_lskcipher_encrypt_ext(tfm, src, dst, len, iv,
+ CRYPTO_LSKCIPHER_FLAG_FINAL);
+}
+EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt);
+
+int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *iv)
+{
+ return crypto_lskcipher_decrypt_ext(tfm, src, dst, len, iv,
+ CRYPTO_LSKCIPHER_FLAG_FINAL);
}
EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt);
@@ -90,6 +90,48 @@ struct skcipher_walk {
unsigned int alignmask;
};
+/**
+ * crypto_lskcipher_encrypt_ext() - encrypt plaintext with continuation
+ * @tfm: lskcipher handle
+ * @src: source buffer
+ * @dst: destination buffer
+ * @len: number of bytes to process
+ * @siv: IV + state for the cipher operation. The length of the IV must
+ * comply with the IV size defined by crypto_lskcipher_ivsize. The
+ * IV is then followed with a buffer with the length as specified by
+ * crypto_lskcipher_statesize.
+ * @flags: Indicates whether this is a continuation and/or final operation.
+ *
+ * Encrypt plaintext data using the lskcipher handle with continuation.
+ *
+ * Return: >=0 if the cipher operation was successful, if positive
+ * then this many bytes have been left unprocessed;
+ * < 0 if an error occurred
+ */
+int crypto_lskcipher_encrypt_ext(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *siv, u32 flags);
+
+/**
+ * crypto_lskcipher_decrypt_ext() - decrypt ciphertext with continuation
+ * @tfm: lskcipher handle
+ * @src: source buffer
+ * @dst: destination buffer
+ * @len: number of bytes to process
+ * @siv: IV + state for the cipher operation. The length of the IV must
+ * comply with the IV size defined by crypto_lskcipher_ivsize. The
+ * IV is then followed with a buffer with the length as specified by
+ * crypto_lskcipher_statesize.
+ * @flags: Indicates whether this is a continuation and/or final operation.
+ *
+ * Decrypt ciphertext data using the lskcipher handle with continuation.
+ *
+ * Return: >=0 if the cipher operation was successful, if positive
+ * then this many bytes have been left unprocessed;
+ * < 0 if an error occurred
+ */
+int crypto_lskcipher_decrypt_ext(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *siv, u32 flags);
+
static inline struct crypto_instance *skcipher_crypto_instance(
struct skcipher_instance *inst)
{
Export the incremental interface internally so that composite algorithms such as adiantum can use it. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> --- crypto/lskcipher.c | 45 +++++++++++++++++++++--------- include/crypto/internal/skcipher.h | 42 ++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+), 13 deletions(-)