libceph: make sure ceph_aes_crypt() IV is aligned
commit 124f930b8cbc4ac11236e6eb1c5f008318864588 upstream.
... otherwise the crypto stack will align it for us with a GFP_ATOMIC
allocation and a memcpy() -- see skcipher_walk_first().
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 38936e1..c485103 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -164,7 +164,7 @@ static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
SKCIPHER_REQUEST_ON_STACK(req, tfm);
struct sg_table sgt;
struct scatterlist prealloc_sg;
- char iv[AES_BLOCK_SIZE];
+ char iv[AES_BLOCK_SIZE] __aligned(8);
int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1));
int crypt_len = encrypt ? in_len + pad_byte : in_len;
int ret;