[CRYPTO] api: Add async blkcipher type

This patch adds the mid-level interface for asynchronous block ciphers.
It also includes a generic queueing mechanism that can be used by other
asynchronous crypto operations in future.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index d0c190b..469f511 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -13,6 +13,8 @@
 #define _CRYPTO_ALGAPI_H
 
 #include <linux/crypto.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
 
 struct module;
 struct rtattr;
@@ -51,6 +53,14 @@
 	struct crypto_instance *inst;
 };
 
+struct crypto_queue {
+	struct list_head list;
+	struct list_head *backlog;
+
+	unsigned int qlen;
+	unsigned int max_qlen;
+};
+
 struct scatter_walk {
 	struct scatterlist *sg;
 	unsigned int offset;
@@ -82,6 +92,7 @@
 	int flags;
 };
 
+extern const struct crypto_type crypto_ablkcipher_type;
 extern const struct crypto_type crypto_blkcipher_type;
 extern const struct crypto_type crypto_hash_type;
 
@@ -103,6 +114,12 @@
 struct crypto_instance *crypto_alloc_instance(const char *name,
 					      struct crypto_alg *alg);
 
+void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
+int crypto_enqueue_request(struct crypto_queue *queue,
+			   struct crypto_async_request *request);
+struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
+int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
+
 int blkcipher_walk_done(struct blkcipher_desc *desc,
 			struct blkcipher_walk *walk, int err);
 int blkcipher_walk_virt(struct blkcipher_desc *desc,
@@ -125,6 +142,17 @@
 	return inst->__ctx;
 }
 
+static inline struct ablkcipher_alg *crypto_ablkcipher_alg(
+	struct crypto_ablkcipher *tfm)
+{
+	return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher;
+}
+
+static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm)
+{
+	return crypto_tfm_ctx(&tfm->base);
+}
+
 static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
 {
 	return crypto_tfm_ctx(&tfm->base);
@@ -172,5 +200,35 @@
 	walk->total = nbytes;
 }
 
+static inline struct crypto_async_request *crypto_get_backlog(
+	struct crypto_queue *queue)
+{
+	return queue->backlog == &queue->list ? NULL :
+	       container_of(queue->backlog, struct crypto_async_request, list);
+}
+
+static inline int ablkcipher_enqueue_request(struct ablkcipher_alg *alg,
+					     struct ablkcipher_request *request)
+{
+	return crypto_enqueue_request(alg->queue, &request->base);
+}
+
+static inline struct ablkcipher_request *ablkcipher_dequeue_request(
+	struct ablkcipher_alg *alg)
+{
+	return ablkcipher_request_cast(crypto_dequeue_request(alg->queue));
+}
+
+static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req)
+{
+	return req->__ctx;
+}
+
+static inline int ablkcipher_tfm_in_queue(struct crypto_ablkcipher *tfm)
+{
+	return crypto_tfm_in_queue(crypto_ablkcipher_alg(tfm)->queue,
+				   crypto_ablkcipher_tfm(tfm));
+}
+
 #endif	/* _CRYPTO_ALGAPI_H */