diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2007-04-16 20:48:54 +1000 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2007-05-02 14:38:31 +1000 |
commit | b5b7f08869340aa8cfa23303f7d195f161479592 (patch) | |
tree | dd1f3f00165e7ca31e29a52d64909439cdfab8fd /include/crypto | |
parent | ebc610e5bc76df073221e64e86c3f7533a09ea40 (diff) |
[CRYPTO] api: Add async blkcipher type
This patch adds the mid-level interface for asynchronous block ciphers.
It also includes a generic queueing mechanism that can be used by other
asynchronous crypto operations in future.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'include/crypto')
-rw-r--r-- | include/crypto/algapi.h | 58 |
1 files changed, 58 insertions, 0 deletions
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index d0c190b4d02..469f511315c 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -13,6 +13,8 @@ #define _CRYPTO_ALGAPI_H #include <linux/crypto.h> +#include <linux/list.h> +#include <linux/kernel.h> struct module; struct rtattr; @@ -51,6 +53,14 @@ struct crypto_spawn { struct crypto_instance *inst; }; +struct crypto_queue { + struct list_head list; + struct list_head *backlog; + + unsigned int qlen; + unsigned int max_qlen; +}; + struct scatter_walk { struct scatterlist *sg; unsigned int offset; @@ -82,6 +92,7 @@ struct blkcipher_walk { int flags; }; +extern const struct crypto_type crypto_ablkcipher_type; extern const struct crypto_type crypto_blkcipher_type; extern const struct crypto_type crypto_hash_type; @@ -103,6 +114,12 @@ struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, u32 type, u32 mask); struct crypto_instance *crypto_alloc_instance(const char *name, struct crypto_alg *alg); +void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); +int crypto_enqueue_request(struct crypto_queue *queue, + struct crypto_async_request *request); +struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); +int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm); + int blkcipher_walk_done(struct blkcipher_desc *desc, struct blkcipher_walk *walk, int err); int blkcipher_walk_virt(struct blkcipher_desc *desc, @@ -125,6 +142,17 @@ static inline void *crypto_instance_ctx(struct crypto_instance *inst) return inst->__ctx; } +static inline struct ablkcipher_alg *crypto_ablkcipher_alg( + struct crypto_ablkcipher *tfm) +{ + return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher; +} + +static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm) { return crypto_tfm_ctx(&tfm->base); @@ -172,5 +200,35 @@ static inline void blkcipher_walk_init(struct blkcipher_walk *walk, walk->total = nbytes; } +static inline struct crypto_async_request *crypto_get_backlog( + struct crypto_queue *queue) +{ + return queue->backlog == &queue->list ? NULL : + container_of(queue->backlog, struct crypto_async_request, list); +} + +static inline int ablkcipher_enqueue_request(struct ablkcipher_alg *alg, + struct ablkcipher_request *request) +{ + return crypto_enqueue_request(alg->queue, &request->base); +} + +static inline struct ablkcipher_request *ablkcipher_dequeue_request( + struct ablkcipher_alg *alg) +{ + return ablkcipher_request_cast(crypto_dequeue_request(alg->queue)); +} + +static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req) +{ + return req->__ctx; +} + +static inline int ablkcipher_tfm_in_queue(struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_in_queue(crypto_ablkcipher_alg(tfm)->queue, + crypto_ablkcipher_tfm(tfm)); +} + #endif /* _CRYPTO_ALGAPI_H */ |