diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-26 11:04:34 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-26 11:04:34 -0700 |
commit | 562f477a54478002ddfbb5b85627c009ca41e71d (patch) | |
tree | 52384cc554ae64cc7a26878d64d606f40fd703ce /crypto | |
parent | ada19a31a90b4f46c040c25ef4ef8ffc203c7fc6 (diff) | |
parent | 949abe574739848b1e68271fbac86c3cb4506aad (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (29 commits)
crypto: sha512-s390 - Add missing block size
hwrng: timeriomem - Breaks an allyesconfig build on s390:
nlattr: Fix build error with NET off
crypto: testmgr - add zlib test
crypto: zlib - New zlib crypto module, using pcomp
crypto: testmgr - Add support for the pcomp interface
crypto: compress - Add pcomp interface
netlink: Move netlink attribute parsing support to lib
crypto: Fix dead links
hwrng: timeriomem - New driver
crypto: chainiv - Use kcrypto_wq instead of keventd_wq
crypto: cryptd - Per-CPU thread implementation based on kcrypto_wq
crypto: api - Use dedicated workqueue for crypto subsystem
crypto: testmgr - Test skciphers with no IVs
crypto: aead - Avoid infinite loop when nivaead fails selftest
crypto: skcipher - Avoid infinite loop when cipher fails selftest
crypto: api - Fix crypto_alloc_tfm/create_create_tfm return convention
crypto: api - crypto_alg_mod_lookup either tested or untested
crypto: amcc - Add crypt4xx driver
crypto: ansi_cprng - Add maintainer
...
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/Kconfig | 44 | ||||
-rw-r--r-- | crypto/Makefile | 5 | ||||
-rw-r--r-- | crypto/ablkcipher.c | 19 | ||||
-rw-r--r-- | crypto/aead.c | 16 | ||||
-rw-r--r-- | crypto/algboss.c | 20 | ||||
-rw-r--r-- | crypto/ansi_cprng.c | 17 | ||||
-rw-r--r-- | crypto/api.c | 17 | ||||
-rw-r--r-- | crypto/blkcipher.c | 2 | ||||
-rw-r--r-- | crypto/chainiv.c | 3 | ||||
-rw-r--r-- | crypto/cryptd.c | 237 | ||||
-rw-r--r-- | crypto/crypto_wq.c | 38 | ||||
-rw-r--r-- | crypto/gf128mul.c | 2 | ||||
-rw-r--r-- | crypto/internal.h | 6 | ||||
-rw-r--r-- | crypto/pcompress.c | 97 | ||||
-rw-r--r-- | crypto/sha256_generic.c | 2 | ||||
-rw-r--r-- | crypto/shash.c | 20 | ||||
-rw-r--r-- | crypto/tcrypt.c | 6 | ||||
-rw-r--r-- | crypto/testmgr.c | 198 | ||||
-rw-r--r-- | crypto/testmgr.h | 147 | ||||
-rw-r--r-- | crypto/zlib.c | 378 |
20 files changed, 1130 insertions, 144 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig index 8dde4fcf99c..74d0e622a51 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -56,6 +56,7 @@ config CRYPTO_BLKCIPHER2 tristate select CRYPTO_ALGAPI2 select CRYPTO_RNG2 + select CRYPTO_WORKQUEUE config CRYPTO_HASH tristate @@ -75,6 +76,10 @@ config CRYPTO_RNG2 tristate select CRYPTO_ALGAPI2 +config CRYPTO_PCOMP + tristate + select CRYPTO_ALGAPI2 + config CRYPTO_MANAGER tristate "Cryptographic algorithm manager" select CRYPTO_MANAGER2 @@ -87,6 +92,7 @@ config CRYPTO_MANAGER2 select CRYPTO_AEAD2 select CRYPTO_HASH2 select CRYPTO_BLKCIPHER2 + select CRYPTO_PCOMP config CRYPTO_GF128MUL tristate "GF(2^128) multiplication functions (EXPERIMENTAL)" @@ -106,11 +112,15 @@ config CRYPTO_NULL help These are 'Null' algorithms, used by IPsec, which do nothing. +config CRYPTO_WORKQUEUE + tristate + config CRYPTO_CRYPTD tristate "Software async crypto daemon" select CRYPTO_BLKCIPHER select CRYPTO_HASH select CRYPTO_MANAGER + select CRYPTO_WORKQUEUE help This is a generic software asynchronous crypto daemon that converts an arbitrary synchronous software crypto algorithm @@ -470,6 +480,31 @@ config CRYPTO_AES_X86_64 See <http://csrc.nist.gov/encryption/aes/> for more information. +config CRYPTO_AES_NI_INTEL + tristate "AES cipher algorithms (AES-NI)" + depends on (X86 || UML_X86) && 64BIT + select CRYPTO_AES_X86_64 + select CRYPTO_CRYPTD + select CRYPTO_ALGAPI + help + Use Intel AES-NI instructions for AES algorithm. + + AES cipher algorithms (FIPS-197). AES uses the Rijndael + algorithm. + + Rijndael appears to be consistently a very good performer in + both hardware and software across a wide range of computing + environments regardless of its use in feedback or non-feedback + modes. Its key setup time is excellent, and its key agility is + good. Rijndael's very low memory requirements make it very well + suited for restricted-space environments, in which it also + demonstrates excellent performance. Rijndael's operations are + among the easiest to defend against power and timing attacks. + + The AES specifies three key sizes: 128, 192 and 256 bits + + See <http://csrc.nist.gov/encryption/aes/> for more information. + config CRYPTO_ANUBIS tristate "Anubis cipher algorithm" select CRYPTO_ALGAPI @@ -714,6 +749,15 @@ config CRYPTO_DEFLATE You will most probably want this if using IPSec. +config CRYPTO_ZLIB + tristate "Zlib compression algorithm" + select CRYPTO_PCOMP + select ZLIB_INFLATE + select ZLIB_DEFLATE + select NLATTR + help + This is the zlib algorithm. + config CRYPTO_LZO tristate "LZO compression algorithm" select CRYPTO_ALGAPI diff --git a/crypto/Makefile b/crypto/Makefile index 46b08bf2035..673d9f7c1bd 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -5,6 +5,8 @@ obj-$(CONFIG_CRYPTO) += crypto.o crypto-objs := api.o cipher.o digest.o compress.o +obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o + obj-$(CONFIG_CRYPTO_FIPS) += fips.o crypto_algapi-$(CONFIG_PROC_FS) += proc.o @@ -25,6 +27,8 @@ crypto_hash-objs += ahash.o crypto_hash-objs += shash.o obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o +obj-$(CONFIG_CRYPTO_PCOMP) += pcompress.o + cryptomgr-objs := algboss.o testmgr.o obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o @@ -70,6 +74,7 @@ obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o obj-$(CONFIG_CRYPTO_SEED) += seed.o obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o +obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index 94140b3756f..e11ce37c710 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -282,6 +282,25 @@ static struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, alg->cra_ablkcipher.ivsize)) return alg; + crypto_mod_put(alg); + alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED, + mask & ~CRYPTO_ALG_TESTED); + if (IS_ERR(alg)) + return alg; + + if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_GIVCIPHER) { + if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) { + crypto_mod_put(alg); + alg = ERR_PTR(-ENOENT); + } + return alg; + } + + BUG_ON(!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize : + alg->cra_ablkcipher.ivsize)); + return ERR_PTR(crypto_givcipher_default(alg, type, mask)); } diff --git a/crypto/aead.c b/crypto/aead.c index 3a6f3f52c7c..d9aa733db16 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -422,6 +422,22 @@ static struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, if (!alg->cra_aead.ivsize) return alg; + crypto_mod_put(alg); + alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED, + mask & ~CRYPTO_ALG_TESTED); + if (IS_ERR(alg)) + return alg; + + if (alg->cra_type == &crypto_aead_type) { + if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) { + crypto_mod_put(alg); + alg = ERR_PTR(-ENOENT); + } + return alg; + } + + BUG_ON(!alg->cra_aead.ivsize); + return ERR_PTR(crypto_nivaead_default(alg, type, mask)); } diff --git a/crypto/algboss.c b/crypto/algboss.c index 4601e4267c8..6906f92aeac 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c @@ -10,7 +10,7 @@ * */ -#include <linux/crypto.h> +#include <crypto/internal/aead.h> #include <linux/ctype.h> #include <linux/err.h> #include <linux/init.h> @@ -206,8 +206,7 @@ static int cryptomgr_test(void *data) u32 type = param->type; int err = 0; - if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) & - CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV)) + if (type & CRYPTO_ALG_TESTED) goto skiptest; err = alg_test(param->driver, param->alg, type, CRYPTO_ALG_TESTED); @@ -223,6 +222,7 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg) { struct task_struct *thread; struct crypto_test_param *param; + u32 type; if (!try_module_get(THIS_MODULE)) goto err; @@ -233,7 +233,19 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg) memcpy(param->driver, alg->cra_driver_name, sizeof(param->driver)); memcpy(param->alg, alg->cra_name, sizeof(param->alg)); - param->type = alg->cra_flags; + type = alg->cra_flags; + + /* This piece of crap needs to disappear into per-type test hooks. */ + if ((!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) & + CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) && + ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize : + alg->cra_ablkcipher.ivsize)) || + (!((type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) && + alg->cra_type == &crypto_nivaead_type && alg->cra_aead.ivsize)) + type |= CRYPTO_ALG_TESTED; + + param->type = type; thread = kthread_run(cryptomgr_test, param, "cryptomgr_test"); if (IS_ERR(thread)) diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index 0fac8ffc2fb..d80ed4c1e00 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c @@ -132,9 +132,15 @@ static int _get_more_prng_bytes(struct prng_context *ctx) */ if (!memcmp(ctx->rand_data, ctx->last_rand_data, DEFAULT_BLK_SZ)) { + if (fips_enabled) { + panic("cprng %p Failed repetition check!\n", + ctx); + } + printk(KERN_ERR "ctx %p Failed repetition check!\n", ctx); + ctx->flags |= PRNG_NEED_RESET; return -EINVAL; } @@ -338,7 +344,16 @@ static int cprng_init(struct crypto_tfm *tfm) spin_lock_init(&ctx->prng_lock); - return reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL); + if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0) + return -EINVAL; + + /* + * after allocation, we should always force the user to reset + * so they don't inadvertently use the insecure default values + * without specifying them intentially + */ + ctx->flags |= PRNG_NEED_RESET; + return 0; } static void cprng_exit(struct crypto_tfm *tfm) diff --git a/crypto/api.c b/crypto/api.c index 38a2bc02a98..314dab96840 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -255,7 +255,7 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask) struct crypto_alg *larval; int ok; - if (!(mask & CRYPTO_ALG_TESTED)) { + if (!((type | mask) & CRYPTO_ALG_TESTED)) { type |= CRYPTO_ALG_TESTED; mask |= CRYPTO_ALG_TESTED; } @@ -464,8 +464,8 @@ err: } EXPORT_SYMBOL_GPL(crypto_alloc_base); -struct crypto_tfm *crypto_create_tfm(struct crypto_alg *alg, - const struct crypto_type *frontend) +void *crypto_create_tfm(struct crypto_alg *alg, + const struct crypto_type *frontend) { char *mem; struct crypto_tfm *tfm = NULL; @@ -499,9 +499,9 @@ out_free_tfm: crypto_shoot_alg(alg); kfree(mem); out_err: - tfm = ERR_PTR(err); + mem = ERR_PTR(err); out: - return tfm; + return mem; } EXPORT_SYMBOL_GPL(crypto_create_tfm); @@ -525,12 +525,11 @@ EXPORT_SYMBOL_GPL(crypto_create_tfm); * * In case of error the return value is an error pointer. */ -struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, - const struct crypto_type *frontend, - u32 type, u32 mask) +void *crypto_alloc_tfm(const char *alg_name, + const struct crypto_type *frontend, u32 type, u32 mask) { struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); - struct crypto_tfm *tfm; + void *tfm; int err; type &= frontend->maskclear; diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index d70a41c002d..90d26c91f4e 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -521,7 +521,7 @@ static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn, int err; type = crypto_skcipher_type(type); - mask = crypto_skcipher_mask(mask) | CRYPTO_ALG_GENIV; + mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV; alg = crypto_alg_mod_lookup(name, type, mask); if (IS_ERR(alg)) diff --git a/crypto/chainiv.c b/crypto/chainiv.c index 7c37a497b86..ba200b07449 100644 --- a/crypto/chainiv.c +++ b/crypto/chainiv.c @@ -15,6 +15,7 @@ #include <crypto/internal/skcipher.h> #include <crypto/rng.h> +#include <crypto/crypto_wq.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> @@ -133,7 +134,7 @@ static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx) goto out; } - queued = schedule_work(&ctx->postponed); + queued = queue_work(kcrypto_wq, &ctx->postponed); BUG_ON(!queued); out: diff --git a/crypto/cryptd.c b/crypto/cryptd.c index d29e06b350f..d14b22658d7 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -12,30 +12,31 @@ #include <crypto/algapi.h> #include <crypto/internal/hash.h> +#include <crypto/cryptd.h> +#include <crypto/crypto_wq.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> -#include <linux/kthread.h> #include <linux/list.h> #include <linux/module.h> -#include <linux/mutex.h> #include <linux/scatterlist.h> #include <linux/sched.h> #include <linux/slab.h> -#include <linux/spinlock.h> -#define CRYPTD_MAX_QLEN 100 +#define CRYPTD_MAX_CPU_QLEN 100 -struct cryptd_state { - spinlock_t lock; - struct mutex mutex; +struct cryptd_cpu_queue { struct crypto_queue queue; - struct task_struct *task; + struct work_struct work; +}; + +struct cryptd_queue { + struct cryptd_cpu_queue *cpu_queue; }; struct cryptd_instance_ctx { struct crypto_spawn spawn; - struct cryptd_state *state; + struct cryptd_queue *queue; }; struct cryptd_blkcipher_ctx { @@ -54,11 +55,85 @@ struct cryptd_hash_request_ctx { crypto_completion_t complete; }; -static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm) +static void cryptd_queue_worker(struct work_struct *work); + +static int cryptd_init_queue(struct cryptd_queue *queue, + unsigned int max_cpu_qlen) +{ + int cpu; + struct cryptd_cpu_queue *cpu_queue; + + queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); + if (!queue->cpu_queue) + return -ENOMEM; + for_each_possible_cpu(cpu) { + cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); + crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); + INIT_WORK(&cpu_queue->work, cryptd_queue_worker); + } + return 0; +} + +static void cryptd_fini_queue(struct cryptd_queue *queue) +{ + int cpu; + struct cryptd_cpu_queue *cpu_queue; + + for_each_possible_cpu(cpu) { + cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); + BUG_ON(cpu_queue->queue.qlen); + } + free_percpu(queue->cpu_queue); +} + +static int cryptd_enqueue_request(struct cryptd_queue *queue, + struct crypto_async_request *request) +{ + int cpu, err; + struct cryptd_cpu_queue *cpu_queue; + + cpu = get_cpu(); + cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); + err = crypto_enqueue_request(&cpu_queue->queue, request); + queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); + put_cpu(); + + return err; +} + +/* Called in workqueue context, do one real cryption work (via + * req->complete) and reschedule itself if there are more work to + * do. */ +static void cryptd_queue_worker(struct work_struct *work) +{ + struct cryptd_cpu_queue *cpu_queue; + struct crypto_async_request *req, *backlog; + + cpu_queue = container_of(work, struct cryptd_cpu_queue, work); + /* Only handle one request at a time to avoid hogging crypto + * workqueue. preempt_disable/enable is used to prevent + * being preempted by cryptd_enqueue_request() */ + preempt_disable(); + backlog = crypto_get_backlog(&cpu_queue->queue); + req = crypto_dequeue_request(&cpu_queue->queue); + preempt_enable(); + + if (!req) + return; + + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + req->complete(req, 0); + + if (cpu_queue->queue.qlen) + queue_work(kcrypto_wq, &cpu_queue->work); +} + +static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) { struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); - return ictx->state; + return ictx->queue; } static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, @@ -130,19 +205,13 @@ static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, { struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); - struct cryptd_state *state = - cryptd_get_state(crypto_ablkcipher_tfm(tfm)); - int err; + struct cryptd_queue *queue; + queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); rctx->complete = req->base.complete; req->base.complete = complete; - spin_lock_bh(&state->lock); - err = ablkcipher_enqueue_request(&state->queue, req); - spin_unlock_bh(&state->lock); - - wake_up_process(state->task); - return err; + return cryptd_enqueue_request(queue, &req->base); } static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) @@ -176,21 +245,12 @@ static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) { struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); - struct cryptd_state *state = cryptd_get_state(tfm); - int active; - - mutex_lock(&state->mutex); - active = ablkcipher_tfm_in_queue(&state->queue, - __crypto_ablkcipher_cast(tfm)); - mutex_unlock(&state->mutex); - - BUG_ON(active); crypto_free_blkcipher(ctx->child); } static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, - struct cryptd_state *state) + struct cryptd_queue *queue) { struct crypto_instance *inst; struct cryptd_instance_ctx *ctx; @@ -213,7 +273,7 @@ static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, if (err) goto out_free_inst; - ctx->state = state; + ctx->queue = queue; memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); @@ -231,7 +291,7 @@ out_free_inst: } static struct crypto_instance *cryptd_alloc_blkcipher( - struct rtattr **tb, struct cryptd_state *state) + struct rtattr **tb, struct cryptd_queue *queue) { struct crypto_instance *inst; struct crypto_alg *alg; @@ -241,7 +301,7 @@ static struct crypto_instance *cryptd_alloc_blkcipher( if (IS_ERR(alg)) return ERR_CAST(alg); - inst = cryptd_alloc_instance(alg, state); + inst = cryptd_alloc_instance(alg, queue); if (IS_ERR(inst)) goto out_put_alg; @@ -289,15 +349,6 @@ static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) { struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); - struct cryptd_state *state = cryptd_get_state(tfm); - int active; - - mutex_lock(&state->mutex); - active = ahash_tfm_in_queue(&state->queue, - __crypto_ahash_cast(tfm)); - mutex_unlock(&state->mutex); - - BUG_ON(active); crypto_free_hash(ctx->child); } @@ -323,19 +374,13 @@ static int cryptd_hash_enqueue(struct ahash_request *req, { struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct cryptd_state *state = - cryptd_get_state(crypto_ahash_tfm(tfm)); - int err; + struct cryptd_queue *queue = + cryptd_get_queue(crypto_ahash_tfm(tfm)); rctx->complete = req->base.complete; req->base.complete = complete; - spin_lock_bh(&state->lock); - err = ahash_enqueue_request(&state->queue, req); - spin_unlock_bh(&state->lock); - - wake_up_process(state->task); - return err; + return cryptd_enqueue_request(queue, &req->base); } static void cryptd_hash_init(struct crypto_async_request *req_async, int err) @@ -468,7 +513,7 @@ static int cryptd_hash_digest_enqueue(struct ahash_request *req) } static struct crypto_instance *cryptd_alloc_hash( - struct rtattr **tb, struct cryptd_state *state) + struct rtattr **tb, struct cryptd_queue *queue) { struct crypto_instance *inst; struct crypto_alg *alg; @@ -478,7 +523,7 @@ static struct crypto_instance *cryptd_alloc_hash( if (IS_ERR(alg)) return ERR_PTR(PTR_ERR(alg)); - inst = cryptd_alloc_instance(alg, state); + inst = cryptd_alloc_instance(alg, queue); if (IS_ERR(inst)) goto out_put_alg; @@ -502,7 +547,7 @@ out_put_alg: return inst; } -static struct cryptd_state state; +static struct cryptd_queue queue; static struct crypto_instance *cryptd_alloc(struct rtattr **tb) { @@ -514,9 +559,9 @@ static struct crypto_instance *cryptd_alloc(struct rtattr **tb) switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_BLKCIPHER: - return cryptd_alloc_blkcipher(tb, &state); + return cryptd_alloc_blkcipher(tb, &queue); case CRYPTO_ALG_TYPE_DIGEST: - return cryptd_alloc_hash(tb, &state); + return cryptd_alloc_hash(tb, &queue); } return ERR_PTR(-EINVAL); @@ -537,82 +582,58 @@ static struct crypto_template cryptd_tmpl = { .module = THIS_MODULE, }; -static inline int cryptd_create_thread(struct cryptd_state *state, - int (*fn)(void *data), const char *name) -{ - spin_lock_init(&state->lock); - mutex_init(&state->mutex); - crypto_init_queue(&state->queue, CRYPTD_MAX_QLEN); - - state->task = kthread_run(fn, state, name); - if (IS_ERR(state->task)) - return PTR_ERR(state->task); +struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, + u32 type, u32 mask) +{ + char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; + struct crypto_ablkcipher *tfm; + + if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, + "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) + return ERR_PTR(-EINVAL); + tfm = crypto_alloc_ablkcipher(cryptd_alg_name, type, mask); + if (IS_ERR(tfm)) + return ERR_CAST(tfm); + if (crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_module != THIS_MODULE) { + crypto_free_ablkcipher(tfm); + return ERR_PTR(-EINVAL); + } - return 0; + return __cryptd_ablkcipher_cast(tfm); } +EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); -static inline void cryptd_stop_thread(struct cryptd_state *state) +struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) { - BUG_ON(state->queue.qlen); - kthread_stop(state->task); + struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); + return ctx->child; } +EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); -static int cryptd_thread(void *data) +void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) { - struct cryptd_state *state = data; - int stop; - - current->flags |= PF_NOFREEZE; - - do { - struct crypto_async_request *req, *backlog; - - mutex_lock(&state->mutex); - __set_current_state(TASK_INTERRUPTIBLE); - - spin_lock_bh(&state->lock); - backlog = crypto_get_backlog(&state->queue); - req = crypto_dequeue_request(&state->queue); - spin_unlock_bh(&state->lock); - - stop = kthread_should_stop(); - - if (stop || req) { - __set_current_state(TASK_RUNNING); - if (req) { - if (backlog) - backlog->complete(backlog, - -EINPROGRESS); - req->complete(req, 0); - } - } - - mutex_unlock(&state->mutex); - - schedule(); - } while (!stop); - - return 0; + crypto_free_ablkcipher(&tfm->base); } +EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); static int __init cryptd_init(void) { int err; - err = cryptd_create_thread(&state, cryptd_thread, "cryptd"); + err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN); if (err) return err; err = crypto_register_template(&cryptd_tmpl); if (err) - kthread_stop(state.task); + cryptd_fini_queue(&queue); return err; } static void __exit cryptd_exit(void) { - cryptd_stop_thread(&state); + cryptd_fini_queue(&queue); crypto_unregister_template(&cryptd_tmpl); } diff --git a/crypto/crypto_wq.c b/crypto/crypto_wq.c new file mode 100644 index 00000000000..fdcf6248f15 --- /dev/null +++ b/crypto/crypto_wq.c @@ -0,0 +1,38 @@ +/* + * Workqueue for crypto subsystem + * + * Copyright (c) 2009 Intel Corp. + * Author: Huang Ying <ying.huang@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include <linux/workqueue.h> +#include <crypto/algapi.h> +#include <crypto/crypto_wq.h> + +struct workqueue_struct *kcrypto_wq; +EXPORT_SYMBOL_GPL(kcrypto_wq); + +static int __init crypto_wq_init(void) +{ + kcrypto_wq = create_workqueue("crypto"); + if (unlikely(!kcrypto_wq)) + return -ENOMEM; + return 0; +} + +static void __exit crypto_wq_exit(void) +{ + destroy_workqueue(kcrypto_wq); +} + +module_init(crypto_wq_init); +module_exit(crypto_wq_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Workqueue for crypto subsystem"); diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c index ecbeaa1f17e..a90d260528d 100644 --- a/crypto/gf128mul.c +++ b/crypto/gf128mul.c @@ -4,7 +4,7 @@ * Copyright (c) 2006, Rik Snel <rsnel@cube.dyndns.org> * * Based on Dr Brian Gladman's (GPL'd) work published at - * http://fp.gladman.plus.com/cryptography_technology/index.htm + * http://gladman.plushost.co.uk/oldsite/cryptography_technology/index.php * See the original copyright notice below. * * This program is free software; you can redistribute it and/or modify it diff --git a/crypto/internal.h b/crypto/internal.h index 3c19a27a756..fc76e1f37fc 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -109,8 +109,10 @@ void crypto_alg_tested(const char *name, int err); void crypto_shoot_alg(struct crypto_alg *alg); struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, u32 mask); -struct crypto_tfm *crypto_create_tfm(struct crypto_alg *alg, - const struct crypto_type *frontend); +void *crypto_create_tfm(struct crypto_alg *alg, + const struct crypto_type *frontend); +void *crypto_alloc_tfm(const char *alg_name, + const struct crypto_type *frontend, u32 type, u32 mask); int crypto_register_instance(struct crypto_template *tmpl, struct crypto_instance *inst); diff --git a/crypto/pcompress.c b/crypto/pcompress.c new file mode 100644 index 00000000000..ca9a4af91ef --- /dev/null +++ b/crypto/pcompress.c @@ -0,0 +1,97 @@ +/* + * Cryptographic API. + * + * Partial (de)compression operations. + * + * Copyright 2008 Sony Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. + * If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/crypto.h> +#include <linux/errno.h> +#include <linux/module.h> +#include <linux/seq_file.h> +#include <linux/string.h> + +#include <crypto/compress.h> + +#include "internal.h" + + +static int crypto_pcomp_init(struct crypto_tfm *tfm, u32 type, u32 mask) +{ + return 0; +} + +static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg, + const struct crypto_type *frontend) +{ + return alg->cra_ctxsize; +} + +static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm, + const struct crypto_type *frontend) +{ + return 0; +} + +static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg) + __attribute__ ((unused)); +static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg) +{ + seq_printf(m, "type : pcomp\n"); +} + +static const struct crypto_type crypto_pcomp_type = { + .extsize = crypto_pcomp_extsize, + .init = crypto_pcomp_init, + .init_tfm = crypto_pcomp_init_tfm, +#ifdef CONFIG_PROC_FS + .show = crypto_pcomp_show, +#endif + .maskclear = ~CRYPTO_ALG_TYPE_MASK, + .maskset = CRYPTO_ALG_TYPE_MASK, + .type = CRYPTO_ALG_TYPE_PCOMPRESS, + .tfmsize = offsetof(struct crypto_pcomp, base), +}; + +struct crypto_pcomp *crypto_alloc_pcomp(const char *alg_name, u32 type, + u32 mask) +{ + return crypto_alloc_tfm(alg_name, &crypto_pcomp_type, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_alloc_pcomp); + +int crypto_register_pcomp(struct pcomp_alg *alg) +{ + struct crypto_alg *base = &alg->base; + + base->cra_type = &crypto_pcomp_type; + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; + base->cra_flags |= CRYPTO_ALG_TYPE_PCOMPRESS; + + return crypto_register_alg(base); +} +EXPORT_SYMBOL_GPL(crypto_register_pcomp); + +int crypto_unregister_pcomp(struct pcomp_alg *alg) +{ + return crypto_unregister_alg(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_unregister_pcomp); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Partial (de)compression type"); +MODULE_AUTHOR("Sony Corporation"); diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c index caa3542e6ce..6349d8339d3 100644 --- a/crypto/sha256_generic.c +++ b/crypto/sha256_generic.c @@ -2,7 +2,7 @@ * Cryptographic API. * * SHA-256, as specified in - * http://csrc.nist.gov/cryptval/shs/sha256-384-512.pdf + * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf * * SHA-256 code by Jean-Luc Cooke <jlcooke@certainkey.com>. * diff --git a/crypto/shash.c b/crypto/shash.c index d5a2b619c55..7a659733f94 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -18,15 +18,10 @@ #include <linux/slab.h> #include <linux/seq_file.h> -static const struct crypto_type crypto_shash_type; - -static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm) -{ - return container_of(tfm, struct crypto_shash, base); -} - #include "internal.h" +static const struct crypto_type crypto_shash_type; + static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { @@ -282,8 +277,7 @@ static int crypto_init_shash_ops_async(struct crypto_tfm *tfm) if (!crypto_mod_get(calg)) return -EAGAIN; - shash = __crypto_shash_cast(crypto_create_tfm( - calg, &crypto_shash_type)); + shash = crypto_create_tfm(calg, &crypto_shash_type); if (IS_ERR(shash)) { crypto_mod_put(calg); return PTR_ERR(shash); @@ -391,8 +385,7 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm) if (!crypto_mod_get(calg)) return -EAGAIN; - shash = __crypto_shash_cast(crypto_create_tfm( - calg, &crypto_shash_type)); + shash = crypto_create_tfm(calg, &crypto_shash_type); if (IS_ERR(shash)) { crypto_mod_put(calg); return PTR_ERR(shash); @@ -442,8 +435,6 @@ static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type, static int crypto_shash_init_tfm(struct crypto_tfm *tfm, const struct crypto_type *frontend) { - if (frontend->type != CRYPTO_ALG_TYPE_SHASH) - return -EINVAL; return 0; } @@ -482,8 +473,7 @@ static const struct crypto_type crypto_shash_type = { struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type, u32 mask) { - return __crypto_shash_cast( - crypto_alloc_tfm(alg_name, &crypto_shash_type, type, mask)); + return crypto_alloc_tfm(alg_name, &crypto_shash_type, type, mask); } EXPORT_SYMBOL_GPL(crypto_alloc_shash); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 28a45a1e6f4..c3c9124209a 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -53,7 +53,7 @@ static char *check[] = { "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320", - "lzo", "cts", NULL + "lzo", "cts", "zlib", NULL }; static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc, @@ -661,6 +661,10 @@ static void do_test(int m) tcrypt_test("ecb(seed)"); break; + case 44: + tcrypt_test("zlib"); + break; + case 100: tcrypt_test("hmac(md5)"); break; diff --git a/crypto/testmgr.c b/crypto/testmgr.c index a75f11ffb95..b50c3c6b17a 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -72,6 +72,13 @@ struct comp_test_suite { } comp, decomp; }; +struct pcomp_test_suite { + struct { + struct pcomp_testvec *vecs; + unsigned int count; + } comp, decomp; +}; + struct hash_test_suite { struct hash_testvec *vecs; unsigned int count; @@ -86,6 +93,7 @@ struct alg_test_desc { struct aead_test_suite aead; struct cipher_test_suite cipher; struct comp_test_suite comp; + struct pcomp_test_suite pcomp; struct hash_test_suite hash; } suite; }; @@ -898,6 +906,159 @@ out: return ret; } +static int test_pcomp(struct crypto_pcomp *tfm, + struct pcomp_testvec *ctemplate, + struct pcomp_testvec *dtemplate, int ctcount, + int dtcount) +{ + const char *algo = crypto_tfm_alg_driver_name(crypto_pcomp_tfm(tfm)); + unsigned int i; + char result[COMP_BUF_SIZE]; + int error; + + for (i = 0; i < ctcount; i++) { + struct comp_request req; + + error = crypto_compress_setup(tfm, ctemplate[i].params, + ctemplate[i].paramsize); + if (error) { + pr_err("alg: pcomp: compression setup failed on test " + "%d for %s: error=%d\n", i + 1, algo, error); + return error; + } + + error = crypto_compress_init(tfm); + if (error) { + pr_err("alg: pcomp: compression init failed on test " + "%d for %s: error=%d\n", i + 1, algo, error); + return error; + } + + memset(result, 0, sizeof(result)); + + req.next_in = ctemplate[i].input; + req.avail_in = ctemplate[i].inlen / 2; + req.next_out = result; + req.avail_out = ctemplate[i].outlen / 2; + + error = crypto_compress_update(tfm, &req); + if (error && (error != -EAGAIN || req.avail_in)) { + pr_err("alg: pcomp: compression update failed on test " + "%d for %s: error=%d\n", i + 1, algo, error); + return error; + } + + /* Add remaining input data */ + req.avail_in += (ctemplate[i].inlen + 1) / 2; + + error = crypto_compress_update(tfm, &req); + if (error && (error != -EAGAIN || req.avail_in)) { + pr_err("alg: pcomp: compression update failed on test " + "%d for %s: error=%d\n", i + 1, algo, error); + return error; + } + + /* Provide remaining output space */ + req.avail_out += COMP_BUF_SIZE - ctemplate[i].outlen / 2; + + error = crypto_compress_final(tfm, &req); + if (error) { + pr_err("alg: pcomp: compression final failed on test " + "%d for %s: error=%d\n", i + 1, algo, error); + return error; + } + + if (COMP_BUF_SIZE - req.avail_out != ctemplate[i].outlen) { + pr_err("alg: comp: Compression test %d failed for %s: " + "output len = %d (expected %d)\n", i + 1, algo, + COMP_BUF_SIZE - req.avail_out, + ctemplate[i].outlen); + return -EINVAL; + } + + if (memcmp(result, ctemplate[i].output, ctemplate[i].outlen)) { + pr_err("alg: pcomp: Compression test %d failed for " + "%s\n", i + 1, algo); + hexdump(result, ctemplate[i].outlen); + return -EINVAL; + } + } + + for (i = 0; i < dtcount; i++) { + struct comp_request req; + + error = crypto_decompress_setup(tfm, dtemplate[i].params, + dtemplate[i].paramsize); + if (error) { + pr_err("alg: pcomp: decompression setup failed on " + "test %d for %s: error=%d\n", i + 1, algo, + error); + return error; + } + + error = crypto_decompress_init(tfm); + if (error) { + pr_err("alg: pcomp: decompression init failed on test " + "%d for %s: error=%d\n", i + 1, algo, error); + return error; + } + + memset(result, 0, sizeof(result)); + + req.next_in = dtemplate[i].input; + req.avail_in = dtemplate[i].inlen / 2; + req.next_out = result; + req.avail_out = dtemplate[i].outlen / 2; + + error = crypto_decompress_update(tfm, &req); + if (error && (error != -EAGAIN || req.avail_in)) { + pr_err("alg: pcomp: decompression update failed on " + "test %d for %s: error=%d\n", i + 1, algo, + error); + return error; + } + + /* Add remaining input data */ + req.avail_in += (dtemplate[i].inlen + 1) / 2; + + error = crypto_decompress_update(tfm, &req); + if (error && (error != -EAGAIN || req.avail_in)) { + pr_err("alg: pcomp: decompression update failed on " + "test %d for %s: error=%d\n", i + 1, algo, + error); + return error; + } + + /* Provide remaining output space */ + req.avail_out += COMP_BUF_SIZE - dtemplate[i].outlen / 2; + + error = crypto_decompress_final(tfm, &req); + if (error && (error != -EAGAIN || req.avail_in)) { + pr_err("alg: pcomp: decompression final failed on " + "test %d for %s: error=%d\n", i + 1, algo, + error); + return error; + } + + if (COMP_BUF_SIZE - req.avail_out != dtemplate[i].outlen) { + pr_err("alg: comp: Decompression test %d failed for " + "%s: output len = %d (expected %d)\n", i + 1, + algo, COMP_BUF_SIZE - req.avail_out, + dtemplate[i].outlen); + return -EINVAL; + } + + if (memcmp(result, dtemplate[i].output, dtemplate[i].outlen)) { + pr_err("alg: pcomp: Decompression test %d failed for " + "%s\n", i + 1, algo); + hexdump(result, dtemplate[i].outlen); + return -EINVAL; + } + } + + return 0; +} + static int alg_test_aead(const struct alg_test_desc *desc, const char *driver, u32 type, u32 mask) { @@ -1007,6 +1168,28 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver, return err; } +static int alg_test_pcomp(const struct alg_test_desc *desc, const char *driver, + u32 type, u32 mask) +{ + struct crypto_pcomp *tfm; + int err; + + tfm = crypto_alloc_pcomp(driver, type, mask); + if (IS_ERR(tfm)) { + pr_err("alg: pcomp: Failed to load transform for %s: %ld\n", + driver, PTR_ERR(tfm)); + return PTR_ERR(tfm); + } + + err = test_pcomp(tfm, desc->suite.pcomp.comp.vecs, + desc->suite.pcomp.decomp.vecs, + desc->suite.pcomp.comp.count, + desc->suite.pcomp.decomp.count); + + crypto_free_pcomp(tfm); + return err; +} + static int alg_test_hash(const struct alg_test_desc *desc, const char *driver, u32 type, u32 mask) { @@ -1835,6 +2018,21 @@ static const struct alg_test_desc alg_test_descs[] = { } } } + }, { + .alg = "zlib", + .test = alg_test_pcomp, + .suite = { + .pcomp = { + .comp = { + .vecs = zlib_comp_tv_template, + .count = ZLIB_COMP_TEST_VECTORS + }, + .decomp = { + .vecs = zlib_decomp_tv_template, + .count = ZLIB_DECOMP_TEST_VECTORS + } + } + } } }; diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 132953e144d..526f00a9c72 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -15,6 +15,11 @@ #ifndef _CRYPTO_TESTMGR_H #define _CRYPTO_TESTMGR_H +#include <linux/netlink.h> +#include <linux/zlib.h> + +#include <crypto/compress.h> + #define MAX_DIGEST_SIZE 64 #define MAX_TAP 8 @@ -8347,10 +8352,19 @@ struct comp_testvec { char output[COMP_BUF_SIZE]; }; +struct pcomp_testvec { + void *params; + unsigned int paramsize; + int inlen, outlen; + char input[COMP_BUF_SIZE]; + char output[COMP_BUF_SIZE]; +}; + /* * Deflate test vectors (null-terminated strings). * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL. */ + #define DEFLATE_COMP_TEST_VECTORS 2 #define DEFLATE_DECOMP_TEST_VECTORS 2 @@ -8426,6 +8440,139 @@ static struct comp_testvec deflate_decomp_tv_template[] = { }, }; +#define ZLIB_COMP_TEST_VECTORS 2 +#define ZLIB_DECOMP_TEST_VECTORS 2 + +static const struct { + struct nlattr nla; + int val; +} deflate_comp_params[] = { + { + .nla = { + .nla_len = NLA_HDRLEN + sizeof(int), + .nla_type = ZLIB_COMP_LEVEL, + }, + .val = Z_DEFAULT_COMPRESSION, + }, { + .nla = { + .nla_len = NLA_HDRLEN + sizeof(int), + .nla_type = ZLIB_COMP_METHOD, + }, + .val = Z_DEFLATED, + }, { + .nla = { + .nla_len = NLA_HDRLEN + sizeof(int), + .nla_type = ZLIB_COMP_WINDOWBITS, + }, + .val = -11, + }, { + .nla = { + .nla_len = NLA_HDRLEN + sizeof(int), + .nla_type = ZLIB_COMP_MEMLEVEL, + }, + .val = MAX_MEM_LEVEL, + }, { + .nla = { + .nla_len = NLA_HDRLEN + sizeof(int), + .nla_type = ZLIB_COMP_STRATEGY, + }, + .val = Z_DEFAULT_STRATEGY, + } +}; + +static const struct { + struct nlattr nla; + int val; +} deflate_decomp_params[] = { + { + .nla = { + .nla_len = NLA_HDRLEN + sizeof(int), + .nla_type = ZLIB_DECOMP_WINDOWBITS, + }, + .val = -11, + } +}; + +static struct pcomp_testvec zlib_comp_tv_template[] = { + { + .params = &deflate_comp_params, + .paramsize = sizeof(deflate_comp_params), + .inlen = 70, + .outlen = 38, + .input = "Join us now and share the software " + "Join us now and share the software ", + .output = "\xf3\xca\xcf\xcc\x53\x28\x2d\x56" + "\xc8\xcb\x2f\x57\x48\xcc\x4b\x51" + "\x28\xce\x48\x2c\x4a\x55\x28\xc9" + "\x48\x55\x28\xce\x4f\x2b\x29\x07" + "\x71\xbc\x08\x2b\x01\x00", + }, { + .params = &deflate_comp_params, + .paramsize = sizeof(deflate_comp_params), + .inlen = 191, + .outlen = 122, + .input = "This document describes a compression method based on the DEFLATE" + "compression algorithm. This document defines the application of " + "the DEFLATE algorithm to the IP Payload Compression Protocol.", + .output = "\x5d\x8d\x31\x0e\xc2\x30\x10\x04" + "\xbf\xb2\x2f\xc8\x1f\x10\x04\x09" + "\x89\xc2\x85\x3f\x70\xb1\x2f\xf8" + "\x24\xdb\x67\xd9\x47\xc1\xef\x49" + "\x68\x12\x51\xae\x76\x67\xd6\x27" + "\x19\x88\x1a\xde\x85\xab\x21\xf2" + "\x08\x5d\x16\x1e\x20\x04\x2d\xad" + "\xf3\x18\xa2\x15\x85\x2d\x69\xc4" + "\x42\x83\x23\xb6\x6c\x89\x71\x9b" + "\xef\xcf\x8b\x9f\xcf\x33\xca\x2f" + "\xed\x62\xa9\x4c\x80\xff\x13\xaf" + "\x52\x37\xed\x0e\x52\x6b\x59\x02" + "\xd9\x4e\xe8\x7a\x76\x1d\x02\x98" + "\xfe\x8a\x87\x83\xa3\x4f\x56\x8a" + "\xb8\x9e\x8e\x5c\x57\xd3\xa0\x79" + "\xfa\x02", + }, +}; + +static struct pcomp_testvec zlib_decomp_tv_template[] = { + { + .params = &deflate_decomp_params, + .paramsize = sizeof(deflate_decomp_params), + .inlen = 122, + .outlen = 191, + .input = "\x5d\x8d\x31\x0e\xc2\x30\x10\x04" + "\xbf\xb2\x2f\xc8\x1f\x10\x04\x09" + "\x89\xc2\x85\x3f\x70\xb1\x2f\xf8" + "\x24\xdb\x67\xd9\x47\xc1\xef\x49" + "\x68\x12\x51\xae\x76\x67\xd6\x27" + "\x19\x88\x1a\xde\x85\xab\x21\xf2" + "\x08\x5d\x16\x1e\x20\x04\x2d\xad" + "\xf3\x18\xa2\x15\x85\x2d\x69\xc4" + "\x42\x83\x23\xb6\x6c\x89\x71\x9b" + "\xef\xcf\x8b\x9f\xcf\x33\xca\x2f" + "\xed\x62\xa9\x4c\x80\xff\x13\xaf" + "\x52\x37\xed\x0e\x52\x6b\x59\x02" + "\xd9\x4e\xe8\x7a\x76\x1d\x02\x98" + "\xfe\x8a\x87\x83\xa3\x4f\x56\x8a" + "\xb8\x9e\x8e\x5c\x57\xd3\xa0\x79" + "\xfa\x02", + .output = "This document describes a compression method based on the DEFLATE" + "compression algorithm. This document defines the application of " + "the DEFLATE algorithm to the IP Payload Compression Protocol.", + }, { + .params = &deflate_decomp_params, + .paramsize = sizeof(deflate_decomp_params), + .inlen = 38, + .outlen = 70, + .input = "\xf3\xca\xcf\xcc\x53\x28\x2d\x56" + "\xc8\xcb\x2f\x57\x48\xcc\x4b\x51" + "\x28\xce\x48\x2c\x4a\x55\x28\xc9" + "\x48\x55\x28\xce\x4f\x2b\x29\x07" + "\x71\xbc\x08\x2b\x01\x00", + .output = "Join us now and share the software " + "Join us now and share the software ", + }, +}; + /* * LZO test vectors (null-terminated strings). */ diff --git a/crypto/zlib.c b/crypto/zlib.c new file mode 100644 index 00000000000..33609bab614 --- /dev/null +++ b/crypto/zlib.c @@ -0,0 +1,378 @@ +/* + * Cryptographic API. + * + * Zlib algorithm + * + * Copyright 2008 Sony Corporation + * + * Based on deflate.c, which is + * Copyright (c) 2003 James Morris <jmorris@intercode.com.au> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * FIXME: deflate transforms will require up to a total of about 436k of kernel + * memory on i386 (390k for compression, the rest for decompression), as the + * current zlib kernel code uses a worst case pre-allocation system by default. + * This needs to be fixed so that the amount of memory required is properly + * related to the winbits and memlevel parameters. + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/zlib.h> +#include <linux/vmalloc.h> +#include <linux/interrupt.h> +#include <linux/mm.h> +#include <linux/net.h> +#include <linux/slab.h> + +#include <crypto/internal/compress.h> + +#include <net/netlink.h> + + +struct zlib_ctx { + struct z_stream_s comp_stream; + struct z_stream_s decomp_stream; + int decomp_windowBits; +}; + + +static void zlib_comp_exit(struct zlib_ctx *ctx) +{ + struct z_stream_s *stream = &ctx->comp_stream; + + if (stream->workspace) { + zlib_deflateEnd(stream); + vfree(stream->workspace); + stream->workspace = NULL; + } +} + +static void zlib_decomp_exit(struct zlib_ctx *ctx) +{ + struct z_stream_s *stream = &ctx->decomp_stream; + + if (stream->workspace) { + zlib_inflateEnd(stream); + kfree(stream->workspace); + stream->workspace = NULL; + } +} + +static int zlib_init(struct crypto_tfm *tfm) +{ + return 0; +} + +static void zlib_exit(struct crypto_tfm *tfm) +{ + struct zlib_ctx *ctx = crypto_tfm_ctx(tfm); + + zlib_comp_exit(ctx); + zlib_decomp_exit(ctx); +} + + +static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params, + unsigned int len) +{ + struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); + struct z_stream_s *stream = &ctx->comp_stream; + struct nlattr *tb[ZLIB_COMP_MAX + 1]; + size_t workspacesize; + int ret; + + ret = nla_parse(tb, ZLIB_COMP_MAX, params, len, NULL); + if (ret) + return ret; + + zlib_comp_exit(ctx); + + workspacesize = zlib_deflate_workspacesize(); + stream->workspace = vmalloc(workspacesize); + if (!stream->workspace) + return -ENOMEM; + + memset(stream->workspace, 0, workspacesize); + ret = zlib_deflateInit2(stream, + tb[ZLIB_COMP_LEVEL] + ? nla_get_u32(tb[ZLIB_COMP_LEVEL]) + : Z_DEFAULT_COMPRESSION, + tb[ZLIB_COMP_METHOD] + ? nla_get_u32(tb[ZLIB_COMP_METHOD]) + : Z_DEFLATED, + tb[ZLIB_COMP_WINDOWBITS] + ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS]) + : MAX_WBITS, + tb[ZLIB_COMP_MEMLEVEL] + ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL]) + : DEF_MEM_LEVEL, + tb[ZLIB_COMP_STRATEGY] + ? nla_get_u32(tb[ZLIB_COMP_STRATEGY]) + : Z_DEFAULT_STRATEGY); + if (ret != Z_OK) { + vfree(stream->workspace); + stream->workspace = NULL; + return -EINVAL; + } + + return 0; +} + +static int zlib_compress_init(struct crypto_pcomp *tfm) +{ + int ret; + struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); + struct z_stream_s *stream = &dctx->comp_stream; + + ret = zlib_deflateReset(stream); + if (ret != Z_OK) + return -EINVAL; + + return 0; +} + +static int zlib_compress_update(struct crypto_pcomp *tfm, + struct comp_request *req) +{ + int ret; + struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); + struct z_stream_s *stream = &dctx->comp_stream; + + pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out); + stream->next_in = req->next_in; + stream->avail_in = req->avail_in; + stream->next_out = req->next_out; + stream->avail_out = req->avail_out; + + ret = zlib_deflate(stream, Z_NO_FLUSH); + switch (ret) { + case Z_OK: + break; + + case Z_BUF_ERROR: + pr_debug("zlib_deflate could not make progress\n"); + return -EAGAIN; + + default: + pr_debug("zlib_deflate failed %d\n", ret); + return -EINVAL; + } + + pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n", + stream->avail_in, stream->avail_out, + req->avail_in - stream->avail_in, + req->avail_out - stream->avail_out); + req->next_in = stream->next_in; + req->avail_in = stream->avail_in; + req->next_out = stream->next_out; + req->avail_out = stream->avail_out; + return 0; +} + +static int zlib_compress_final(struct crypto_pcomp *tfm, + struct comp_request *req) +{ + int ret; + struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); + struct z_stream_s *stream = &dctx->comp_stream; + + pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out); + stream->next_in = req->next_in; + stream->avail_in = req->avail_in; + stream->next_out = req->next_out; + stream->avail_out = req->avail_out; + + ret = zlib_deflate(stream, Z_FINISH); + if (ret != Z_STREAM_END) { + pr_debug("zlib_deflate failed %d\n", ret); + return -EINVAL; + } + + pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n", + stream->avail_in, stream->avail_out, + req->avail_in - stream->avail_in, + req->avail_out - stream->avail_out); + req->next_in = stream->next_in; + req->avail_in = stream->avail_in; + req->next_out = stream->next_out; + req->avail_out = stream->avail_out; + return 0; +} + + +static int zlib_decompress_setup(struct crypto_pcomp *tfm, void *params, + unsigned int len) +{ + struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); + struct z_stream_s *stream = &ctx->decomp_stream; + struct nlattr *tb[ZLIB_DECOMP_MAX + 1]; + int ret = 0; + + ret = nla_parse(tb, ZLIB_DECOMP_MAX, params, len, NULL); + if (ret) + return ret; + + zlib_decomp_exit(ctx); + + ctx->decomp_windowBits = tb[ZLIB_DECOMP_WINDOWBITS] + ? nla_get_u32(tb[ZLIB_DECOMP_WINDOWBITS]) + : DEF_WBITS; + + stream->workspace = kzalloc(zlib_inflate_workspacesize(), GFP_KERNEL); + if (!stream->workspace) + return -ENOMEM; + + ret = zlib_inflateInit2(stream, ctx->decomp_windowBits); + if (ret != Z_OK) { + kfree(stream->workspace); + stream->workspace = NULL; + return -EINVAL; + } + + return 0; +} + +static int zlib_decompress_init(struct crypto_pcomp *tfm) +{ + int ret; + struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); + struct z_stream_s *stream = &dctx->decomp_stream; + + ret = zlib_inflateReset(stream); + if (ret != Z_OK) + return -EINVAL; + + return 0; +} + +static int zlib_decompress_update(struct crypto_pcomp *tfm, + struct comp_request *req) +{ + int ret; + struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); + struct z_stream_s *stream = &dctx->decomp_stream; + + pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out); + stream->next_in = req->next_in; + stream->avail_in = req->avail_in; + stream->next_out = req->next_out; + stream->avail_out = req->avail_out; + + ret = zlib_inflate(stream, Z_SYNC_FLUSH); + switch (ret) { + case Z_OK: + case Z_STREAM_END: + break; + + case Z_BUF_ERROR: + pr_debug("zlib_inflate could not make progress\n"); + return -EAGAIN; + + default: + pr_debug("zlib_inflate failed %d\n", ret); + return -EINVAL; + } + + pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n", + stream->avail_in, stream->avail_out, + req->avail_in - stream->avail_in, + req->avail_out - stream->avail_out); + req->next_in = stream->next_in; + req->avail_in = stream->avail_in; + req->next_out = stream->next_out; + req->avail_out = stream->avail_out; + return 0; +} + +static int zlib_decompress_final(struct crypto_pcomp *tfm, + struct comp_request *req) +{ + int ret; + struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); + struct z_stream_s *stream = &dctx->decomp_stream; + + pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out); + stream->next_in = req->next_in; + stream->avail_in = req->avail_in; + stream->next_out = req->next_out; + stream->avail_out = req->avail_out; + + if (dctx->decomp_windowBits < 0) { + ret = zlib_inflate(stream, Z_SYNC_FLUSH); + /* + * Work around a bug in zlib, which sometimes wants to taste an + * extra byte when being used in the (undocumented) raw deflate + * mode. (From USAGI). + */ + if (ret == Z_OK && !stream->avail_in && stream->avail_out) { + const void *saved_next_in = stream->next_in; + u8 zerostuff = 0; + + stream->next_in = &zerostuff; + stream->avail_in = 1; + ret = zlib_inflate(stream, Z_FINISH); + stream->next_in = saved_next_in; + stream->avail_in = 0; + } + } else + ret = zlib_inflate(stream, Z_FINISH); + if (ret != Z_STREAM_END) { + pr_debug("zlib_inflate failed %d\n", ret); + return -EINVAL; + } + + pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n", + stream->avail_in, stream->avail_out, + req->avail_in - stream->avail_in, + req->avail_out - stream->avail_out); + req->next_in = stream->next_in; + req->avail_in = stream->avail_in; + req->next_out = stream->next_out; + req->avail_out = stream->avail_out; + return 0; +} + + +static struct pcomp_alg zlib_alg = { + .compress_setup = zlib_compress_setup, + .compress_init = zlib_compress_init, + .compress_update = zlib_compress_update, + .compress_final = zlib_compress_final, + .decompress_setup = zlib_decompress_setup, + .decompress_init = zlib_decompress_init, + .decompress_update = zlib_decompress_update, + .decompress_final = zlib_decompress_final, + + .base = { + .cra_name = "zlib", + .cra_flags = CRYPTO_ALG_TYPE_PCOMPRESS, + .cra_ctxsize = sizeof(struct zlib_ctx), + .cra_module = THIS_MODULE, + .cra_init = zlib_init, + .cra_exit = zlib_exit, + } +}; + +static int __init zlib_mod_init(void) +{ + return crypto_register_pcomp(&zlib_alg); +} + +static void __exit zlib_mod_fini(void) +{ + crypto_unregister_pcomp(&zlib_alg); +} + +module_init(zlib_mod_init); +module_exit(zlib_mod_fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Zlib Compression Algorithm"); +MODULE_AUTHOR("Sony Corporation"); |