diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/char/hw_random/Kconfig | 2 | ||||
-rw-r--r-- | drivers/char/hw_random/omap-rng.c | 2 | ||||
-rw-r--r-- | drivers/char/hw_random/timeriomem-rng.c | 26 | ||||
-rw-r--r-- | drivers/char/hw_random/via-rng.c | 15 | ||||
-rw-r--r-- | drivers/crypto/Kconfig | 2 | ||||
-rw-r--r-- | drivers/crypto/hifn_795x.c | 8 | ||||
-rw-r--r-- | drivers/crypto/padlock-aes.c | 13 | ||||
-rw-r--r-- | drivers/crypto/talitos.c | 713 |
8 files changed, 558 insertions, 223 deletions
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 5fab6470f4b..9c00440dcf8 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -88,7 +88,7 @@ config HW_RANDOM_N2RNG config HW_RANDOM_VIA tristate "VIA HW Random Number Generator support" - depends on HW_RANDOM && X86_32 + depends on HW_RANDOM && X86 default HW_RANDOM ---help--- This driver provides kernel-side support for the Random Number diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 538313f9e7a..00dd3de1be5 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -89,7 +89,7 @@ static struct hwrng omap_rng_ops = { .data_read = omap_rng_data_read, }; -static int __init omap_rng_probe(struct platform_device *pdev) +static int __devinit omap_rng_probe(struct platform_device *pdev) { struct resource *res, *mem; int ret; diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c index dcd352ad0e7..a94e930575f 100644 --- a/drivers/char/hw_random/timeriomem-rng.c +++ b/drivers/char/hw_random/timeriomem-rng.c @@ -88,9 +88,9 @@ static struct hwrng timeriomem_rng_ops = { .priv = 0, }; -static int __init timeriomem_rng_probe(struct platform_device *pdev) +static int __devinit timeriomem_rng_probe(struct platform_device *pdev) { - struct resource *res, *mem; + struct resource *res; int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -98,21 +98,12 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev) if (!res) return -ENOENT; - mem = request_mem_region(res->start, res->end - res->start + 1, - pdev->name); - if (mem == NULL) - return -EBUSY; - - dev_set_drvdata(&pdev->dev, mem); - timeriomem_rng_data = pdev->dev.platform_data; timeriomem_rng_data->address = ioremap(res->start, res->end - res->start + 1); - if (!timeriomem_rng_data->address) { - ret = -ENOMEM; - goto err_ioremap; - } + if (!timeriomem_rng_data->address) + return -EIO; if (timeriomem_rng_data->period != 0 && usecs_to_jiffies(timeriomem_rng_data->period) > 0) { @@ -125,7 +116,7 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev) ret = hwrng_register(&timeriomem_rng_ops); if (ret) - goto err_register; + goto failed; dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n", timeriomem_rng_data->address, @@ -133,24 +124,19 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev) return 0; -err_register: +failed: dev_err(&pdev->dev, "problem registering\n"); iounmap(timeriomem_rng_data->address); -err_ioremap: - release_resource(mem); return ret; } static int __devexit timeriomem_rng_remove(struct platform_device *pdev) { - struct resource *mem = dev_get_drvdata(&pdev->dev); - del_timer_sync(&timeriomem_rng_timer); hwrng_unregister(&timeriomem_rng_ops); iounmap(timeriomem_rng_data->address); - release_resource(mem); return 0; } diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index 4e9573c1d39..794aacb715c 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -132,6 +132,19 @@ static int via_rng_init(struct hwrng *rng) struct cpuinfo_x86 *c = &cpu_data(0); u32 lo, hi, old_lo; + /* VIA Nano CPUs don't have the MSR_VIA_RNG anymore. The RNG + * is always enabled if CPUID rng_en is set. There is no + * RNG configuration like it used to be the case in this + * register */ + if ((c->x86 == 6) && (c->x86_model >= 0x0f)) { + if (!cpu_has_xstore_enabled) { + printk(KERN_ERR PFX "can't enable hardware RNG " + "if XSTORE is not enabled\n"); + return -ENODEV; + } + return 0; + } + /* Control the RNG via MSR. Tread lightly and pay very close * close attention to values written, as the reserved fields * are documented to be "undefined and unpredictable"; but it @@ -205,5 +218,5 @@ static void __exit mod_exit(void) module_init(mod_init); module_exit(mod_exit); -MODULE_DESCRIPTION("H/W RNG driver for VIA chipsets"); +MODULE_DESCRIPTION("H/W RNG driver for VIA CPU with PadLock"); MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 01afd758072..e748e55bd86 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -12,7 +12,7 @@ if CRYPTO_HW config CRYPTO_DEV_PADLOCK tristate "Support for VIA PadLock ACE" - depends on X86_32 && !UML + depends on X86 && !UML select CRYPTO_ALGAPI help Some VIA processors come with an integrated crypto engine diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 2bef086fb34..5f753fc0873 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -2564,7 +2564,7 @@ static void hifn_tasklet_callback(unsigned long data) hifn_process_queue(dev); } -static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) +static int __devinit hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int err, i; struct hifn_device *dev; @@ -2696,7 +2696,7 @@ err_out_disable_pci_device: return err; } -static void hifn_remove(struct pci_dev *pdev) +static void __devexit hifn_remove(struct pci_dev *pdev) { int i; struct hifn_device *dev; @@ -2744,7 +2744,7 @@ static struct pci_driver hifn_pci_driver = { .remove = __devexit_p(hifn_remove), }; -static int __devinit hifn_init(void) +static int __init hifn_init(void) { unsigned int freq; int err; @@ -2789,7 +2789,7 @@ static int __devinit hifn_init(void) return 0; } -static void __devexit hifn_fini(void) +static void __exit hifn_fini(void) { pci_unregister_driver(&hifn_pci_driver); diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 856b3cc2558..87f92c39b5f 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -154,7 +154,11 @@ static inline void padlock_reset_key(struct cword *cword) int cpu = raw_smp_processor_id(); if (cword != per_cpu(last_cword, cpu)) +#ifndef CONFIG_X86_64 asm volatile ("pushfl; popfl"); +#else + asm volatile ("pushfq; popfq"); +#endif } static inline void padlock_store_cword(struct cword *cword) @@ -208,10 +212,19 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, asm volatile ("test $1, %%cl;" "je 1f;" +#ifndef CONFIG_X86_64 "lea -1(%%ecx), %%eax;" "mov $1, %%ecx;" +#else + "lea -1(%%rcx), %%rax;" + "mov $1, %%rcx;" +#endif ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */ +#ifndef CONFIG_X86_64 "mov %%eax, %%ecx;" +#else + "mov %%rax, %%rcx;" +#endif "1:" ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ : "+S"(input), "+D"(output) diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index a3918c16b3d..c70775fd3ce 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -44,6 +44,8 @@ #include <crypto/sha.h> #include <crypto/aead.h> #include <crypto/authenc.h> +#include <crypto/skcipher.h> +#include <crypto/scatterwalk.h> #include "talitos.h" @@ -339,7 +341,8 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) status = error; dma_unmap_single(dev, request->dma_desc, - sizeof(struct talitos_desc), DMA_BIDIRECTIONAL); + sizeof(struct talitos_desc), + DMA_BIDIRECTIONAL); /* copy entries so we can call callback outside lock */ saved_req.desc = request->desc; @@ -413,7 +416,8 @@ static struct talitos_desc *current_desc(struct device *dev, int ch) /* * user diagnostics; report root cause of error based on execution unit status */ -static void report_eu_error(struct device *dev, int ch, struct talitos_desc *desc) +static void report_eu_error(struct device *dev, int ch, + struct talitos_desc *desc) { struct talitos_private *priv = dev_get_drvdata(dev); int i; @@ -684,8 +688,8 @@ struct talitos_ctx { unsigned int authsize; }; -static int aead_authenc_setauthsize(struct crypto_aead *authenc, - unsigned int authsize) +static int aead_setauthsize(struct crypto_aead *authenc, + unsigned int authsize) { struct talitos_ctx *ctx = crypto_aead_ctx(authenc); @@ -694,8 +698,8 @@ static int aead_authenc_setauthsize(struct crypto_aead *authenc, return 0; } -static int aead_authenc_setkey(struct crypto_aead *authenc, - const u8 *key, unsigned int keylen) +static int aead_setkey(struct crypto_aead *authenc, + const u8 *key, unsigned int keylen) { struct talitos_ctx *ctx = crypto_aead_ctx(authenc); struct rtattr *rta = (void *)key; @@ -740,7 +744,7 @@ badkey: } /* - * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor + * talitos_edesc - s/w-extended descriptor * @src_nents: number of segments in input scatterlist * @dst_nents: number of segments in output scatterlist * @dma_len: length of dma mapped link_tbl space @@ -752,17 +756,67 @@ badkey: * is greater than 1, an integrity check value is concatenated to the end * of link_tbl data */ -struct ipsec_esp_edesc { +struct talitos_edesc { int src_nents; int dst_nents; + int src_is_chained; + int dst_is_chained; int dma_len; dma_addr_t dma_link_tbl; struct talitos_desc desc; struct talitos_ptr link_tbl[0]; }; +static int talitos_map_sg(struct device *dev, struct scatterlist *sg, + unsigned int nents, enum dma_data_direction dir, + int chained) +{ + if (unlikely(chained)) + while (sg) { + dma_map_sg(dev, sg, 1, dir); + sg = scatterwalk_sg_next(sg); + } + else + dma_map_sg(dev, sg, nents, dir); + return nents; +} + +static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg, + enum dma_data_direction dir) +{ + while (sg) { + dma_unmap_sg(dev, sg, 1, dir); + sg = scatterwalk_sg_next(sg); + } +} + +static void talitos_sg_unmap(struct device *dev, + struct talitos_edesc *edesc, + struct scatterlist *src, + struct scatterlist *dst) +{ + unsigned int src_nents = edesc->src_nents ? : 1; + unsigned int dst_nents = edesc->dst_nents ? : 1; + + if (src != dst) { + if (edesc->src_is_chained) + talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE); + else + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); + + if (edesc->dst_is_chained) + talitos_unmap_sg_chain(dev, dst, DMA_FROM_DEVICE); + else + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); + } else + if (edesc->src_is_chained) + talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL); + else + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); +} + static void ipsec_esp_unmap(struct device *dev, - struct ipsec_esp_edesc *edesc, + struct talitos_edesc *edesc, struct aead_request *areq) { unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE); @@ -772,15 +826,7 @@ static void ipsec_esp_unmap(struct device *dev, dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE); - if (areq->src != areq->dst) { - dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1, - DMA_TO_DEVICE); - dma_unmap_sg(dev, areq->dst, edesc->dst_nents ? : 1, - DMA_FROM_DEVICE); - } else { - dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1, - DMA_BIDIRECTIONAL); - } + talitos_sg_unmap(dev, edesc, areq->src, areq->dst); if (edesc->dma_len) dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, @@ -795,13 +841,14 @@ static void ipsec_esp_encrypt_done(struct device *dev, int err) { struct aead_request *areq = context; - struct ipsec_esp_edesc *edesc = - container_of(desc, struct ipsec_esp_edesc, desc); struct crypto_aead *authenc = crypto_aead_reqtfm(areq); struct talitos_ctx *ctx = crypto_aead_ctx(authenc); + struct talitos_edesc *edesc; struct scatterlist *sg; void *icvdata; + edesc = container_of(desc, struct talitos_edesc, desc); + ipsec_esp_unmap(dev, edesc, areq); /* copy the generated ICV to dst */ @@ -819,17 +866,18 @@ static void ipsec_esp_encrypt_done(struct device *dev, } static void ipsec_esp_decrypt_swauth_done(struct device *dev, - struct talitos_desc *desc, void *context, - int err) + struct talitos_desc *desc, + void *context, int err) { struct aead_request *req = context; - struct ipsec_esp_edesc *edesc = - container_of(desc, struct ipsec_esp_edesc, desc); struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct talitos_ctx *ctx = crypto_aead_ctx(authenc); + struct talitos_edesc *edesc; struct scatterlist *sg; void *icvdata; + edesc = container_of(desc, struct talitos_edesc, desc); + ipsec_esp_unmap(dev, edesc, req); if (!err) { @@ -851,20 +899,20 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev, } static void ipsec_esp_decrypt_hwauth_done(struct device *dev, - struct talitos_desc *desc, void *context, - int err) + struct talitos_desc *desc, + void *context, int err) { struct aead_request *req = context; - struct ipsec_esp_edesc *edesc = - container_of(desc, struct ipsec_esp_edesc, desc); + struct talitos_edesc *edesc; + + edesc = container_of(desc, struct talitos_edesc, desc); ipsec_esp_unmap(dev, edesc, req); /* check ICV auth status */ - if (!err) - if ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != - DESC_HDR_LO_ICCR1_PASS) - err = -EBADMSG; + if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != + DESC_HDR_LO_ICCR1_PASS)) + err = -EBADMSG; kfree(edesc); @@ -886,7 +934,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, link_tbl_ptr->j_extent = 0; link_tbl_ptr++; cryptlen -= sg_dma_len(sg); - sg = sg_next(sg); + sg = scatterwalk_sg_next(sg); } /* adjust (decrease) last one (or two) entry's len to cryptlen */ @@ -910,7 +958,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, /* * fill in and submit ipsec_esp descriptor */ -static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, +static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, u8 *giv, u64 seq, void (*callback) (struct device *dev, struct talitos_desc *desc, @@ -952,32 +1000,31 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, desc->ptr[4].len = cpu_to_be16(cryptlen); desc->ptr[4].j_extent = authsize; - if (areq->src == areq->dst) - sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1, - DMA_BIDIRECTIONAL); - else - sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1, - DMA_TO_DEVICE); + sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL + : DMA_TO_DEVICE, + edesc->src_is_chained); if (sg_count == 1) { desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); } else { sg_link_tbl_len = cryptlen; - if ((edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) && - (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) { + if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) sg_link_tbl_len = cryptlen + authsize; - } + sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len, &edesc->link_tbl[0]); if (sg_count > 1) { desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); - dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, - edesc->dma_len, DMA_BIDIRECTIONAL); + dma_sync_single_for_device(dev, edesc->dma_link_tbl, + edesc->dma_len, + DMA_BIDIRECTIONAL); } else { /* Only one segment now, so no link tbl needed */ - desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); + desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq-> + src)); } } @@ -985,10 +1032,11 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, desc->ptr[5].len = cpu_to_be16(cryptlen); desc->ptr[5].j_extent = authsize; - if (areq->src != areq->dst) { - sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1, - DMA_FROM_DEVICE); - } + if (areq->src != areq->dst) + sg_count = talitos_map_sg(dev, areq->dst, + edesc->dst_nents ? : 1, + DMA_FROM_DEVICE, + edesc->dst_is_chained); if (sg_count == 1) { desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); @@ -1033,49 +1081,55 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, return ret; } - /* * derive number of elements in scatterlist */ -static int sg_count(struct scatterlist *sg_list, int nbytes) +static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained) { struct scatterlist *sg = sg_list; int sg_nents = 0; - while (nbytes) { + *chained = 0; + while (nbytes > 0) { sg_nents++; nbytes -= sg->length; - sg = sg_next(sg); + if (!sg_is_last(sg) && (sg + 1)->length == 0) + *chained = 1; + sg = scatterwalk_sg_next(sg); } return sg_nents; } /* - * allocate and map the ipsec_esp extended descriptor + * allocate and map the extended descriptor */ -static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, - int icv_stashing) +static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, + struct scatterlist *src, + struct scatterlist *dst, + unsigned int cryptlen, + unsigned int authsize, + int icv_stashing, + u32 cryptoflags) { - struct crypto_aead *authenc = crypto_aead_reqtfm(areq); - struct talitos_ctx *ctx = crypto_aead_ctx(authenc); - struct ipsec_esp_edesc *edesc; + struct talitos_edesc *edesc; int src_nents, dst_nents, alloc_len, dma_len; - gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : + int src_chained, dst_chained = 0; + gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; - if (areq->cryptlen + ctx->authsize > TALITOS_MAX_DATA_LEN) { - dev_err(ctx->dev, "cryptlen exceeds h/w max limit\n"); + if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) { + dev_err(dev, "length exceeds h/w max limit\n"); return ERR_PTR(-EINVAL); } - src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize); + src_nents = sg_count(src, cryptlen + authsize, &src_chained); src_nents = (src_nents == 1) ? 0 : src_nents; - if (areq->dst == areq->src) { + if (dst == src) { dst_nents = src_nents; } else { - dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize); + dst_nents = sg_count(dst, cryptlen + authsize, &dst_chained); dst_nents = (dst_nents == 1) ? 0 : dst_nents; } @@ -1084,39 +1138,52 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, * allowing for two separate entries for ICV and generated ICV (+ 2), * and the ICV data itself */ - alloc_len = sizeof(struct ipsec_esp_edesc); + alloc_len = sizeof(struct talitos_edesc); if (src_nents || dst_nents) { dma_len = (src_nents + dst_nents + 2) * - sizeof(struct talitos_ptr) + ctx->authsize; + sizeof(struct talitos_ptr) + authsize; alloc_len += dma_len; } else { dma_len = 0; - alloc_len += icv_stashing ? ctx->authsize : 0; + alloc_len += icv_stashing ? authsize : 0; } edesc = kmalloc(alloc_len, GFP_DMA | flags); if (!edesc) { - dev_err(ctx->dev, "could not allocate edescriptor\n"); + dev_err(dev, "could not allocate edescriptor\n"); return ERR_PTR(-ENOMEM); } edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; + edesc->src_is_chained = src_chained; + edesc->dst_is_chained = dst_chained; edesc->dma_len = dma_len; - edesc->dma_link_tbl = dma_map_single(ctx->dev, &edesc->link_tbl[0], + edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], edesc->dma_len, DMA_BIDIRECTIONAL); return edesc; } -static int aead_authenc_encrypt(struct aead_request *req) +static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, + int icv_stashing) +{ + struct crypto_aead *authenc = crypto_aead_reqtfm(areq); + struct talitos_ctx *ctx = crypto_aead_ctx(authenc); + + return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, + areq->cryptlen, ctx->authsize, icv_stashing, + areq->base.flags); +} + +static int aead_encrypt(struct aead_request *req) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct talitos_ctx *ctx = crypto_aead_ctx(authenc); - struct ipsec_esp_edesc *edesc; + struct talitos_edesc *edesc; /* allocate extended descriptor */ - edesc = ipsec_esp_edesc_alloc(req, 0); + edesc = aead_edesc_alloc(req, 0); if (IS_ERR(edesc)) return PTR_ERR(edesc); @@ -1126,70 +1193,67 @@ static int aead_authenc_encrypt(struct aead_request *req) return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done); } - - -static int aead_authenc_decrypt(struct aead_request *req) +static int aead_decrypt(struct aead_request *req) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct talitos_ctx *ctx = crypto_aead_ctx(authenc); unsigned int authsize = ctx->authsize; struct talitos_private *priv = dev_get_drvdata(ctx->dev); - struct ipsec_esp_edesc *edesc; + struct talitos_edesc *edesc; struct scatterlist *sg; void *icvdata; req->cryptlen -= authsize; /* allocate extended descriptor */ - edesc = ipsec_esp_edesc_alloc(req, 1); + edesc = aead_edesc_alloc(req, 1); if (IS_ERR(edesc)) return PTR_ERR(edesc); if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) && - (((!edesc->src_nents && !edesc->dst_nents) || - priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT))) { + ((!edesc->src_nents && !edesc->dst_nents) || + priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) { /* decrypt and check the ICV */ - edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND | + edesc->desc.hdr = ctx->desc_hdr_template | + DESC_HDR_DIR_INBOUND | DESC_HDR_MODE1_MDEU_CICV; /* reset integrity check result bits */ edesc->desc.hdr_lo = 0; - return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_hwauth_done); + return ipsec_esp(edesc, req, NULL, 0, + ipsec_esp_decrypt_hwauth_done); - } else { - - /* Have to check the ICV with software */ + } - edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; + /* Have to check the ICV with software */ + edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; - /* stash incoming ICV for later cmp with ICV generated by the h/w */ - if (edesc->dma_len) - icvdata = &edesc->link_tbl[edesc->src_nents + - edesc->dst_nents + 2]; - else - icvdata = &edesc->link_tbl[0]; + /* stash incoming ICV for later cmp with ICV generated by the h/w */ + if (edesc->dma_len) + icvdata = &edesc->link_tbl[edesc->src_nents + + edesc->dst_nents + 2]; + else + icvdata = &edesc->link_tbl[0]; - sg = sg_last(req->src, edesc->src_nents ? : 1); + sg = sg_last(req->src, edesc->src_nents ? : 1); - memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize, - ctx->authsize); + memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize, + ctx->authsize); - return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done); - } + return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done); } -static int aead_authenc_givencrypt( - struct aead_givcrypt_request *req) +static int aead_givencrypt(struct aead_givcrypt_request *req) { struct aead_request *areq = &req->areq; struct crypto_aead *authenc = crypto_aead_reqtfm(areq); struct talitos_ctx *ctx = crypto_aead_ctx(authenc); - struct ipsec_esp_edesc *edesc; + struct talitos_edesc *edesc; /* allocate extended descriptor */ - edesc = ipsec_esp_edesc_alloc(areq, 0); + edesc = aead_edesc_alloc(areq, 0); if (IS_ERR(edesc)) return PTR_ERR(edesc); @@ -1204,31 +1268,228 @@ static int aead_authenc_givencrypt( ipsec_esp_encrypt_done); } +static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, + const u8 *key, unsigned int keylen) +{ + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); + struct ablkcipher_alg *alg = crypto_ablkcipher_alg(cipher); + + if (keylen > TALITOS_MAX_KEY_SIZE) + goto badkey; + + if (keylen < alg->min_keysize || keylen > alg->max_keysize) + goto badkey; + + memcpy(&ctx->key, key, keylen); + ctx->keylen = keylen; + + return 0; + +badkey: + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; +} + +static void common_nonsnoop_unmap(struct device *dev, + struct talitos_edesc *edesc, + struct ablkcipher_request *areq) +{ + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); + + talitos_sg_unmap(dev, edesc, areq->src, areq->dst); + + if (edesc->dma_len) + dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, + DMA_BIDIRECTIONAL); +} + +static void ablkcipher_done(struct device *dev, + struct talitos_desc *desc, void *context, + int err) +{ + struct ablkcipher_request *areq = context; + struct talitos_edesc *edesc; + + edesc = container_of(desc, struct talitos_edesc, desc); + + common_nonsnoop_unmap(dev, edesc, areq); + + kfree(edesc); + + areq->base.complete(&areq->base, err); +} + +static int common_nonsnoop(struct talitos_edesc *edesc, + struct ablkcipher_request *areq, + u8 *giv, + void (*callback) (struct device *dev, + struct talitos_desc *desc, + void *context, int error)) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); + struct device *dev = ctx->dev; + struct talitos_desc *desc = &edesc->desc; + unsigned int cryptlen = areq->nbytes; + unsigned int ivsize; + int sg_count, ret; + + /* first DWORD empty */ + desc->ptr[0].len = 0; + desc->ptr[0].ptr = 0; + desc->ptr[0].j_extent = 0; + + /* cipher iv */ + ivsize = crypto_ablkcipher_ivsize(cipher); + map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, giv ?: areq->info, 0, + DMA_TO_DEVICE); + + /* cipher key */ + map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, + (char *)&ctx->key, 0, DMA_TO_DEVICE); + + /* + * cipher in + */ + desc->ptr[3].len = cpu_to_be16(cryptlen); + desc->ptr[3].j_extent = 0; + + sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL + : DMA_TO_DEVICE, + edesc->src_is_chained); + + if (sg_count == 1) { + desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src)); + } else { + sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, + &edesc->link_tbl[0]); + if (sg_count > 1) { + desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; + desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl); + dma_sync_single_for_device(dev, edesc->dma_link_tbl, + edesc->dma_len, + DMA_BIDIRECTIONAL); + } else { + /* Only one segment now, so no link tbl needed */ + desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq-> + src)); + } + } + + /* cipher out */ + desc->ptr[4].len = cpu_to_be16(cryptlen); + desc->ptr[4].j_extent = 0; + + if (areq->src != areq->dst) + sg_count = talitos_map_sg(dev, areq->dst, + edesc->dst_nents ? : 1, + DMA_FROM_DEVICE, + edesc->dst_is_chained); + + if (sg_count == 1) { + desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->dst)); + } else { + struct talitos_ptr *link_tbl_ptr = + &edesc->link_tbl[edesc->src_nents + 1]; + + desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; + desc->ptr[4].ptr = cpu_to_be32((struct talitos_ptr *) + edesc->dma_link_tbl + + edesc->src_nents + 1); + sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, + link_tbl_ptr); + dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, + edesc->dma_len, DMA_BIDIRECTIONAL); + } + + /* iv out */ + map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0, + DMA_FROM_DEVICE); + + /* last DWORD empty */ + desc->ptr[6].len = 0; + desc->ptr[6].ptr = 0; + desc->ptr[6].j_extent = 0; + + ret = talitos_submit(dev, desc, callback, areq); + if (ret != -EINPROGRESS) { + common_nonsnoop_unmap(dev, edesc, areq); + kfree(edesc); + } + return ret; +} + +static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * + areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); + + return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, areq->nbytes, + 0, 0, areq->base.flags); +} + +static int ablkcipher_encrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); + struct talitos_edesc *edesc; + + /* allocate extended descriptor */ + edesc = ablkcipher_edesc_alloc(areq); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + /* set encrypt */ + edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; + + return common_nonsnoop(edesc, areq, NULL, ablkcipher_done); +} + +static int ablkcipher_decrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); + struct talitos_edesc *edesc; + + /* allocate extended descriptor */ + edesc = ablkcipher_edesc_alloc(areq); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; + + return common_nonsnoop(edesc, areq, NULL, ablkcipher_done); +} + struct talitos_alg_template { - char name[CRYPTO_MAX_ALG_NAME]; - char driver_name[CRYPTO_MAX_ALG_NAME]; - unsigned int blocksize; - struct aead_alg aead; - struct device *dev; + struct crypto_alg alg; __be32 desc_hdr_template; }; static struct talitos_alg_template driver_algs[] = { - /* single-pass ipsec_esp descriptor */ + /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */ { - .name = "authenc(hmac(sha1),cbc(aes))", - .driver_name = "authenc-hmac-sha1-cbc-aes-talitos", - .blocksize = AES_BLOCK_SIZE, - .aead = { - .setkey = aead_authenc_setkey, - .setauthsize = aead_authenc_setauthsize, - .encrypt = aead_authenc_encrypt, - .decrypt = aead_authenc_decrypt, - .givencrypt = aead_authenc_givencrypt, - .geniv = "<built-in>", - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = SHA1_DIGEST_SIZE, - }, + .alg = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_type = &crypto_aead_type, + .cra_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + } + }, .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | DESC_HDR_SEL0_AESU | DESC_HDR_MODE0_AESU_CBC | @@ -1238,19 +1499,23 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_SHA1_HMAC, }, { - .name = "authenc(hmac(sha1),cbc(des3_ede))", - .driver_name = "authenc-hmac-sha1-cbc-3des-talitos", - .blocksize = DES3_EDE_BLOCK_SIZE, - .aead = { - .setkey = aead_authenc_setkey, - .setauthsize = aead_authenc_setauthsize, - .encrypt = aead_authenc_encrypt, - .decrypt = aead_authenc_decrypt, - .givencrypt = aead_authenc_givencrypt, - .geniv = "<built-in>", - .ivsize = DES3_EDE_BLOCK_SIZE, - .maxauthsize = SHA1_DIGEST_SIZE, - }, + .alg = { + .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_type = &crypto_aead_type, + .cra_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + } + }, .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | DESC_HDR_SEL0_DEU | DESC_HDR_MODE0_DEU_CBC | @@ -1261,19 +1526,23 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_SHA1_HMAC, }, { - .name = "authenc(hmac(sha256),cbc(aes))", - .driver_name = "authenc-hmac-sha256-cbc-aes-talitos", - .blocksize = AES_BLOCK_SIZE, - .aead = { - .setkey = aead_authenc_setkey, - .setauthsize = aead_authenc_setauthsize, - .encrypt = aead_authenc_encrypt, - .decrypt = aead_authenc_decrypt, - .givencrypt = aead_authenc_givencrypt, - .geniv = "<built-in>", - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = SHA256_DIGEST_SIZE, - }, + .alg = { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_type = &crypto_aead_type, + .cra_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + } + }, .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | DESC_HDR_SEL0_AESU | DESC_HDR_MODE0_AESU_CBC | @@ -1283,19 +1552,23 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_SHA256_HMAC, }, { - .name = "authenc(hmac(sha256),cbc(des3_ede))", - .driver_name = "authenc-hmac-sha256-cbc-3des-talitos", - .blocksize = DES3_EDE_BLOCK_SIZE, - .aead = { - .setkey = aead_authenc_setkey, - .setauthsize = aead_authenc_setauthsize, - .encrypt = aead_authenc_encrypt, - .decrypt = aead_authenc_decrypt, - .givencrypt = aead_authenc_givencrypt, - .geniv = "<built-in>", - .ivsize = DES3_EDE_BLOCK_SIZE, - .maxauthsize = SHA256_DIGEST_SIZE, - }, + .alg = { + .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_type = &crypto_aead_type, + .cra_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + } + }, .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | DESC_HDR_SEL0_DEU | DESC_HDR_MODE0_DEU_CBC | @@ -1306,19 +1579,23 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_SHA256_HMAC, }, { - .name = "authenc(hmac(md5),cbc(aes))", - .driver_name = "authenc-hmac-md5-cbc-aes-talitos", - .blocksize = AES_BLOCK_SIZE, - .aead = { - .setkey = aead_authenc_setkey, - .setauthsize = aead_authenc_setauthsize, - .encrypt = aead_authenc_encrypt, - .decrypt = aead_authenc_decrypt, - .givencrypt = aead_authenc_givencrypt, - .geniv = "<built-in>", - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = MD5_DIGEST_SIZE, - }, + .alg = { + .cra_name = "authenc(hmac(md5),cbc(aes))", + .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_type = &crypto_aead_type, + .cra_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + } + }, .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | DESC_HDR_SEL0_AESU | DESC_HDR_MODE0_AESU_CBC | @@ -1328,19 +1605,23 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_MD5_HMAC, }, { - .name = "authenc(hmac(md5),cbc(des3_ede))", - .driver_name = "authenc-hmac-md5-cbc-3des-talitos", - .blocksize = DES3_EDE_BLOCK_SIZE, - .aead = { - .setkey = aead_authenc_setkey, - .setauthsize = aead_authenc_setauthsize, - .encrypt = aead_authenc_encrypt, - .decrypt = aead_authenc_decrypt, - .givencrypt = aead_authenc_givencrypt, - .geniv = "<built-in>", - .ivsize = DES3_EDE_BLOCK_SIZE, - .maxauthsize = MD5_DIGEST_SIZE, - }, + .alg = { + .cra_name = "authenc(hmac(md5),cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_type = &crypto_aead_type, + .cra_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "<built-in>", + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + } + }, .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | DESC_HDR_SEL0_DEU | DESC_HDR_MODE0_DEU_CBC | @@ -1349,6 +1630,52 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_INIT | DESC_HDR_MODE1_MDEU_PAD | DESC_HDR_MODE1_MDEU_MD5_HMAC, + }, + /* ABLKCIPHER algorithms. */ + { + .alg = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_type = &crypto_ablkcipher_type, + .cra_ablkcipher = { + .setkey = ablkcipher_setkey, + .encrypt = ablkcipher_encrypt, + .decrypt = ablkcipher_decrypt, + .geniv = "eseqiv", + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_AESU | + DESC_HDR_MODE0_AESU_CBC, + }, + { + .alg = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_type = &crypto_ablkcipher_type, + .cra_ablkcipher = { + .setkey = ablkcipher_setkey, + .encrypt = ablkcipher_encrypt, + .decrypt = ablkcipher_decrypt, + .geniv = "eseqiv", + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_DEU | + DESC_HDR_MODE0_DEU_CBC | + DESC_HDR_MODE0_DEU_3DES, } }; @@ -1362,12 +1689,14 @@ struct talitos_crypto_alg { static int talitos_cra_init(struct crypto_tfm *tfm) { struct crypto_alg *alg = tfm->__crt_alg; - struct talitos_crypto_alg *talitos_alg = - container_of(alg, struct talitos_crypto_alg, crypto_alg); + struct talitos_crypto_alg *talitos_alg; struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); + talitos_alg = container_of(alg, struct talitos_crypto_alg, crypto_alg); + /* update context with ptr to dev */ ctx->dev = talitos_alg->dev; + /* copy descriptor header template value */ ctx->desc_hdr_template = talitos_alg->desc_hdr_template; @@ -1453,19 +1782,13 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, return ERR_PTR(-ENOMEM); alg = &t_alg->crypto_alg; + *alg = template->alg; - snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); - snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", - template->driver_name); alg->cra_module = THIS_MODULE; alg->cra_init = talitos_cra_init; alg->cra_priority = TALITOS_CRA_PRIORITY; - alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; - alg->cra_blocksize = template->blocksize; alg->cra_alignmask = 0; - alg->cra_type = &crypto_aead_type; alg->cra_ctxsize = sizeof(struct talitos_ctx); - alg->cra_u.aead = template->aead; t_alg->desc_hdr_template = template->desc_hdr_template; t_alg->dev = dev; |