summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/hw_random/omap-rng.c2
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c26
-rw-r--r--drivers/char/hw_random/via-rng.c15
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/crypto/hifn_795x.c8
-rw-r--r--drivers/crypto/padlock-aes.c13
-rw-r--r--drivers/crypto/talitos.c713
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c59
-rw-r--r--drivers/i2c/busses/i2c-ocores.c5
-rw-r--r--drivers/i2c/busses/i2c-omap.c39
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c48
-rw-r--r--drivers/mmc/host/atmel-mci.c12
14 files changed, 666 insertions, 280 deletions
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 5fab6470f4b..9c00440dcf8 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -88,7 +88,7 @@ config HW_RANDOM_N2RNG
config HW_RANDOM_VIA
tristate "VIA HW Random Number Generator support"
- depends on HW_RANDOM && X86_32
+ depends on HW_RANDOM && X86
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 538313f9e7a..00dd3de1be5 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -89,7 +89,7 @@ static struct hwrng omap_rng_ops = {
.data_read = omap_rng_data_read,
};
-static int __init omap_rng_probe(struct platform_device *pdev)
+static int __devinit omap_rng_probe(struct platform_device *pdev)
{
struct resource *res, *mem;
int ret;
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index dcd352ad0e7..a94e930575f 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -88,9 +88,9 @@ static struct hwrng timeriomem_rng_ops = {
.priv = 0,
};
-static int __init timeriomem_rng_probe(struct platform_device *pdev)
+static int __devinit timeriomem_rng_probe(struct platform_device *pdev)
{
- struct resource *res, *mem;
+ struct resource *res;
int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -98,21 +98,12 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev)
if (!res)
return -ENOENT;
- mem = request_mem_region(res->start, res->end - res->start + 1,
- pdev->name);
- if (mem == NULL)
- return -EBUSY;
-
- dev_set_drvdata(&pdev->dev, mem);
-
timeriomem_rng_data = pdev->dev.platform_data;
timeriomem_rng_data->address = ioremap(res->start,
res->end - res->start + 1);
- if (!timeriomem_rng_data->address) {
- ret = -ENOMEM;
- goto err_ioremap;
- }
+ if (!timeriomem_rng_data->address)
+ return -EIO;
if (timeriomem_rng_data->period != 0
&& usecs_to_jiffies(timeriomem_rng_data->period) > 0) {
@@ -125,7 +116,7 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev)
ret = hwrng_register(&timeriomem_rng_ops);
if (ret)
- goto err_register;
+ goto failed;
dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n",
timeriomem_rng_data->address,
@@ -133,24 +124,19 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev)
return 0;
-err_register:
+failed:
dev_err(&pdev->dev, "problem registering\n");
iounmap(timeriomem_rng_data->address);
-err_ioremap:
- release_resource(mem);
return ret;
}
static int __devexit timeriomem_rng_remove(struct platform_device *pdev)
{
- struct resource *mem = dev_get_drvdata(&pdev->dev);
-
del_timer_sync(&timeriomem_rng_timer);
hwrng_unregister(&timeriomem_rng_ops);
iounmap(timeriomem_rng_data->address);
- release_resource(mem);
return 0;
}
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index 4e9573c1d39..794aacb715c 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -132,6 +132,19 @@ static int via_rng_init(struct hwrng *rng)
struct cpuinfo_x86 *c = &cpu_data(0);
u32 lo, hi, old_lo;
+ /* VIA Nano CPUs don't have the MSR_VIA_RNG anymore. The RNG
+ * is always enabled if CPUID rng_en is set. There is no
+ * RNG configuration like it used to be the case in this
+ * register */
+ if ((c->x86 == 6) && (c->x86_model >= 0x0f)) {
+ if (!cpu_has_xstore_enabled) {
+ printk(KERN_ERR PFX "can't enable hardware RNG "
+ "if XSTORE is not enabled\n");
+ return -ENODEV;
+ }
+ return 0;
+ }
+
/* Control the RNG via MSR. Tread lightly and pay very close
* close attention to values written, as the reserved fields
* are documented to be "undefined and unpredictable"; but it
@@ -205,5 +218,5 @@ static void __exit mod_exit(void)
module_init(mod_init);
module_exit(mod_exit);
-MODULE_DESCRIPTION("H/W RNG driver for VIA chipsets");
+MODULE_DESCRIPTION("H/W RNG driver for VIA CPU with PadLock");
MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 01afd758072..e748e55bd86 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -12,7 +12,7 @@ if CRYPTO_HW
config CRYPTO_DEV_PADLOCK
tristate "Support for VIA PadLock ACE"
- depends on X86_32 && !UML
+ depends on X86 && !UML
select CRYPTO_ALGAPI
help
Some VIA processors come with an integrated crypto engine
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 2bef086fb34..5f753fc0873 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2564,7 +2564,7 @@ static void hifn_tasklet_callback(unsigned long data)
hifn_process_queue(dev);
}
-static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int __devinit hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int err, i;
struct hifn_device *dev;
@@ -2696,7 +2696,7 @@ err_out_disable_pci_device:
return err;
}
-static void hifn_remove(struct pci_dev *pdev)
+static void __devexit hifn_remove(struct pci_dev *pdev)
{
int i;
struct hifn_device *dev;
@@ -2744,7 +2744,7 @@ static struct pci_driver hifn_pci_driver = {
.remove = __devexit_p(hifn_remove),
};
-static int __devinit hifn_init(void)
+static int __init hifn_init(void)
{
unsigned int freq;
int err;
@@ -2789,7 +2789,7 @@ static int __devinit hifn_init(void)
return 0;
}
-static void __devexit hifn_fini(void)
+static void __exit hifn_fini(void)
{
pci_unregister_driver(&hifn_pci_driver);
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 856b3cc2558..87f92c39b5f 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -154,7 +154,11 @@ static inline void padlock_reset_key(struct cword *cword)
int cpu = raw_smp_processor_id();
if (cword != per_cpu(last_cword, cpu))
+#ifndef CONFIG_X86_64
asm volatile ("pushfl; popfl");
+#else
+ asm volatile ("pushfq; popfq");
+#endif
}
static inline void padlock_store_cword(struct cword *cword)
@@ -208,10 +212,19 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
asm volatile ("test $1, %%cl;"
"je 1f;"
+#ifndef CONFIG_X86_64
"lea -1(%%ecx), %%eax;"
"mov $1, %%ecx;"
+#else
+ "lea -1(%%rcx), %%rax;"
+ "mov $1, %%rcx;"
+#endif
".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */
+#ifndef CONFIG_X86_64
"mov %%eax, %%ecx;"
+#else
+ "mov %%rax, %%rcx;"
+#endif
"1:"
".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
: "+S"(input), "+D"(output)
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index a3918c16b3d..c70775fd3ce 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -44,6 +44,8 @@
#include <crypto/sha.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
+#include <crypto/skcipher.h>
+#include <crypto/scatterwalk.h>
#include "talitos.h"
@@ -339,7 +341,8 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
status = error;
dma_unmap_single(dev, request->dma_desc,
- sizeof(struct talitos_desc), DMA_BIDIRECTIONAL);
+ sizeof(struct talitos_desc),
+ DMA_BIDIRECTIONAL);
/* copy entries so we can call callback outside lock */
saved_req.desc = request->desc;
@@ -413,7 +416,8 @@ static struct talitos_desc *current_desc(struct device *dev, int ch)
/*
* user diagnostics; report root cause of error based on execution unit status
*/
-static void report_eu_error(struct device *dev, int ch, struct talitos_desc *desc)
+static void report_eu_error(struct device *dev, int ch,
+ struct talitos_desc *desc)
{
struct talitos_private *priv = dev_get_drvdata(dev);
int i;
@@ -684,8 +688,8 @@ struct talitos_ctx {
unsigned int authsize;
};
-static int aead_authenc_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+static int aead_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
{
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
@@ -694,8 +698,8 @@ static int aead_authenc_setauthsize(struct crypto_aead *authenc,
return 0;
}
-static int aead_authenc_setkey(struct crypto_aead *authenc,
- const u8 *key, unsigned int keylen)
+static int aead_setkey(struct crypto_aead *authenc,
+ const u8 *key, unsigned int keylen)
{
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
struct rtattr *rta = (void *)key;
@@ -740,7 +744,7 @@ badkey:
}
/*
- * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor
+ * talitos_edesc - s/w-extended descriptor
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
* @dma_len: length of dma mapped link_tbl space
@@ -752,17 +756,67 @@ badkey:
* is greater than 1, an integrity check value is concatenated to the end
* of link_tbl data
*/
-struct ipsec_esp_edesc {
+struct talitos_edesc {
int src_nents;
int dst_nents;
+ int src_is_chained;
+ int dst_is_chained;
int dma_len;
dma_addr_t dma_link_tbl;
struct talitos_desc desc;
struct talitos_ptr link_tbl[0];
};
+static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
+ unsigned int nents, enum dma_data_direction dir,
+ int chained)
+{
+ if (unlikely(chained))
+ while (sg) {
+ dma_map_sg(dev, sg, 1, dir);
+ sg = scatterwalk_sg_next(sg);
+ }
+ else
+ dma_map_sg(dev, sg, nents, dir);
+ return nents;
+}
+
+static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
+ enum dma_data_direction dir)
+{
+ while (sg) {
+ dma_unmap_sg(dev, sg, 1, dir);
+ sg = scatterwalk_sg_next(sg);
+ }
+}
+
+static void talitos_sg_unmap(struct device *dev,
+ struct talitos_edesc *edesc,
+ struct scatterlist *src,
+ struct scatterlist *dst)
+{
+ unsigned int src_nents = edesc->src_nents ? : 1;
+ unsigned int dst_nents = edesc->dst_nents ? : 1;
+
+ if (src != dst) {
+ if (edesc->src_is_chained)
+ talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
+ else
+ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+
+ if (edesc->dst_is_chained)
+ talitos_unmap_sg_chain(dev, dst, DMA_FROM_DEVICE);
+ else
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ } else
+ if (edesc->src_is_chained)
+ talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
+ else
+ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+}
+
static void ipsec_esp_unmap(struct device *dev,
- struct ipsec_esp_edesc *edesc,
+ struct talitos_edesc *edesc,
struct aead_request *areq)
{
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
@@ -772,15 +826,7 @@ static void ipsec_esp_unmap(struct device *dev,
dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE);
- if (areq->src != areq->dst) {
- dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1,
- DMA_TO_DEVICE);
- dma_unmap_sg(dev, areq->dst, edesc->dst_nents ? : 1,
- DMA_FROM_DEVICE);
- } else {
- dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1,
- DMA_BIDIRECTIONAL);
- }
+ talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
if (edesc->dma_len)
dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
@@ -795,13 +841,14 @@ static void ipsec_esp_encrypt_done(struct device *dev,
int err)
{
struct aead_request *areq = context;
- struct ipsec_esp_edesc *edesc =
- container_of(desc, struct ipsec_esp_edesc, desc);
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+ struct talitos_edesc *edesc;
struct scatterlist *sg;
void *icvdata;
+ edesc = container_of(desc, struct talitos_edesc, desc);
+
ipsec_esp_unmap(dev, edesc, areq);
/* copy the generated ICV to dst */
@@ -819,17 +866,18 @@ static void ipsec_esp_encrypt_done(struct device *dev,
}
static void ipsec_esp_decrypt_swauth_done(struct device *dev,
- struct talitos_desc *desc, void *context,
- int err)
+ struct talitos_desc *desc,
+ void *context, int err)
{
struct aead_request *req = context;
- struct ipsec_esp_edesc *edesc =
- container_of(desc, struct ipsec_esp_edesc, desc);
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+ struct talitos_edesc *edesc;
struct scatterlist *sg;
void *icvdata;
+ edesc = container_of(desc, struct talitos_edesc, desc);
+
ipsec_esp_unmap(dev, edesc, req);
if (!err) {
@@ -851,20 +899,20 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
}
static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
- struct talitos_desc *desc, void *context,
- int err)
+ struct talitos_desc *desc,
+ void *context, int err)
{
struct aead_request *req = context;
- struct ipsec_esp_edesc *edesc =
- container_of(desc, struct ipsec_esp_edesc, desc);
+ struct talitos_edesc *edesc;
+
+ edesc = container_of(desc, struct talitos_edesc, desc);
ipsec_esp_unmap(dev, edesc, req);
/* check ICV auth status */
- if (!err)
- if ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
- DESC_HDR_LO_ICCR1_PASS)
- err = -EBADMSG;
+ if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
+ DESC_HDR_LO_ICCR1_PASS))
+ err = -EBADMSG;
kfree(edesc);
@@ -886,7 +934,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
link_tbl_ptr->j_extent = 0;
link_tbl_ptr++;
cryptlen -= sg_dma_len(sg);
- sg = sg_next(sg);
+ sg = scatterwalk_sg_next(sg);
}
/* adjust (decrease) last one (or two) entry's len to cryptlen */
@@ -910,7 +958,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
/*
* fill in and submit ipsec_esp descriptor
*/
-static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
+static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
u8 *giv, u64 seq,
void (*callback) (struct device *dev,
struct talitos_desc *desc,
@@ -952,32 +1000,31 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
desc->ptr[4].len = cpu_to_be16(cryptlen);
desc->ptr[4].j_extent = authsize;
- if (areq->src == areq->dst)
- sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1,
- DMA_BIDIRECTIONAL);
- else
- sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1,
- DMA_TO_DEVICE);
+ sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
+ : DMA_TO_DEVICE,
+ edesc->src_is_chained);
if (sg_count == 1) {
desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
} else {
sg_link_tbl_len = cryptlen;
- if ((edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) &&
- (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) {
+ if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
sg_link_tbl_len = cryptlen + authsize;
- }
+
sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
&edesc->link_tbl[0]);
if (sg_count > 1) {
desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl);
- dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
- edesc->dma_len, DMA_BIDIRECTIONAL);
+ dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+ edesc->dma_len,
+ DMA_BIDIRECTIONAL);
} else {
/* Only one segment now, so no link tbl needed */
- desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
+ desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->
+ src));
}
}
@@ -985,10 +1032,11 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
desc->ptr[5].len = cpu_to_be16(cryptlen);
desc->ptr[5].j_extent = authsize;
- if (areq->src != areq->dst) {
- sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
- DMA_FROM_DEVICE);
- }
+ if (areq->src != areq->dst)
+ sg_count = talitos_map_sg(dev, areq->dst,
+ edesc->dst_nents ? : 1,
+ DMA_FROM_DEVICE,
+ edesc->dst_is_chained);
if (sg_count == 1) {
desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst));
@@ -1033,49 +1081,55 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
return ret;
}
-
/*
* derive number of elements in scatterlist
*/
-static int sg_count(struct scatterlist *sg_list, int nbytes)
+static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
{
struct scatterlist *sg = sg_list;
int sg_nents = 0;
- while (nbytes) {
+ *chained = 0;
+ while (nbytes > 0) {
sg_nents++;
nbytes -= sg->length;
- sg = sg_next(sg);
+ if (!sg_is_last(sg) && (sg + 1)->length == 0)
+ *chained = 1;
+ sg = scatterwalk_sg_next(sg);
}
return sg_nents;
}
/*
- * allocate and map the ipsec_esp extended descriptor
+ * allocate and map the extended descriptor
*/
-static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
- int icv_stashing)
+static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
+ struct scatterlist *src,
+ struct scatterlist *dst,
+ unsigned int cryptlen,
+ unsigned int authsize,
+ int icv_stashing,
+ u32 cryptoflags)
{
- struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
- struct ipsec_esp_edesc *edesc;
+ struct talitos_edesc *edesc;
int src_nents, dst_nents, alloc_len, dma_len;
- gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ int src_chained, dst_chained = 0;
+ gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
- if (areq->cryptlen + ctx->authsize > TALITOS_MAX_DATA_LEN) {
- dev_err(ctx->dev, "cryptlen exceeds h/w max limit\n");
+ if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
+ dev_err(dev, "length exceeds h/w max limit\n");
return ERR_PTR(-EINVAL);
}
- src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize);
+ src_nents = sg_count(src, cryptlen + authsize, &src_chained);
src_nents = (src_nents == 1) ? 0 : src_nents;
- if (areq->dst == areq->src) {
+ if (dst == src) {
dst_nents = src_nents;
} else {
- dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize);
+ dst_nents = sg_count(dst, cryptlen + authsize, &dst_chained);
dst_nents = (dst_nents == 1) ? 0 : dst_nents;
}
@@ -1084,39 +1138,52 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
* allowing for two separate entries for ICV and generated ICV (+ 2),
* and the ICV data itself
*/
- alloc_len = sizeof(struct ipsec_esp_edesc);
+ alloc_len = sizeof(struct talitos_edesc);
if (src_nents || dst_nents) {
dma_len = (src_nents + dst_nents + 2) *
- sizeof(struct talitos_ptr) + ctx->authsize;
+ sizeof(struct talitos_ptr) + authsize;
alloc_len += dma_len;
} else {
dma_len = 0;
- alloc_len += icv_stashing ? ctx->authsize : 0;
+ alloc_len += icv_stashing ? authsize : 0;
}
edesc = kmalloc(alloc_len, GFP_DMA | flags);
if (!edesc) {
- dev_err(ctx->dev, "could not allocate edescriptor\n");
+ dev_err(dev, "could not allocate edescriptor\n");
return ERR_PTR(-ENOMEM);
}
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
+ edesc->src_is_chained = src_chained;
+ edesc->dst_is_chained = dst_chained;
edesc->dma_len = dma_len;
- edesc->dma_link_tbl = dma_map_single(ctx->dev, &edesc->link_tbl[0],
+ edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
edesc->dma_len, DMA_BIDIRECTIONAL);
return edesc;
}
-static int aead_authenc_encrypt(struct aead_request *req)
+static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq,
+ int icv_stashing)
+{
+ struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+
+ return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
+ areq->cryptlen, ctx->authsize, icv_stashing,
+ areq->base.flags);
+}
+
+static int aead_encrypt(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
- struct ipsec_esp_edesc *edesc;
+ struct talitos_edesc *edesc;
/* allocate extended descriptor */
- edesc = ipsec_esp_edesc_alloc(req, 0);
+ edesc = aead_edesc_alloc(req, 0);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1126,70 +1193,67 @@ static int aead_authenc_encrypt(struct aead_request *req)
return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done);
}
-
-
-static int aead_authenc_decrypt(struct aead_request *req)
+static int aead_decrypt(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
unsigned int authsize = ctx->authsize;
struct talitos_private *priv = dev_get_drvdata(ctx->dev);
- struct ipsec_esp_edesc *edesc;
+ struct talitos_edesc *edesc;
struct scatterlist *sg;
void *icvdata;
req->cryptlen -= authsize;
/* allocate extended descriptor */
- edesc = ipsec_esp_edesc_alloc(req, 1);
+ edesc = aead_edesc_alloc(req, 1);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
- (((!edesc->src_nents && !edesc->dst_nents) ||
- priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT))) {
+ ((!edesc->src_nents && !edesc->dst_nents) ||
+ priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
/* decrypt and check the ICV */
- edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND |
+ edesc->desc.hdr = ctx->desc_hdr_template |
+ DESC_HDR_DIR_INBOUND |
DESC_HDR_MODE1_MDEU_CICV;
/* reset integrity check result bits */
edesc->desc.hdr_lo = 0;
- return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_hwauth_done);
+ return ipsec_esp(edesc, req, NULL, 0,
+ ipsec_esp_decrypt_hwauth_done);
- } else {
-
- /* Have to check the ICV with software */
+ }
- edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
+ /* Have to check the ICV with software */
+ edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
- /* stash incoming ICV for later cmp with ICV generated by the h/w */
- if (edesc->dma_len)
- icvdata = &edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 2];
- else
- icvdata = &edesc->link_tbl[0];
+ /* stash incoming ICV for later cmp with ICV generated by the h/w */
+ if (edesc->dma_len)
+ icvdata = &edesc->link_tbl[edesc->src_nents +
+ edesc->dst_nents + 2];
+ else
+ icvdata = &edesc->link_tbl[0];
- sg = sg_last(req->src, edesc->src_nents ? : 1);
+ sg = sg_last(req->src, edesc->src_nents ? : 1);
- memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
- ctx->authsize);
+ memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
+ ctx->authsize);
- return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done);
- }
+ return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done);
}
-static int aead_authenc_givencrypt(
- struct aead_givcrypt_request *req)
+static int aead_givencrypt(struct aead_givcrypt_request *req)
{
struct aead_request *areq = &req->areq;
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
- struct ipsec_esp_edesc *edesc;
+ struct talitos_edesc *edesc;
/* allocate extended descriptor */
- edesc = ipsec_esp_edesc_alloc(areq, 0);
+ edesc = aead_edesc_alloc(areq, 0);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1204,31 +1268,228 @@ static int aead_authenc_givencrypt(
ipsec_esp_encrypt_done);
}
+static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct ablkcipher_alg *alg = crypto_ablkcipher_alg(cipher);
+
+ if (keylen > TALITOS_MAX_KEY_SIZE)
+ goto badkey;
+
+ if (keylen < alg->min_keysize || keylen > alg->max_keysize)
+ goto badkey;
+
+ memcpy(&ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+
+badkey:
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+static void common_nonsnoop_unmap(struct device *dev,
+ struct talitos_edesc *edesc,
+ struct ablkcipher_request *areq)
+{
+ unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
+ unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
+ unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
+
+ talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
+
+ if (edesc->dma_len)
+ dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
+ DMA_BIDIRECTIONAL);
+}
+
+static void ablkcipher_done(struct device *dev,
+ struct talitos_desc *desc, void *context,
+ int err)
+{
+ struct ablkcipher_request *areq = context;
+ struct talitos_edesc *edesc;
+
+ edesc = container_of(desc, struct talitos_edesc, desc);
+
+ common_nonsnoop_unmap(dev, edesc, areq);
+
+ kfree(edesc);
+
+ areq->base.complete(&areq->base, err);
+}
+
+static int common_nonsnoop(struct talitos_edesc *edesc,
+ struct ablkcipher_request *areq,
+ u8 *giv,
+ void (*callback) (struct device *dev,
+ struct talitos_desc *desc,
+ void *context, int error))
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct device *dev = ctx->dev;
+ struct talitos_desc *desc = &edesc->desc;
+ unsigned int cryptlen = areq->nbytes;
+ unsigned int ivsize;
+ int sg_count, ret;
+
+ /* first DWORD empty */
+ desc->ptr[0].len = 0;
+ desc->ptr[0].ptr = 0;
+ desc->ptr[0].j_extent = 0;
+
+ /* cipher iv */
+ ivsize = crypto_ablkcipher_ivsize(cipher);
+ map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, giv ?: areq->info, 0,
+ DMA_TO_DEVICE);
+
+ /* cipher key */
+ map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
+ (char *)&ctx->key, 0, DMA_TO_DEVICE);
+
+ /*
+ * cipher in
+ */
+ desc->ptr[3].len = cpu_to_be16(cryptlen);
+ desc->ptr[3].j_extent = 0;
+
+ sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
+ : DMA_TO_DEVICE,
+ edesc->src_is_chained);
+
+ if (sg_count == 1) {
+ desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src));
+ } else {
+ sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
+ &edesc->link_tbl[0]);
+ if (sg_count > 1) {
+ desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
+ desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl);
+ dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+ edesc->dma_len,
+ DMA_BIDIRECTIONAL);
+ } else {
+ /* Only one segment now, so no link tbl needed */
+ desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->
+ src));
+ }
+ }
+
+ /* cipher out */
+ desc->ptr[4].len = cpu_to_be16(cryptlen);
+ desc->ptr[4].j_extent = 0;
+
+ if (areq->src != areq->dst)
+ sg_count = talitos_map_sg(dev, areq->dst,
+ edesc->dst_nents ? : 1,
+ DMA_FROM_DEVICE,
+ edesc->dst_is_chained);
+
+ if (sg_count == 1) {
+ desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->dst));
+ } else {
+ struct talitos_ptr *link_tbl_ptr =
+ &edesc->link_tbl[edesc->src_nents + 1];
+
+ desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
+ desc->ptr[4].ptr = cpu_to_be32((struct talitos_ptr *)
+ edesc->dma_link_tbl +
+ edesc->src_nents + 1);
+ sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
+ link_tbl_ptr);
+ dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
+ edesc->dma_len, DMA_BIDIRECTIONAL);
+ }
+
+ /* iv out */
+ map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0,
+ DMA_FROM_DEVICE);
+
+ /* last DWORD empty */
+ desc->ptr[6].len = 0;
+ desc->ptr[6].ptr = 0;
+ desc->ptr[6].j_extent = 0;
+
+ ret = talitos_submit(dev, desc, callback, areq);
+ if (ret != -EINPROGRESS) {
+ common_nonsnoop_unmap(dev, edesc, areq);
+ kfree(edesc);
+ }
+ return ret;
+}
+
+static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
+ areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, areq->nbytes,
+ 0, 0, areq->base.flags);
+}
+
+static int ablkcipher_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct talitos_edesc *edesc;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_edesc_alloc(areq);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /* set encrypt */
+ edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
+
+ return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
+}
+
+static int ablkcipher_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct talitos_edesc *edesc;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_edesc_alloc(areq);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
+
+ return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
+}
+
struct talitos_alg_template {
- char name[CRYPTO_MAX_ALG_NAME];
- char driver_name[CRYPTO_MAX_ALG_NAME];
- unsigned int blocksize;
- struct aead_alg aead;
- struct device *dev;
+ struct crypto_alg alg;
__be32 desc_hdr_template;
};
static struct talitos_alg_template driver_algs[] = {
- /* single-pass ipsec_esp descriptor */
+ /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
{
- .name = "authenc(hmac(sha1),cbc(aes))",
- .driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
- .blocksize = AES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
- .geniv = "<built-in>",
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- },
+ .alg = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_aead_type,
+ .cra_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ }
+ },
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CBC |
@@ -1238,19 +1499,23 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA1_HMAC,
},
{
- .name = "authenc(hmac(sha1),cbc(des3_ede))",
- .driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
- .geniv = "<built-in>",
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- },
+ .alg = {
+ .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_aead_type,
+ .cra_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ }
+ },
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
DESC_HDR_MODE0_DEU_CBC |
@@ -1261,19 +1526,23 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA1_HMAC,
},
{
- .name = "authenc(hmac(sha256),cbc(aes))",
- .driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
- .blocksize = AES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
- .geniv = "<built-in>",
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
- },
+ .alg = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_aead_type,
+ .cra_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ }
+ },
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CBC |
@@ -1283,19 +1552,23 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA256_HMAC,
},
{
- .name = "authenc(hmac(sha256),cbc(des3_ede))",
- .driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
- .geniv = "<built-in>",
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
- },
+ .alg = {
+ .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_aead_type,
+ .cra_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ }
+ },
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
DESC_HDR_MODE0_DEU_CBC |
@@ -1306,19 +1579,23 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA256_HMAC,
},
{
- .name = "authenc(hmac(md5),cbc(aes))",
- .driver_name = "authenc-hmac-md5-cbc-aes-talitos",
- .blocksize = AES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
- .geniv = "<built-in>",
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = MD5_DIGEST_SIZE,
- },
+ .alg = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_aead_type,
+ .cra_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ }
+ },
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CBC |
@@ -1328,19 +1605,23 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_MD5_HMAC,
},
{
- .name = "authenc(hmac(md5),cbc(des3_ede))",
- .driver_name = "authenc-hmac-md5-cbc-3des-talitos",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
- .geniv = "<built-in>",
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = MD5_DIGEST_SIZE,
- },
+ .alg = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_aead_type,
+ .cra_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ }
+ },
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
DESC_HDR_MODE0_DEU_CBC |
@@ -1349,6 +1630,52 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_INIT |
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_MD5_HMAC,
+ },
+ /* ABLKCIPHER algorithms. */
+ {
+ .alg = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_AESU |
+ DESC_HDR_MODE0_AESU_CBC,
+ },
+ {
+ .alg = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_DEU |
+ DESC_HDR_MODE0_DEU_CBC |
+ DESC_HDR_MODE0_DEU_3DES,
}
};
@@ -1362,12 +1689,14 @@ struct talitos_crypto_alg {
static int talitos_cra_init(struct crypto_tfm *tfm)
{
struct crypto_alg *alg = tfm->__crt_alg;
- struct talitos_crypto_alg *talitos_alg =
- container_of(alg, struct talitos_crypto_alg, crypto_alg);
+ struct talitos_crypto_alg *talitos_alg;
struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+ talitos_alg = container_of(alg, struct talitos_crypto_alg, crypto_alg);
+
/* update context with ptr to dev */
ctx->dev = talitos_alg->dev;
+
/* copy descriptor header template value */
ctx->desc_hdr_template = talitos_alg->desc_hdr_template;
@@ -1453,19 +1782,13 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
return ERR_PTR(-ENOMEM);
alg = &t_alg->crypto_alg;
+ *alg = template->alg;
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->driver_name);
alg->cra_module = THIS_MODULE;
alg->cra_init = talitos_cra_init;
alg->cra_priority = TALITOS_CRA_PRIORITY;
- alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
- alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
- alg->cra_type = &crypto_aead_type;
alg->cra_ctxsize = sizeof(struct talitos_ctx);
- alg->cra_u.aead = template->aead;
t_alg->desc_hdr_template = template->desc_hdr_template;
t_alg->dev = dev;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index f1c6ca7e285..c8460fa9cfa 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -298,7 +298,7 @@ config I2C_BLACKFIN_TWI
config I2C_BLACKFIN_TWI_CLK_KHZ
int "Blackfin TWI I2C clock (kHz)"
depends on I2C_BLACKFIN_TWI
- range 10 400
+ range 21 400
default 50
help
The unit of the TWI clock is kHz.
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index fc548b3d002..26d8987e69b 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -104,9 +104,14 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
write_MASTER_CTL(iface,
read_MASTER_CTL(iface) | STOP);
else if (iface->cur_mode == TWI_I2C_MODE_REPEAT &&
- iface->cur_msg+1 < iface->msg_num)
- write_MASTER_CTL(iface,
- read_MASTER_CTL(iface) | RSTART);
+ iface->cur_msg + 1 < iface->msg_num) {
+ if (iface->pmsg[iface->cur_msg + 1].flags & I2C_M_RD)
+ write_MASTER_CTL(iface,
+ read_MASTER_CTL(iface) | RSTART | MDIR);
+ else
+ write_MASTER_CTL(iface,
+ (read_MASTER_CTL(iface) | RSTART) & ~MDIR);
+ }
SSYNC();
/* Clear status */
write_INT_STAT(iface, XMTSERV);
@@ -134,9 +139,13 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
read_MASTER_CTL(iface) | STOP);
SSYNC();
} else if (iface->cur_mode == TWI_I2C_MODE_REPEAT &&
- iface->cur_msg+1 < iface->msg_num) {
- write_MASTER_CTL(iface,
- read_MASTER_CTL(iface) | RSTART);
+ iface->cur_msg + 1 < iface->msg_num) {
+ if (iface->pmsg[iface->cur_msg + 1].flags & I2C_M_RD)
+ write_MASTER_CTL(iface,
+ read_MASTER_CTL(iface) | RSTART | MDIR);
+ else
+ write_MASTER_CTL(iface,
+ (read_MASTER_CTL(iface) | RSTART) & ~MDIR);
SSYNC();
}
/* Clear interrupt source */
@@ -196,8 +205,6 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
/* remove restart bit and enable master receive */
write_MASTER_CTL(iface,
read_MASTER_CTL(iface) & ~RSTART);
- write_MASTER_CTL(iface,
- read_MASTER_CTL(iface) | MEN | MDIR);
SSYNC();
} else if (iface->cur_mode == TWI_I2C_MODE_REPEAT &&
iface->cur_msg+1 < iface->msg_num) {
@@ -222,18 +229,19 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
}
if (iface->pmsg[iface->cur_msg].len <= 255)
- write_MASTER_CTL(iface,
- iface->pmsg[iface->cur_msg].len << 6);
+ write_MASTER_CTL(iface,
+ (read_MASTER_CTL(iface) &
+ (~(0xff << 6))) |
+ (iface->pmsg[iface->cur_msg].len << 6));
else {
- write_MASTER_CTL(iface, 0xff << 6);
+ write_MASTER_CTL(iface,
+ (read_MASTER_CTL(iface) |
+ (0xff << 6)));
iface->manual_stop = 1;
}
/* remove restart bit and enable master receive */
write_MASTER_CTL(iface,
read_MASTER_CTL(iface) & ~RSTART);
- write_MASTER_CTL(iface, read_MASTER_CTL(iface) |
- MEN | ((iface->read_write == I2C_SMBUS_READ) ?
- MDIR : 0));
SSYNC();
} else {
iface->result = 1;
@@ -441,6 +449,16 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
}
iface->transPtr = data->block;
break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ if (read_write == I2C_SMBUS_READ) {
+ iface->readNum = data->block[0];
+ iface->cur_mode = TWI_I2C_MODE_COMBINED;
+ } else {
+ iface->writeNum = data->block[0];
+ iface->cur_mode = TWI_I2C_MODE_STANDARDSUB;
+ }
+ iface->transPtr = (u8 *)&data->block[1];
+ break;
default:
return -1;
}
@@ -564,7 +582,7 @@ static u32 bfin_twi_functionality(struct i2c_adapter *adap)
return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_PROC_CALL |
- I2C_FUNC_I2C;
+ I2C_FUNC_I2C | I2C_FUNC_SMBUS_I2C_BLOCK;
}
static struct i2c_algorithm bfin_twi_algorithm = {
@@ -614,6 +632,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
struct i2c_adapter *p_adap;
struct resource *res;
int rc;
+ unsigned int clkhilow;
iface = kzalloc(sizeof(struct bfin_twi_iface), GFP_KERNEL);
if (!iface) {
@@ -675,10 +694,14 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
/* Set TWI internal clock as 10MHz */
write_CONTROL(iface, ((get_sclk() / 1024 / 1024 + 5) / 10) & 0x7F);
+ /*
+ * We will not end up with a CLKDIV=0 because no one will specify
+ * 20kHz SCL or less in Kconfig now. (5 * 1024 / 20 = 0x100)
+ */
+ clkhilow = 5 * 1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ;
+
/* Set Twi interface clock as specified */
- write_CLKDIV(iface, ((5*1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ)
- << 8) | ((5*1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ)
- & 0xFF));
+ write_CLKDIV(iface, (clkhilow << 8) | clkhilow);
/* Enable TWI */
write_CONTROL(iface, read_CONTROL(iface) | TWI_ENA);
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index e5193bf7548..3542c6ba98f 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -216,6 +216,7 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
struct ocores_i2c_platform_data *pdata;
struct resource *res, *res2;
int ret;
+ int i;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
@@ -271,6 +272,10 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
goto add_adapter_failed;
}
+ /* add in known devices to the bus */
+ for (i = 0; i < pdata->num_devices; i++)
+ i2c_new_device(&i2c->adap, pdata->devices + i);
+
return 0;
add_adapter_failed:
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index ece0125a1ee..c73475dd0fb 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -333,8 +333,18 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
if (cpu_is_omap2430() || cpu_is_omap34xx()) {
- /* HSI2C controller internal clk rate should be 19.2 Mhz */
- internal_clk = 19200;
+ /*
+ * HSI2C controller internal clk rate should be 19.2 Mhz for
+ * HS and for all modes on 2430. On 34xx we can use lower rate
+ * to get longer filter period for better noise suppression.
+ * The filter is iclk (fclk for HS) period.
+ */
+ if (dev->speed > 400 || cpu_is_omap_2430())
+ internal_clk = 19200;
+ else if (dev->speed > 100)
+ internal_clk = 9600;
+ else
+ internal_clk = 4000;
fclk_rate = clk_get_rate(dev->fclk) / 1000;
/* Compute prescaler divisor */
@@ -343,17 +353,28 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
/* If configured for High Speed */
if (dev->speed > 400) {
+ unsigned long scl;
+
/* For first phase of HS mode */
- fsscll = internal_clk / (400 * 2) - 6;
- fssclh = internal_clk / (400 * 2) - 6;
+ scl = internal_clk / 400;
+ fsscll = scl - (scl / 3) - 7;
+ fssclh = (scl / 3) - 5;
/* For second phase of HS mode */
- hsscll = fclk_rate / (dev->speed * 2) - 6;
- hssclh = fclk_rate / (dev->speed * 2) - 6;
+ scl = fclk_rate / dev->speed;
+ hsscll = scl - (scl / 3) - 7;
+ hssclh = (scl / 3) - 5;
+ } else if (dev->speed > 100) {
+ unsigned long scl;
+
+ /* Fast mode */
+ scl = internal_clk / dev->speed;
+ fsscll = scl - (scl / 3) - 7;
+ fssclh = (scl / 3) - 5;
} else {
- /* To handle F/S modes */
- fsscll = internal_clk / (dev->speed * 2) - 6;
- fssclh = internal_clk / (dev->speed * 2) - 6;
+ /* Standard mode */
+ fsscll = internal_clk / (dev->speed * 2) - 7;
+ fssclh = internal_clk / (dev->speed * 2) - 5;
}
scll = (hsscll << OMAP_I2C_SCLL_HSSCLL) | fsscll;
sclh = (hssclh << OMAP_I2C_SCLH_HSSCLH) | fssclh;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 1691ef0f1ee..079a312d36f 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -51,6 +51,11 @@ enum s3c24xx_i2c_state {
STATE_STOP
};
+enum s3c24xx_i2c_type {
+ TYPE_S3C2410,
+ TYPE_S3C2440,
+};
+
struct s3c24xx_i2c {
spinlock_t lock;
wait_queue_head_t wait;
@@ -88,8 +93,10 @@ struct s3c24xx_i2c {
static inline int s3c24xx_i2c_is2440(struct s3c24xx_i2c *i2c)
{
struct platform_device *pdev = to_platform_device(i2c->dev);
+ enum s3c24xx_i2c_type type;
- return !strcmp(pdev->name, "s3c2440-i2c");
+ type = platform_get_device_id(pdev)->driver_data;
+ return type == TYPE_S3C2440;
}
/* s3c24xx_i2c_master_complete
@@ -969,52 +976,41 @@ static int s3c24xx_i2c_resume(struct platform_device *dev)
/* device driver for platform bus bits */
-static struct platform_driver s3c2410_i2c_driver = {
- .probe = s3c24xx_i2c_probe,
- .remove = s3c24xx_i2c_remove,
- .suspend_late = s3c24xx_i2c_suspend_late,
- .resume = s3c24xx_i2c_resume,
- .driver = {
- .owner = THIS_MODULE,
- .name = "s3c2410-i2c",
- },
+static struct platform_device_id s3c24xx_driver_ids[] = {
+ {
+ .name = "s3c2410-i2c",
+ .driver_data = TYPE_S3C2410,
+ }, {
+ .name = "s3c2440-i2c",
+ .driver_data = TYPE_S3C2440,
+ }, { },
};
+MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
-static struct platform_driver s3c2440_i2c_driver = {
+static struct platform_driver s3c24xx_i2c_driver = {
.probe = s3c24xx_i2c_probe,
.remove = s3c24xx_i2c_remove,
.suspend_late = s3c24xx_i2c_suspend_late,
.resume = s3c24xx_i2c_resume,
+ .id_table = s3c24xx_driver_ids,
.driver = {
.owner = THIS_MODULE,
- .name = "s3c2440-i2c",
+ .name = "s3c-i2c",
},
};
static int __init i2c_adap_s3c_init(void)
{
- int ret;
-
- ret = platform_driver_register(&s3c2410_i2c_driver);
- if (ret == 0) {
- ret = platform_driver_register(&s3c2440_i2c_driver);
- if (ret)
- platform_driver_unregister(&s3c2410_i2c_driver);
- }
-
- return ret;
+ return platform_driver_register(&s3c24xx_i2c_driver);
}
subsys_initcall(i2c_adap_s3c_init);
static void __exit i2c_adap_s3c_exit(void)
{
- platform_driver_unregister(&s3c2410_i2c_driver);
- platform_driver_unregister(&s3c2440_i2c_driver);
+ platform_driver_unregister(&s3c24xx_i2c_driver);
}
module_exit(i2c_adap_s3c_exit);
MODULE_DESCRIPTION("S3C24XX I2C Bus driver");
MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c2410-i2c");
-MODULE_ALIAS("platform:s3c2440-i2c");
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index cf6a100bb38..7b603e4b41d 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -177,6 +177,7 @@ struct atmel_mci {
* available.
* @wp_pin: GPIO pin used for card write protect sending, or negative
* if not available.
+ * @detect_is_active_high: The state of the detect pin when it is active.
* @detect_timer: Timer used for debouncing @detect_pin interrupts.
*/
struct atmel_mci_slot {
@@ -196,6 +197,7 @@ struct atmel_mci_slot {
int detect_pin;
int wp_pin;
+ bool detect_is_active_high;
struct timer_list detect_timer;
};
@@ -924,7 +926,8 @@ static int atmci_get_cd(struct mmc_host *mmc)
struct atmel_mci_slot *slot = mmc_priv(mmc);
if (gpio_is_valid(slot->detect_pin)) {
- present = !gpio_get_value(slot->detect_pin);
+ present = !(gpio_get_value(slot->detect_pin) ^
+ slot->detect_is_active_high);
dev_dbg(&mmc->class_dev, "card is %spresent\n",
present ? "" : "not ");
}
@@ -1028,7 +1031,8 @@ static void atmci_detect_change(unsigned long data)
return;
enable_irq(gpio_to_irq(slot->detect_pin));
- present = !gpio_get_value(slot->detect_pin);
+ present = !(gpio_get_value(slot->detect_pin) ^
+ slot->detect_is_active_high);
present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
@@ -1456,6 +1460,7 @@ static int __init atmci_init_slot(struct atmel_mci *host,
slot->host = host;
slot->detect_pin = slot_data->detect_pin;
slot->wp_pin = slot_data->wp_pin;
+ slot->detect_is_active_high = slot_data->detect_is_active_high;
slot->sdc_reg = sdc_reg;
mmc->ops = &atmci_ops;
@@ -1477,7 +1482,8 @@ static int __init atmci_init_slot(struct atmel_mci *host,
if (gpio_request(slot->detect_pin, "mmc_detect")) {
dev_dbg(&mmc->class_dev, "no detect pin available\n");
slot->detect_pin = -EBUSY;
- } else if (gpio_get_value(slot->detect_pin)) {
+ } else if (gpio_get_value(slot->detect_pin) ^
+ slot->detect_is_active_high) {
clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
}
}