From d1c8b0a7692e81b46550bcc493465ed10510cd33 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 21 Apr 2009 14:14:37 +0800 Subject: crypto: padlock - Enable on x86_64 Almost everything stays the same, we need just to use the extended registers on the bit variant. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Herbert Xu --- drivers/crypto/padlock-aes.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'drivers/crypto/padlock-aes.c') diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 856b3cc2558..87f92c39b5f 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -154,7 +154,11 @@ static inline void padlock_reset_key(struct cword *cword) int cpu = raw_smp_processor_id(); if (cword != per_cpu(last_cword, cpu)) +#ifndef CONFIG_X86_64 asm volatile ("pushfl; popfl"); +#else + asm volatile ("pushfq; popfq"); +#endif } static inline void padlock_store_cword(struct cword *cword) @@ -208,10 +212,19 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, asm volatile ("test $1, %%cl;" "je 1f;" +#ifndef CONFIG_X86_64 "lea -1(%%ecx), %%eax;" "mov $1, %%ecx;" +#else + "lea -1(%%rcx), %%rax;" + "mov $1, %%rcx;" +#endif ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */ +#ifndef CONFIG_X86_64 "mov %%eax, %%ecx;" +#else + "mov %%rax, %%rcx;" +#endif "1:" ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ : "+S"(input), "+D"(output) -- cgit v1.2.3-70-g09d2 From a76c1c23d0c33d98f2d9b36e76e7f71289fc8391 Mon Sep 17 00:00:00 2001 From: Chuck Ebbert Date: Thu, 18 Jun 2009 19:24:10 +0800 Subject: crypto: padlock-aes - work around Nano CPU errata in ECB mode The VIA Nano processor has a bug that makes it prefetch extra data during encryption operations, causing spurious page faults. Extend existing workarounds for ECB mode to copy the data to an temporary buffer to avoid the problem. Signed-off-by: Chuck Ebbert Acked-by: Harald Welte Signed-off-by: Herbert Xu --- drivers/crypto/padlock-aes.c | 81 +++++++++++++++++++++++++------------------- 1 file changed, 46 insertions(+), 35 deletions(-) (limited to 'drivers/crypto/padlock-aes.c') diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 87f92c39b5f..e1d8776c697 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -18,9 +18,17 @@ #include #include #include +#include #include #include "padlock.h" +/* number of data blocks actually fetched for each xcrypt insn */ +static unsigned int ecb_fetch_blocks = 2; +static unsigned int cbc_fetch_blocks = 1; + +#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE) +#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE) + /* Control word. */ struct cword { unsigned int __attribute__ ((__packed__)) @@ -173,63 +181,59 @@ static inline void padlock_store_cword(struct cword *cword) */ static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, - struct cword *control_word) + struct cword *control_word, int count) { asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ : "+S"(input), "+D"(output) - : "d"(control_word), "b"(key), "c"(1)); + : "d"(control_word), "b"(key), "c"(count)); } -static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword) +static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, + struct cword *cword, int count) { - u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1]; + /* + * Padlock prefetches extra data so we must provide mapped input buffers. + * Assume there are at least 16 bytes of stack already in use. + */ + u8 buf[AES_BLOCK_SIZE * 7 + PADLOCK_ALIGNMENT - 1]; u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); - memcpy(tmp, in, AES_BLOCK_SIZE); - padlock_xcrypt(tmp, out, key, cword); + memcpy(tmp, in, count * AES_BLOCK_SIZE); + padlock_xcrypt(tmp, out, key, cword, count); } static inline void aes_crypt(const u8 *in, u8 *out, u32 *key, - struct cword *cword) + struct cword *cword, int count) { - /* padlock_xcrypt requires at least two blocks of data. */ - if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) & - (PAGE_SIZE - 1)))) { - aes_crypt_copy(in, out, key, cword); + /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data. + * We could avoid some copying here but it's probably not worth it. + */ + if (unlikely(((unsigned long)in & PAGE_SIZE) + ecb_fetch_bytes > PAGE_SIZE)) { + aes_crypt_copy(in, out, key, cword, count); return; } - padlock_xcrypt(in, out, key, cword); + padlock_xcrypt(in, out, key, cword, count); } static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, void *control_word, u32 count) { - if (count == 1) { - aes_crypt(input, output, key, control_word); + u32 initial = count & (ecb_fetch_blocks - 1); + + if (count < ecb_fetch_blocks) { + aes_crypt(input, output, key, control_word, count); return; } - asm volatile ("test $1, %%cl;" - "je 1f;" -#ifndef CONFIG_X86_64 - "lea -1(%%ecx), %%eax;" - "mov $1, %%ecx;" -#else - "lea -1(%%rcx), %%rax;" - "mov $1, %%rcx;" -#endif - ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */ -#ifndef CONFIG_X86_64 - "mov %%eax, %%ecx;" -#else - "mov %%rax, %%rcx;" -#endif - "1:" - ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + if (initial) + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(initial)); + + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ : "+S"(input), "+D"(output) - : "d"(control_word), "b"(key), "c"(count) - : "ax"); + : "d"(control_word), "b"(key), "c"(count - initial)); } static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, @@ -249,7 +253,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) padlock_reset_key(&ctx->cword.encrypt); ts_state = irq_ts_save(); - aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); + aes_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1); irq_ts_restore(ts_state); padlock_store_cword(&ctx->cword.encrypt); } @@ -261,7 +265,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) padlock_reset_key(&ctx->cword.encrypt); ts_state = irq_ts_save(); - aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); + aes_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1); irq_ts_restore(ts_state); padlock_store_cword(&ctx->cword.encrypt); } @@ -454,6 +458,7 @@ static struct crypto_alg cbc_aes_alg = { static int __init padlock_init(void) { int ret; + struct cpuinfo_x86 *c = &cpu_data(0); if (!cpu_has_xcrypt) { printk(KERN_NOTICE PFX "VIA PadLock not detected.\n"); @@ -476,6 +481,12 @@ static int __init padlock_init(void) printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); + if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) { + ecb_fetch_blocks = 8; + cbc_fetch_blocks = 4; /* NOTE: notused */ + printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n"); + } + out: return ret; -- cgit v1.2.3-70-g09d2 From 8d8409f773af2cfd52e23e4b138a7d55a31182cd Mon Sep 17 00:00:00 2001 From: Chuck Ebbert Date: Thu, 18 Jun 2009 19:31:09 +0800 Subject: crypto: padlock-aes - work around Nano CPU errata in CBC mode Extend previous workarounds for the prefetch bug to cover CBC mode, clean up the code a bit. Signed-off-by: Chuck Ebbert Acked-by: Harald Welte Signed-off-by: Herbert Xu --- drivers/crypto/padlock-aes.c | 83 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 65 insertions(+), 18 deletions(-) (limited to 'drivers/crypto/padlock-aes.c') diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index e1d8776c697..a9952b1236b 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -22,11 +22,16 @@ #include #include "padlock.h" -/* number of data blocks actually fetched for each xcrypt insn */ +/* + * Number of data blocks actually fetched for each xcrypt insn. + * Processors with prefetch errata will fetch extra blocks. + */ static unsigned int ecb_fetch_blocks = 2; -static unsigned int cbc_fetch_blocks = 1; - +#define MAX_ECB_FETCH_BLOCKS (8) #define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE) + +static unsigned int cbc_fetch_blocks = 1; +#define MAX_CBC_FETCH_BLOCKS (4) #define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE) /* Control word. */ @@ -180,7 +185,7 @@ static inline void padlock_store_cword(struct cword *cword) * should be used only inside the irq_ts_save/restore() context */ -static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, +static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key, struct cword *control_word, int count) { asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ @@ -188,32 +193,65 @@ static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, : "d"(control_word), "b"(key), "c"(count)); } -static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, +static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key, + u8 *iv, struct cword *control_word, int count) +{ + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (count)); + return iv; +} + +static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword, int count) { /* * Padlock prefetches extra data so we must provide mapped input buffers. * Assume there are at least 16 bytes of stack already in use. */ - u8 buf[AES_BLOCK_SIZE * 7 + PADLOCK_ALIGNMENT - 1]; + u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); memcpy(tmp, in, count * AES_BLOCK_SIZE); - padlock_xcrypt(tmp, out, key, cword, count); + rep_xcrypt_ecb(tmp, out, key, cword, count); } -static inline void aes_crypt(const u8 *in, u8 *out, u32 *key, +static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key, + u8 *iv, struct cword *cword, int count) +{ + /* + * Padlock prefetches extra data so we must provide mapped input buffers. + * Assume there are at least 16 bytes of stack already in use. + */ + u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; + u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + memcpy(tmp, in, count * AES_BLOCK_SIZE); + return rep_xcrypt_cbc(tmp, out, key, iv, cword, count); +} + +static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key, struct cword *cword, int count) { /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data. * We could avoid some copying here but it's probably not worth it. */ if (unlikely(((unsigned long)in & PAGE_SIZE) + ecb_fetch_bytes > PAGE_SIZE)) { - aes_crypt_copy(in, out, key, cword, count); + ecb_crypt_copy(in, out, key, cword, count); return; } - padlock_xcrypt(in, out, key, cword, count); + rep_xcrypt_ecb(in, out, key, cword, count); +} + +static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key, + u8 *iv, struct cword *cword, int count) +{ + /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */ + if (unlikely(((unsigned long)in & PAGE_SIZE) + cbc_fetch_bytes > PAGE_SIZE)) + return cbc_crypt_copy(in, out, key, iv, cword, count); + + return rep_xcrypt_cbc(in, out, key, iv, cword, count); } static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, @@ -222,7 +260,7 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, u32 initial = count & (ecb_fetch_blocks - 1); if (count < ecb_fetch_blocks) { - aes_crypt(input, output, key, control_word, count); + ecb_crypt(input, output, key, control_word, count); return; } @@ -239,10 +277,19 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, u8 *iv, void *control_word, u32 count) { - /* rep xcryptcbc */ - asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" + u32 initial = count & (cbc_fetch_blocks - 1); + + if (count < cbc_fetch_blocks) + return cbc_crypt(input, output, key, iv, control_word, count); + + if (initial) + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (count)); + + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ : "+S" (input), "+D" (output), "+a" (iv) - : "d" (control_word), "b" (key), "c" (count)); + : "d" (control_word), "b" (key), "c" (count-initial)); return iv; } @@ -253,7 +300,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) padlock_reset_key(&ctx->cword.encrypt); ts_state = irq_ts_save(); - aes_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1); + ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1); irq_ts_restore(ts_state); padlock_store_cword(&ctx->cword.encrypt); } @@ -265,7 +312,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) padlock_reset_key(&ctx->cword.encrypt); ts_state = irq_ts_save(); - aes_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1); + ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1); irq_ts_restore(ts_state); padlock_store_cword(&ctx->cword.encrypt); } @@ -482,8 +529,8 @@ static int __init padlock_init(void) printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) { - ecb_fetch_blocks = 8; - cbc_fetch_blocks = 4; /* NOTE: notused */ + ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS; + cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS; printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n"); } -- cgit v1.2.3-70-g09d2