summaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/crypto/Makefile1
-rw-r--r--arch/s390/crypto/aes_s390.c383
-rw-r--r--arch/s390/crypto/crypt_s390.h112
-rw-r--r--arch/s390/crypto/des_check_key.c132
-rw-r--r--arch/s390/crypto/des_s390.c370
-rw-r--r--arch/s390/crypto/ghash_s390.c162
-rw-r--r--arch/s390/crypto/prng.c4
-rw-r--r--arch/s390/crypto/sha1_s390.c2
-rw-r--r--arch/s390/crypto/sha256_s390.c2
-rw-r--r--arch/s390/crypto/sha512_s390.c2
-rw-r--r--arch/s390/hypfs/hypfs.h2
-rw-r--r--arch/s390/include/asm/cacheflush.h1
-rw-r--r--arch/s390/include/asm/diag.h17
-rw-r--r--arch/s390/include/asm/ftrace.h4
-rw-r--r--arch/s390/include/asm/jump_label.h37
-rw-r--r--arch/s390/include/asm/mmu_context.h2
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/diag.c21
-rw-r--r--arch/s390/kernel/dis.c1
-rw-r--r--arch/s390/kernel/entry.S2
-rw-r--r--arch/s390/kernel/entry64.S2
-rw-r--r--arch/s390/kernel/jump_label.c59
-rw-r--r--arch/s390/kernel/smp.c6
-rw-r--r--arch/s390/kvm/sie64a.S4
-rw-r--r--arch/s390/mm/cmm.c2
-rw-r--r--arch/s390/mm/fault.c6
-rw-r--r--arch/s390/mm/pageattr.c10
-rw-r--r--arch/s390/oprofile/hwsampler.c14
-rw-r--r--arch/s390/oprofile/hwsampler.h4
-rw-r--r--arch/s390/oprofile/init.c8
31 files changed, 1065 insertions, 310 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 2508a6f3158..4a7f14079e0 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -88,6 +88,7 @@ config S390
select HAVE_KERNEL_XZ
select HAVE_GET_USER_PAGES_FAST
select HAVE_ARCH_MUTEX_CPU_RELAX
+ select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
select ARCH_INLINE_SPIN_TRYLOCK
select ARCH_INLINE_SPIN_TRYLOCK_BH
select ARCH_INLINE_SPIN_LOCK
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile
index 1cf81d77c5a..7f0b7cda625 100644
--- a/arch/s390/crypto/Makefile
+++ b/arch/s390/crypto/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o
obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o
obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o
obj-$(CONFIG_S390_PRNG) += prng.o
+obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 58f46734465..a9ce135893f 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -31,7 +31,8 @@
#define AES_KEYLEN_192 2
#define AES_KEYLEN_256 4
-static char keylen_flag = 0;
+static u8 *ctrblk;
+static char keylen_flag;
struct s390_aes_ctx {
u8 iv[AES_BLOCK_SIZE];
@@ -45,6 +46,24 @@ struct s390_aes_ctx {
} fallback;
};
+struct pcc_param {
+ u8 key[32];
+ u8 tweak[16];
+ u8 block[16];
+ u8 bit[16];
+ u8 xts[16];
+};
+
+struct s390_xts_ctx {
+ u8 key[32];
+ u8 xts_param[16];
+ struct pcc_param pcc;
+ long enc;
+ long dec;
+ int key_len;
+ struct crypto_blkcipher *fallback;
+};
+
/*
* Check if the key_len is supported by the HW.
* Returns 0 if it is, a positive number if it is not and software fallback is
@@ -504,15 +523,337 @@ static struct crypto_alg cbc_aes_alg = {
}
};
+static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
+ unsigned int ret;
+
+ xts_ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ xts_ctx->fallback->base.crt_flags |= (tfm->crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ ret = crypto_blkcipher_setkey(xts_ctx->fallback, key, len);
+ if (ret) {
+ tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+ tfm->crt_flags |= (xts_ctx->fallback->base.crt_flags &
+ CRYPTO_TFM_RES_MASK);
+ }
+ return ret;
+}
+
+static int xts_fallback_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_blkcipher *tfm;
+ unsigned int ret;
+
+ tfm = desc->tfm;
+ desc->tfm = xts_ctx->fallback;
+
+ ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
+
+ desc->tfm = tfm;
+ return ret;
+}
+
+static int xts_fallback_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_blkcipher *tfm;
+ unsigned int ret;
+
+ tfm = desc->tfm;
+ desc->tfm = xts_ctx->fallback;
+
+ ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
+
+ desc->tfm = tfm;
+ return ret;
+}
+
+static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
+ u32 *flags = &tfm->crt_flags;
+
+ switch (key_len) {
+ case 32:
+ xts_ctx->enc = KM_XTS_128_ENCRYPT;
+ xts_ctx->dec = KM_XTS_128_DECRYPT;
+ memcpy(xts_ctx->key + 16, in_key, 16);
+ memcpy(xts_ctx->pcc.key + 16, in_key + 16, 16);
+ break;
+ case 48:
+ xts_ctx->enc = 0;
+ xts_ctx->dec = 0;
+ xts_fallback_setkey(tfm, in_key, key_len);
+ break;
+ case 64:
+ xts_ctx->enc = KM_XTS_256_ENCRYPT;
+ xts_ctx->dec = KM_XTS_256_DECRYPT;
+ memcpy(xts_ctx->key, in_key, 32);
+ memcpy(xts_ctx->pcc.key, in_key + 32, 32);
+ break;
+ default:
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+ xts_ctx->key_len = key_len;
+ return 0;
+}
+
+static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
+ struct s390_xts_ctx *xts_ctx,
+ struct blkcipher_walk *walk)
+{
+ unsigned int offset = (xts_ctx->key_len >> 1) & 0x10;
+ int ret = blkcipher_walk_virt(desc, walk);
+ unsigned int nbytes = walk->nbytes;
+ unsigned int n;
+ u8 *in, *out;
+ void *param;
+
+ if (!nbytes)
+ goto out;
+
+ memset(xts_ctx->pcc.block, 0, sizeof(xts_ctx->pcc.block));
+ memset(xts_ctx->pcc.bit, 0, sizeof(xts_ctx->pcc.bit));
+ memset(xts_ctx->pcc.xts, 0, sizeof(xts_ctx->pcc.xts));
+ memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak));
+ param = xts_ctx->pcc.key + offset;
+ ret = crypt_s390_pcc(func, param);
+ BUG_ON(ret < 0);
+
+ memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16);
+ param = xts_ctx->key + offset;
+ do {
+ /* only use complete blocks */
+ n = nbytes & ~(AES_BLOCK_SIZE - 1);
+ out = walk->dst.virt.addr;
+ in = walk->src.virt.addr;
+
+ ret = crypt_s390_km(func, param, out, in, n);
+ BUG_ON(ret < 0 || ret != n);
+
+ nbytes &= AES_BLOCK_SIZE - 1;
+ ret = blkcipher_walk_done(desc, walk, nbytes);
+ } while ((nbytes = walk->nbytes));
+out:
+ return ret;
+}
+
+static int xts_aes_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ if (unlikely(xts_ctx->key_len == 48))
+ return xts_fallback_encrypt(desc, dst, src, nbytes);
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return xts_aes_crypt(desc, xts_ctx->enc, xts_ctx, &walk);
+}
+
+static int xts_aes_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ if (unlikely(xts_ctx->key_len == 48))
+ return xts_fallback_decrypt(desc, dst, src, nbytes);
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return xts_aes_crypt(desc, xts_ctx->dec, xts_ctx, &walk);
+}
+
+static int xts_fallback_init(struct crypto_tfm *tfm)
+{
+ const char *name = tfm->__crt_alg->cra_name;
+ struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
+
+ xts_ctx->fallback = crypto_alloc_blkcipher(name, 0,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(xts_ctx->fallback)) {
+ pr_err("Allocating XTS fallback algorithm %s failed\n",
+ name);
+ return PTR_ERR(xts_ctx->fallback);
+ }
+ return 0;
+}
+
+static void xts_fallback_exit(struct crypto_tfm *tfm)
+{
+ struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_blkcipher(xts_ctx->fallback);
+ xts_ctx->fallback = NULL;
+}
+
+static struct crypto_alg xts_aes_alg = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-s390",
+ .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s390_xts_ctx),
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(xts_aes_alg.cra_list),
+ .cra_init = xts_fallback_init,
+ .cra_exit = xts_fallback_exit,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = xts_aes_set_key,
+ .encrypt = xts_aes_encrypt,
+ .decrypt = xts_aes_decrypt,
+ }
+ }
+};
+
+static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+
+ switch (key_len) {
+ case 16:
+ sctx->enc = KMCTR_AES_128_ENCRYPT;
+ sctx->dec = KMCTR_AES_128_DECRYPT;
+ break;
+ case 24:
+ sctx->enc = KMCTR_AES_192_ENCRYPT;
+ sctx->dec = KMCTR_AES_192_DECRYPT;
+ break;
+ case 32:
+ sctx->enc = KMCTR_AES_256_ENCRYPT;
+ sctx->dec = KMCTR_AES_256_DECRYPT;
+ break;
+ }
+
+ return aes_set_key(tfm, in_key, key_len);
+}
+
+static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
+ struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
+{
+ int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
+ unsigned int i, n, nbytes;
+ u8 buf[AES_BLOCK_SIZE];
+ u8 *out, *in;
+
+ if (!walk->nbytes)
+ return ret;
+
+ memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE);
+ while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ out = walk->dst.virt.addr;
+ in = walk->src.virt.addr;
+ while (nbytes >= AES_BLOCK_SIZE) {
+ /* only use complete blocks, max. PAGE_SIZE */
+ n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
+ nbytes & ~(AES_BLOCK_SIZE - 1);
+ for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
+ memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE);
+ crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
+ }
+ ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
+ BUG_ON(ret < 0 || ret != n);
+ if (n > AES_BLOCK_SIZE)
+ memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE);
+ crypto_inc(ctrblk, AES_BLOCK_SIZE);
+ out += n;
+ in += n;
+ nbytes -= n;
+ }
+ ret = blkcipher_walk_done(desc, walk, nbytes);
+ }
+ /*
+ * final block may be < AES_BLOCK_SIZE, copy only nbytes
+ */
+ if (nbytes) {
+ out = walk->dst.virt.addr;
+ in = walk->src.virt.addr;
+ ret = crypt_s390_kmctr(func, sctx->key, buf, in,
+ AES_BLOCK_SIZE, ctrblk);
+ BUG_ON(ret < 0 || ret != AES_BLOCK_SIZE);
+ memcpy(out, buf, nbytes);
+ crypto_inc(ctrblk, AES_BLOCK_SIZE);
+ ret = blkcipher_walk_done(desc, walk, 0);
+ }
+ memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE);
+ return ret;
+}
+
+static int ctr_aes_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return ctr_aes_crypt(desc, sctx->enc, sctx, &walk);
+}
+
+static int ctr_aes_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return ctr_aes_crypt(desc, sctx->dec, sctx, &walk);
+}
+
+static struct crypto_alg ctr_aes_alg = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-s390",
+ .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct s390_aes_ctx),
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(ctr_aes_alg.cra_list),
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ctr_aes_set_key,
+ .encrypt = ctr_aes_encrypt,
+ .decrypt = ctr_aes_decrypt,
+ }
+ }
+};
+
static int __init aes_s390_init(void)
{
int ret;
- if (crypt_s390_func_available(KM_AES_128_ENCRYPT))
+ if (crypt_s390_func_available(KM_AES_128_ENCRYPT, CRYPT_S390_MSA))
keylen_flag |= AES_KEYLEN_128;
- if (crypt_s390_func_available(KM_AES_192_ENCRYPT))
+ if (crypt_s390_func_available(KM_AES_192_ENCRYPT, CRYPT_S390_MSA))
keylen_flag |= AES_KEYLEN_192;
- if (crypt_s390_func_available(KM_AES_256_ENCRYPT))
+ if (crypt_s390_func_available(KM_AES_256_ENCRYPT, CRYPT_S390_MSA))
keylen_flag |= AES_KEYLEN_256;
if (!keylen_flag)
@@ -535,9 +876,40 @@ static int __init aes_s390_init(void)
if (ret)
goto cbc_aes_err;
+ if (crypt_s390_func_available(KM_XTS_128_ENCRYPT,
+ CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
+ crypt_s390_func_available(KM_XTS_256_ENCRYPT,
+ CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
+ ret = crypto_register_alg(&xts_aes_alg);
+ if (ret)
+ goto xts_aes_err;
+ }
+
+ if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT,
+ CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
+ crypt_s390_func_available(KMCTR_AES_192_ENCRYPT,
+ CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
+ crypt_s390_func_available(KMCTR_AES_256_ENCRYPT,
+ CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
+ ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
+ if (!ctrblk) {
+ ret = -ENOMEM;
+ goto ctr_aes_err;
+ }
+ ret = crypto_register_alg(&ctr_aes_alg);
+ if (ret) {
+ free_page((unsigned long) ctrblk);
+ goto ctr_aes_err;
+ }
+ }
+
out:
return ret;
+ctr_aes_err:
+ crypto_unregister_alg(&xts_aes_alg);
+xts_aes_err:
+ crypto_unregister_alg(&cbc_aes_alg);
cbc_aes_err:
crypto_unregister_alg(&ecb_aes_alg);
ecb_aes_err:
@@ -548,6 +920,9 @@ aes_err:
static void __exit aes_s390_fini(void)
{
+ crypto_unregister_alg(&ctr_aes_alg);
+ free_page((unsigned long) ctrblk);
+ crypto_unregister_alg(&xts_aes_alg);
crypto_unregister_alg(&cbc_aes_alg);
crypto_unregister_alg(&ecb_aes_alg);
crypto_unregister_alg(&aes_alg);
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
index 7ee9a1b4ad9..49676771bd6 100644
--- a/arch/s390/crypto/crypt_s390.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -24,13 +24,18 @@
#define CRYPT_S390_PRIORITY 300
#define CRYPT_S390_COMPOSITE_PRIORITY 400
+#define CRYPT_S390_MSA 0x1
+#define CRYPT_S390_MSA3 0x2
+#define CRYPT_S390_MSA4 0x4
+
/* s390 cryptographic operations */
enum crypt_s390_operations {
CRYPT_S390_KM = 0x0100,
CRYPT_S390_KMC = 0x0200,
CRYPT_S390_KIMD = 0x0300,
CRYPT_S390_KLMD = 0x0400,
- CRYPT_S390_KMAC = 0x0500
+ CRYPT_S390_KMAC = 0x0500,
+ CRYPT_S390_KMCTR = 0x0600
};
/*
@@ -51,6 +56,10 @@ enum crypt_s390_km_func {
KM_AES_192_DECRYPT = CRYPT_S390_KM | 0x13 | 0x80,
KM_AES_256_ENCRYPT = CRYPT_S390_KM | 0x14,
KM_AES_256_DECRYPT = CRYPT_S390_KM | 0x14 | 0x80,
+ KM_XTS_128_ENCRYPT = CRYPT_S390_KM | 0x32,
+ KM_XTS_128_DECRYPT = CRYPT_S390_KM | 0x32 | 0x80,
+ KM_XTS_256_ENCRYPT = CRYPT_S390_KM | 0x34,
+ KM_XTS_256_DECRYPT = CRYPT_S390_KM | 0x34 | 0x80,
};
/*
@@ -75,6 +84,26 @@ enum crypt_s390_kmc_func {
};
/*
+ * function codes for KMCTR (CIPHER MESSAGE WITH COUNTER)
+ * instruction
+ */
+enum crypt_s390_kmctr_func {
+ KMCTR_QUERY = CRYPT_S390_KMCTR | 0x0,
+ KMCTR_DEA_ENCRYPT = CRYPT_S390_KMCTR | 0x1,
+ KMCTR_DEA_DECRYPT = CRYPT_S390_KMCTR | 0x1 | 0x80,
+ KMCTR_TDEA_128_ENCRYPT = CRYPT_S390_KMCTR | 0x2,
+ KMCTR_TDEA_128_DECRYPT = CRYPT_S390_KMCTR | 0x2 | 0x80,
+ KMCTR_TDEA_192_ENCRYPT = CRYPT_S390_KMCTR | 0x3,
+ KMCTR_TDEA_192_DECRYPT = CRYPT_S390_KMCTR | 0x3 | 0x80,
+ KMCTR_AES_128_ENCRYPT = CRYPT_S390_KMCTR | 0x12,
+ KMCTR_AES_128_DECRYPT = CRYPT_S390_KMCTR | 0x12 | 0x80,
+ KMCTR_AES_192_ENCRYPT = CRYPT_S390_KMCTR | 0x13,
+ KMCTR_AES_192_DECRYPT = CRYPT_S390_KMCTR | 0x13 | 0x80,
+ KMCTR_AES_256_ENCRYPT = CRYPT_S390_KMCTR | 0x14,
+ KMCTR_AES_256_DECRYPT = CRYPT_S390_KMCTR | 0x14 | 0x80,
+};
+
+/*
* function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
* instruction
*/
@@ -83,6 +112,7 @@ enum crypt_s390_kimd_func {
KIMD_SHA_1 = CRYPT_S390_KIMD | 1,
KIMD_SHA_256 = CRYPT_S390_KIMD | 2,
KIMD_SHA_512 = CRYPT_S390_KIMD | 3,
+ KIMD_GHASH = CRYPT_S390_KIMD | 65,
};
/*
@@ -284,6 +314,45 @@ static inline int crypt_s390_kmac(long func, void *param,
}
/**
+ * crypt_s390_kmctr:
+ * @func: the function code passed to KMCTR; see crypt_s390_kmctr_func
+ * @param: address of parameter block; see POP for details on each func
+ * @dest: address of destination memory area
+ * @src: address of source memory area
+ * @src_len: length of src operand in bytes
+ * @counter: address of counter value
+ *
+ * Executes the KMCTR (CIPHER MESSAGE WITH COUNTER) operation of the CPU.
+ *
+ * Returns -1 for failure, 0 for the query func, number of processed
+ * bytes for encryption/decryption funcs
+ */
+static inline int crypt_s390_kmctr(long func, void *param, u8 *dest,
+ const u8 *src, long src_len, u8 *counter)
+{
+ register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
+ register void *__param asm("1") = param;
+ register const u8 *__src asm("2") = src;
+ register long __src_len asm("3") = src_len;
+ register u8 *__dest asm("4") = dest;
+ register u8 *__ctr asm("6") = counter;
+ int ret = -1;
+
+ asm volatile(
+ "0: .insn rrf,0xb92d0000,%3,%1,%4,0 \n" /* KMCTR opcode */
+ "1: brc 1,0b \n" /* handle partial completion */
+ " la %0,0\n"
+ "2:\n"
+ EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+ : "+d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest),
+ "+a" (__ctr)
+ : "d" (__func), "a" (__param) : "cc", "memory");
+ if (ret < 0)
+ return ret;
+ return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
+}
+
+/**
* crypt_s390_func_available:
* @func: the function code of the specific function; 0 if op in general
*
@@ -291,13 +360,17 @@ static inline int crypt_s390_kmac(long func, void *param,
*
* Returns 1 if func available; 0 if func or op in general not available
*/
-static inline int crypt_s390_func_available(int func)
+static inline int crypt_s390_func_available(int func,
+ unsigned int facility_mask)
{
unsigned char status[16];
int ret;
- /* check if CPACF facility (bit 17) is available */
- if (!test_facility(17))
+ if (facility_mask & CRYPT_S390_MSA && !test_facility(17))
+ return 0;
+ if (facility_mask & CRYPT_S390_MSA3 && !test_facility(76))
+ return 0;
+ if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77))
return 0;
switch (func & CRYPT_S390_OP_MASK) {
@@ -316,6 +389,10 @@ static inline int crypt_s390_func_available(int func)
case CRYPT_S390_KMAC:
ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
break;
+ case CRYPT_S390_KMCTR:
+ ret = crypt_s390_kmctr(KMCTR_QUERY, &status, NULL, NULL, 0,
+ NULL);
+ break;
default:
return 0;
}
@@ -326,4 +403,31 @@ static inline int crypt_s390_func_available(int func)
return (status[func >> 3] & (0x80 >> (func & 7))) != 0;
}
+/**
+ * crypt_s390_pcc:
+ * @func: the function code passed to KM; see crypt_s390_km_func
+ * @param: address of parameter block; see POP for details on each func
+ *
+ * Executes the PCC (PERFORM CRYPTOGRAPHIC COMPUTATION) operation of the CPU.
+ *
+ * Returns -1 for failure, 0 for success.
+ */
+static inline int crypt_s390_pcc(long func, void *param)
+{
+ register long __func asm("0") = func & 0x7f; /* encrypt or decrypt */
+ register void *__param asm("1") = param;
+ int ret = -1;
+
+ asm volatile(
+ "0: .insn rre,0xb92c0000,0,0 \n" /* PCC opcode */
+ "1: brc 1,0b \n" /* handle partial completion */
+ " la %0,0\n"
+ "2:\n"
+ EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+ : "+d" (ret)
+ : "d" (__func), "a" (__param) : "cc", "memory");
+ return ret;
+}
+
+
#endif /* _CRYPTO_ARCH_S390_CRYPT_S390_H */
diff --git a/arch/s390/crypto/des_check_key.c b/arch/s390/crypto/des_check_key.c
deleted file mode 100644
index 5706af26644..00000000000
--- a/arch/s390/crypto/des_check_key.c
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Function for checking keys for the DES and Tripple DES Encryption
- * algorithms.
- *
- * Originally released as descore by Dana L. How <how@isl.stanford.edu>.
- * Modified by Raimar Falke <rf13@inf.tu-dresden.de> for the Linux-Kernel.
- * Derived from Cryptoapi and Nettle implementations, adapted for in-place
- * scatterlist interface. Changed LGPL to GPL per section 3 of the LGPL.
- *
- * s390 Version:
- * Copyright IBM Corp. 2003
- * Author(s): Thomas Spatzier
- * Jan Glauber (jan.glauber@de.ibm.com)
- *
- * Derived from "crypto/des.c"
- * Copyright (c) 1992 Dana L. How.
- * Copyright (c) Raimar Falke <rf13@inf.tu-dresden.de>
- * Copyright (c) Gisle Sflensminde <gisle@ii.uib.no>
- * Copyright (C) 2001 Niels Mvller.
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/crypto.h>
-#include "crypto_des.h"
-
-#define ROR(d,c,o) ((d) = (d) >> (c) | (d) << (o))
-
-static const u8 parity[] = {
- 8,1,0,8,0,8,8,0,0,8,8,0,8,0,2,8,0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,3,
- 0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,
- 0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,
- 8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,
- 0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,
- 8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,
- 8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,
- 4,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,8,5,0,8,0,8,8,0,0,8,8,0,8,0,6,8,
-};
-
-/*
- * RFC2451: Weak key checks SHOULD be performed.
- */
-int
-crypto_des_check_key(const u8 *key, unsigned int keylen, u32 *flags)
-{
- u32 n, w;
-
- n = parity[key[0]]; n <<= 4;
- n |= parity[key[1]]; n <<= 4;
- n |= parity[key[2]]; n <<= 4;
- n |= parity[key[3]]; n <<= 4;
- n |= parity[key[4]]; n <<= 4;
- n |= parity[key[5]]; n <<= 4;
- n |= parity[key[6]]; n <<= 4;
- n |= parity[key[7]];
- w = 0x88888888L;
-
- if ((*flags & CRYPTO_TFM_REQ_WEAK_KEY)
- && !((n - (w >> 3)) & w)) { /* 1 in 10^10 keys passes this test */
- if (n < 0x41415151) {
- if (n < 0x31312121) {
- if (n < 0x14141515) {
- /* 01 01 01 01 01 01 01 01 */
- if (n == 0x11111111) goto weak;
- /* 01 1F 01 1F 01 0E 01 0E */
- if (n == 0x13131212) goto weak;
- } else {
- /* 01 E0 01 E0 01 F1 01 F1 */
- if (n == 0x14141515) goto weak;
- /* 01 FE 01 FE 01 FE 01 FE */
- if (n == 0x16161616) goto weak;
- }
- } else {
- if (n < 0x34342525) {
- /* 1F 01 1F 01 0E 01 0E 01 */
- if (n == 0x31312121) goto weak;
- /* 1F 1F 1F 1F 0E 0E 0E 0E (?) */
- if (n == 0x33332222) goto weak;
- } else {
- /* 1F E0 1F E0 0E F1 0E F1 */
- if (n == 0x34342525) goto weak;
- /* 1F FE 1F FE 0E FE 0E FE */
- if (n == 0x36362626) goto weak;
- }
- }
- } else {
- if (n < 0x61616161) {
- if (n < 0x44445555) {
- /* E0 01 E0 01 F1 01 F1 01 */
- if (n == 0x41415151) goto weak;
- /* E0 1F E0 1F F1 0E F1 0E */
- if (n == 0x43435252) goto weak;
- } else {
- /* E0 E0 E0 E0 F1 F1 F1 F1 (?) */
- if (n == 0x44445555) goto weak;
- /* E0 FE E0 FE F1 FE F1 FE */
- if (n == 0x46465656) goto weak;
- }
- } else {
- if (n < 0x64646565) {
- /* FE 01 FE 01 FE 01 FE 01 */
- if (n == 0x61616161) goto weak;
- /* FE 1F FE 1F FE 0E FE 0E */
- if (n == 0x63636262) goto weak;
- } else {
- /* FE E0 FE E0 FE F1 FE F1 */
- if (n == 0x64646565) goto weak;
- /* FE FE FE FE FE FE FE FE */
- if (n == 0x66666666) goto weak;
- }
- }
- }
- }
- return 0;
-weak:
- *flags |= CRYPTO_TFM_RES_WEAK_KEY;
- return -EINVAL;
-}
-
-EXPORT_SYMBOL(crypto_des_check_key);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Key Check function for DES & DES3 Cipher Algorithms");
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index cc542011839..a52bfd124d8 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -3,7 +3,7 @@
*
* s390 implementation of the DES Cipher Algorithm.
*
- * Copyright IBM Corp. 2003,2007
+ * Copyright IBM Corp. 2003,2011
* Author(s): Thomas Spatzier
* Jan Glauber (jan.glauber@de.ibm.com)
*
@@ -22,22 +22,19 @@
#include "crypt_s390.h"
-#define DES3_192_KEY_SIZE (3 * DES_KEY_SIZE)
+#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
-struct crypt_s390_des_ctx {
- u8 iv[DES_BLOCK_SIZE];
- u8 key[DES_KEY_SIZE];
-};
+static u8 *ctrblk;
-struct crypt_s390_des3_192_ctx {
+struct s390_des_ctx {
u8 iv[DES_BLOCK_SIZE];
- u8 key[DES3_192_KEY_SIZE];
+ u8 key[DES3_KEY_SIZE];
};
static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
+ unsigned int key_len)
{
- struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm);
+ struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
u32 tmp[DES_EXPKEY_WORDS];
@@ -47,22 +44,22 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
return -EINVAL;
}
- memcpy(dctx->key, key, keylen);
+ memcpy(ctx->key, key, key_len);
return 0;
}
static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm);
+ struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
- crypt_s390_km(KM_DEA_ENCRYPT, dctx->key, out, in, DES_BLOCK_SIZE);
+ crypt_s390_km(KM_DEA_ENCRYPT, ctx->key, out, in, DES_BLOCK_SIZE);
}
static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm);
+ struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
- crypt_s390_km(KM_DEA_DECRYPT, dctx->key, out, in, DES_BLOCK_SIZE);
+ crypt_s390_km(KM_DEA_DECRYPT, ctx->key, out, in, DES_BLOCK_SIZE);
}
static struct crypto_alg des_alg = {
@@ -71,7 +68,7 @@ static struct crypto_alg des_alg = {
.cra_priority = CRYPT_S390_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypt_s390_des_ctx),
+ .cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(des_alg.cra_list),
.cra_u = {
@@ -86,7 +83,7 @@ static struct crypto_alg des_alg = {
};
static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
- void *param, struct blkcipher_walk *walk)
+ u8 *key, struct blkcipher_walk *walk)
{
int ret = blkcipher_walk_virt(desc, walk);
unsigned int nbytes;
@@ -97,7 +94,7 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
- ret = crypt_s390_km(func, param, out, in, n);
+ ret = crypt_s390_km(func, key, out, in, n);
BUG_ON((ret < 0) || (ret != n));
nbytes &= DES_BLOCK_SIZE - 1;
@@ -108,7 +105,7 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
}
static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
- void *param, struct blkcipher_walk *walk)
+ u8 *iv, struct blkcipher_walk *walk)
{
int ret = blkcipher_walk_virt(desc, walk);
unsigned int nbytes = walk->nbytes;
@@ -116,20 +113,20 @@ static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
if (!nbytes)
goto out;
- memcpy(param, walk->iv, DES_BLOCK_SIZE);
+ memcpy(iv, walk->iv, DES_BLOCK_SIZE);
do {
/* only use complete blocks */
unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
- ret = crypt_s390_kmc(func, param, out, in, n);
+ ret = crypt_s390_kmc(func, iv, out, in, n);
BUG_ON((ret < 0) || (ret != n));
nbytes &= DES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
} while ((nbytes = walk->nbytes));
- memcpy(walk->iv, param, DES_BLOCK_SIZE);
+ memcpy(walk->iv, iv, DES_BLOCK_SIZE);
out:
return ret;
@@ -139,22 +136,22 @@ static int ecb_des_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_desall_crypt(desc, KM_DEA_ENCRYPT, sctx->key, &walk);
+ return ecb_desall_crypt(desc, KM_DEA_ENCRYPT, ctx->key, &walk);
}
static int ecb_des_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_desall_crypt(desc, KM_DEA_DECRYPT, sctx->key, &walk);
+ return ecb_desall_crypt(desc, KM_DEA_DECRYPT, ctx->key, &walk);
}
static struct crypto_alg ecb_des_alg = {
@@ -163,7 +160,7 @@ static struct crypto_alg ecb_des_alg = {
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypt_s390_des_ctx),
+ .cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ecb_des_alg.cra_list),
@@ -182,22 +179,22 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, sctx->iv, &walk);
+ return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk);
}
static int cbc_des_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, sctx->iv, &walk);
+ return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk);
}
static struct crypto_alg cbc_des_alg = {
@@ -206,7 +203,7 @@ static struct crypto_alg cbc_des_alg = {
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypt_s390_des_ctx),
+ .cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(cbc_des_alg.cra_list),
@@ -235,10 +232,10 @@ static struct crypto_alg cbc_des_alg = {
* property.
*
*/
-static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
+static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int key_len)
{
- struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm);
+ struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
@@ -248,141 +245,276 @@ static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key,
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
- memcpy(dctx->key, key, keylen);
+ memcpy(ctx->key, key, key_len);
return 0;
}
-static void des3_192_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+static void des3_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
- struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm);
+ struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
- crypt_s390_km(KM_TDEA_192_ENCRYPT, dctx->key, dst, (void*)src,
- DES_BLOCK_SIZE);
+ crypt_s390_km(KM_TDEA_192_ENCRYPT, ctx->key, dst, src, DES_BLOCK_SIZE);
}
-static void des3_192_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+static void des3_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
- struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm);
+ struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
- crypt_s390_km(KM_TDEA_192_DECRYPT, dctx->key, dst, (void*)src,
- DES_BLOCK_SIZE);
+ crypt_s390_km(KM_TDEA_192_DECRYPT, ctx->key, dst, src, DES_BLOCK_SIZE);
}
-static struct crypto_alg des3_192_alg = {
+static struct crypto_alg des3_alg = {
.cra_name = "des3_ede",
.cra_driver_name = "des3_ede-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx),
+ .cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(des3_192_alg.cra_list),
+ .cra_list = LIST_HEAD_INIT(des3_alg.cra_list),
.cra_u = {
.cipher = {
- .cia_min_keysize = DES3_192_KEY_SIZE,
- .cia_max_keysize = DES3_192_KEY_SIZE,
- .cia_setkey = des3_192_setkey,
- .cia_encrypt = des3_192_encrypt,
- .cia_decrypt = des3_192_decrypt,
+ .cia_min_keysize = DES3_KEY_SIZE,
+ .cia_max_keysize = DES3_KEY_SIZE,
+ .cia_setkey = des3_setkey,
+ .cia_encrypt = des3_encrypt,
+ .cia_decrypt = des3_decrypt,
}
}
};
-static int ecb_des3_192_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_des3_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
{
- struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_desall_crypt(desc, KM_TDEA_192_ENCRYPT, sctx->key, &walk);
+ return ecb_desall_crypt(desc, KM_TDEA_192_ENCRYPT, ctx->key, &walk);
}
-static int ecb_des3_192_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_des3_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
{
- struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_desall_crypt(desc, KM_TDEA_192_DECRYPT, sctx->key, &walk);
+ return ecb_desall_crypt(desc, KM_TDEA_192_DECRYPT, ctx->key, &walk);
}
-static struct crypto_alg ecb_des3_192_alg = {
+static struct crypto_alg ecb_des3_alg = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-des3_ede-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx),
+ .cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(
- ecb_des3_192_alg.cra_list),
+ ecb_des3_alg.cra_list),
.cra_u = {
.blkcipher = {
- .min_keysize = DES3_192_KEY_SIZE,
- .max_keysize = DES3_192_KEY_SIZE,
- .setkey = des3_192_setkey,
- .encrypt = ecb_des3_192_encrypt,
- .decrypt = ecb_des3_192_decrypt,
+ .min_keysize = DES3_KEY_SIZE,
+ .max_keysize = DES3_KEY_SIZE,
+ .setkey = des3_setkey,
+ .encrypt = ecb_des3_encrypt,
+ .decrypt = ecb_des3_decrypt,
}
}
};
-static int cbc_des3_192_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_des3_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
{
- struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, sctx->iv, &walk);
+ return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk);
}
-static int cbc_des3_192_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_des3_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
{
- struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, sctx->iv, &walk);
+ return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk);
}
-static struct crypto_alg cbc_des3_192_alg = {
+static struct crypto_alg cbc_des3_alg = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-des3_ede-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx),
+ .cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(
- cbc_des3_192_alg.cra_list),
+ cbc_des3_alg.cra_list),
.cra_u = {
.blkcipher = {
- .min_keysize = DES3_192_KEY_SIZE,
- .max_keysize = DES3_192_KEY_SIZE,
+ .min_keysize = DES3_KEY_SIZE,
+ .max_keysize = DES3_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
- .setkey = des3_192_setkey,
- .encrypt = cbc_des3_192_encrypt,
- .decrypt = cbc_des3_192_decrypt,
+ .setkey = des3_setkey,
+ .encrypt = cbc_des3_encrypt,
+ .decrypt = cbc_des3_decrypt,
}
}
};
-static int des_s390_init(void)
+static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
+ struct s390_des_ctx *ctx, struct blkcipher_walk *walk)
+{
+ int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
+ unsigned int i, n, nbytes;
+ u8 buf[DES_BLOCK_SIZE];
+ u8 *out, *in;
+
+ memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE);
+ while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
+ out = walk->dst.virt.addr;
+ in = walk->src.virt.addr;
+ while (nbytes >= DES_BLOCK_SIZE) {
+ /* align to block size, max. PAGE_SIZE */
+ n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
+ nbytes & ~(DES_BLOCK_SIZE - 1);
+ for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
+ memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE,
+ DES_BLOCK_SIZE);
+ crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
+ }
+ ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
+ BUG_ON((ret < 0) || (ret != n));
+ if (n > DES_BLOCK_SIZE)
+ memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
+ DES_BLOCK_SIZE);
+ crypto_inc(ctrblk, DES_BLOCK_SIZE);
+ out += n;
+ in += n;
+ nbytes -= n;
+ }
+ ret = blkcipher_walk_done(desc, walk, nbytes);
+ }
+
+ /* final block may be < DES_BLOCK_SIZE, copy only nbytes */
+ if (nbytes) {
+ out = walk->dst.virt.addr;
+ in = walk->src.virt.addr;
+ ret = crypt_s390_kmctr(func, ctx->key, buf, in,
+ DES_BLOCK_SIZE, ctrblk);
+ BUG_ON(ret < 0 || ret != DES_BLOCK_SIZE);
+ memcpy(out, buf, nbytes);
+ crypto_inc(ctrblk, DES_BLOCK_SIZE);
+ ret = blkcipher_walk_done(desc, walk, 0);
+ }
+ memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE);
+ return ret;
+}
+
+static int ctr_des_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return ctr_desall_crypt(desc, KMCTR_DEA_ENCRYPT, ctx, &walk);
+}
+
+static int ctr_des_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return ctr_desall_crypt(desc, KMCTR_DEA_DECRYPT, ctx, &walk);
+}
+
+static struct crypto_alg ctr_des_alg = {
+ .cra_name = "ctr(des)",
+ .cra_driver_name = "ctr-des-s390",
+ .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct s390_des_ctx),
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(ctr_des_alg.cra_list),
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = des_setkey,
+ .encrypt = ctr_des_encrypt,
+ .decrypt = ctr_des_decrypt,
+ }
+ }
+};
+
+static int ctr_des3_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return ctr_desall_crypt(desc, KMCTR_TDEA_192_ENCRYPT, ctx, &walk);
+}
+
+static int ctr_des3_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ return ctr_desall_crypt(desc, KMCTR_TDEA_192_DECRYPT, ctx, &walk);
+}
+
+static struct crypto_alg ctr_des3_alg = {
+ .cra_name = "ctr(des3_ede)",
+ .cra_driver_name = "ctr-des3_ede-s390",
+ .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct s390_des_ctx),
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(ctr_des3_alg.cra_list),
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = DES3_KEY_SIZE,
+ .max_keysize = DES3_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = des3_setkey,
+ .encrypt = ctr_des3_encrypt,
+ .decrypt = ctr_des3_decrypt,
+ }
+ }
+};
+
+static int __init des_s390_init(void)
{
int ret;
- if (!crypt_s390_func_available(KM_DEA_ENCRYPT) ||
- !crypt_s390_func_available(KM_TDEA_192_ENCRYPT))
+ if (!crypt_s390_func_available(KM_DEA_ENCRYPT, CRYPT_S390_MSA) ||
+ !crypt_s390_func_available(KM_TDEA_192_ENCRYPT, CRYPT_S390_MSA))
return -EOPNOTSUPP;
ret = crypto_register_alg(&des_alg);
@@ -394,23 +526,46 @@ static int des_s390_init(void)
ret = crypto_register_alg(&cbc_des_alg);
if (ret)
goto cbc_des_err;
- ret = crypto_register_alg(&des3_192_alg);
+ ret = crypto_register_alg(&des3_alg);
if (ret)
- goto des3_192_err;
- ret = crypto_register_alg(&ecb_des3_192_alg);
+ goto des3_err;
+ ret = crypto_register_alg(&ecb_des3_alg);
if (ret)
- goto ecb_des3_192_err;
- ret = crypto_register_alg(&cbc_des3_192_alg);
+ goto ecb_des3_err;
+ ret = crypto_register_alg(&cbc_des3_alg);
if (ret)
- goto cbc_des3_192_err;
+ goto cbc_des3_err;
+
+ if (crypt_s390_func_available(KMCTR_DEA_ENCRYPT,
+ CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
+ crypt_s390_func_available(KMCTR_TDEA_192_ENCRYPT,
+ CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
+ ret = crypto_register_alg(&ctr_des_alg);
+ if (ret)
+ goto ctr_des_err;
+ ret = crypto_register_alg(&ctr_des3_alg);
+ if (ret)
+ goto ctr_des3_err;
+ ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
+ if (!ctrblk) {
+ ret = -ENOMEM;
+ goto ctr_mem_err;
+ }
+ }
out:
return ret;
-cbc_des3_192_err:
- crypto_unregister_alg(&ecb_des3_192_alg);
-ecb_des3_192_err:
- crypto_unregister_alg(&des3_192_alg);
-des3_192_err:
+ctr_mem_err:
+ crypto_unregister_alg(&ctr_des3_alg);
+ctr_des3_err:
+ crypto_unregister_alg(&ctr_des_alg);
+ctr_des_err:
+ crypto_unregister_alg(&cbc_des3_alg);
+cbc_des3_err:
+ crypto_unregister_alg(&ecb_des3_alg);
+ecb_des3_err:
+ crypto_unregister_alg(&des3_alg);
+des3_err:
crypto_unregister_alg(&cbc_des_alg);
cbc_des_err:
crypto_unregister_alg(&ecb_des_alg);
@@ -422,9 +577,14 @@ des_err:
static void __exit des_s390_exit(void)
{
- crypto_unregister_alg(&cbc_des3_192_alg);
- crypto_unregister_alg(&ecb_des3_192_alg);
- crypto_unregister_alg(&des3_192_alg);
+ if (ctrblk) {
+ crypto_unregister_alg(&ctr_des_alg);
+ crypto_unregister_alg(&ctr_des3_alg);
+ free_page((unsigned long) ctrblk);
+ }
+ crypto_unregister_alg(&cbc_des3_alg);
+ crypto_unregister_alg(&ecb_des3_alg);
+ crypto_unregister_alg(&des3_alg);
crypto_unregister_alg(&cbc_des_alg);
crypto_unregister_alg(&ecb_des_alg);
crypto_unregister_alg(&des_alg);
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
new file mode 100644
index 00000000000..b1bd170f24b
--- /dev/null
+++ b/arch/s390/crypto/ghash_s390.c
@@ -0,0 +1,162 @@
+/*
+ * Cryptographic API.
+ *
+ * s390 implementation of the GHASH algorithm for GCM (Galois/Counter Mode).
+ *
+ * Copyright IBM Corp. 2011
+ * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
+ */
+
+#include <crypto/internal/hash.h>
+#include <linux/module.h>
+
+#include "crypt_s390.h"
+
+#define GHASH_BLOCK_SIZE 16
+#define GHASH_DIGEST_SIZE 16
+
+struct ghash_ctx {
+ u8 icv[16];
+ u8 key[16];
+};
+
+struct ghash_desc_ctx {
+ u8 buffer[GHASH_BLOCK_SIZE];
+ u32 bytes;
+};
+
+static int ghash_init(struct shash_desc *desc)
+{
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ memset(dctx, 0, sizeof(*dctx));
+
+ return 0;
+}
+
+static int ghash_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
+
+ if (keylen != GHASH_BLOCK_SIZE) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
+ memset(ctx->icv, 0, GHASH_BLOCK_SIZE);
+
+ return 0;
+}
+
+static int ghash_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen)
+{
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ unsigned int n;
+ u8 *buf = dctx->buffer;
+ int ret;
+
+ if (dctx->bytes) {
+ u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes);
+
+ n = min(srclen, dctx->bytes);
+ dctx->bytes -= n;
+ srclen -= n;
+
+ memcpy(pos, src, n);
+ src += n;
+
+ if (!dctx->bytes) {
+ ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
+ GHASH_BLOCK_SIZE);
+ BUG_ON(ret != GHASH_BLOCK_SIZE);
+ }
+ }
+
+ n = srclen & ~(GHASH_BLOCK_SIZE - 1);
+ if (n) {
+ ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
+ BUG_ON(ret != n);
+ src += n;
+ srclen -= n;
+ }
+
+ if (srclen) {
+ dctx->bytes = GHASH_BLOCK_SIZE - srclen;
+ memcpy(buf, src, srclen);
+ }
+
+ return 0;
+}
+
+static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
+{
+ u8 *buf = dctx->buffer;
+ int ret;
+
+ if (dctx->bytes) {
+ u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes);
+
+ memset(pos, 0, dctx->bytes);
+
+ ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
+ BUG_ON(ret != GHASH_BLOCK_SIZE);
+ }
+
+ dctx->bytes = 0;
+}
+
+static int ghash_final(struct shash_desc *desc, u8 *dst)
+{
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+
+ ghash_flush(ctx, dctx);
+ memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
+
+ return 0;
+}
+
+static struct shash_alg ghash_alg = {
+ .digestsize = GHASH_DIGEST_SIZE,
+ .init = ghash_init,
+ .update = ghash_update,
+ .final = ghash_final,
+ .setkey = ghash_setkey,
+ .descsize = sizeof(struct ghash_desc_ctx),
+ .base = {
+ .cra_name = "ghash",
+ .cra_driver_name = "ghash-s390",
+ .cra_priority = CRYPT_S390_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = GHASH_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ghash_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(ghash_alg.base.cra_list),
+ },
+};
+
+static int __init ghash_mod_init(void)
+{
+ if (!crypt_s390_func_available(KIMD_GHASH,
+ CRYPT_S390_MSA | CRYPT_S390_MSA4))
+ return -EOPNOTSUPP;
+
+ return crypto_register_shash(&ghash_alg);
+}
+
+static void __exit ghash_mod_exit(void)
+{
+ crypto_unregister_shash(&ghash_alg);
+}
+
+module_init(ghash_mod_init);
+module_exit(ghash_mod_exit);
+
+MODULE_ALIAS("ghash");
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("GHASH Message Digest Algorithm, s390 implementation");
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 975e3ab13cb..0808fbf0f7d 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -76,7 +76,7 @@ static void prng_seed(int nbytes)
/* Add the entropy */
while (nbytes >= 8) {
- *((__u64 *)parm_block) ^= *((__u64 *)buf+i*8);
+ *((__u64 *)parm_block) ^= *((__u64 *)(buf+i));
prng_add_entropy();
i += 8;
nbytes -= 8;
@@ -166,7 +166,7 @@ static int __init prng_init(void)
int ret;
/* check if the CPU has a PRNG */
- if (!crypt_s390_func_available(KMC_PRNG))
+ if (!crypt_s390_func_available(KMC_PRNG, CRYPT_S390_MSA))
return -EOPNOTSUPP;
if (prng_chunk_size < 8)
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index f6de7826c97..e9868c6e0a0 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -90,7 +90,7 @@ static struct shash_alg alg = {
static int __init sha1_s390_init(void)
{
- if (!crypt_s390_func_available(KIMD_SHA_1))
+ if (!crypt_s390_func_available(KIMD_SHA_1, CRYPT_S390_MSA))
return -EOPNOTSUPP;
return crypto_register_shash(&alg);
}
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 61a7db37212..5ed8d64fc2e 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -86,7 +86,7 @@ static struct shash_alg alg = {
static int sha256_s390_init(void)
{
- if (!crypt_s390_func_available(KIMD_SHA_256))
+ if (!crypt_s390_func_available(KIMD_SHA_256, CRYPT_S390_MSA))
return -EOPNOTSUPP;
return crypto_register_shash(&alg);
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
index 4bf73d0dc52..32a81383b69 100644
--- a/arch/s390/crypto/sha512_s390.c
+++ b/arch/s390/crypto/sha512_s390.c
@@ -132,7 +132,7 @@ static int __init init(void)
{
int ret;
- if (!crypt_s390_func_available(KIMD_SHA_512))
+ if (!crypt_s390_func_available(KIMD_SHA_512, CRYPT_S390_MSA))
return -EOPNOTSUPP;
if ((ret = crypto_register_shash(&sha512_alg)) < 0)
goto out;
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index 80c1526f2af..d9df5a060a8 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -47,7 +47,7 @@ struct hypfs_dbfs_data {
void *buf;
void *buf_free_ptr;
size_t size;
- struct hypfs_dbfs_file *dbfs_file;;
+ struct hypfs_dbfs_file *dbfs_file;
struct kref kref;
};
diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h
index 43a5c78046d..3e20383d092 100644
--- a/arch/s390/include/asm/cacheflush.h
+++ b/arch/s390/include/asm/cacheflush.h
@@ -11,5 +11,6 @@ void kernel_map_pages(struct page *page, int numpages, int enable);
int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
#endif /* _S390_CACHEFLUSH_H */
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h
index 72b2e2f2d32..7e91c58072e 100644
--- a/arch/s390/include/asm/diag.h
+++ b/arch/s390/include/asm/diag.h
@@ -9,9 +9,22 @@
#define _ASM_S390_DIAG_H
/*
- * Diagnose 10: Release pages
+ * Diagnose 10: Release page range
*/
-extern void diag10(unsigned long addr);
+static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn)
+{
+ unsigned long start_addr, end_addr;
+
+ start_addr = start_pfn << PAGE_SHIFT;
+ end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT;
+
+ asm volatile(
+ "0: diag %0,%1,0x10\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ EX_TABLE(1b, 1b)
+ : : "a" (start_addr), "a" (end_addr));
+}
/*
* Diagnose 14: Input spool file manipulation
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 3c29be4836e..b7931faaef6 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -11,15 +11,13 @@ struct dyn_arch_ftrace { };
#ifdef CONFIG_64BIT
#define MCOUNT_INSN_SIZE 12
-#define MCOUNT_OFFSET 8
#else
#define MCOUNT_INSN_SIZE 20
-#define MCOUNT_OFFSET 4
#endif
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
- return addr - MCOUNT_OFFSET;
+ return addr;
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
new file mode 100644
index 00000000000..95a6cf2b5b6
--- /dev/null
+++ b/arch/s390/include/asm/jump_label.h
@@ -0,0 +1,37 @@
+#ifndef _ASM_S390_JUMP_LABEL_H
+#define _ASM_S390_JUMP_LABEL_H
+
+#include <linux/types.h>
+
+#define JUMP_LABEL_NOP_SIZE 6
+
+#ifdef CONFIG_64BIT
+#define ASM_PTR ".quad"
+#define ASM_ALIGN ".balign 8"
+#else
+#define ASM_PTR ".long"
+#define ASM_ALIGN ".balign 4"
+#endif
+
+static __always_inline bool arch_static_branch(struct jump_label_key *key)
+{
+ asm goto("0: brcl 0,0\n"
+ ".pushsection __jump_table, \"aw\"\n"
+ ASM_ALIGN "\n"
+ ASM_PTR " 0b, %l[label], %0\n"
+ ".popsection\n"
+ : : "X" (key) : : label);
+ return false;
+label:
+ return true;
+}
+
+typedef unsigned long jump_label_t;
+
+struct jump_entry {
+ jump_label_t code;
+ jump_label_t target;
+ jump_label_t key;
+};
+
+#endif
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index a6f0e7cc9cd..8c277caa8d3 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -23,7 +23,7 @@ static inline int init_new_context(struct task_struct *tsk,
#ifdef CONFIG_64BIT
mm->context.asce_bits |= _ASCE_TYPE_REGION3;
#endif
- if (current->mm->context.alloc_pgste) {
+ if (current->mm && current->mm->context.alloc_pgste) {
/*
* alloc_pgste indicates, that any NEW context will be created
* with extended page tables. The old context is unchanged. The
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 64230bc392f..5ff15dacb57 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -23,7 +23,7 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \
processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \
- vdso.o vtime.o sysinfo.o nmi.o sclp.o
+ vdso.o vtime.o sysinfo.o nmi.o sclp.o jump_label.o
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index c032d11da8a..8237fc07ac7 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -9,27 +9,6 @@
#include <asm/diag.h>
/*
- * Diagnose 10: Release pages
- */
-void diag10(unsigned long addr)
-{
- if (addr >= 0x7ff00000)
- return;
- asm volatile(
-#ifdef CONFIG_64BIT
- " sam31\n"
- " diag %0,%0,0x10\n"
- "0: sam64\n"
-#else
- " diag %0,%0,0x10\n"
- "0:\n"
-#endif
- EX_TABLE(0b, 0b)
- : : "a" (addr));
-}
-EXPORT_SYMBOL(diag10);
-
-/*
* Diagnose 14: Input spool file manipulation
*/
int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index c83726c9fe0..3d4a78fc1ad 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -672,6 +672,7 @@ static struct insn opcode_b2[] = {
{ "rp", 0x77, INSTR_S_RD },
{ "stcke", 0x78, INSTR_S_RD },
{ "sacf", 0x79, INSTR_S_RD },
+ { "spp", 0x80, INSTR_S_RD },
{ "stsi", 0x7d, INSTR_S_RD },
{ "srnm", 0x99, INSTR_S_RD },
{ "stfpc", 0x9c, INSTR_S_RD },
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 648f64239a9..1b67fc6ebdc 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -836,7 +836,7 @@ restart_base:
stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
basr %r14,0
l %r14,restart_addr-.(%r14)
- br %r14 # branch to start_secondary
+ basr %r14,%r14 # branch to start_secondary
restart_addr:
.long start_secondary
.align 8
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 9d3603d6c51..9fd86456349 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -841,7 +841,7 @@ restart_base:
mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
- jg start_secondary
+ brasl %r14,start_secondary
.align 8
restart_vtime:
.long 0x7fffffff,0xffffffff
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
new file mode 100644
index 00000000000..44cc06bedf7
--- /dev/null
+++ b/arch/s390/kernel/jump_label.c
@@ -0,0 +1,59 @@
+/*
+ * Jump label s390 support
+ *
+ * Copyright IBM Corp. 2011
+ * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/stop_machine.h>
+#include <linux/jump_label.h>
+#include <asm/ipl.h>
+
+#ifdef HAVE_JUMP_LABEL
+
+struct insn {
+ u16 opcode;
+ s32 offset;
+} __packed;
+
+struct insn_args {
+ unsigned long *target;
+ struct insn *insn;
+ ssize_t size;
+};
+
+static int __arch_jump_label_transform(void *data)
+{
+ struct insn_args *args = data;
+ int rc;
+
+ rc = probe_kernel_write(args->target, args->insn, args->size);
+ WARN_ON_ONCE(rc < 0);
+ return 0;
+}
+
+void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ struct insn_args args;
+ struct insn insn;
+
+ if (type == JUMP_LABEL_ENABLE) {
+ /* brcl 15,offset */
+ insn.opcode = 0xc0f4;
+ insn.offset = (entry->target - entry->code) >> 1;
+ } else {
+ /* brcl 0,0 */
+ insn.opcode = 0xc004;
+ insn.offset = 0;
+ }
+
+ args.target = (void *) entry->code;
+ args.insn = &insn;
+ args.size = JUMP_LABEL_NOP_SIZE;
+
+ stop_machine(__arch_jump_label_transform, &args, NULL);
+}
+
+#endif
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 63a97db83f9..63c7d9ff220 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -165,12 +165,12 @@ static void do_ext_call_interrupt(unsigned int ext_int_code,
kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++;
/*
* handle bit signal external calls
- *
- * For the ec_schedule signal we have to do nothing. All the work
- * is done automatically when we return from the interrupt.
*/
bits = xchg(&S390_lowcore.ext_call_fast, 0);
+ if (test_bit(ec_schedule, &bits))
+ scheduler_ipi();
+
if (test_bit(ec_call_function, &bits))
generic_smp_call_function_interrupt();
diff --git a/arch/s390/kvm/sie64a.S b/arch/s390/kvm/sie64a.S
index 7e9d30d567b..ab0e041ac54 100644
--- a/arch/s390/kvm/sie64a.S
+++ b/arch/s390/kvm/sie64a.S
@@ -48,10 +48,10 @@ sie_irq_handler:
tm __TI_flags+7(%r2),_TIF_EXIT_SIE
jz 0f
larl %r2,sie_exit # work pending, leave sie
- stg %r2,__LC_RETURN_PSW+8
+ stg %r2,SPI_PSW+8(0,%r15)
br %r14
0: larl %r2,sie_reenter # re-enter with guest id
- stg %r2,__LC_RETURN_PSW+8
+ stg %r2,SPI_PSW+8(0,%r15)
1: br %r14
/*
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index c66ffd8dbbb..1f1dba9dcf5 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -91,7 +91,7 @@ static long cmm_alloc_pages(long nr, long *counter,
} else
free_page((unsigned long) npa);
}
- diag10(addr);
+ diag10_range(addr >> PAGE_SHIFT, 1);
pa->pages[pa->index++] = addr;
(*counter)++;
spin_unlock(&cmm_lock);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 9217e332b11..ab988135e5c 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -543,7 +543,6 @@ static void pfault_interrupt(unsigned int ext_int_code,
struct task_struct *tsk;
__u16 subcode;
- kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
/*
* Get the external interruption subcode & pfault
* initial/completion signal bit. VM stores this
@@ -553,14 +552,15 @@ static void pfault_interrupt(unsigned int ext_int_code,
subcode = ext_int_code >> 16;
if ((subcode & 0xff00) != __SUBCODE_MASK)
return;
+ kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
/*
* Get the token (= address of the task structure of the affected task).
*/
#ifdef CONFIG_64BIT
- tsk = *(struct task_struct **) param64;
+ tsk = (struct task_struct *) param64;
#else
- tsk = *(struct task_struct **) param32;
+ tsk = (struct task_struct *) param32;
#endif
if (subcode & 0x0080) {
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 122ffbd08ce..f05edcc3bef 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -24,12 +24,13 @@ static void change_page_attr(unsigned long addr, int numpages,
WARN_ON_ONCE(1);
continue;
}
- ptep = pte_offset_kernel(pmdp, addr + i * PAGE_SIZE);
+ ptep = pte_offset_kernel(pmdp, addr);
pte = *ptep;
pte = set(pte);
- ptep_invalidate(&init_mm, addr + i * PAGE_SIZE, ptep);
+ ptep_invalidate(&init_mm, addr, ptep);
*ptep = pte;
+ addr += PAGE_SIZE;
}
}
@@ -53,3 +54,8 @@ int set_memory_nx(unsigned long addr, int numpages)
return 0;
}
EXPORT_SYMBOL_GPL(set_memory_nx);
+
+int set_memory_x(unsigned long addr, int numpages)
+{
+ return 0;
+}
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index 4952872d6f0..33cbd373cce 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -1021,20 +1021,14 @@ deallocate_exit:
return rc;
}
-long hwsampler_query_min_interval(void)
+unsigned long hwsampler_query_min_interval(void)
{
- if (min_sampler_rate)
- return min_sampler_rate;
- else
- return -EINVAL;
+ return min_sampler_rate;
}
-long hwsampler_query_max_interval(void)
+unsigned long hwsampler_query_max_interval(void)
{
- if (max_sampler_rate)
- return max_sampler_rate;
- else
- return -EINVAL;
+ return max_sampler_rate;
}
unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu)
diff --git a/arch/s390/oprofile/hwsampler.h b/arch/s390/oprofile/hwsampler.h
index 8c72b59316b..1912f3bb190 100644
--- a/arch/s390/oprofile/hwsampler.h
+++ b/arch/s390/oprofile/hwsampler.h
@@ -102,8 +102,8 @@ int hwsampler_setup(void);
int hwsampler_shutdown(void);
int hwsampler_allocate(unsigned long sdbt, unsigned long sdb);
int hwsampler_deallocate(void);
-long hwsampler_query_min_interval(void);
-long hwsampler_query_max_interval(void);
+unsigned long hwsampler_query_min_interval(void);
+unsigned long hwsampler_query_max_interval(void);
int hwsampler_start_all(unsigned long interval);
int hwsampler_stop_all(void);
int hwsampler_deactivate(unsigned int cpu);
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index c63d7e58352..5995e9bc72d 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -145,15 +145,11 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
* create hwsampler files only if hwsampler_setup() succeeds.
*/
oprofile_min_interval = hwsampler_query_min_interval();
- if (oprofile_min_interval < 0) {
- oprofile_min_interval = 0;
+ if (oprofile_min_interval == 0)
return -ENODEV;
- }
oprofile_max_interval = hwsampler_query_max_interval();
- if (oprofile_max_interval < 0) {
- oprofile_max_interval = 0;
+ if (oprofile_max_interval == 0)
return -ENODEV;
- }
if (oprofile_timer_init(ops))
return -ENODEV;