From d311149337f93ae4de60a2f1c24a0d856089903f Mon Sep 17 00:00:00 2001 From: Marcelo Cerri Date: Fri, 2 Aug 2013 12:09:52 +0000 Subject: crypto: nx - fix limits to sg lists for SHA-2 The co-processor has several limits regarding the length of scatter/gather lists and the total number of bytes in it. These limits are available in the device tree, as following: - "ibm,max-sg-len": maximum number of bytes of each scatter/gather list. - "ibm,max-sync-cop": used for synchronous operations, it is an array of structures that contains information regarding the limits that must be considered for each mode and operation. The most important limits in it are: - The total number of bytes that a scatter/gather list can hold. - The maximum number of elements that a scatter/gather list can have. This patch updates the NX driver to perform several hyper calls if needed in order to always respect the length limits for scatter/gather lists. Reviewed-by: Fionnuala Gunter Reviewed-by: Joel Schopp Reviewed-by: Joy Latten Signed-off-by: Marcelo Cerri Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-sha256.c | 110 ++++++++++++++++++++++++------------------ 1 file changed, 64 insertions(+), 46 deletions(-) (limited to 'drivers/crypto/nx/nx-sha256.c') diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c index 67024f2f0b7..254b01abef6 100644 --- a/drivers/crypto/nx/nx-sha256.c +++ b/drivers/crypto/nx/nx-sha256.c @@ -55,70 +55,86 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *in_sg; - u64 to_process, leftover; + u64 to_process, leftover, total; + u32 max_sg_len; int rc = 0; - if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { - /* we've hit the nx chip previously and we're updating again, - * so copy over the partial digest */ - memcpy(csbcpb->cpb.sha256.input_partial_digest, - csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); - } - /* 2 cases for total data len: - * 1: <= SHA256_BLOCK_SIZE: copy into state, return 0 - * 2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover + * 1: < SHA256_BLOCK_SIZE: copy into state, return 0 + * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover */ - if (len + sctx->count < SHA256_BLOCK_SIZE) { + total = sctx->count + len; + if (total < SHA256_BLOCK_SIZE) { memcpy(sctx->buf + sctx->count, data, len); sctx->count += len; goto out; } - /* to_process: the SHA256_BLOCK_SIZE data chunk to process in this - * update */ - to_process = (sctx->count + len) & ~(SHA256_BLOCK_SIZE - 1); - leftover = (sctx->count + len) & (SHA256_BLOCK_SIZE - 1); - - if (sctx->count) { - in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf, - sctx->count, nx_ctx->ap->sglen); - in_sg = nx_build_sg_list(in_sg, (u8 *)data, + in_sg = nx_ctx->in_sg; + max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), + nx_ctx->ap->sglen); + + do { + /* + * to_process: the SHA256_BLOCK_SIZE data chunk to process in + * this update. This value is also restricted by the sg list + * limits. + */ + to_process = min_t(u64, total, nx_ctx->ap->databytelen); + to_process = min_t(u64, to_process, + NX_PAGE_SIZE * (max_sg_len - 1)); + to_process = to_process & ~(SHA256_BLOCK_SIZE - 1); + leftover = total - to_process; + + if (sctx->count) { + in_sg = nx_build_sg_list(nx_ctx->in_sg, + (u8 *) sctx->buf, + sctx->count, max_sg_len); + } + in_sg = nx_build_sg_list(in_sg, (u8 *) data, to_process - sctx->count, - nx_ctx->ap->sglen); - nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * - sizeof(struct nx_sg); - } else { - in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data, - to_process, nx_ctx->ap->sglen); + max_sg_len); nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); - } - NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { + /* + * we've hit the nx chip previously and we're updating + * again, so copy over the partial digest. + */ + memcpy(csbcpb->cpb.sha256.input_partial_digest, + csbcpb->cpb.sha256.message_digest, + SHA256_DIGEST_SIZE); + } - if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { - rc = -EINVAL; - goto out; - } + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } - rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, - desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); - if (rc) - goto out; + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; - atomic_inc(&(nx_ctx->stats->sha256_ops)); + atomic_inc(&(nx_ctx->stats->sha256_ops)); + csbcpb->cpb.sha256.message_bit_length += (u64) + (csbcpb->cpb.sha256.spbc * 8); + + /* everything after the first update is continuation */ + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + + total -= to_process; + data += to_process; + sctx->count = 0; + in_sg = nx_ctx->in_sg; + } while (leftover >= SHA256_BLOCK_SIZE); /* copy the leftover back into the state struct */ if (leftover) - memcpy(sctx->buf, data + len - leftover, leftover); + memcpy(sctx->buf, data, leftover); sctx->count = leftover; - - csbcpb->cpb.sha256.message_bit_length += (u64) - (csbcpb->cpb.sha256.spbc * 8); - - /* everything after the first update is continuation */ - NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; out: return rc; } @@ -129,8 +145,10 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *in_sg, *out_sg; + u32 max_sg_len; int rc; + max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen); if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { /* we've hit the nx chip previously, now we're finalizing, @@ -146,9 +164,9 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) csbcpb->cpb.sha256.message_bit_length += (u64)(sctx->count * 8); in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf, - sctx->count, nx_ctx->ap->sglen); + sctx->count, max_sg_len); out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA256_DIGEST_SIZE, - nx_ctx->ap->sglen); + max_sg_len); nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); -- cgit v1.2.3-70-g09d2 From c849163b80c05f4567b1adef5db7f377460f88cd Mon Sep 17 00:00:00 2001 From: Marcelo Cerri Date: Mon, 12 Aug 2013 18:49:37 -0300 Subject: crypto: nx - fix concurrency issue The NX driver uses the transformation context to store several fields containing data related to the state of the operations in progress. Since a single tfm can be used by different kernel threads at the same time, we need to protect the data stored into the context. This patch makes use of spin locks to protect the data where a race condition can happen. Reviewed-by: Fionnuala Gunter Reviewed-by: Joy Latten Signed-off-by: Marcelo Cerri Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-aes-cbc.c | 10 ++++++++-- drivers/crypto/nx/nx-aes-ccm.c | 20 ++++++++++++++++---- drivers/crypto/nx/nx-aes-ctr.c | 10 ++++++++-- drivers/crypto/nx/nx-aes-ecb.c | 10 ++++++++-- drivers/crypto/nx/nx-aes-gcm.c | 4 ++++ drivers/crypto/nx/nx-aes-xcbc.c | 8 ++++++++ drivers/crypto/nx/nx-sha256.c | 16 ++++++++++++++++ drivers/crypto/nx/nx-sha512.c | 16 ++++++++++++++++ drivers/crypto/nx/nx.c | 4 ++-- drivers/crypto/nx/nx.h | 1 + 10 files changed, 87 insertions(+), 12 deletions(-) (limited to 'drivers/crypto/nx/nx-sha256.c') diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c index a2f99a910e4..7c0237dae02 100644 --- a/drivers/crypto/nx/nx-aes-cbc.c +++ b/drivers/crypto/nx/nx-aes-cbc.c @@ -70,10 +70,15 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc, { struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + unsigned long irq_flags; int rc; - if (nbytes > nx_ctx->ap->databytelen) - return -EINVAL; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + + if (nbytes > nx_ctx->ap->databytelen) { + rc = -EINVAL; + goto out; + } if (enc) NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; @@ -100,6 +105,7 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc, atomic64_add(csbcpb->csb.processed_byte_count, &(nx_ctx->stats->aes_bytes)); out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c index ef5eae6d140..39d42245bc7 100644 --- a/drivers/crypto/nx/nx-aes-ccm.c +++ b/drivers/crypto/nx/nx-aes-ccm.c @@ -271,10 +271,15 @@ static int ccm_nx_decrypt(struct aead_request *req, unsigned int nbytes = req->cryptlen; unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); struct nx_ccm_priv *priv = &nx_ctx->priv.ccm; + unsigned long irq_flags; int rc = -1; - if (nbytes > nx_ctx->ap->databytelen) - return -EINVAL; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + + if (nbytes > nx_ctx->ap->databytelen) { + rc = -EINVAL; + goto out; + } nbytes -= authsize; @@ -308,6 +313,7 @@ static int ccm_nx_decrypt(struct aead_request *req, rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag, authsize) ? -EBADMSG : 0; out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } @@ -318,10 +324,15 @@ static int ccm_nx_encrypt(struct aead_request *req, struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; unsigned int nbytes = req->cryptlen; unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); + unsigned long irq_flags; int rc = -1; - if (nbytes > nx_ctx->ap->databytelen) - return -EINVAL; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + + if (nbytes > nx_ctx->ap->databytelen) { + rc = -EINVAL; + goto out; + } rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, csbcpb->cpb.aes_ccm.in_pat_or_b0); @@ -350,6 +361,7 @@ static int ccm_nx_encrypt(struct aead_request *req, req->dst, nbytes, authsize, SCATTERWALK_TO_SG); out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c index b6286f14680..762611b883c 100644 --- a/drivers/crypto/nx/nx-aes-ctr.c +++ b/drivers/crypto/nx/nx-aes-ctr.c @@ -88,10 +88,15 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc, { struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + unsigned long irq_flags; int rc; - if (nbytes > nx_ctx->ap->databytelen) - return -EINVAL; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + + if (nbytes > nx_ctx->ap->databytelen) { + rc = -EINVAL; + goto out; + } rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, csbcpb->cpb.aes_ctr.iv); @@ -112,6 +117,7 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc, atomic64_add(csbcpb->csb.processed_byte_count, &(nx_ctx->stats->aes_bytes)); out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c index 7bbc9a81da2..77dbe084ba4 100644 --- a/drivers/crypto/nx/nx-aes-ecb.c +++ b/drivers/crypto/nx/nx-aes-ecb.c @@ -70,10 +70,15 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc, { struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + unsigned long irq_flags; int rc; - if (nbytes > nx_ctx->ap->databytelen) - return -EINVAL; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + + if (nbytes > nx_ctx->ap->databytelen) { + rc = -EINVAL; + goto out; + } if (enc) NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; @@ -98,6 +103,7 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc, atomic64_add(csbcpb->csb.processed_byte_count, &(nx_ctx->stats->aes_bytes)); out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index 6cca6c392b0..df90d03afc1 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c @@ -166,8 +166,11 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct blkcipher_desc desc; unsigned int nbytes = req->cryptlen; + unsigned long irq_flags; int rc = -EINVAL; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + if (nbytes > nx_ctx->ap->databytelen) goto out; @@ -255,6 +258,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) -EBADMSG : 0; } out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c index 93923e4628c..658da0fd3e1 100644 --- a/drivers/crypto/nx/nx-aes-xcbc.c +++ b/drivers/crypto/nx/nx-aes-xcbc.c @@ -89,8 +89,11 @@ static int nx_xcbc_update(struct shash_desc *desc, struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_sg *in_sg; u32 to_process, leftover; + unsigned long irq_flags; int rc = 0; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { /* we've hit the nx chip previously and we're updating again, * so copy over the partial digest */ @@ -158,6 +161,7 @@ static int nx_xcbc_update(struct shash_desc *desc, /* everything after the first update is continuation */ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } @@ -167,8 +171,11 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out) struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_sg *in_sg, *out_sg; + unsigned long irq_flags; int rc = 0; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { /* we've hit the nx chip previously, now we're finalizing, * so copy over the partial digest */ @@ -211,6 +218,7 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out) memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c index 254b01abef6..6547a7104bf 100644 --- a/drivers/crypto/nx/nx-sha256.c +++ b/drivers/crypto/nx/nx-sha256.c @@ -57,8 +57,11 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, struct nx_sg *in_sg; u64 to_process, leftover, total; u32 max_sg_len; + unsigned long irq_flags; int rc = 0; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + /* 2 cases for total data len: * 1: < SHA256_BLOCK_SIZE: copy into state, return 0 * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover @@ -136,6 +139,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, memcpy(sctx->buf, data, leftover); sctx->count = leftover; out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } @@ -146,8 +150,11 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *in_sg, *out_sg; u32 max_sg_len; + unsigned long irq_flags; int rc; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen); if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { @@ -186,6 +193,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) &(nx_ctx->stats->sha256_bytes)); memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } @@ -195,6 +203,9 @@ static int nx_sha256_export(struct shash_desc *desc, void *out) struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct sha256_state *octx = out; + unsigned long irq_flags; + + spin_lock_irqsave(&nx_ctx->lock, irq_flags); octx->count = sctx->count + (csbcpb->cpb.sha256.message_bit_length / 8); @@ -217,6 +228,7 @@ static int nx_sha256_export(struct shash_desc *desc, void *out) octx->state[7] = SHA256_H7; } + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return 0; } @@ -226,6 +238,9 @@ static int nx_sha256_import(struct shash_desc *desc, const void *in) struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; const struct sha256_state *ictx = in; + unsigned long irq_flags; + + spin_lock_irqsave(&nx_ctx->lock, irq_flags); memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); @@ -240,6 +255,7 @@ static int nx_sha256_import(struct shash_desc *desc, const void *in) NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; } + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return 0; } diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c index 2d6d9135983..236e6afeab1 100644 --- a/drivers/crypto/nx/nx-sha512.c +++ b/drivers/crypto/nx/nx-sha512.c @@ -57,8 +57,11 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, struct nx_sg *in_sg; u64 to_process, leftover, total, spbc_bits; u32 max_sg_len; + unsigned long irq_flags; int rc = 0; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + /* 2 cases for total data len: * 1: < SHA512_BLOCK_SIZE: copy into state, return 0 * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover @@ -138,6 +141,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, memcpy(sctx->buf, data, leftover); sctx->count[0] = leftover; out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } @@ -149,8 +153,11 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) struct nx_sg *in_sg, *out_sg; u32 max_sg_len; u64 count0; + unsigned long irq_flags; int rc; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen); if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { @@ -193,6 +200,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } @@ -202,6 +210,9 @@ static int nx_sha512_export(struct shash_desc *desc, void *out) struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct sha512_state *octx = out; + unsigned long irq_flags; + + spin_lock_irqsave(&nx_ctx->lock, irq_flags); /* move message_bit_length (128 bits) into count and convert its value * to bytes */ @@ -233,6 +244,7 @@ static int nx_sha512_export(struct shash_desc *desc, void *out) octx->state[7] = SHA512_H7; } + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return 0; } @@ -242,6 +254,9 @@ static int nx_sha512_import(struct shash_desc *desc, const void *in) struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; const struct sha512_state *ictx = in; + unsigned long irq_flags; + + spin_lock_irqsave(&nx_ctx->lock, irq_flags); memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); sctx->count[0] = ictx->count[0] & 0x3f; @@ -259,6 +274,7 @@ static int nx_sha512_import(struct shash_desc *desc, const void *in) NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; } + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return 0; } diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index ad07dc62b95..bdf4990f975 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -61,8 +61,7 @@ int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx, do { rc = vio_h_cop_sync(viodev, op); - } while ((rc == -EBUSY && !may_sleep && retries--) || - (rc == -EBUSY && may_sleep && cond_resched())); + } while (rc == -EBUSY && !may_sleep && retries--); if (rc) { dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d " @@ -251,6 +250,7 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, */ void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function) { + spin_lock_init(&nx_ctx->lock); memset(nx_ctx->kmem, 0, nx_ctx->kmem_len); nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT; diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h index 3232b182dd2..14bb97f1c33 100644 --- a/drivers/crypto/nx/nx.h +++ b/drivers/crypto/nx/nx.h @@ -117,6 +117,7 @@ struct nx_ctr_priv { }; struct nx_crypto_ctx { + spinlock_t lock; /* synchronize access to the context */ void *kmem; /* unaligned, kmalloc'd buffer */ size_t kmem_len; /* length of kmem */ struct nx_csbcpb *csbcpb; /* aligned page given to phyp @ hcall time */ -- cgit v1.2.3-70-g09d2 From 069fa0453f9dc86fd97dd5b3f5fda4724ed5ff69 Mon Sep 17 00:00:00 2001 From: Marcelo Cerri Date: Thu, 29 Aug 2013 11:36:40 -0300 Subject: crypto: nx - fix SHA-2 for chunks bigger than block size Each call to the co-processor, with exception of the last call, needs to send data that is multiple of block size. As consequence, any remaining data is kept in the internal NX context. This patch fixes a bug in the driver that causes it to save incorrect data into the context when data is bigger than the block size. Reviewed-by: Joy Latten Signed-off-by: Marcelo Cerri Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-sha256.c | 2 +- drivers/crypto/nx/nx-sha512.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/crypto/nx/nx-sha256.c') diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c index 6547a7104bf..da0b24a7633 100644 --- a/drivers/crypto/nx/nx-sha256.c +++ b/drivers/crypto/nx/nx-sha256.c @@ -129,7 +129,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; total -= to_process; - data += to_process; + data += to_process - sctx->count; sctx->count = 0; in_sg = nx_ctx->in_sg; } while (leftover >= SHA256_BLOCK_SIZE); diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c index 236e6afeab1..4ae5b0f221d 100644 --- a/drivers/crypto/nx/nx-sha512.c +++ b/drivers/crypto/nx/nx-sha512.c @@ -131,7 +131,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; total -= to_process; - data += to_process; + data += to_process - sctx->count[0]; sctx->count[0] = 0; in_sg = nx_ctx->in_sg; } while (leftover >= SHA512_BLOCK_SIZE); -- cgit v1.2.3-70-g09d2