diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-07 14:31:18 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-07 14:31:18 -0700 |
commit | 6be48f2940af9ea8d93c23a0dd8e322672c92efd (patch) | |
tree | 1bdc85a9d3fd0c19e108ea27a29a83ef2b44f5d0 /drivers/crypto/nx/nx-aes-ccm.c | |
parent | 0ffb01d9def22f1954e99529b7e4ded497b2e88b (diff) | |
parent | 68411521cc6055edc6274e03ab3210a5893533ba (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
"Here is the crypto update for 3.12:
- Added MODULE_SOFTDEP to allow pre-loading of modules.
- Reinstated crct10dif driver using the module softdep feature.
- Allow via rng driver to be auto-loaded.
- Split large input data when necessary in nx.
- Handle zero length messages correctly for GCM/XCBC in nx.
- Handle SHA-2 chunks bigger than block size properly in nx.
- Handle unaligned lengths in omap-aes.
- Added SHA384/SHA512 to omap-sham.
- Added OMAP5/AM43XX SHAM support.
- Added OMAP4 TRNG support.
- Misc fixes"
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (66 commits)
Reinstate "crypto: crct10dif - Wrap crc_t10dif function all to use crypto transform framework"
hwrng: via - Add MODULE_DEVICE_TABLE
crypto: fcrypt - Fix bitoperation for compilation with clang
crypto: nx - fix SHA-2 for chunks bigger than block size
crypto: nx - fix GCM for zero length messages
crypto: nx - fix XCBC for zero length messages
crypto: nx - fix limits to sg lists for AES-CCM
crypto: nx - fix limits to sg lists for AES-XCBC
crypto: nx - fix limits to sg lists for AES-GCM
crypto: nx - fix limits to sg lists for AES-CTR
crypto: nx - fix limits to sg lists for AES-CBC
crypto: nx - fix limits to sg lists for AES-ECB
crypto: nx - add offset to nx_build_sg_lists()
padata - Register hotcpu notifier after initialization
padata - share code between CPU_ONLINE and CPU_DOWN_FAILED, same to CPU_DOWN_PREPARE and CPU_UP_CANCELED
hwrng: omap - reorder OMAP TRNG driver code
crypto: omap-sham - correct dma burst size
crypto: omap-sham - Enable Polling mode if DMA fails
crypto: tegra-aes - bitwise vs logical and
crypto: sahara - checking the wrong variable
...
Diffstat (limited to 'drivers/crypto/nx/nx-aes-ccm.c')
-rw-r--r-- | drivers/crypto/nx/nx-aes-ccm.c | 283 |
1 files changed, 214 insertions, 69 deletions
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c index ef5eae6d140..5ecd4c2414a 100644 --- a/drivers/crypto/nx/nx-aes-ccm.c +++ b/drivers/crypto/nx/nx-aes-ccm.c @@ -179,13 +179,26 @@ static int generate_pat(u8 *iv, struct nx_sg *nx_insg = nx_ctx->in_sg; struct nx_sg *nx_outsg = nx_ctx->out_sg; unsigned int iauth_len = 0; - struct vio_pfo_op *op = NULL; u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL; int rc; /* zero the ctr value */ memset(iv + 15 - iv[0], 0, iv[0] + 1); + /* page 78 of nx_wb.pdf has, + * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes + * in length. If a full message is used, the AES CCA implementation + * restricts the maximum AAD length to 2^32 -1 bytes. + * If partial messages are used, the implementation supports + * 2^64 -1 bytes maximum AAD length. + * + * However, in the cryptoapi's aead_request structure, + * assoclen is an unsigned int, thus it cannot hold a length + * value greater than 2^32 - 1. + * Thus the AAD is further constrained by this and is never + * greater than 2^32. + */ + if (!req->assoclen) { b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; } else if (req->assoclen <= 14) { @@ -195,7 +208,46 @@ static int generate_pat(u8 *iv, b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; b1 = nx_ctx->priv.ccm.iauth_tag; iauth_len = req->assoclen; + } else if (req->assoclen <= 65280) { + /* if associated data is less than (2^16 - 2^8), we construct + * B1 differently and feed in the associated data to a CCA + * operation */ + b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0; + b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; + iauth_len = 14; + } else { + b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0; + b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; + iauth_len = 10; + } + + /* generate B0 */ + rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0); + if (rc) + return rc; + + /* generate B1: + * add control info for associated data + * RFC 3610 and NIST Special Publication 800-38C + */ + if (b1) { + memset(b1, 0, 16); + if (req->assoclen <= 65280) { + *(u16 *)b1 = (u16)req->assoclen; + scatterwalk_map_and_copy(b1 + 2, req->assoc, 0, + iauth_len, SCATTERWALK_FROM_SG); + } else { + *(u16 *)b1 = (u16)(0xfffe); + *(u32 *)&b1[2] = (u32)req->assoclen; + scatterwalk_map_and_copy(b1 + 6, req->assoc, 0, + iauth_len, SCATTERWALK_FROM_SG); + } + } + /* now copy any remaining AAD to scatterlist and call nx... */ + if (!req->assoclen) { + return rc; + } else if (req->assoclen <= 14) { nx_insg = nx_build_sg_list(nx_insg, b1, 16, nx_ctx->ap->sglen); nx_outsg = nx_build_sg_list(nx_outsg, tmp, 16, nx_ctx->ap->sglen); @@ -210,56 +262,74 @@ static int generate_pat(u8 *iv, NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT; NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE; - op = &nx_ctx->op; result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac; - } else if (req->assoclen <= 65280) { - /* if associated data is less than (2^16 - 2^8), we construct - * B1 differently and feed in the associated data to a CCA - * operation */ - b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0; - b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; - iauth_len = 14; - /* remaining assoc data must have scatterlist built for it */ - nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, - req->assoc, iauth_len, - req->assoclen - iauth_len); - nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) * - sizeof(struct nx_sg); + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + return rc; + + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); - op = &nx_ctx->op_aead; - result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; } else { - /* if associated data is less than (2^32), we construct B1 - * differently yet again and feed in the associated data to a - * CCA operation */ - pr_err("associated data len is %u bytes (returning -EINVAL)\n", - req->assoclen); - rc = -EINVAL; - } + u32 max_sg_len; + unsigned int processed = 0, to_process; + + /* page_limit: number of sg entries that fit on one page */ + max_sg_len = min_t(u32, + nx_driver.of.max_sg_len/sizeof(struct nx_sg), + nx_ctx->ap->sglen); + + processed += iauth_len; + + do { + to_process = min_t(u32, req->assoclen - processed, + nx_ctx->ap->databytelen); + to_process = min_t(u64, to_process, + NX_PAGE_SIZE * (max_sg_len - 1)); + + if ((to_process + processed) < req->assoclen) { + NX_CPB_FDM(nx_ctx->csbcpb_aead) |= + NX_FDM_INTERMEDIATE; + } else { + NX_CPB_FDM(nx_ctx->csbcpb_aead) &= + ~NX_FDM_INTERMEDIATE; + } + + nx_insg = nx_walk_and_build(nx_ctx->in_sg, + nx_ctx->ap->sglen, + req->assoc, processed, + to_process); + + nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) * + sizeof(struct nx_sg); - rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0); - if (rc) - goto done; + result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; - if (b1) { - memset(b1, 0, 16); - *(u16 *)b1 = (u16)req->assoclen; + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + return rc; - scatterwalk_map_and_copy(b1 + 2, req->assoc, 0, - iauth_len, SCATTERWALK_FROM_SG); + memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0, + nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0, + AES_BLOCK_SIZE); - rc = nx_hcall_sync(nx_ctx, op, - req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); - if (rc) - goto done; + NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION; - atomic_inc(&(nx_ctx->stats->aes_ops)); - atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(req->assoclen, + &(nx_ctx->stats->aes_bytes)); - memcpy(out, result, AES_BLOCK_SIZE); + processed += to_process; + } while (processed < req->assoclen); + + result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; } -done: + + memcpy(out, result, AES_BLOCK_SIZE); + return rc; } @@ -271,10 +341,12 @@ static int ccm_nx_decrypt(struct aead_request *req, unsigned int nbytes = req->cryptlen; unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); struct nx_ccm_priv *priv = &nx_ctx->priv.ccm; + unsigned long irq_flags; + unsigned int processed = 0, to_process; + u32 max_sg_len; int rc = -1; - if (nbytes > nx_ctx->ap->databytelen) - return -EINVAL; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); nbytes -= authsize; @@ -288,26 +360,61 @@ static int ccm_nx_decrypt(struct aead_request *req, if (rc) goto out; - rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes, - csbcpb->cpb.aes_ccm.iv_or_ctr); - if (rc) - goto out; + /* page_limit: number of sg entries that fit on one page */ + max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), + nx_ctx->ap->sglen); - NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; - NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_INTERMEDIATE; + do { + + /* to_process: the AES_BLOCK_SIZE data chunk to process in this + * update. This value is bound by sg list limits. + */ + to_process = min_t(u64, nbytes - processed, + nx_ctx->ap->databytelen); + to_process = min_t(u64, to_process, + NX_PAGE_SIZE * (max_sg_len - 1)); + + if ((to_process + processed) < nbytes) + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + else + NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + + NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; + + rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, + to_process, processed, + csbcpb->cpb.aes_ccm.iv_or_ctr); + if (rc) + goto out; - rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); - if (rc) - goto out; + if (rc) + goto out; - atomic_inc(&(nx_ctx->stats->aes_ops)); - atomic64_add(csbcpb->csb.processed_byte_count, - &(nx_ctx->stats->aes_bytes)); + /* for partial completion, copy following for next + * entry into loop... + */ + memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE); + memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0, + csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE); + memcpy(csbcpb->cpb.aes_ccm.in_s0, + csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE); + + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + + /* update stats */ + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(csbcpb->csb.processed_byte_count, + &(nx_ctx->stats->aes_bytes)); + + processed += to_process; + } while (processed < nbytes); rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag, authsize) ? -EBADMSG : 0; out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } @@ -318,38 +425,76 @@ static int ccm_nx_encrypt(struct aead_request *req, struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; unsigned int nbytes = req->cryptlen; unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); + unsigned long irq_flags; + unsigned int processed = 0, to_process; + u32 max_sg_len; int rc = -1; - if (nbytes > nx_ctx->ap->databytelen) - return -EINVAL; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, csbcpb->cpb.aes_ccm.in_pat_or_b0); if (rc) goto out; - rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes, - csbcpb->cpb.aes_ccm.iv_or_ctr); - if (rc) - goto out; + /* page_limit: number of sg entries that fit on one page */ + max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), + nx_ctx->ap->sglen); + + do { + /* to process: the AES_BLOCK_SIZE data chunk to process in this + * update. This value is bound by sg list limits. + */ + to_process = min_t(u64, nbytes - processed, + nx_ctx->ap->databytelen); + to_process = min_t(u64, to_process, + NX_PAGE_SIZE * (max_sg_len - 1)); + + if ((to_process + processed) < nbytes) + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + else + NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + + NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; + + rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, + to_process, processed, + csbcpb->cpb.aes_ccm.iv_or_ctr); + if (rc) + goto out; - NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; - NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; - rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, - req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); - if (rc) - goto out; + /* for partial completion, copy following for next + * entry into loop... + */ + memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE); + memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0, + csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE); + memcpy(csbcpb->cpb.aes_ccm.in_s0, + csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE); - atomic_inc(&(nx_ctx->stats->aes_ops)); - atomic64_add(csbcpb->csb.processed_byte_count, - &(nx_ctx->stats->aes_bytes)); + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + + /* update stats */ + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(csbcpb->csb.processed_byte_count, + &(nx_ctx->stats->aes_bytes)); + + processed += to_process; + + } while (processed < nbytes); /* copy out the auth tag */ scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac, req->dst, nbytes, authsize, SCATTERWALK_TO_SG); + out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } |