diff options
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/af_alg.c | 181 | ||||
-rw-r--r-- | crypto/algif_aead.c | 38 | ||||
-rw-r--r-- | crypto/algif_hash.c | 110 | ||||
-rw-r--r-- | crypto/algif_skcipher.c | 10 |
4 files changed, 171 insertions, 168 deletions
diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 5f7252a5b7b4..7d4b6016b83d 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -531,50 +531,25 @@ static const struct net_proto_family alg_family = { .owner = THIS_MODULE, }; -int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len) -{ - size_t off; - ssize_t n; - int npages, i; - - n = iov_iter_get_pages2(iter, sgl->pages, len, ALG_MAX_PAGES, &off); - if (n < 0) - return n; - - npages = DIV_ROUND_UP(off + n, PAGE_SIZE); - if (WARN_ON(npages == 0)) - return -EINVAL; - /* Add one extra for linking */ - sg_init_table(sgl->sg, npages + 1); - - for (i = 0, len = n; i < npages; i++) { - int plen = min_t(int, len, PAGE_SIZE - off); - - sg_set_page(sgl->sg + i, sgl->pages[i], plen, off); - - off = 0; - len -= plen; - } - sg_mark_end(sgl->sg + npages - 1); - sgl->npages = npages; - - return n; -} -EXPORT_SYMBOL_GPL(af_alg_make_sg); - static void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new) { - sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1); - sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg); + sg_unmark_end(sgl_prev->sgt.sgl + sgl_prev->sgt.nents - 1); + sg_chain(sgl_prev->sgt.sgl, sgl_prev->sgt.nents + 1, sgl_new->sgt.sgl); } void af_alg_free_sg(struct af_alg_sgl *sgl) { int i; - for (i = 0; i < sgl->npages; i++) - put_page(sgl->pages[i]); + if (sgl->sgt.sgl) { + if (sgl->need_unpin) + for (i = 0; i < sgl->sgt.nents; i++) + unpin_user_page(sg_page(&sgl->sgt.sgl[i])); + if (sgl->sgt.sgl != sgl->sgl) + kvfree(sgl->sgt.sgl); + sgl->sgt.sgl = NULL; + } } EXPORT_SYMBOL_GPL(af_alg_free_sg); @@ -1015,7 +990,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, while (size) { struct scatterlist *sg; size_t len = size; - size_t plen; + ssize_t plen; /* use the existing memory in an allocated page */ if (ctx->merge) { @@ -1060,35 +1035,58 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, if (sgl->cur) sg_unmark_end(sg + sgl->cur - 1); - do { - struct page *pg; - unsigned int i = sgl->cur; - - plen = min_t(size_t, len, PAGE_SIZE); - - pg = alloc_page(GFP_KERNEL); - if (!pg) { - err = -ENOMEM; + if (msg->msg_flags & MSG_SPLICE_PAGES) { + struct sg_table sgtable = { + .sgl = sg, + .nents = sgl->cur, + .orig_nents = sgl->cur, + }; + + plen = extract_iter_to_sg(&msg->msg_iter, len, &sgtable, + MAX_SGL_ENTS, 0); + if (plen < 0) { + err = plen; goto unlock; } - sg_assign_page(sg + i, pg); - - err = memcpy_from_msg(page_address(sg_page(sg + i)), - msg, plen); - if (err) { - __free_page(sg_page(sg + i)); - sg_assign_page(sg + i, NULL); - goto unlock; - } - - sg[i].length = plen; + for (; sgl->cur < sgtable.nents; sgl->cur++) + get_page(sg_page(&sg[sgl->cur])); len -= plen; ctx->used += plen; copied += plen; size -= plen; - sgl->cur++; - } while (len && sgl->cur < MAX_SGL_ENTS); + } else { + do { + struct page *pg; + unsigned int i = sgl->cur; + + plen = min_t(size_t, len, PAGE_SIZE); + + pg = alloc_page(GFP_KERNEL); + if (!pg) { + err = -ENOMEM; + goto unlock; + } + + sg_assign_page(sg + i, pg); + + err = memcpy_from_msg( + page_address(sg_page(sg + i)), + msg, plen); + if (err) { + __free_page(sg_page(sg + i)); + sg_assign_page(sg + i, NULL); + goto unlock; + } + + sg[i].length = plen; + len -= plen; + ctx->used += plen; + copied += plen; + size -= plen; + sgl->cur++; + } while (len && sgl->cur < MAX_SGL_ENTS); + } if (!size) sg_mark_end(sg + sgl->cur - 1); @@ -1121,53 +1119,17 @@ EXPORT_SYMBOL_GPL(af_alg_sendmsg); ssize_t af_alg_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { - struct sock *sk = sock->sk; - struct alg_sock *ask = alg_sk(sk); - struct af_alg_ctx *ctx = ask->private; - struct af_alg_tsgl *sgl; - int err = -EINVAL; + struct bio_vec bvec; + struct msghdr msg = { + .msg_flags = flags | MSG_SPLICE_PAGES, + }; if (flags & MSG_SENDPAGE_NOTLAST) - flags |= MSG_MORE; - - lock_sock(sk); - if (!ctx->more && ctx->used) - goto unlock; - - if (!size) - goto done; - - if (!af_alg_writable(sk)) { - err = af_alg_wait_for_wmem(sk, flags); - if (err) - goto unlock; - } - - err = af_alg_alloc_tsgl(sk); - if (err) - goto unlock; - - ctx->merge = 0; - sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list); - - if (sgl->cur) - sg_unmark_end(sgl->sg + sgl->cur - 1); - - sg_mark_end(sgl->sg + sgl->cur); - - get_page(page); - sg_set_page(sgl->sg + sgl->cur, page, size, offset); - sgl->cur++; - ctx->used += size; + msg.msg_flags |= MSG_MORE; -done: - ctx->more = flags & MSG_MORE; - -unlock: - af_alg_data_wakeup(sk); - release_sock(sk); - - return err ?: size; + bvec_set_page(&bvec, page, size, offset); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); + return sock_sendmsg(sock, &msg); } EXPORT_SYMBOL_GPL(af_alg_sendpage); @@ -1288,8 +1250,8 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, while (maxsize > len && msg_data_left(msg)) { struct af_alg_rsgl *rsgl; + ssize_t err; size_t seglen; - int err; /* limit the amount of readable buffers */ if (!af_alg_readable(sk)) @@ -1306,16 +1268,23 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, return -ENOMEM; } - rsgl->sgl.npages = 0; + rsgl->sgl.sgt.sgl = rsgl->sgl.sgl; + rsgl->sgl.sgt.nents = 0; + rsgl->sgl.sgt.orig_nents = 0; list_add_tail(&rsgl->list, &areq->rsgl_list); - /* make one iovec available as scatterlist */ - err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); + sg_init_table(rsgl->sgl.sgt.sgl, ALG_MAX_PAGES); + err = extract_iter_to_sg(&msg->msg_iter, seglen, &rsgl->sgl.sgt, + ALG_MAX_PAGES, 0); if (err < 0) { rsgl->sg_num_bytes = 0; return err; } + sg_mark_end(rsgl->sgl.sgt.sgl + rsgl->sgl.sgt.nents - 1); + rsgl->sgl.need_unpin = + iov_iter_extract_will_pin(&msg->msg_iter); + /* chain the new scatterlist with previous one */ if (areq->last_rsgl) af_alg_link_sg(&areq->last_rsgl->sgl, &rsgl->sgl); diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 42493b4d8ce4..35bfa283748d 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -9,8 +9,8 @@ * The following concept of the memory management is used: * * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is - * filled by user space with the data submitted via sendpage/sendmsg. Filling - * up the TX SGL does not cause a crypto operation -- the data will only be + * filled by user space with the data submitted via sendpage. Filling up + * the TX SGL does not cause a crypto operation -- the data will only be * tracked by the kernel. Upon receipt of one recvmsg call, the caller must * provide a buffer which is tracked with the RX SGL. * @@ -113,19 +113,19 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, } /* - * Data length provided by caller via sendmsg/sendpage that has not - * yet been processed. + * Data length provided by caller via sendmsg that has not yet been + * processed. */ used = ctx->used; /* - * Make sure sufficient data is present -- note, the same check is - * also present in sendmsg/sendpage. The checks in sendpage/sendmsg - * shall provide an information to the data sender that something is - * wrong, but they are irrelevant to maintain the kernel integrity. - * We need this check here too in case user space decides to not honor - * the error message in sendmsg/sendpage and still call recvmsg. This - * check here protects the kernel integrity. + * Make sure sufficient data is present -- note, the same check is also + * present in sendmsg. The checks in sendmsg shall provide an + * information to the data sender that something is wrong, but they are + * irrelevant to maintain the kernel integrity. We need this check + * here too in case user space decides to not honor the error message + * in sendmsg and still call recvmsg. This check here protects the + * kernel integrity. */ if (!aead_sufficient_data(sk)) return -EINVAL; @@ -210,7 +210,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, */ /* Use the RX SGL as source (and destination) for crypto op. */ - rsgl_src = areq->first_rsgl.sgl.sg; + rsgl_src = areq->first_rsgl.sgl.sgt.sgl; if (ctx->enc) { /* @@ -224,7 +224,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, * RX SGL: AAD || PT || Tag */ err = crypto_aead_copy_sgl(null_tfm, tsgl_src, - areq->first_rsgl.sgl.sg, processed); + areq->first_rsgl.sgl.sgt.sgl, + processed); if (err) goto free; af_alg_pull_tsgl(sk, processed, NULL, 0); @@ -242,7 +243,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, /* Copy AAD || CT to RX SGL buffer for in-place operation. */ err = crypto_aead_copy_sgl(null_tfm, tsgl_src, - areq->first_rsgl.sgl.sg, outlen); + areq->first_rsgl.sgl.sgt.sgl, + outlen); if (err) goto free; @@ -267,10 +269,10 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, if (usedpages) { /* RX SGL present */ struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl; + struct scatterlist *sg = sgl_prev->sgt.sgl; - sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1); - sg_chain(sgl_prev->sg, sgl_prev->npages + 1, - areq->tsgl); + sg_unmark_end(sg + sgl_prev->sgt.nents - 1); + sg_chain(sg, sgl_prev->sgt.nents + 1, areq->tsgl); } else /* no RX SGL present (e.g. authentication only) */ rsgl_src = areq->tsgl; @@ -278,7 +280,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, /* Initialize the crypto operation */ aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src, - areq->first_rsgl.sgl.sg, used, ctx->iv); + areq->first_rsgl.sgl.sgt.sgl, used, ctx->iv); aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen); aead_request_set_tfm(&areq->cra_u.aead_req, tfm); diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 63af72e19fa8..1a2d80c6c91a 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -63,70 +63,102 @@ static void hash_free_result(struct sock *sk, struct hash_ctx *ctx) static int hash_sendmsg(struct socket *sock, struct msghdr *msg, size_t ignored) { - int limit = ALG_MAX_PAGES * PAGE_SIZE; struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct hash_ctx *ctx = ask->private; - long copied = 0; + ssize_t copied = 0; + size_t len, max_pages, npages; + bool continuing = ctx->more, need_init = false; int err; - if (limit > sk->sk_sndbuf) - limit = sk->sk_sndbuf; + max_pages = min_t(size_t, ALG_MAX_PAGES, + DIV_ROUND_UP(sk->sk_sndbuf, PAGE_SIZE)); lock_sock(sk); - if (!ctx->more) { + if (!continuing) { if ((msg->msg_flags & MSG_MORE)) hash_free_result(sk, ctx); - - err = crypto_wait_req(crypto_ahash_init(&ctx->req), &ctx->wait); - if (err) - goto unlock; + need_init = true; } ctx->more = false; while (msg_data_left(msg)) { - int len = msg_data_left(msg); - - if (len > limit) - len = limit; - - len = af_alg_make_sg(&ctx->sgl, &msg->msg_iter, len); - if (len < 0) { - err = copied ? 0 : len; - goto unlock; + ctx->sgl.sgt.sgl = ctx->sgl.sgl; + ctx->sgl.sgt.nents = 0; + ctx->sgl.sgt.orig_nents = 0; + + err = -EIO; + npages = iov_iter_npages(&msg->msg_iter, max_pages); + if (npages == 0) + goto unlock_free; + + if (npages > ARRAY_SIZE(ctx->sgl.sgl)) { + err = -ENOMEM; + ctx->sgl.sgt.sgl = + kvmalloc(array_size(npages, + sizeof(*ctx->sgl.sgt.sgl)), + GFP_KERNEL); + if (!ctx->sgl.sgt.sgl) + goto unlock_free; } + sg_init_table(ctx->sgl.sgl, npages); - ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len); + ctx->sgl.need_unpin = iov_iter_extract_will_pin(&msg->msg_iter); - err = crypto_wait_req(crypto_ahash_update(&ctx->req), - &ctx->wait); - af_alg_free_sg(&ctx->sgl); - if (err) { - iov_iter_revert(&msg->msg_iter, len); - goto unlock; + err = extract_iter_to_sg(&msg->msg_iter, LONG_MAX, + &ctx->sgl.sgt, npages, 0); + if (err < 0) + goto unlock_free; + len = err; + sg_mark_end(ctx->sgl.sgt.sgl + ctx->sgl.sgt.nents - 1); + + if (!msg_data_left(msg)) { + err = hash_alloc_result(sk, ctx); + if (err) + goto unlock_free; } - copied += len; - } + ahash_request_set_crypt(&ctx->req, ctx->sgl.sgt.sgl, + ctx->result, len); - err = 0; + if (!msg_data_left(msg) && !continuing && + !(msg->msg_flags & MSG_MORE)) { + err = crypto_ahash_digest(&ctx->req); + } else { + if (need_init) { + err = crypto_wait_req( + crypto_ahash_init(&ctx->req), + &ctx->wait); + if (err) + goto unlock_free; + need_init = false; + } + + if (msg_data_left(msg) || (msg->msg_flags & MSG_MORE)) + err = crypto_ahash_update(&ctx->req); + else + err = crypto_ahash_finup(&ctx->req); + continuing = true; + } - ctx->more = msg->msg_flags & MSG_MORE; - if (!ctx->more) { - err = hash_alloc_result(sk, ctx); + err = crypto_wait_req(err, &ctx->wait); if (err) - goto unlock; + goto unlock_free; - ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); - err = crypto_wait_req(crypto_ahash_final(&ctx->req), - &ctx->wait); + copied += len; + af_alg_free_sg(&ctx->sgl); } + ctx->more = msg->msg_flags & MSG_MORE; + err = 0; unlock: release_sock(sk); + return copied ?: err; - return err ?: copied; +unlock_free: + af_alg_free_sg(&ctx->sgl); + goto unlock; } static ssize_t hash_sendpage(struct socket *sock, struct page *page, @@ -141,8 +173,8 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, flags |= MSG_MORE; lock_sock(sk); - sg_init_table(ctx->sgl.sg, 1); - sg_set_page(ctx->sgl.sg, page, size, offset); + sg_init_table(ctx->sgl.sgl, 1); + sg_set_page(ctx->sgl.sgl, page, size, offset); if (!(flags & MSG_MORE)) { err = hash_alloc_result(sk, ctx); @@ -151,7 +183,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, } else if (!ctx->more) hash_free_result(sk, ctx); - ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size); + ahash_request_set_crypt(&ctx->req, ctx->sgl.sgl, ctx->result, size); if (!(flags & MSG_MORE)) { if (ctx->more) diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index ee8890ee8f33..b1f321b9f846 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -9,10 +9,10 @@ * The following concept of the memory management is used: * * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is - * filled by user space with the data submitted via sendpage/sendmsg. Filling - * up the TX SGL does not cause a crypto operation -- the data will only be - * tracked by the kernel. Upon receipt of one recvmsg call, the caller must - * provide a buffer which is tracked with the RX SGL. + * filled by user space with the data submitted via sendmsg. Filling up the TX + * SGL does not cause a crypto operation -- the data will only be tracked by + * the kernel. Upon receipt of one recvmsg call, the caller must provide a + * buffer which is tracked with the RX SGL. * * During the processing of the recvmsg operation, the cipher request is * allocated and prepared. As part of the recvmsg operation, the processed @@ -105,7 +105,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, /* Initialize the crypto operation */ skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm); skcipher_request_set_crypt(&areq->cra_u.skcipher_req, areq->tsgl, - areq->first_rsgl.sgl.sg, len, ctx->iv); + areq->first_rsgl.sgl.sgt.sgl, len, ctx->iv); if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { /* AIO operation */ |