Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
More iov_iter work from Al Viro. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Коммит
f2683b743f
|
@ -338,49 +338,31 @@ static const struct net_proto_family alg_family = {
|
|||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len,
|
||||
int write)
|
||||
int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
|
||||
{
|
||||
unsigned long from = (unsigned long)addr;
|
||||
unsigned long npages;
|
||||
unsigned off;
|
||||
int err;
|
||||
int i;
|
||||
size_t off;
|
||||
ssize_t n;
|
||||
int npages, i;
|
||||
|
||||
err = -EFAULT;
|
||||
if (!access_ok(write ? VERIFY_READ : VERIFY_WRITE, addr, len))
|
||||
goto out;
|
||||
n = iov_iter_get_pages(iter, sgl->pages, len, ALG_MAX_PAGES, &off);
|
||||
if (n < 0)
|
||||
return n;
|
||||
|
||||
off = from & ~PAGE_MASK;
|
||||
npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
if (npages > ALG_MAX_PAGES)
|
||||
npages = ALG_MAX_PAGES;
|
||||
|
||||
err = get_user_pages_fast(from, npages, write, sgl->pages);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
npages = err;
|
||||
err = -EINVAL;
|
||||
npages = PAGE_ALIGN(off + n);
|
||||
if (WARN_ON(npages == 0))
|
||||
goto out;
|
||||
|
||||
err = 0;
|
||||
return -EINVAL;
|
||||
|
||||
sg_init_table(sgl->sg, npages);
|
||||
|
||||
for (i = 0; i < npages; i++) {
|
||||
for (i = 0, len = n; i < npages; i++) {
|
||||
int plen = min_t(int, len, PAGE_SIZE - off);
|
||||
|
||||
sg_set_page(sgl->sg + i, sgl->pages[i], plen, off);
|
||||
|
||||
off = 0;
|
||||
len -= plen;
|
||||
err += plen;
|
||||
}
|
||||
|
||||
out:
|
||||
return err;
|
||||
return n;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(af_alg_make_sg);
|
||||
|
||||
|
|
|
@ -41,8 +41,6 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
|
|||
struct sock *sk = sock->sk;
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
struct hash_ctx *ctx = ask->private;
|
||||
unsigned long iovlen;
|
||||
const struct iovec *iov;
|
||||
long copied = 0;
|
||||
int err;
|
||||
|
||||
|
@ -58,37 +56,28 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
|
|||
|
||||
ctx->more = 0;
|
||||
|
||||
for (iov = msg->msg_iter.iov, iovlen = msg->msg_iter.nr_segs; iovlen > 0;
|
||||
iovlen--, iov++) {
|
||||
unsigned long seglen = iov->iov_len;
|
||||
char __user *from = iov->iov_base;
|
||||
while (iov_iter_count(&msg->msg_iter)) {
|
||||
int len = iov_iter_count(&msg->msg_iter);
|
||||
|
||||
while (seglen) {
|
||||
int len = min_t(unsigned long, seglen, limit);
|
||||
int newlen;
|
||||
if (len > limit)
|
||||
len = limit;
|
||||
|
||||
newlen = af_alg_make_sg(&ctx->sgl, from, len, 0);
|
||||
if (newlen < 0) {
|
||||
err = copied ? 0 : newlen;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL,
|
||||
newlen);
|
||||
|
||||
err = af_alg_wait_for_completion(
|
||||
crypto_ahash_update(&ctx->req),
|
||||
&ctx->completion);
|
||||
|
||||
af_alg_free_sg(&ctx->sgl);
|
||||
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
||||
seglen -= newlen;
|
||||
from += newlen;
|
||||
copied += newlen;
|
||||
len = af_alg_make_sg(&ctx->sgl, &msg->msg_iter, len);
|
||||
if (len < 0) {
|
||||
err = copied ? 0 : len;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len);
|
||||
|
||||
err = af_alg_wait_for_completion(crypto_ahash_update(&ctx->req),
|
||||
&ctx->completion);
|
||||
af_alg_free_sg(&ctx->sgl);
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
||||
copied += len;
|
||||
iov_iter_advance(&msg->msg_iter, len);
|
||||
}
|
||||
|
||||
err = 0;
|
||||
|
|
|
@ -426,67 +426,59 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
|
|||
&ctx->req));
|
||||
struct skcipher_sg_list *sgl;
|
||||
struct scatterlist *sg;
|
||||
unsigned long iovlen;
|
||||
const struct iovec *iov;
|
||||
int err = -EAGAIN;
|
||||
int used;
|
||||
long copied = 0;
|
||||
|
||||
lock_sock(sk);
|
||||
for (iov = msg->msg_iter.iov, iovlen = msg->msg_iter.nr_segs; iovlen > 0;
|
||||
iovlen--, iov++) {
|
||||
unsigned long seglen = iov->iov_len;
|
||||
char __user *from = iov->iov_base;
|
||||
while (iov_iter_count(&msg->msg_iter)) {
|
||||
sgl = list_first_entry(&ctx->tsgl,
|
||||
struct skcipher_sg_list, list);
|
||||
sg = sgl->sg;
|
||||
|
||||
while (seglen) {
|
||||
sgl = list_first_entry(&ctx->tsgl,
|
||||
struct skcipher_sg_list, list);
|
||||
sg = sgl->sg;
|
||||
while (!sg->length)
|
||||
sg++;
|
||||
|
||||
while (!sg->length)
|
||||
sg++;
|
||||
|
||||
if (!ctx->used) {
|
||||
err = skcipher_wait_for_data(sk, flags);
|
||||
if (err)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
used = min_t(unsigned long, ctx->used, seglen);
|
||||
|
||||
used = af_alg_make_sg(&ctx->rsgl, from, used, 1);
|
||||
err = used;
|
||||
if (err < 0)
|
||||
used = ctx->used;
|
||||
if (!used) {
|
||||
err = skcipher_wait_for_data(sk, flags);
|
||||
if (err)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (ctx->more || used < ctx->used)
|
||||
used -= used % bs;
|
||||
used = min_t(unsigned long, used, iov_iter_count(&msg->msg_iter));
|
||||
|
||||
err = -EINVAL;
|
||||
if (!used)
|
||||
goto free;
|
||||
used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used);
|
||||
err = used;
|
||||
if (err < 0)
|
||||
goto unlock;
|
||||
|
||||
ablkcipher_request_set_crypt(&ctx->req, sg,
|
||||
ctx->rsgl.sg, used,
|
||||
ctx->iv);
|
||||
if (ctx->more || used < ctx->used)
|
||||
used -= used % bs;
|
||||
|
||||
err = af_alg_wait_for_completion(
|
||||
err = -EINVAL;
|
||||
if (!used)
|
||||
goto free;
|
||||
|
||||
ablkcipher_request_set_crypt(&ctx->req, sg,
|
||||
ctx->rsgl.sg, used,
|
||||
ctx->iv);
|
||||
|
||||
err = af_alg_wait_for_completion(
|
||||
ctx->enc ?
|
||||
crypto_ablkcipher_encrypt(&ctx->req) :
|
||||
crypto_ablkcipher_decrypt(&ctx->req),
|
||||
&ctx->completion);
|
||||
|
||||
free:
|
||||
af_alg_free_sg(&ctx->rsgl);
|
||||
af_alg_free_sg(&ctx->rsgl);
|
||||
|
||||
if (err)
|
||||
goto unlock;
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
||||
copied += used;
|
||||
from += used;
|
||||
seglen -= used;
|
||||
skcipher_pull_sgl(sk, used);
|
||||
}
|
||||
copied += used;
|
||||
skcipher_pull_sgl(sk, used);
|
||||
iov_iter_advance(&msg->msg_iter, used);
|
||||
}
|
||||
|
||||
err = 0;
|
||||
|
|
|
@ -370,12 +370,12 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
|
|||
to_copy = size - bytes_copied;
|
||||
|
||||
if (is_iovec) {
|
||||
struct iovec *iov = (struct iovec *)src;
|
||||
struct msghdr *msg = (struct msghdr *)src;
|
||||
int err;
|
||||
|
||||
/* The iovec will track bytes_copied internally. */
|
||||
err = memcpy_fromiovec((u8 *)va + page_offset,
|
||||
iov, to_copy);
|
||||
err = memcpy_from_msg((u8 *)va + page_offset,
|
||||
msg, to_copy);
|
||||
if (err != 0) {
|
||||
if (kernel_if->host)
|
||||
kunmap(kernel_if->u.h.page[page_index]);
|
||||
|
@ -580,7 +580,7 @@ static int qp_memcpy_from_queue(void *dest,
|
|||
*/
|
||||
static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
|
||||
u64 queue_offset,
|
||||
const void *src,
|
||||
const void *msg,
|
||||
size_t src_offset, size_t size)
|
||||
{
|
||||
|
||||
|
@ -588,7 +588,7 @@ static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
|
|||
* We ignore src_offset because src is really a struct iovec * and will
|
||||
* maintain offset internally.
|
||||
*/
|
||||
return __qp_memcpy_to_queue(queue, queue_offset, src, size, true);
|
||||
return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3223,13 +3223,13 @@ EXPORT_SYMBOL_GPL(vmci_qpair_peek);
|
|||
* of bytes enqueued or < 0 on error.
|
||||
*/
|
||||
ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
|
||||
void *iov,
|
||||
struct msghdr *msg,
|
||||
size_t iov_size,
|
||||
int buf_type)
|
||||
{
|
||||
ssize_t result;
|
||||
|
||||
if (!qpair || !iov)
|
||||
if (!qpair)
|
||||
return VMCI_ERROR_INVALID_ARGS;
|
||||
|
||||
qp_lock(qpair);
|
||||
|
@ -3238,7 +3238,7 @@ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
|
|||
result = qp_enqueue_locked(qpair->produce_q,
|
||||
qpair->consume_q,
|
||||
qpair->produce_q_size,
|
||||
iov, iov_size,
|
||||
msg, iov_size,
|
||||
qp_memcpy_to_queue_iov);
|
||||
|
||||
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
||||
|
|
|
@ -84,10 +84,6 @@ struct vhost_net_ubuf_ref {
|
|||
|
||||
struct vhost_net_virtqueue {
|
||||
struct vhost_virtqueue vq;
|
||||
/* hdr is used to store the virtio header.
|
||||
* Since each iovec has >= 1 byte length, we never need more than
|
||||
* header length entries to store the header. */
|
||||
struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)];
|
||||
size_t vhost_hlen;
|
||||
size_t sock_hlen;
|
||||
/* vhost zerocopy support fields below: */
|
||||
|
@ -235,44 +231,6 @@ static bool vhost_sock_zcopy(struct socket *sock)
|
|||
sock_flag(sock->sk, SOCK_ZEROCOPY);
|
||||
}
|
||||
|
||||
/* Pop first len bytes from iovec. Return number of segments used. */
|
||||
static int move_iovec_hdr(struct iovec *from, struct iovec *to,
|
||||
size_t len, int iov_count)
|
||||
{
|
||||
int seg = 0;
|
||||
size_t size;
|
||||
|
||||
while (len && seg < iov_count) {
|
||||
size = min(from->iov_len, len);
|
||||
to->iov_base = from->iov_base;
|
||||
to->iov_len = size;
|
||||
from->iov_len -= size;
|
||||
from->iov_base += size;
|
||||
len -= size;
|
||||
++from;
|
||||
++to;
|
||||
++seg;
|
||||
}
|
||||
return seg;
|
||||
}
|
||||
/* Copy iovec entries for len bytes from iovec. */
|
||||
static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
|
||||
size_t len, int iovcount)
|
||||
{
|
||||
int seg = 0;
|
||||
size_t size;
|
||||
|
||||
while (len && seg < iovcount) {
|
||||
size = min(from->iov_len, len);
|
||||
to->iov_base = from->iov_base;
|
||||
to->iov_len = size;
|
||||
len -= size;
|
||||
++from;
|
||||
++to;
|
||||
++seg;
|
||||
}
|
||||
}
|
||||
|
||||
/* In case of DMA done not in order in lower device driver for some reason.
|
||||
* upend_idx is used to track end of used idx, done_idx is used to track head
|
||||
* of used idx. Once lower device DMA done contiguously, we will signal KVM
|
||||
|
@ -336,7 +294,7 @@ static void handle_tx(struct vhost_net *net)
|
|||
{
|
||||
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
|
||||
struct vhost_virtqueue *vq = &nvq->vq;
|
||||
unsigned out, in, s;
|
||||
unsigned out, in;
|
||||
int head;
|
||||
struct msghdr msg = {
|
||||
.msg_name = NULL,
|
||||
|
@ -395,16 +353,17 @@ static void handle_tx(struct vhost_net *net)
|
|||
break;
|
||||
}
|
||||
/* Skip header. TODO: support TSO. */
|
||||
s = move_iovec_hdr(vq->iov, nvq->hdr, hdr_size, out);
|
||||
len = iov_length(vq->iov, out);
|
||||
iov_iter_init(&msg.msg_iter, WRITE, vq->iov, out, len);
|
||||
iov_iter_advance(&msg.msg_iter, hdr_size);
|
||||
/* Sanity check */
|
||||
if (!len) {
|
||||
if (!iov_iter_count(&msg.msg_iter)) {
|
||||
vq_err(vq, "Unexpected header len for TX: "
|
||||
"%zd expected %zd\n",
|
||||
iov_length(nvq->hdr, s), hdr_size);
|
||||
len, hdr_size);
|
||||
break;
|
||||
}
|
||||
len = iov_iter_count(&msg.msg_iter);
|
||||
|
||||
zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
|
||||
&& (nvq->upend_idx + 1) % UIO_MAXIOV !=
|
||||
|
@ -569,9 +528,9 @@ static void handle_rx(struct vhost_net *net)
|
|||
.msg_controllen = 0,
|
||||
.msg_flags = MSG_DONTWAIT,
|
||||
};
|
||||
struct virtio_net_hdr_mrg_rxbuf hdr = {
|
||||
.hdr.flags = 0,
|
||||
.hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
|
||||
struct virtio_net_hdr hdr = {
|
||||
.flags = 0,
|
||||
.gso_type = VIRTIO_NET_HDR_GSO_NONE
|
||||
};
|
||||
size_t total_len = 0;
|
||||
int err, mergeable;
|
||||
|
@ -579,6 +538,7 @@ static void handle_rx(struct vhost_net *net)
|
|||
size_t vhost_hlen, sock_hlen;
|
||||
size_t vhost_len, sock_len;
|
||||
struct socket *sock;
|
||||
struct iov_iter fixup;
|
||||
|
||||
mutex_lock(&vq->mutex);
|
||||
sock = vq->private_data;
|
||||
|
@ -623,14 +583,19 @@ static void handle_rx(struct vhost_net *net)
|
|||
break;
|
||||
}
|
||||
/* We don't need to be notified again. */
|
||||
if (unlikely((vhost_hlen)))
|
||||
/* Skip header. TODO: support TSO. */
|
||||
move_iovec_hdr(vq->iov, nvq->hdr, vhost_hlen, in);
|
||||
else
|
||||
/* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
|
||||
* needed because recvmsg can modify msg_iov. */
|
||||
copy_iovec_hdr(vq->iov, nvq->hdr, sock_hlen, in);
|
||||
iov_iter_init(&msg.msg_iter, READ, vq->iov, in, sock_len);
|
||||
iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
|
||||
fixup = msg.msg_iter;
|
||||
if (unlikely((vhost_hlen))) {
|
||||
/* We will supply the header ourselves
|
||||
* TODO: support TSO.
|
||||
*/
|
||||
iov_iter_advance(&msg.msg_iter, vhost_hlen);
|
||||
} else {
|
||||
/* It'll come from socket; we'll need to patch
|
||||
* ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
|
||||
*/
|
||||
iov_iter_advance(&fixup, sizeof(hdr));
|
||||
}
|
||||
err = sock->ops->recvmsg(NULL, sock, &msg,
|
||||
sock_len, MSG_DONTWAIT | MSG_TRUNC);
|
||||
/* Userspace might have consumed the packet meanwhile:
|
||||
|
@ -642,18 +607,18 @@ static void handle_rx(struct vhost_net *net)
|
|||
vhost_discard_vq_desc(vq, headcount);
|
||||
continue;
|
||||
}
|
||||
/* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
|
||||
if (unlikely(vhost_hlen) &&
|
||||
memcpy_toiovecend(nvq->hdr, (unsigned char *)&hdr, 0,
|
||||
vhost_hlen)) {
|
||||
copy_to_iter(&hdr, sizeof(hdr), &fixup) != sizeof(hdr)) {
|
||||
vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
|
||||
vq->iov->iov_base);
|
||||
break;
|
||||
}
|
||||
/* TODO: Should check and handle checksum. */
|
||||
/* Supply (or replace) ->num_buffers if VIRTIO_NET_F_MRG_RXBUF
|
||||
* TODO: Should check and handle checksum.
|
||||
*/
|
||||
if (likely(mergeable) &&
|
||||
memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
|
||||
offsetof(typeof(hdr), num_buffers),
|
||||
sizeof hdr.num_buffers)) {
|
||||
copy_to_iter(&headcount, 2, &fixup) != 2) {
|
||||
vq_err(vq, "Failed num_buffers write");
|
||||
vhost_discard_vq_desc(vq, headcount);
|
||||
break;
|
||||
|
|
|
@ -1079,7 +1079,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
|
|||
req_size, vq->iov[0].iov_len);
|
||||
break;
|
||||
}
|
||||
ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size);
|
||||
ret = copy_from_user(req, vq->iov[0].iov_base, req_size);
|
||||
if (unlikely(ret)) {
|
||||
vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
|
||||
break;
|
||||
|
|
|
@ -1125,6 +1125,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
|
|||
struct vring_desc desc;
|
||||
unsigned int i = 0, count, found = 0;
|
||||
u32 len = vhost32_to_cpu(vq, indirect->len);
|
||||
struct iov_iter from;
|
||||
int ret;
|
||||
|
||||
/* Sanity check */
|
||||
|
@ -1142,6 +1143,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
|
|||
vq_err(vq, "Translation failure %d in indirect.\n", ret);
|
||||
return ret;
|
||||
}
|
||||
iov_iter_init(&from, READ, vq->indirect, ret, len);
|
||||
|
||||
/* We will use the result as an address to read from, so most
|
||||
* architectures only need a compiler barrier here. */
|
||||
|
@ -1164,8 +1166,8 @@ static int get_indirect(struct vhost_virtqueue *vq,
|
|||
i, count);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (unlikely(memcpy_fromiovec((unsigned char *)&desc,
|
||||
vq->indirect, sizeof desc))) {
|
||||
if (unlikely(copy_from_iter(&desc, sizeof(desc), &from) !=
|
||||
sizeof(desc))) {
|
||||
vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
|
||||
i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -306,8 +306,8 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg,
|
|||
|
||||
_debug("- range %u-%u%s",
|
||||
offset, to, msg->msg_flags ? " [more]" : "");
|
||||
iov_iter_init(&msg->msg_iter, WRITE,
|
||||
(struct iovec *) iov, 1, to - offset);
|
||||
iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC,
|
||||
iov, 1, to - offset);
|
||||
|
||||
/* have to change the state *before* sending the last
|
||||
* packet as RxRPC might give us the reply before it
|
||||
|
@ -384,7 +384,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
|
|||
|
||||
msg.msg_name = NULL;
|
||||
msg.msg_namelen = 0;
|
||||
iov_iter_init(&msg.msg_iter, WRITE, (struct iovec *)iov, 1,
|
||||
iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1,
|
||||
call->request_size);
|
||||
msg.msg_control = NULL;
|
||||
msg.msg_controllen = 0;
|
||||
|
@ -770,7 +770,7 @@ static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb,
|
|||
void afs_send_empty_reply(struct afs_call *call)
|
||||
{
|
||||
struct msghdr msg;
|
||||
struct iovec iov[1];
|
||||
struct kvec iov[1];
|
||||
|
||||
_enter("");
|
||||
|
||||
|
@ -778,7 +778,7 @@ void afs_send_empty_reply(struct afs_call *call)
|
|||
iov[0].iov_len = 0;
|
||||
msg.msg_name = NULL;
|
||||
msg.msg_namelen = 0;
|
||||
iov_iter_init(&msg.msg_iter, WRITE, iov, 0, 0); /* WTF? */
|
||||
iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 0, 0); /* WTF? */
|
||||
msg.msg_control = NULL;
|
||||
msg.msg_controllen = 0;
|
||||
msg.msg_flags = 0;
|
||||
|
@ -805,7 +805,7 @@ void afs_send_empty_reply(struct afs_call *call)
|
|||
void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
|
||||
{
|
||||
struct msghdr msg;
|
||||
struct iovec iov[1];
|
||||
struct kvec iov[1];
|
||||
int n;
|
||||
|
||||
_enter("");
|
||||
|
@ -814,7 +814,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
|
|||
iov[0].iov_len = len;
|
||||
msg.msg_name = NULL;
|
||||
msg.msg_namelen = 0;
|
||||
iov_iter_init(&msg.msg_iter, WRITE, iov, 1, len);
|
||||
iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1, len);
|
||||
msg.msg_control = NULL;
|
||||
msg.msg_controllen = 0;
|
||||
msg.msg_flags = 0;
|
||||
|
|
|
@ -67,8 +67,7 @@ int af_alg_unregister_type(const struct af_alg_type *type);
|
|||
int af_alg_release(struct socket *sock);
|
||||
int af_alg_accept(struct sock *sk, struct socket *newsock);
|
||||
|
||||
int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len,
|
||||
int write);
|
||||
int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
|
||||
void af_alg_free_sg(struct af_alg_sgl *sgl);
|
||||
|
||||
int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con);
|
||||
|
|
|
@ -2487,19 +2487,18 @@ static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
|
|||
}
|
||||
|
||||
static inline int skb_add_data(struct sk_buff *skb,
|
||||
char __user *from, int copy)
|
||||
struct iov_iter *from, int copy)
|
||||
{
|
||||
const int off = skb->len;
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_NONE) {
|
||||
int err = 0;
|
||||
__wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
|
||||
copy, 0, &err);
|
||||
if (!err) {
|
||||
__wsum csum = 0;
|
||||
if (csum_and_copy_from_iter(skb_put(skb, copy), copy,
|
||||
&csum, from) == copy) {
|
||||
skb->csum = csum_block_add(skb->csum, csum, off);
|
||||
return 0;
|
||||
}
|
||||
} else if (!copy_from_user(skb_put(skb, copy), from, copy))
|
||||
} else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy)
|
||||
return 0;
|
||||
|
||||
__skb_trim(skb, off);
|
||||
|
@ -2696,8 +2695,7 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
|
|||
|
||||
static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
|
||||
{
|
||||
/* XXX: stripping const */
|
||||
return memcpy_fromiovec(data, (struct iovec *)msg->msg_iter.iov, len);
|
||||
return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
|
||||
}
|
||||
|
||||
static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
|
||||
|
|
|
@ -318,13 +318,6 @@ struct ucred {
|
|||
/* IPX options */
|
||||
#define IPX_TYPE 1
|
||||
|
||||
extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
|
||||
struct iovec *iov,
|
||||
int offset,
|
||||
unsigned int len, __wsum *csump);
|
||||
extern unsigned long iov_pages(const struct iovec *iov, int offset,
|
||||
unsigned long nr_segs);
|
||||
|
||||
extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
|
||||
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
|
||||
|
||||
|
|
|
@ -135,10 +135,4 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
|
|||
size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
|
||||
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
|
||||
|
||||
int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
|
||||
int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
|
||||
int offset, int len);
|
||||
int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
|
||||
int offset, int len);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -74,7 +74,7 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
|
|||
ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size,
|
||||
int mode);
|
||||
ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
|
||||
void *iov, size_t iov_size, int mode);
|
||||
struct msghdr *msg, size_t iov_size, int mode);
|
||||
ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
|
||||
struct msghdr *msg, size_t iov_size, int mode);
|
||||
ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size,
|
||||
|
|
|
@ -59,7 +59,7 @@ extern struct pingv6_ops pingv6_ops;
|
|||
|
||||
struct pingfakehdr {
|
||||
struct icmphdr icmph;
|
||||
struct iovec *iov;
|
||||
struct msghdr *msg;
|
||||
sa_family_t family;
|
||||
__wsum wcheck;
|
||||
};
|
||||
|
|
|
@ -1803,27 +1803,25 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
|
|||
}
|
||||
|
||||
static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
|
||||
char __user *from, char *to,
|
||||
struct iov_iter *from, char *to,
|
||||
int copy, int offset)
|
||||
{
|
||||
if (skb->ip_summed == CHECKSUM_NONE) {
|
||||
int err = 0;
|
||||
__wsum csum = csum_and_copy_from_user(from, to, copy, 0, &err);
|
||||
if (err)
|
||||
return err;
|
||||
__wsum csum = 0;
|
||||
if (csum_and_copy_from_iter(to, copy, &csum, from) != copy)
|
||||
return -EFAULT;
|
||||
skb->csum = csum_block_add(skb->csum, csum, offset);
|
||||
} else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
|
||||
if (!access_ok(VERIFY_READ, from, copy) ||
|
||||
__copy_from_user_nocache(to, from, copy))
|
||||
if (copy_from_iter_nocache(to, copy, from) != copy)
|
||||
return -EFAULT;
|
||||
} else if (copy_from_user(to, from, copy))
|
||||
} else if (copy_from_iter(to, copy, from) != copy)
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
|
||||
char __user *from, int copy)
|
||||
struct iov_iter *from, int copy)
|
||||
{
|
||||
int err, offset = skb->len;
|
||||
|
||||
|
@ -1835,7 +1833,7 @@ static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
|
|||
return err;
|
||||
}
|
||||
|
||||
static inline int skb_copy_to_page_nocache(struct sock *sk, char __user *from,
|
||||
static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from,
|
||||
struct sk_buff *skb,
|
||||
struct page *page,
|
||||
int off, int copy)
|
||||
|
|
|
@ -20,8 +20,7 @@ static __inline__ int udplite_getfrag(void *from, char *to, int offset,
|
|||
int len, int odd, struct sk_buff *skb)
|
||||
{
|
||||
struct msghdr *msg = from;
|
||||
/* XXX: stripping const */
|
||||
return memcpy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len);
|
||||
return copy_from_iter(to, len, &msg->msg_iter) != len ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
/* Designate sk as UDP-Lite socket */
|
||||
|
|
|
@ -24,7 +24,7 @@ obj-y += lockref.o
|
|||
|
||||
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
|
||||
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
|
||||
gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
|
||||
gcd.o lcm.o list_sort.o uuid.o flex_array.o clz_ctz.o \
|
||||
bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
|
||||
percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o
|
||||
obj-y += string_helpers.o
|
||||
|
|
87
lib/iovec.c
87
lib/iovec.c
|
@ -1,87 +0,0 @@
|
|||
#include <linux/uaccess.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/uio.h>
|
||||
|
||||
/*
|
||||
* Copy iovec to kernel. Returns -EFAULT on error.
|
||||
*
|
||||
* Note: this modifies the original iovec.
|
||||
*/
|
||||
|
||||
int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len)
|
||||
{
|
||||
while (len > 0) {
|
||||
if (iov->iov_len) {
|
||||
int copy = min_t(unsigned int, len, iov->iov_len);
|
||||
if (copy_from_user(kdata, iov->iov_base, copy))
|
||||
return -EFAULT;
|
||||
len -= copy;
|
||||
kdata += copy;
|
||||
iov->iov_base += copy;
|
||||
iov->iov_len -= copy;
|
||||
}
|
||||
iov++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(memcpy_fromiovec);
|
||||
|
||||
/*
|
||||
* Copy kernel to iovec. Returns -EFAULT on error.
|
||||
*/
|
||||
|
||||
int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
|
||||
int offset, int len)
|
||||
{
|
||||
int copy;
|
||||
for (; len > 0; ++iov) {
|
||||
/* Skip over the finished iovecs */
|
||||
if (unlikely(offset >= iov->iov_len)) {
|
||||
offset -= iov->iov_len;
|
||||
continue;
|
||||
}
|
||||
copy = min_t(unsigned int, iov->iov_len - offset, len);
|
||||
if (copy_to_user(iov->iov_base + offset, kdata, copy))
|
||||
return -EFAULT;
|
||||
offset = 0;
|
||||
kdata += copy;
|
||||
len -= copy;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(memcpy_toiovecend);
|
||||
|
||||
/*
|
||||
* Copy iovec to kernel. Returns -EFAULT on error.
|
||||
*/
|
||||
|
||||
int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
|
||||
int offset, int len)
|
||||
{
|
||||
/* No data? Done! */
|
||||
if (len == 0)
|
||||
return 0;
|
||||
|
||||
/* Skip over the finished iovecs */
|
||||
while (offset >= iov->iov_len) {
|
||||
offset -= iov->iov_len;
|
||||
iov++;
|
||||
}
|
||||
|
||||
while (len > 0) {
|
||||
u8 __user *base = iov->iov_base + offset;
|
||||
int copy = min_t(unsigned int, len, iov->iov_len - offset);
|
||||
|
||||
offset = 0;
|
||||
if (copy_from_user(kdata, base, copy))
|
||||
return -EFAULT;
|
||||
len -= copy;
|
||||
kdata += copy;
|
||||
iov++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(memcpy_fromiovecend);
|
|
@ -2,7 +2,7 @@
|
|||
# Makefile for the Linux networking core.
|
||||
#
|
||||
|
||||
obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
|
||||
obj-y := sock.o request_sock.o skbuff.o datagram.o stream.o scm.o \
|
||||
gen_stats.o gen_estimator.o net_namespace.o secure_seq.o flow_dissector.o
|
||||
|
||||
obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
|
||||
|
|
137
net/core/iovec.c
137
net/core/iovec.c
|
@ -1,137 +0,0 @@
|
|||
/*
|
||||
* iovec manipulation routines.
|
||||
*
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* Fixes:
|
||||
* Andrew Lunn : Errors in iovec copying.
|
||||
* Pedro Roque : Added memcpy_fromiovecend and
|
||||
* csum_..._fromiovecend.
|
||||
* Andi Kleen : fixed error handling for 2.1
|
||||
* Alexey Kuznetsov: 2.1 optimisations
|
||||
* Andi Kleen : Fix csum*fromiovecend for IPv6.
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/in6.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <net/checksum.h>
|
||||
#include <net/sock.h>
|
||||
|
||||
/*
|
||||
* And now for the all-in-one: copy and checksum from a user iovec
|
||||
* directly to a datagram
|
||||
* Calls to csum_partial but the last must be in 32 bit chunks
|
||||
*
|
||||
* ip_build_xmit must ensure that when fragmenting only the last
|
||||
* call to this function will be unaligned also.
|
||||
*/
|
||||
int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov,
|
||||
int offset, unsigned int len, __wsum *csump)
|
||||
{
|
||||
__wsum csum = *csump;
|
||||
int partial_cnt = 0, err = 0;
|
||||
|
||||
/* Skip over the finished iovecs */
|
||||
while (offset >= iov->iov_len) {
|
||||
offset -= iov->iov_len;
|
||||
iov++;
|
||||
}
|
||||
|
||||
while (len > 0) {
|
||||
u8 __user *base = iov->iov_base + offset;
|
||||
int copy = min_t(unsigned int, len, iov->iov_len - offset);
|
||||
|
||||
offset = 0;
|
||||
|
||||
/* There is a remnant from previous iov. */
|
||||
if (partial_cnt) {
|
||||
int par_len = 4 - partial_cnt;
|
||||
|
||||
/* iov component is too short ... */
|
||||
if (par_len > copy) {
|
||||
if (copy_from_user(kdata, base, copy))
|
||||
goto out_fault;
|
||||
kdata += copy;
|
||||
base += copy;
|
||||
partial_cnt += copy;
|
||||
len -= copy;
|
||||
iov++;
|
||||
if (len)
|
||||
continue;
|
||||
*csump = csum_partial(kdata - partial_cnt,
|
||||
partial_cnt, csum);
|
||||
goto out;
|
||||
}
|
||||
if (copy_from_user(kdata, base, par_len))
|
||||
goto out_fault;
|
||||
csum = csum_partial(kdata - partial_cnt, 4, csum);
|
||||
kdata += par_len;
|
||||
base += par_len;
|
||||
copy -= par_len;
|
||||
len -= par_len;
|
||||
partial_cnt = 0;
|
||||
}
|
||||
|
||||
if (len > copy) {
|
||||
partial_cnt = copy % 4;
|
||||
if (partial_cnt) {
|
||||
copy -= partial_cnt;
|
||||
if (copy_from_user(kdata + copy, base + copy,
|
||||
partial_cnt))
|
||||
goto out_fault;
|
||||
}
|
||||
}
|
||||
|
||||
if (copy) {
|
||||
csum = csum_and_copy_from_user(base, kdata, copy,
|
||||
csum, &err);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
len -= copy + partial_cnt;
|
||||
kdata += copy + partial_cnt;
|
||||
iov++;
|
||||
}
|
||||
*csump = csum;
|
||||
out:
|
||||
return err;
|
||||
|
||||
out_fault:
|
||||
err = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
EXPORT_SYMBOL(csum_partial_copy_fromiovecend);
|
||||
|
||||
unsigned long iov_pages(const struct iovec *iov, int offset,
|
||||
unsigned long nr_segs)
|
||||
{
|
||||
unsigned long seg, base;
|
||||
int pages = 0, len, size;
|
||||
|
||||
while (nr_segs && (offset >= iov->iov_len)) {
|
||||
offset -= iov->iov_len;
|
||||
++iov;
|
||||
--nr_segs;
|
||||
}
|
||||
|
||||
for (seg = 0; seg < nr_segs; seg++) {
|
||||
base = (unsigned long)iov[seg].iov_base + offset;
|
||||
len = iov[seg].iov_len - offset;
|
||||
size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
|
||||
pages += size;
|
||||
offset = 0;
|
||||
}
|
||||
|
||||
return pages;
|
||||
}
|
||||
EXPORT_SYMBOL(iov_pages);
|
|
@ -755,13 +755,11 @@ ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk
|
|||
struct msghdr *msg = from;
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
/* XXX: stripping const */
|
||||
if (memcpy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len) < 0)
|
||||
if (copy_from_iter(to, len, &msg->msg_iter) != len)
|
||||
return -EFAULT;
|
||||
} else {
|
||||
__wsum csum = 0;
|
||||
/* XXX: stripping const */
|
||||
if (csum_partial_copy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len, &csum) < 0)
|
||||
if (csum_and_copy_from_iter(to, len, &csum, &msg->msg_iter) != len)
|
||||
return -EFAULT;
|
||||
skb->csum = csum_block_add(skb->csum, csum, odd);
|
||||
}
|
||||
|
|
|
@ -599,18 +599,18 @@ int ping_getfrag(void *from, char *to,
|
|||
struct pingfakehdr *pfh = (struct pingfakehdr *)from;
|
||||
|
||||
if (offset == 0) {
|
||||
if (fraglen < sizeof(struct icmphdr))
|
||||
fraglen -= sizeof(struct icmphdr);
|
||||
if (fraglen < 0)
|
||||
BUG();
|
||||
if (csum_partial_copy_fromiovecend(to + sizeof(struct icmphdr),
|
||||
pfh->iov, 0, fraglen - sizeof(struct icmphdr),
|
||||
&pfh->wcheck))
|
||||
if (csum_and_copy_from_iter(to + sizeof(struct icmphdr),
|
||||
fraglen, &pfh->wcheck,
|
||||
&pfh->msg->msg_iter) != fraglen)
|
||||
return -EFAULT;
|
||||
} else if (offset < sizeof(struct icmphdr)) {
|
||||
BUG();
|
||||
} else {
|
||||
if (csum_partial_copy_fromiovecend
|
||||
(to, pfh->iov, offset - sizeof(struct icmphdr),
|
||||
fraglen, &pfh->wcheck))
|
||||
if (csum_and_copy_from_iter(to, fraglen, &pfh->wcheck,
|
||||
&pfh->msg->msg_iter) != fraglen)
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
|
@ -811,8 +811,7 @@ back_from_confirm:
|
|||
pfh.icmph.checksum = 0;
|
||||
pfh.icmph.un.echo.id = inet->inet_sport;
|
||||
pfh.icmph.un.echo.sequence = user_icmph.un.echo.sequence;
|
||||
/* XXX: stripping const */
|
||||
pfh.iov = (struct iovec *)msg->msg_iter.iov;
|
||||
pfh.msg = msg;
|
||||
pfh.wcheck = 0;
|
||||
pfh.family = AF_INET;
|
||||
|
||||
|
|
|
@ -337,7 +337,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
|
||||
void *from, size_t length,
|
||||
struct msghdr *msg, size_t length,
|
||||
struct rtable **rtp,
|
||||
unsigned int flags)
|
||||
{
|
||||
|
@ -382,7 +382,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
|
|||
|
||||
skb->transport_header = skb->network_header;
|
||||
err = -EFAULT;
|
||||
if (memcpy_fromiovecend((void *)iph, from, 0, length))
|
||||
if (memcpy_from_msg(iph, msg, length))
|
||||
goto error_free;
|
||||
|
||||
iphlen = iph->ihl * 4;
|
||||
|
@ -625,8 +625,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|||
back_from_confirm:
|
||||
|
||||
if (inet->hdrincl)
|
||||
/* XXX: stripping const */
|
||||
err = raw_send_hdrinc(sk, &fl4, (struct iovec *)msg->msg_iter.iov, len,
|
||||
err = raw_send_hdrinc(sk, &fl4, msg, len,
|
||||
&rt, msg->msg_flags);
|
||||
|
||||
else {
|
||||
|
|
241
net/ipv4/tcp.c
241
net/ipv4/tcp.c
|
@ -1067,11 +1067,10 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
|
|||
int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
||||
size_t size)
|
||||
{
|
||||
const struct iovec *iov;
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct sk_buff *skb;
|
||||
int iovlen, flags, err, copied = 0;
|
||||
int mss_now = 0, size_goal, copied_syn = 0, offset = 0;
|
||||
int flags, err, copied = 0;
|
||||
int mss_now = 0, size_goal, copied_syn = 0;
|
||||
bool sg;
|
||||
long timeo;
|
||||
|
||||
|
@ -1084,7 +1083,6 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|||
goto out;
|
||||
else if (err)
|
||||
goto out_err;
|
||||
offset = copied_syn;
|
||||
}
|
||||
|
||||
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
|
||||
|
@ -1118,8 +1116,6 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|||
mss_now = tcp_send_mss(sk, &size_goal, flags);
|
||||
|
||||
/* Ok commence sending. */
|
||||
iovlen = msg->msg_iter.nr_segs;
|
||||
iov = msg->msg_iter.iov;
|
||||
copied = 0;
|
||||
|
||||
err = -EPIPE;
|
||||
|
@ -1128,151 +1124,134 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|||
|
||||
sg = !!(sk->sk_route_caps & NETIF_F_SG);
|
||||
|
||||
while (--iovlen >= 0) {
|
||||
size_t seglen = iov->iov_len;
|
||||
unsigned char __user *from = iov->iov_base;
|
||||
while (iov_iter_count(&msg->msg_iter)) {
|
||||
int copy = 0;
|
||||
int max = size_goal;
|
||||
|
||||
iov++;
|
||||
if (unlikely(offset > 0)) { /* Skip bytes copied in SYN */
|
||||
if (offset >= seglen) {
|
||||
offset -= seglen;
|
||||
continue;
|
||||
}
|
||||
seglen -= offset;
|
||||
from += offset;
|
||||
offset = 0;
|
||||
skb = tcp_write_queue_tail(sk);
|
||||
if (tcp_send_head(sk)) {
|
||||
if (skb->ip_summed == CHECKSUM_NONE)
|
||||
max = mss_now;
|
||||
copy = max - skb->len;
|
||||
}
|
||||
|
||||
while (seglen > 0) {
|
||||
int copy = 0;
|
||||
int max = size_goal;
|
||||
|
||||
skb = tcp_write_queue_tail(sk);
|
||||
if (tcp_send_head(sk)) {
|
||||
if (skb->ip_summed == CHECKSUM_NONE)
|
||||
max = mss_now;
|
||||
copy = max - skb->len;
|
||||
}
|
||||
|
||||
if (copy <= 0) {
|
||||
if (copy <= 0) {
|
||||
new_segment:
|
||||
/* Allocate new segment. If the interface is SG,
|
||||
* allocate skb fitting to single page.
|
||||
*/
|
||||
if (!sk_stream_memory_free(sk))
|
||||
goto wait_for_sndbuf;
|
||||
/* Allocate new segment. If the interface is SG,
|
||||
* allocate skb fitting to single page.
|
||||
*/
|
||||
if (!sk_stream_memory_free(sk))
|
||||
goto wait_for_sndbuf;
|
||||
|
||||
skb = sk_stream_alloc_skb(sk,
|
||||
select_size(sk, sg),
|
||||
sk->sk_allocation);
|
||||
if (!skb)
|
||||
goto wait_for_memory;
|
||||
skb = sk_stream_alloc_skb(sk,
|
||||
select_size(sk, sg),
|
||||
sk->sk_allocation);
|
||||
if (!skb)
|
||||
goto wait_for_memory;
|
||||
|
||||
/*
|
||||
* Check whether we can use HW checksum.
|
||||
*/
|
||||
if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
|
||||
skb->ip_summed = CHECKSUM_PARTIAL;
|
||||
/*
|
||||
* Check whether we can use HW checksum.
|
||||
*/
|
||||
if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
|
||||
skb->ip_summed = CHECKSUM_PARTIAL;
|
||||
|
||||
skb_entail(sk, skb);
|
||||
copy = size_goal;
|
||||
max = size_goal;
|
||||
skb_entail(sk, skb);
|
||||
copy = size_goal;
|
||||
max = size_goal;
|
||||
|
||||
/* All packets are restored as if they have
|
||||
* already been sent. skb_mstamp isn't set to
|
||||
* avoid wrong rtt estimation.
|
||||
*/
|
||||
if (tp->repair)
|
||||
TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED;
|
||||
}
|
||||
/* All packets are restored as if they have
|
||||
* already been sent. skb_mstamp isn't set to
|
||||
* avoid wrong rtt estimation.
|
||||
*/
|
||||
if (tp->repair)
|
||||
TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED;
|
||||
}
|
||||
|
||||
/* Try to append data to the end of skb. */
|
||||
if (copy > seglen)
|
||||
copy = seglen;
|
||||
/* Try to append data to the end of skb. */
|
||||
if (copy > iov_iter_count(&msg->msg_iter))
|
||||
copy = iov_iter_count(&msg->msg_iter);
|
||||
|
||||
/* Where to copy to? */
|
||||
if (skb_availroom(skb) > 0) {
|
||||
/* We have some space in skb head. Superb! */
|
||||
copy = min_t(int, copy, skb_availroom(skb));
|
||||
err = skb_add_data_nocache(sk, skb, from, copy);
|
||||
if (err)
|
||||
goto do_fault;
|
||||
} else {
|
||||
bool merge = true;
|
||||
int i = skb_shinfo(skb)->nr_frags;
|
||||
struct page_frag *pfrag = sk_page_frag(sk);
|
||||
/* Where to copy to? */
|
||||
if (skb_availroom(skb) > 0) {
|
||||
/* We have some space in skb head. Superb! */
|
||||
copy = min_t(int, copy, skb_availroom(skb));
|
||||
err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy);
|
||||
if (err)
|
||||
goto do_fault;
|
||||
} else {
|
||||
bool merge = true;
|
||||
int i = skb_shinfo(skb)->nr_frags;
|
||||
struct page_frag *pfrag = sk_page_frag(sk);
|
||||
|
||||
if (!sk_page_frag_refill(sk, pfrag))
|
||||
goto wait_for_memory;
|
||||
if (!sk_page_frag_refill(sk, pfrag))
|
||||
goto wait_for_memory;
|
||||
|
||||
if (!skb_can_coalesce(skb, i, pfrag->page,
|
||||
pfrag->offset)) {
|
||||
if (i == MAX_SKB_FRAGS || !sg) {
|
||||
tcp_mark_push(tp, skb);
|
||||
goto new_segment;
|
||||
}
|
||||
merge = false;
|
||||
if (!skb_can_coalesce(skb, i, pfrag->page,
|
||||
pfrag->offset)) {
|
||||
if (i == MAX_SKB_FRAGS || !sg) {
|
||||
tcp_mark_push(tp, skb);
|
||||
goto new_segment;
|
||||
}
|
||||
|
||||
copy = min_t(int, copy, pfrag->size - pfrag->offset);
|
||||
|
||||
if (!sk_wmem_schedule(sk, copy))
|
||||
goto wait_for_memory;
|
||||
|
||||
err = skb_copy_to_page_nocache(sk, from, skb,
|
||||
pfrag->page,
|
||||
pfrag->offset,
|
||||
copy);
|
||||
if (err)
|
||||
goto do_error;
|
||||
|
||||
/* Update the skb. */
|
||||
if (merge) {
|
||||
skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
|
||||
} else {
|
||||
skb_fill_page_desc(skb, i, pfrag->page,
|
||||
pfrag->offset, copy);
|
||||
get_page(pfrag->page);
|
||||
}
|
||||
pfrag->offset += copy;
|
||||
merge = false;
|
||||
}
|
||||
|
||||
if (!copied)
|
||||
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
|
||||
copy = min_t(int, copy, pfrag->size - pfrag->offset);
|
||||
|
||||
tp->write_seq += copy;
|
||||
TCP_SKB_CB(skb)->end_seq += copy;
|
||||
tcp_skb_pcount_set(skb, 0);
|
||||
if (!sk_wmem_schedule(sk, copy))
|
||||
goto wait_for_memory;
|
||||
|
||||
from += copy;
|
||||
copied += copy;
|
||||
if ((seglen -= copy) == 0 && iovlen == 0) {
|
||||
tcp_tx_timestamp(sk, skb);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
|
||||
continue;
|
||||
|
||||
if (forced_push(tp)) {
|
||||
tcp_mark_push(tp, skb);
|
||||
__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
|
||||
} else if (skb == tcp_send_head(sk))
|
||||
tcp_push_one(sk, mss_now);
|
||||
continue;
|
||||
|
||||
wait_for_sndbuf:
|
||||
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
|
||||
wait_for_memory:
|
||||
if (copied)
|
||||
tcp_push(sk, flags & ~MSG_MORE, mss_now,
|
||||
TCP_NAGLE_PUSH, size_goal);
|
||||
|
||||
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
|
||||
err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
|
||||
pfrag->page,
|
||||
pfrag->offset,
|
||||
copy);
|
||||
if (err)
|
||||
goto do_error;
|
||||
|
||||
mss_now = tcp_send_mss(sk, &size_goal, flags);
|
||||
/* Update the skb. */
|
||||
if (merge) {
|
||||
skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
|
||||
} else {
|
||||
skb_fill_page_desc(skb, i, pfrag->page,
|
||||
pfrag->offset, copy);
|
||||
get_page(pfrag->page);
|
||||
}
|
||||
pfrag->offset += copy;
|
||||
}
|
||||
|
||||
if (!copied)
|
||||
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
|
||||
|
||||
tp->write_seq += copy;
|
||||
TCP_SKB_CB(skb)->end_seq += copy;
|
||||
tcp_skb_pcount_set(skb, 0);
|
||||
|
||||
copied += copy;
|
||||
if (!iov_iter_count(&msg->msg_iter)) {
|
||||
tcp_tx_timestamp(sk, skb);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
|
||||
continue;
|
||||
|
||||
if (forced_push(tp)) {
|
||||
tcp_mark_push(tp, skb);
|
||||
__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
|
||||
} else if (skb == tcp_send_head(sk))
|
||||
tcp_push_one(sk, mss_now);
|
||||
continue;
|
||||
|
||||
wait_for_sndbuf:
|
||||
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
|
||||
wait_for_memory:
|
||||
if (copied)
|
||||
tcp_push(sk, flags & ~MSG_MORE, mss_now,
|
||||
TCP_NAGLE_PUSH, size_goal);
|
||||
|
||||
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
|
||||
goto do_error;
|
||||
|
||||
mss_now = tcp_send_mss(sk, &size_goal, flags);
|
||||
}
|
||||
|
||||
out:
|
||||
|
|
|
@ -3055,7 +3055,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
|
|||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct tcp_fastopen_request *fo = tp->fastopen_req;
|
||||
int syn_loss = 0, space, err = 0;
|
||||
int syn_loss = 0, space, err = 0, copied;
|
||||
unsigned long last_syn_loss = 0;
|
||||
struct sk_buff *syn_data;
|
||||
|
||||
|
@ -3093,11 +3093,16 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
|
|||
goto fallback;
|
||||
syn_data->ip_summed = CHECKSUM_PARTIAL;
|
||||
memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
|
||||
if (unlikely(memcpy_fromiovecend(skb_put(syn_data, space),
|
||||
fo->data->msg_iter.iov, 0, space))) {
|
||||
copied = copy_from_iter(skb_put(syn_data, space), space,
|
||||
&fo->data->msg_iter);
|
||||
if (unlikely(!copied)) {
|
||||
kfree_skb(syn_data);
|
||||
goto fallback;
|
||||
}
|
||||
if (copied != space) {
|
||||
skb_trim(syn_data, copied);
|
||||
space = copied;
|
||||
}
|
||||
|
||||
/* No more data pending in inet_wait_for_connect() */
|
||||
if (space == fo->size)
|
||||
|
|
|
@ -163,8 +163,7 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|||
pfh.icmph.checksum = 0;
|
||||
pfh.icmph.un.echo.id = inet->inet_sport;
|
||||
pfh.icmph.un.echo.sequence = user_icmph.icmp6_sequence;
|
||||
/* XXX: stripping const */
|
||||
pfh.iov = (struct iovec *)msg->msg_iter.iov;
|
||||
pfh.msg = msg;
|
||||
pfh.wcheck = 0;
|
||||
pfh.family = AF_INET6;
|
||||
|
||||
|
|
|
@ -609,7 +609,7 @@ out:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
|
||||
static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
|
||||
struct flowi6 *fl6, struct dst_entry **dstp,
|
||||
unsigned int flags)
|
||||
{
|
||||
|
@ -648,7 +648,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
|
|||
skb->ip_summed = CHECKSUM_NONE;
|
||||
|
||||
skb->transport_header = skb->network_header;
|
||||
err = memcpy_fromiovecend((void *)iph, from, 0, length);
|
||||
err = memcpy_from_msg(iph, msg, length);
|
||||
if (err)
|
||||
goto error_fault;
|
||||
|
||||
|
@ -886,8 +886,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
|
|||
|
||||
back_from_confirm:
|
||||
if (inet->hdrincl)
|
||||
/* XXX: stripping const */
|
||||
err = rawv6_send_hdrinc(sk, (struct iovec *)msg->msg_iter.iov, len, &fl6, &dst, msg->msg_flags);
|
||||
err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst, msg->msg_flags);
|
||||
else {
|
||||
lock_sock(sk);
|
||||
err = ip6_append_data(sk, raw6_getfrag, &rfv,
|
||||
|
|
|
@ -2298,7 +2298,12 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* It's a really convoluted way for userland to ask for mmaped
|
||||
* sendmsg(), but that's what we've got...
|
||||
*/
|
||||
if (netlink_tx_is_mmaped(sk) &&
|
||||
msg->msg_iter.type == ITER_IOVEC &&
|
||||
msg->msg_iter.nr_segs == 1 &&
|
||||
msg->msg_iter.iov->iov_base == NULL) {
|
||||
err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
|
||||
&scm);
|
||||
|
|
|
@ -232,10 +232,7 @@ int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
|
|||
call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
|
||||
ret = -EPROTO; /* request phase complete for this client call */
|
||||
} else {
|
||||
mm_segment_t oldfs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
ret = rxrpc_send_data(NULL, call->socket, call, msg, len);
|
||||
set_fs(oldfs);
|
||||
}
|
||||
|
||||
release_sock(&call->socket->sk);
|
||||
|
@ -529,13 +526,11 @@ static int rxrpc_send_data(struct kiocb *iocb,
|
|||
struct msghdr *msg, size_t len)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp;
|
||||
unsigned char __user *from;
|
||||
struct sk_buff *skb;
|
||||
const struct iovec *iov;
|
||||
struct sock *sk = &rx->sk;
|
||||
long timeo;
|
||||
bool more;
|
||||
int ret, ioc, segment, copied;
|
||||
int ret, copied;
|
||||
|
||||
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
|
||||
|
||||
|
@ -545,25 +540,17 @@ static int rxrpc_send_data(struct kiocb *iocb,
|
|||
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
|
||||
return -EPIPE;
|
||||
|
||||
iov = msg->msg_iter.iov;
|
||||
ioc = msg->msg_iter.nr_segs - 1;
|
||||
from = iov->iov_base;
|
||||
segment = iov->iov_len;
|
||||
iov++;
|
||||
more = msg->msg_flags & MSG_MORE;
|
||||
|
||||
skb = call->tx_pending;
|
||||
call->tx_pending = NULL;
|
||||
|
||||
copied = 0;
|
||||
do {
|
||||
if (len > iov_iter_count(&msg->msg_iter))
|
||||
len = iov_iter_count(&msg->msg_iter);
|
||||
while (len) {
|
||||
int copy;
|
||||
|
||||
if (segment > len)
|
||||
segment = len;
|
||||
|
||||
_debug("SEGMENT %d @%p", segment, from);
|
||||
|
||||
if (!skb) {
|
||||
size_t size, chunk, max, space;
|
||||
|
||||
|
@ -631,13 +618,13 @@ static int rxrpc_send_data(struct kiocb *iocb,
|
|||
/* append next segment of data to the current buffer */
|
||||
copy = skb_tailroom(skb);
|
||||
ASSERTCMP(copy, >, 0);
|
||||
if (copy > segment)
|
||||
copy = segment;
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
if (copy > sp->remain)
|
||||
copy = sp->remain;
|
||||
|
||||
_debug("add");
|
||||
ret = skb_add_data(skb, from, copy);
|
||||
ret = skb_add_data(skb, &msg->msg_iter, copy);
|
||||
_debug("added");
|
||||
if (ret < 0)
|
||||
goto efault;
|
||||
|
@ -646,18 +633,6 @@ static int rxrpc_send_data(struct kiocb *iocb,
|
|||
copied += copy;
|
||||
|
||||
len -= copy;
|
||||
segment -= copy;
|
||||
from += copy;
|
||||
while (segment == 0 && ioc > 0) {
|
||||
from = iov->iov_base;
|
||||
segment = iov->iov_len;
|
||||
iov++;
|
||||
ioc--;
|
||||
}
|
||||
if (len == 0) {
|
||||
segment = 0;
|
||||
ioc = 0;
|
||||
}
|
||||
|
||||
/* check for the far side aborting the call or a network error
|
||||
* occurring */
|
||||
|
@ -665,7 +640,7 @@ static int rxrpc_send_data(struct kiocb *iocb,
|
|||
goto call_aborted;
|
||||
|
||||
/* add the packet to the send queue if it's now full */
|
||||
if (sp->remain <= 0 || (segment == 0 && !more)) {
|
||||
if (sp->remain <= 0 || (!len && !more)) {
|
||||
struct rxrpc_connection *conn = call->conn;
|
||||
uint32_t seq;
|
||||
size_t pad;
|
||||
|
@ -711,11 +686,10 @@ static int rxrpc_send_data(struct kiocb *iocb,
|
|||
|
||||
memcpy(skb->head, &sp->hdr,
|
||||
sizeof(struct rxrpc_header));
|
||||
rxrpc_queue_packet(call, skb, segment == 0 && !more);
|
||||
rxrpc_queue_packet(call, skb, !iov_iter_count(&msg->msg_iter) && !more);
|
||||
skb = NULL;
|
||||
}
|
||||
|
||||
} while (segment > 0);
|
||||
}
|
||||
|
||||
success:
|
||||
ret = copied;
|
||||
|
|
78
net/socket.c
78
net/socket.c
|
@ -113,10 +113,8 @@ unsigned int sysctl_net_busy_read __read_mostly;
|
|||
unsigned int sysctl_net_busy_poll __read_mostly;
|
||||
#endif
|
||||
|
||||
static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
|
||||
unsigned long nr_segs, loff_t pos);
|
||||
static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
unsigned long nr_segs, loff_t pos);
|
||||
static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to);
|
||||
static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from);
|
||||
static int sock_mmap(struct file *file, struct vm_area_struct *vma);
|
||||
|
||||
static int sock_close(struct inode *inode, struct file *file);
|
||||
|
@ -142,8 +140,10 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
|
|||
static const struct file_operations socket_file_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.llseek = no_llseek,
|
||||
.aio_read = sock_aio_read,
|
||||
.aio_write = sock_aio_write,
|
||||
.read = new_sync_read,
|
||||
.write = new_sync_write,
|
||||
.read_iter = sock_read_iter,
|
||||
.write_iter = sock_write_iter,
|
||||
.poll = sock_poll,
|
||||
.unlocked_ioctl = sock_ioctl,
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
@ -845,63 +845,47 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
|
|||
return sock->ops->splice_read(sock, ppos, pipe, len, flags);
|
||||
}
|
||||
|
||||
static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb,
|
||||
struct file *file, const struct iovec *iov,
|
||||
unsigned long nr_segs)
|
||||
static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct socket *sock = file->private_data;
|
||||
struct msghdr msg = {.msg_iter = *to};
|
||||
ssize_t res;
|
||||
|
||||
msg->msg_name = NULL;
|
||||
msg->msg_namelen = 0;
|
||||
msg->msg_control = NULL;
|
||||
msg->msg_controllen = 0;
|
||||
iov_iter_init(&msg->msg_iter, READ, iov, nr_segs, iocb->ki_nbytes);
|
||||
msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
|
||||
if (file->f_flags & O_NONBLOCK)
|
||||
msg.msg_flags = MSG_DONTWAIT;
|
||||
|
||||
return __sock_recvmsg(iocb, sock, msg, iocb->ki_nbytes, msg->msg_flags);
|
||||
}
|
||||
|
||||
static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
|
||||
unsigned long nr_segs, loff_t pos)
|
||||
{
|
||||
struct msghdr msg;
|
||||
|
||||
if (pos != 0)
|
||||
if (iocb->ki_pos != 0)
|
||||
return -ESPIPE;
|
||||
|
||||
if (iocb->ki_nbytes == 0) /* Match SYS5 behaviour */
|
||||
return 0;
|
||||
|
||||
return do_sock_read(&msg, iocb, iocb->ki_filp, iov, nr_segs);
|
||||
res = __sock_recvmsg(iocb, sock, &msg,
|
||||
iocb->ki_nbytes, msg.msg_flags);
|
||||
*to = msg.msg_iter;
|
||||
return res;
|
||||
}
|
||||
|
||||
static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb,
|
||||
struct file *file, const struct iovec *iov,
|
||||
unsigned long nr_segs)
|
||||
static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct socket *sock = file->private_data;
|
||||
struct msghdr msg = {.msg_iter = *from};
|
||||
ssize_t res;
|
||||
|
||||
msg->msg_name = NULL;
|
||||
msg->msg_namelen = 0;
|
||||
msg->msg_control = NULL;
|
||||
msg->msg_controllen = 0;
|
||||
iov_iter_init(&msg->msg_iter, WRITE, iov, nr_segs, iocb->ki_nbytes);
|
||||
msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
|
||||
if (sock->type == SOCK_SEQPACKET)
|
||||
msg->msg_flags |= MSG_EOR;
|
||||
|
||||
return __sock_sendmsg(iocb, sock, msg, iocb->ki_nbytes);
|
||||
}
|
||||
|
||||
static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
unsigned long nr_segs, loff_t pos)
|
||||
{
|
||||
struct msghdr msg;
|
||||
|
||||
if (pos != 0)
|
||||
if (iocb->ki_pos != 0)
|
||||
return -ESPIPE;
|
||||
|
||||
return do_sock_write(&msg, iocb, iocb->ki_filp, iov, nr_segs);
|
||||
if (file->f_flags & O_NONBLOCK)
|
||||
msg.msg_flags = MSG_DONTWAIT;
|
||||
|
||||
if (sock->type == SOCK_SEQPACKET)
|
||||
msg.msg_flags |= MSG_EOR;
|
||||
|
||||
res = __sock_sendmsg(iocb, sock, &msg, iocb->ki_nbytes);
|
||||
*from = msg.msg_iter;
|
||||
return res;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -189,7 +189,6 @@ err:
|
|||
* tipc_msg_build - create buffer chain containing specified header and data
|
||||
* @mhdr: Message header, to be prepended to data
|
||||
* @m: User message
|
||||
* @offset: Posision in iov to start copying from
|
||||
* @dsz: Total length of user data
|
||||
* @pktmax: Max packet size that can be used
|
||||
* @list: Buffer or chain of buffers to be returned to caller
|
||||
|
@ -221,8 +220,7 @@ int tipc_msg_build(struct net *net, struct tipc_msg *mhdr, struct msghdr *m,
|
|||
__skb_queue_tail(list, skb);
|
||||
skb_copy_to_linear_data(skb, mhdr, mhsz);
|
||||
pktpos = skb->data + mhsz;
|
||||
if (!dsz || !memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset,
|
||||
dsz))
|
||||
if (copy_from_iter(pktpos, dsz, &m->msg_iter) == dsz)
|
||||
return dsz;
|
||||
rc = -EFAULT;
|
||||
goto error;
|
||||
|
@ -252,12 +250,11 @@ int tipc_msg_build(struct net *net, struct tipc_msg *mhdr, struct msghdr *m,
|
|||
if (drem < pktrem)
|
||||
pktrem = drem;
|
||||
|
||||
if (memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset, pktrem)) {
|
||||
if (copy_from_iter(pktpos, pktrem, &m->msg_iter) != pktrem) {
|
||||
rc = -EFAULT;
|
||||
goto error;
|
||||
}
|
||||
drem -= pktrem;
|
||||
offset += pktrem;
|
||||
|
||||
if (!drem)
|
||||
break;
|
||||
|
|
|
@ -733,6 +733,7 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
|
|||
struct net *net = sock_net(sk);
|
||||
struct tipc_msg *mhdr = &tipc_sk(sk)->phdr;
|
||||
struct sk_buff_head head;
|
||||
struct iov_iter save = msg->msg_iter;
|
||||
uint mtu;
|
||||
int rc;
|
||||
|
||||
|
@ -758,8 +759,10 @@ new_mtu:
|
|||
rc = dsz;
|
||||
break;
|
||||
}
|
||||
if (rc == -EMSGSIZE)
|
||||
if (rc == -EMSGSIZE) {
|
||||
msg->msg_iter = save;
|
||||
goto new_mtu;
|
||||
}
|
||||
if (rc != -ELINKCONG)
|
||||
break;
|
||||
tipc_sk(sk)->link_cong = 1;
|
||||
|
@ -895,6 +898,7 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
|
|||
struct sk_buff_head head;
|
||||
struct sk_buff *skb;
|
||||
struct tipc_name_seq *seq = &dest->addr.nameseq;
|
||||
struct iov_iter save;
|
||||
u32 mtu;
|
||||
long timeo;
|
||||
int rc;
|
||||
|
@ -963,6 +967,7 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
|
|||
msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
|
||||
}
|
||||
|
||||
save = m->msg_iter;
|
||||
new_mtu:
|
||||
mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
|
||||
__skb_queue_head_init(&head);
|
||||
|
@ -980,8 +985,10 @@ new_mtu:
|
|||
rc = dsz;
|
||||
break;
|
||||
}
|
||||
if (rc == -EMSGSIZE)
|
||||
if (rc == -EMSGSIZE) {
|
||||
m->msg_iter = save;
|
||||
goto new_mtu;
|
||||
}
|
||||
if (rc != -ELINKCONG)
|
||||
break;
|
||||
tsk->link_cong = 1;
|
||||
|
@ -1052,6 +1059,7 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
|
|||
long timeo;
|
||||
u32 dnode;
|
||||
uint mtu, send, sent = 0;
|
||||
struct iov_iter save;
|
||||
|
||||
/* Handle implied connection establishment */
|
||||
if (unlikely(dest)) {
|
||||
|
@ -1078,6 +1086,7 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
|
|||
dnode = tsk_peer_node(tsk);
|
||||
|
||||
next:
|
||||
save = m->msg_iter;
|
||||
mtu = tsk->max_pkt;
|
||||
send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
|
||||
__skb_queue_head_init(&head);
|
||||
|
@ -1097,6 +1106,7 @@ next:
|
|||
if (rc == -EMSGSIZE) {
|
||||
tsk->max_pkt = tipc_node_get_mtu(net, dnode,
|
||||
portid);
|
||||
m->msg_iter = save;
|
||||
goto next;
|
||||
}
|
||||
if (rc != -ELINKCONG)
|
||||
|
|
|
@ -1850,8 +1850,7 @@ static ssize_t vmci_transport_stream_enqueue(
|
|||
struct msghdr *msg,
|
||||
size_t len)
|
||||
{
|
||||
/* XXX: stripping const */
|
||||
return vmci_qpair_enquev(vmci_trans(vsk)->qpair, (struct iovec *)msg->msg_iter.iov, len, 0);
|
||||
return vmci_qpair_enquev(vmci_trans(vsk)->qpair, msg, len, 0);
|
||||
}
|
||||
|
||||
static s64 vmci_transport_stream_has_data(struct vsock_sock *vsk)
|
||||
|
|
Загрузка…
Ссылка в новой задаче