xen/pvcalls: implement poll command
For active sockets, check the indexes and use the inflight_conn_req waitqueue to wait. For passive sockets if an accept is outstanding (PVCALLS_FLAG_ACCEPT_INFLIGHT), check if it has been answered by looking at bedata->rsp[req_id]. If so, return POLLIN. Otherwise use the inflight_accept_req waitqueue. If no accepts are inflight, send PVCALLS_POLL to the backend. If we have outstanding POLL requests awaiting for a response use the inflight_req waitqueue: inflight_req is awaken when a new response is received; on wakeup we check whether the POLL response is arrived by looking at the PVCALLS_FLAG_POLL_RET flag. We set the flag from pvcalls_front_event_handler, if the response was for a POLL command. In pvcalls_front_event_handler, get the struct sock_mapping from the poll id (we previously converted struct sock_mapping* to uintptr_t and used it as id). Signed-off-by: Stefano Stabellini <stefano@aporeto.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> CC: boris.ostrovsky@oracle.com CC: jgross@suse.com Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
This commit is contained in:
Родитель
ae0d04052e
Коммит
5842c83596
|
@ -83,6 +83,8 @@ struct sock_mapping {
|
||||||
* Only one poll operation can be inflight for a given socket.
|
* Only one poll operation can be inflight for a given socket.
|
||||||
*/
|
*/
|
||||||
#define PVCALLS_FLAG_ACCEPT_INFLIGHT 0
|
#define PVCALLS_FLAG_ACCEPT_INFLIGHT 0
|
||||||
|
#define PVCALLS_FLAG_POLL_INFLIGHT 1
|
||||||
|
#define PVCALLS_FLAG_POLL_RET 2
|
||||||
uint8_t flags;
|
uint8_t flags;
|
||||||
uint32_t inflight_req_id;
|
uint32_t inflight_req_id;
|
||||||
struct sock_mapping *accept_map;
|
struct sock_mapping *accept_map;
|
||||||
|
@ -154,15 +156,32 @@ again:
|
||||||
rsp = RING_GET_RESPONSE(&bedata->ring, bedata->ring.rsp_cons);
|
rsp = RING_GET_RESPONSE(&bedata->ring, bedata->ring.rsp_cons);
|
||||||
|
|
||||||
req_id = rsp->req_id;
|
req_id = rsp->req_id;
|
||||||
dst = (uint8_t *)&bedata->rsp[req_id] + sizeof(rsp->req_id);
|
if (rsp->cmd == PVCALLS_POLL) {
|
||||||
src = (uint8_t *)rsp + sizeof(rsp->req_id);
|
struct sock_mapping *map = (struct sock_mapping *)(uintptr_t)
|
||||||
memcpy(dst, src, sizeof(*rsp) - sizeof(rsp->req_id));
|
rsp->u.poll.id;
|
||||||
/*
|
|
||||||
* First copy the rest of the data, then req_id. It is
|
clear_bit(PVCALLS_FLAG_POLL_INFLIGHT,
|
||||||
* paired with the barrier when accessing bedata->rsp.
|
(void *)&map->passive.flags);
|
||||||
*/
|
/*
|
||||||
smp_wmb();
|
* clear INFLIGHT, then set RET. It pairs with
|
||||||
bedata->rsp[req_id].req_id = rsp->req_id;
|
* the checks at the beginning of
|
||||||
|
* pvcalls_front_poll_passive.
|
||||||
|
*/
|
||||||
|
smp_wmb();
|
||||||
|
set_bit(PVCALLS_FLAG_POLL_RET,
|
||||||
|
(void *)&map->passive.flags);
|
||||||
|
} else {
|
||||||
|
dst = (uint8_t *)&bedata->rsp[req_id] +
|
||||||
|
sizeof(rsp->req_id);
|
||||||
|
src = (uint8_t *)rsp + sizeof(rsp->req_id);
|
||||||
|
memcpy(dst, src, sizeof(*rsp) - sizeof(rsp->req_id));
|
||||||
|
/*
|
||||||
|
* First copy the rest of the data, then req_id. It is
|
||||||
|
* paired with the barrier when accessing bedata->rsp.
|
||||||
|
*/
|
||||||
|
smp_wmb();
|
||||||
|
bedata->rsp[req_id].req_id = req_id;
|
||||||
|
}
|
||||||
|
|
||||||
done = 1;
|
done = 1;
|
||||||
bedata->ring.rsp_cons++;
|
bedata->ring.rsp_cons++;
|
||||||
|
@ -846,6 +865,113 @@ received:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static unsigned int pvcalls_front_poll_passive(struct file *file,
|
||||||
|
struct pvcalls_bedata *bedata,
|
||||||
|
struct sock_mapping *map,
|
||||||
|
poll_table *wait)
|
||||||
|
{
|
||||||
|
int notify, req_id, ret;
|
||||||
|
struct xen_pvcalls_request *req;
|
||||||
|
|
||||||
|
if (test_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
|
||||||
|
(void *)&map->passive.flags)) {
|
||||||
|
uint32_t req_id = READ_ONCE(map->passive.inflight_req_id);
|
||||||
|
|
||||||
|
if (req_id != PVCALLS_INVALID_ID &&
|
||||||
|
READ_ONCE(bedata->rsp[req_id].req_id) == req_id)
|
||||||
|
return POLLIN | POLLRDNORM;
|
||||||
|
|
||||||
|
poll_wait(file, &map->passive.inflight_accept_req, wait);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (test_and_clear_bit(PVCALLS_FLAG_POLL_RET,
|
||||||
|
(void *)&map->passive.flags))
|
||||||
|
return POLLIN | POLLRDNORM;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* First check RET, then INFLIGHT. No barriers necessary to
|
||||||
|
* ensure execution ordering because of the conditional
|
||||||
|
* instructions creating control dependencies.
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (test_and_set_bit(PVCALLS_FLAG_POLL_INFLIGHT,
|
||||||
|
(void *)&map->passive.flags)) {
|
||||||
|
poll_wait(file, &bedata->inflight_req, wait);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_lock(&bedata->socket_lock);
|
||||||
|
ret = get_request(bedata, &req_id);
|
||||||
|
if (ret < 0) {
|
||||||
|
spin_unlock(&bedata->socket_lock);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
req = RING_GET_REQUEST(&bedata->ring, req_id);
|
||||||
|
req->req_id = req_id;
|
||||||
|
req->cmd = PVCALLS_POLL;
|
||||||
|
req->u.poll.id = (uintptr_t) map;
|
||||||
|
|
||||||
|
bedata->ring.req_prod_pvt++;
|
||||||
|
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
|
||||||
|
spin_unlock(&bedata->socket_lock);
|
||||||
|
if (notify)
|
||||||
|
notify_remote_via_irq(bedata->irq);
|
||||||
|
|
||||||
|
poll_wait(file, &bedata->inflight_req, wait);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned int pvcalls_front_poll_active(struct file *file,
|
||||||
|
struct pvcalls_bedata *bedata,
|
||||||
|
struct sock_mapping *map,
|
||||||
|
poll_table *wait)
|
||||||
|
{
|
||||||
|
unsigned int mask = 0;
|
||||||
|
int32_t in_error, out_error;
|
||||||
|
struct pvcalls_data_intf *intf = map->active.ring;
|
||||||
|
|
||||||
|
out_error = intf->out_error;
|
||||||
|
in_error = intf->in_error;
|
||||||
|
|
||||||
|
poll_wait(file, &map->active.inflight_conn_req, wait);
|
||||||
|
if (pvcalls_front_write_todo(map))
|
||||||
|
mask |= POLLOUT | POLLWRNORM;
|
||||||
|
if (pvcalls_front_read_todo(map))
|
||||||
|
mask |= POLLIN | POLLRDNORM;
|
||||||
|
if (in_error != 0 || out_error != 0)
|
||||||
|
mask |= POLLERR;
|
||||||
|
|
||||||
|
return mask;
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned int pvcalls_front_poll(struct file *file, struct socket *sock,
|
||||||
|
poll_table *wait)
|
||||||
|
{
|
||||||
|
struct pvcalls_bedata *bedata;
|
||||||
|
struct sock_mapping *map;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
pvcalls_enter();
|
||||||
|
if (!pvcalls_front_dev) {
|
||||||
|
pvcalls_exit();
|
||||||
|
return POLLNVAL;
|
||||||
|
}
|
||||||
|
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
|
||||||
|
|
||||||
|
map = (struct sock_mapping *) sock->sk->sk_send_head;
|
||||||
|
if (!map) {
|
||||||
|
pvcalls_exit();
|
||||||
|
return POLLNVAL;
|
||||||
|
}
|
||||||
|
if (map->active_socket)
|
||||||
|
ret = pvcalls_front_poll_active(file, bedata, map, wait);
|
||||||
|
else
|
||||||
|
ret = pvcalls_front_poll_passive(file, bedata, map, wait);
|
||||||
|
pvcalls_exit();
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct xenbus_device_id pvcalls_front_ids[] = {
|
static const struct xenbus_device_id pvcalls_front_ids[] = {
|
||||||
{ "pvcalls" },
|
{ "pvcalls" },
|
||||||
{ "" }
|
{ "" }
|
||||||
|
|
|
@ -20,5 +20,8 @@ int pvcalls_front_recvmsg(struct socket *sock,
|
||||||
struct msghdr *msg,
|
struct msghdr *msg,
|
||||||
size_t len,
|
size_t len,
|
||||||
int flags);
|
int flags);
|
||||||
|
unsigned int pvcalls_front_poll(struct file *file,
|
||||||
|
struct socket *sock,
|
||||||
|
poll_table *wait);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
Загрузка…
Ссылка в новой задаче