vmd(8): simplify vcpu logic, removing uart & vionet reads

Remove legacy state handling on the ns8250 and virtio network devices
originally put in place before using libevent for async device
events. The vcpu thread doesn't need to process device data as it is
handled by the libevent thread.

This has the benefit of simplifying some of the message passing
between threads introduced to the ns8250 uart since both the vcpu
and libevent threads were processing read events.

No functional change intended. Tested by many, including abieber@,
weerd@, Mischa Peters, and Matthias Schmidt. (Thanks.)

OK mlarkin@
This commit is contained in:
dv 2021-07-16 16:21:22 +00:00
parent 9ce0cde1d1
commit 39d683862c
5 changed files with 17 additions and 102 deletions

View File

@ -1,4 +1,4 @@
/* $OpenBSD: ns8250.c,v 1.31 2021/06/16 16:55:02 dv Exp $ */
/* $OpenBSD: ns8250.c,v 1.32 2021/07/16 16:21:22 dv Exp $ */
/*
* Copyright (c) 2016 Mike Larkin <mlarkin@openbsd.org>
*
@ -36,14 +36,10 @@
extern char *__progname;
struct ns8250_dev com1_dev;
/* Flags to distinguish calling threads to com_rcv */
#define NS8250_DEV_THREAD 0
#define NS8250_CPU_THREAD 1
static struct vm_dev_pipe dev_pipe;
static void com_rcv_event(int, short, void *);
static void com_rcv(struct ns8250_dev *, uint32_t, uint32_t, int);
static void com_rcv(struct ns8250_dev *, uint32_t, uint32_t);
/*
* ns8250_pipe_dispatch
@ -58,11 +54,6 @@ ns8250_pipe_dispatch(int fd, short event, void *arg)
msg = vm_pipe_recv(&dev_pipe);
switch(msg) {
case NS8250_ZERO_READ:
log_debug("%s: resetting events after zero byte read", __func__);
event_del(&com1_dev.event);
event_add(&com1_dev.wake, NULL);
break;
case NS8250_RATELIMIT:
evtimer_add(&com1_dev.rate, &com1_dev.rate_tv);
break;
@ -110,7 +101,6 @@ ns8250_init(int fd, uint32_t vmid)
com1_dev.fd = fd;
com1_dev.irq = 4;
com1_dev.portid = NS8250_COM1;
com1_dev.rcv_pending = 0;
com1_dev.vmid = vmid;
com1_dev.byte_out = 0;
com1_dev.regs.divlo = 1;
@ -163,17 +153,8 @@ com_rcv_event(int fd, short kind, void *arg)
return;
}
/*
* We already have other data pending to be received. The data that
* has become available now will be moved to the com port later by
* the vcpu.
*/
if (!com1_dev.rcv_pending) {
if (com1_dev.regs.lsr & LSR_RXRDY)
com1_dev.rcv_pending = 1;
else
com_rcv(&com1_dev, (uintptr_t)arg, 0, NS8250_DEV_THREAD);
}
if ((com1_dev.regs.lsr & LSR_RXRDY) == 0)
com_rcv(&com1_dev, (uintptr_t)arg, 0);
/* If pending interrupt, inject */
if ((com1_dev.regs.iir & IIR_NOPEND) == 0) {
@ -216,7 +197,7 @@ com_rcv_handle_break(struct ns8250_dev *com, uint8_t cmd)
* Must be called with the mutex of the com device acquired
*/
static void
com_rcv(struct ns8250_dev *com, uint32_t vm_id, uint32_t vcpu_id, int thread)
com_rcv(struct ns8250_dev *com, uint32_t vm_id, uint32_t vcpu_id)
{
char buf[2];
ssize_t sz;
@ -236,13 +217,9 @@ com_rcv(struct ns8250_dev *com, uint32_t vm_id, uint32_t vcpu_id, int thread)
if (errno != EAGAIN)
log_warn("unexpected read error on com device");
} else if (sz == 0) {
if (thread == NS8250_DEV_THREAD) {
event_del(&com->event);
event_add(&com->wake, NULL);
} else {
/* Called by vcpu thread, use pipe for event changes */
vm_pipe_send(&dev_pipe, NS8250_ZERO_READ);
}
/* Zero read typically occurs on a disconnect */
event_del(&com->event);
event_add(&com->wake, NULL);
return;
} else if (sz != 1 && sz != 2)
log_warnx("unexpected read return value %zd on com device", sz);
@ -258,8 +235,6 @@ com_rcv(struct ns8250_dev *com, uint32_t vm_id, uint32_t vcpu_id, int thread)
com->regs.iir &= ~IIR_NOPEND;
}
}
com->rcv_pending = fd_hasdata(com->fd);
}
/*
@ -337,9 +312,6 @@ vcpu_process_com_data(struct vm_exit *vei, uint32_t vm_id, uint32_t vcpu_id)
*/
if (com1_dev.regs.iir == 0x0)
com1_dev.regs.iir = 0x1;
if (com1_dev.rcv_pending)
com_rcv(&com1_dev, vm_id, vcpu_id, NS8250_CPU_THREAD);
}
/* If pending interrupt, make sure it gets injected */
@ -688,7 +660,6 @@ ns8250_restore(int fd, int con_fd, uint32_t vmid)
com1_dev.fd = con_fd;
com1_dev.irq = 4;
com1_dev.portid = NS8250_COM1;
com1_dev.rcv_pending = 0;
com1_dev.vmid = vmid;
com1_dev.byte_out = 0;
com1_dev.regs.divlo = 1;

View File

@ -1,4 +1,4 @@
/* $OpenBSD: ns8250.h,v 1.9 2019/12/11 06:45:16 pd Exp $ */
/* $OpenBSD: ns8250.h,v 1.10 2021/07/16 16:21:22 dv Exp $ */
/*
* Copyright (c) 2016 Mike Larkin <mlarkin@openbsd.org>
*
@ -69,7 +69,6 @@ struct ns8250_dev {
enum ns8250_portid portid;
int fd;
int irq;
int rcv_pending;
uint32_t vmid;
uint64_t byte_out;
uint32_t baudrate;

View File

@ -1,4 +1,4 @@
/* $OpenBSD: virtio.c,v 1.91 2021/06/21 02:38:18 dv Exp $ */
/* $OpenBSD: virtio.c,v 1.92 2021/07/16 16:21:22 dv Exp $ */
/*
* Copyright (c) 2015 Mike Larkin <mlarkin@openbsd.org>
@ -1254,12 +1254,12 @@ static int
vionet_rx(struct vionet_dev *dev)
{
char buf[PAGE_SIZE];
int hasdata, num_enq = 0, spc = 0;
int num_enq = 0, spc = 0;
struct ether_header *eh;
ssize_t sz;
do {
sz = read(dev->fd, buf, sizeof buf);
sz = read(dev->fd, buf, sizeof(buf));
if (sz == -1) {
/*
* If we get EAGAIN, No data is currently available.
@ -1270,21 +1270,17 @@ vionet_rx(struct vionet_dev *dev)
"device");
} else if (sz > 0) {
eh = (struct ether_header *)buf;
if (!dev->lockedmac || sz < ETHER_HDR_LEN ||
if (!dev->lockedmac ||
ETHER_IS_MULTICAST(eh->ether_dhost) ||
memcmp(eh->ether_dhost, dev->mac,
sizeof(eh->ether_dhost)) == 0)
num_enq += vionet_enq_rx(dev, buf, sz, &spc);
} else if (sz == 0) {
log_debug("process_rx: no data");
hasdata = 0;
break;
}
} while (spc > 0 && sz > 0);
hasdata = fd_hasdata(dev->fd);
} while (spc && hasdata);
dev->rx_pending = hasdata;
return (num_enq);
}
@ -1301,16 +1297,6 @@ vionet_rx_event(int fd, short kind, void *arg)
mutex_lock(&dev->mutex);
/*
* We already have other data pending to be received. The data that
* has become available now will be enqueued to the vionet_dev
* later.
*/
if (dev->rx_pending) {
mutex_unlock(&dev->mutex);
return;
}
if (vionet_rx(dev) > 0) {
/* XXX: vcpu_id */
vcpu_assert_pic_irq(dev->vm_id, 0, dev->irq);
@ -1319,40 +1305,6 @@ vionet_rx_event(int fd, short kind, void *arg)
mutex_unlock(&dev->mutex);
}
/*
* vionet_process_rx
*
* Processes any remaining pending receivable data for a vionet device.
* Called on VCPU exit. Although we poll on the tap file descriptor of
* a vionet_dev in a separate thread, this function still needs to be
* called on VCPU exit: it can happen that not all data fits into the
* receive queue of the vionet_dev immediately. So any outstanding data
* is handled here.
*
* Parameters:
* vm_id: VM ID of the VM for which to process vionet events
*/
void
vionet_process_rx(uint32_t vm_id)
{
int i;
for (i = 0 ; i < nr_vionet; i++) {
mutex_lock(&vionet[i].mutex);
if (!vionet[i].rx_added) {
mutex_unlock(&vionet[i].mutex);
continue;
}
if (vionet[i].rx_pending) {
if (vionet_rx(&vionet[i])) {
vcpu_assert_pic_irq(vm_id, 0, vionet[i].irq);
}
}
mutex_unlock(&vionet[i].mutex);
}
}
/*
* Must be called with dev->mutex acquired.
*/
@ -1383,7 +1335,6 @@ vionet_notify_rx(struct vionet_dev *dev)
/* Compute offset into avail ring */
avail = (struct vring_avail *)(vr + dev->vq[RXQ].vq_availoffset);
dev->rx_added = 1;
dev->vq[RXQ].notified_avail = avail->idx - 1;
free(vr);
@ -1937,7 +1888,6 @@ virtio_init(struct vmd_vm *vm, int child_cdrom,
vionet[i].vq[TXQ].last_avail = 0;
vionet[i].vq[TXQ].notified_avail = 0;
vionet[i].fd = child_taps[i];
vionet[i].rx_pending = 0;
vionet[i].vm_id = vcp->vcp_id;
vionet[i].vm_vmid = vm->vm_vmid;
vionet[i].irq = pci_get_dev_irq(id);
@ -2222,7 +2172,6 @@ vionet_restore(int fd, struct vmd_vm *vm, int *child_taps)
return (-1);
}
vionet[i].fd = child_taps[i];
vionet[i].rx_pending = 0;
vionet[i].vm_id = vcp->vcp_id;
vionet[i].vm_vmid = vm->vm_vmid;
vionet[i].irq = pci_get_dev_irq(vionet[i].pci_id);

View File

@ -1,4 +1,4 @@
/* $OpenBSD: virtio.h,v 1.40 2021/06/21 02:38:18 dv Exp $ */
/* $OpenBSD: virtio.h,v 1.41 2021/07/16 16:21:22 dv Exp $ */
/*
* Copyright (c) 2015 Mike Larkin <mlarkin@openbsd.org>
@ -217,8 +217,7 @@ struct vionet_dev {
struct virtio_vq_info vq[VIRTIO_MAX_QUEUES];
int fd, rx_added;
int rx_pending;
int fd;
uint32_t vm_id;
uint32_t vm_vmid;
int irq;

View File

@ -1,4 +1,4 @@
/* $OpenBSD: vm.c,v 1.63 2021/06/16 16:55:02 dv Exp $ */
/* $OpenBSD: vm.c,v 1.64 2021/07/16 16:21:22 dv Exp $ */
/*
* Copyright (c) 2015 Mike Larkin <mlarkin@openbsd.org>
@ -1711,9 +1711,6 @@ vcpu_exit(struct vm_run_params *vrp)
__progname, vrp->vrp_exit_reason);
}
/* Process any pending traffic */
vionet_process_rx(vrp->vrp_vm_id);
vrp->vrp_continue = 1;
return (0);