virtio.c revision 266592
1/*-
2 * Copyright (c) 2013  Chris Torek <torek @ torek net>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/10/usr.sbin/bhyve/virtio.c 266592 2014-05-23 19:06:35Z jhb $");
29
30#include <sys/param.h>
31#include <sys/uio.h>
32
33#include <stdio.h>
34#include <stdint.h>
35#include <pthread.h>
36
37#include "bhyverun.h"
38#include "pci_emul.h"
39#include "virtio.h"
40
41/*
42 * Functions for dealing with generalized "virtual devices" as
43 * defined by <https://www.google.com/#output=search&q=virtio+spec>
44 */
45
46/*
47 * In case we decide to relax the "virtio softc comes at the
48 * front of virtio-based device softc" constraint, let's use
49 * this to convert.
50 */
51#define DEV_SOFTC(vs) ((void *)(vs))
52
53/*
54 * Link a virtio_softc to its constants, the device softc, and
55 * the PCI emulation.
56 */
57void
58vi_softc_linkup(struct virtio_softc *vs, struct virtio_consts *vc,
59		void *dev_softc, struct pci_devinst *pi,
60		struct vqueue_info *queues)
61{
62	int i;
63
64	/* vs and dev_softc addresses must match */
65	assert((void *)vs == dev_softc);
66	vs->vs_vc = vc;
67	vs->vs_pi = pi;
68	pi->pi_arg = vs;
69
70	vs->vs_queues = queues;
71	for (i = 0; i < vc->vc_nvq; i++) {
72		queues[i].vq_vs = vs;
73		queues[i].vq_num = i;
74	}
75}
76
77/*
78 * Reset device (device-wide).  This erases all queues, i.e.,
79 * all the queues become invalid (though we don't wipe out the
80 * internal pointers, we just clear the VQ_ALLOC flag).
81 *
82 * It resets negotiated features to "none".
83 *
84 * If MSI-X is enabled, this also resets all the vectors to NO_VECTOR.
85 */
86void
87vi_reset_dev(struct virtio_softc *vs)
88{
89	struct vqueue_info *vq;
90	int i, nvq;
91
92	nvq = vs->vs_vc->vc_nvq;
93	for (vq = vs->vs_queues, i = 0; i < nvq; vq++, i++) {
94		vq->vq_flags = 0;
95		vq->vq_last_avail = 0;
96		vq->vq_pfn = 0;
97		vq->vq_msix_idx = VIRTIO_MSI_NO_VECTOR;
98	}
99	vs->vs_negotiated_caps = 0;
100	vs->vs_curq = 0;
101	/* vs->vs_status = 0; -- redundant */
102	vs->vs_isr = 0;
103	vs->vs_msix_cfg_idx = VIRTIO_MSI_NO_VECTOR;
104}
105
106/*
107 * Set I/O BAR (usually 0) to map PCI config registers.
108 */
109void
110vi_set_io_bar(struct virtio_softc *vs, int barnum)
111{
112	size_t size;
113
114	/*
115	 * ??? should we use CFG0 if MSI-X is disabled?
116	 * Existing code did not...
117	 */
118	size = VTCFG_R_CFG1 + vs->vs_vc->vc_cfgsize;
119	pci_emul_alloc_bar(vs->vs_pi, barnum, PCIBAR_IO, size);
120}
121
122/*
123 * Initialize MSI-X vector capabilities if we're to use MSI-X,
124 * or MSI capabilities if not.
125 *
126 * We assume we want one MSI-X vector per queue, here, plus one
127 * for the config vec.
128 */
129int
130vi_intr_init(struct virtio_softc *vs, int barnum, int use_msix)
131{
132	int nvec;
133
134	if (use_msix) {
135		vs->vs_flags |= VIRTIO_USE_MSIX;
136		vi_reset_dev(vs); /* set all vectors to NO_VECTOR */
137		nvec = vs->vs_vc->vc_nvq + 1;
138		if (pci_emul_add_msixcap(vs->vs_pi, nvec, barnum))
139			return (1);
140	} else {
141		vs->vs_flags &= ~VIRTIO_USE_MSIX;
142		/* Only 1 MSI vector for bhyve */
143		pci_emul_add_msicap(vs->vs_pi, 1);
144	}
145	return (0);
146}
147
148/*
149 * Initialize the currently-selected virtio queue (vs->vs_curq).
150 * The guest just gave us a page frame number, from which we can
151 * calculate the addresses of the queue.
152 */
153void
154vi_vq_init(struct virtio_softc *vs, uint32_t pfn)
155{
156	struct vqueue_info *vq;
157	uint64_t phys;
158	size_t size;
159	char *base;
160
161	vq = &vs->vs_queues[vs->vs_curq];
162	vq->vq_pfn = pfn;
163	phys = (uint64_t)pfn << VRING_PFN;
164	size = vring_size(vq->vq_qsize);
165	base = paddr_guest2host(vs->vs_pi->pi_vmctx, phys, size);
166
167	/* First page(s) are descriptors... */
168	vq->vq_desc = (struct virtio_desc *)base;
169	base += vq->vq_qsize * sizeof(struct virtio_desc);
170
171	/* ... immediately followed by "avail" ring (entirely uint16_t's) */
172	vq->vq_avail = (struct vring_avail *)base;
173	base += (2 + vq->vq_qsize + 1) * sizeof(uint16_t);
174
175	/* Then it's rounded up to the next page... */
176	base = (char *)roundup2((uintptr_t)base, VRING_ALIGN);
177
178	/* ... and the last page(s) are the used ring. */
179	vq->vq_used = (struct vring_used *)base;
180
181	/* Mark queue as allocated, and start at 0 when we use it. */
182	vq->vq_flags = VQ_ALLOC;
183	vq->vq_last_avail = 0;
184}
185
186/*
187 * Helper inline for vq_getchain(): record the i'th "real"
188 * descriptor.
189 */
190static inline void
191_vq_record(int i, volatile struct virtio_desc *vd, struct vmctx *ctx,
192	   struct iovec *iov, int n_iov, uint16_t *flags) {
193
194	if (i >= n_iov)
195		return;
196	iov[i].iov_base = paddr_guest2host(ctx, vd->vd_addr, vd->vd_len);
197	iov[i].iov_len = vd->vd_len;
198	if (flags != NULL)
199		flags[i] = vd->vd_flags;
200}
201#define	VQ_MAX_DESCRIPTORS	512	/* see below */
202
203/*
204 * Examine the chain of descriptors starting at the "next one" to
205 * make sure that they describe a sensible request.  If so, return
206 * the number of "real" descriptors that would be needed/used in
207 * acting on this request.  This may be smaller than the number of
208 * available descriptors, e.g., if there are two available but
209 * they are two separate requests, this just returns 1.  Or, it
210 * may be larger: if there are indirect descriptors involved,
211 * there may only be one descriptor available but it may be an
212 * indirect pointing to eight more.  We return 8 in this case,
213 * i.e., we do not count the indirect descriptors, only the "real"
214 * ones.
215 *
216 * Basically, this vets the vd_flags and vd_next field of each
217 * descriptor and tells you how many are involved.  Since some may
218 * be indirect, this also needs the vmctx (in the pci_devinst
219 * at vs->vs_pi) so that it can find indirect descriptors.
220 *
221 * As we process each descriptor, we copy and adjust it (guest to
222 * host address wise, also using the vmtctx) into the given iov[]
223 * array (of the given size).  If the array overflows, we stop
224 * placing values into the array but keep processing descriptors,
225 * up to VQ_MAX_DESCRIPTORS, before giving up and returning -1.
226 * So you, the caller, must not assume that iov[] is as big as the
227 * return value (you can process the same thing twice to allocate
228 * a larger iov array if needed, or supply a zero length to find
229 * out how much space is needed).
230 *
231 * If you want to verify the WRITE flag on each descriptor, pass a
232 * non-NULL "flags" pointer to an array of "uint16_t" of the same size
233 * as n_iov and we'll copy each vd_flags field after unwinding any
234 * indirects.
235 *
236 * If some descriptor(s) are invalid, this prints a diagnostic message
237 * and returns -1.  If no descriptors are ready now it simply returns 0.
238 *
239 * You are assumed to have done a vq_ring_ready() if needed (note
240 * that vq_has_descs() does one).
241 */
242int
243vq_getchain(struct vqueue_info *vq,
244	    struct iovec *iov, int n_iov, uint16_t *flags)
245{
246	int i;
247	u_int ndesc, n_indir;
248	u_int idx, head, next;
249	volatile struct virtio_desc *vdir, *vindir, *vp;
250	struct vmctx *ctx;
251	struct virtio_softc *vs;
252	const char *name;
253
254	vs = vq->vq_vs;
255	name = vs->vs_vc->vc_name;
256
257	/*
258	 * Note: it's the responsibility of the guest not to
259	 * update vq->vq_avail->va_idx until all of the descriptors
260         * the guest has written are valid (including all their
261         * vd_next fields and vd_flags).
262	 *
263	 * Compute (last_avail - va_idx) in integers mod 2**16.  This is
264	 * the number of descriptors the device has made available
265	 * since the last time we updated vq->vq_last_avail.
266	 *
267	 * We just need to do the subtraction as an unsigned int,
268	 * then trim off excess bits.
269	 */
270	idx = vq->vq_last_avail;
271	ndesc = (uint16_t)((u_int)vq->vq_avail->va_idx - idx);
272	if (ndesc == 0)
273		return (0);
274	if (ndesc > vq->vq_qsize) {
275		/* XXX need better way to diagnose issues */
276		fprintf(stderr,
277		    "%s: ndesc (%u) out of range, driver confused?\r\n",
278		    name, (u_int)ndesc);
279		return (-1);
280	}
281
282	/*
283	 * Now count/parse "involved" descriptors starting from
284	 * the head of the chain.
285	 *
286	 * To prevent loops, we could be more complicated and
287	 * check whether we're re-visiting a previously visited
288	 * index, but we just abort if the count gets excessive.
289	 */
290	ctx = vs->vs_pi->pi_vmctx;
291	head = vq->vq_avail->va_ring[idx & (vq->vq_qsize - 1)];
292	next = head;
293	for (i = 0; i < VQ_MAX_DESCRIPTORS; next = vdir->vd_next) {
294		if (next >= vq->vq_qsize) {
295			fprintf(stderr,
296			    "%s: descriptor index %u out of range, "
297			    "driver confused?\r\n",
298			    name, next);
299			return (-1);
300		}
301		vdir = &vq->vq_desc[next];
302		if ((vdir->vd_flags & VRING_DESC_F_INDIRECT) == 0) {
303			_vq_record(i, vdir, ctx, iov, n_iov, flags);
304			i++;
305		} else if ((vs->vs_negotiated_caps &
306		    VIRTIO_RING_F_INDIRECT_DESC) == 0) {
307			fprintf(stderr,
308			    "%s: descriptor has forbidden INDIRECT flag, "
309			    "driver confused?\r\n",
310			    name);
311			return (-1);
312		} else {
313			n_indir = vdir->vd_len / 16;
314			if ((vdir->vd_len & 0xf) || n_indir == 0) {
315				fprintf(stderr,
316				    "%s: invalid indir len 0x%x, "
317				    "driver confused?\r\n",
318				    name, (u_int)vdir->vd_len);
319				return (-1);
320			}
321			vindir = paddr_guest2host(ctx,
322			    vdir->vd_addr, vdir->vd_len);
323			/*
324			 * Indirects start at the 0th, then follow
325			 * their own embedded "next"s until those run
326			 * out.  Each one's indirect flag must be off
327			 * (we don't really have to check, could just
328			 * ignore errors...).
329			 */
330			next = 0;
331			for (;;) {
332				vp = &vindir[next];
333				if (vp->vd_flags & VRING_DESC_F_INDIRECT) {
334					fprintf(stderr,
335					    "%s: indirect desc has INDIR flag,"
336					    " driver confused?\r\n",
337					    name);
338					return (-1);
339				}
340				_vq_record(i, vp, ctx, iov, n_iov, flags);
341				if (++i > VQ_MAX_DESCRIPTORS)
342					goto loopy;
343				if ((vp->vd_flags & VRING_DESC_F_NEXT) == 0)
344					break;
345				next = vp->vd_next;
346				if (next >= n_indir) {
347					fprintf(stderr,
348					    "%s: invalid next %u > %u, "
349					    "driver confused?\r\n",
350					    name, (u_int)next, n_indir);
351					return (-1);
352				}
353			}
354		}
355		if ((vdir->vd_flags & VRING_DESC_F_NEXT) == 0)
356			return (i);
357	}
358loopy:
359	fprintf(stderr,
360	    "%s: descriptor loop? count > %d - driver confused?\r\n",
361	    name, i);
362	return (-1);
363}
364
365/*
366 * Return the currently-first request chain to the guest, setting
367 * its I/O length to the provided value.
368 *
369 * (This chain is the one you handled when you called vq_getchain()
370 * and used its positive return value.)
371 */
372void
373vq_relchain(struct vqueue_info *vq, uint32_t iolen)
374{
375	uint16_t head, uidx, mask;
376	volatile struct vring_used *vuh;
377	volatile struct virtio_used *vue;
378
379	/*
380	 * Notes:
381	 *  - mask is N-1 where N is a power of 2 so computes x % N
382	 *  - vuh points to the "used" data shared with guest
383	 *  - vue points to the "used" ring entry we want to update
384	 *  - head is the same value we compute in vq_iovecs().
385	 *
386	 * (I apologize for the two fields named vu_idx; the
387	 * virtio spec calls the one that vue points to, "id"...)
388	 */
389	mask = vq->vq_qsize - 1;
390	vuh = vq->vq_used;
391	head = vq->vq_avail->va_ring[vq->vq_last_avail++ & mask];
392
393	uidx = vuh->vu_idx;
394	vue = &vuh->vu_ring[uidx++ & mask];
395	vue->vu_idx = head; /* ie, vue->id = head */
396	vue->vu_tlen = iolen;
397	vuh->vu_idx = uidx;
398}
399
400/*
401 * Driver has finished processing "available" chains and calling
402 * vq_relchain on each one.  If driver used all the available
403 * chains, used_all should be set.
404 *
405 * If the "used" index moved we may need to inform the guest, i.e.,
406 * deliver an interrupt.  Even if the used index did NOT move we
407 * may need to deliver an interrupt, if the avail ring is empty and
408 * we are supposed to interrupt on empty.
409 *
410 * Note that used_all_avail is provided by the caller because it's
411 * a snapshot of the ring state when he decided to finish interrupt
412 * processing -- it's possible that descriptors became available after
413 * that point.  (It's also typically a constant 1/True as well.)
414 */
415void
416vq_endchains(struct vqueue_info *vq, int used_all_avail)
417{
418	struct virtio_softc *vs;
419	uint16_t event_idx, new_idx, old_idx;
420	int intr;
421
422	/*
423	 * Interrupt generation: if we're using EVENT_IDX,
424	 * interrupt if we've crossed the event threshold.
425	 * Otherwise interrupt is generated if we added "used" entries,
426	 * but suppressed by VRING_AVAIL_F_NO_INTERRUPT.
427	 *
428	 * In any case, though, if NOTIFY_ON_EMPTY is set and the
429	 * entire avail was processed, we need to interrupt always.
430	 */
431	vs = vq->vq_vs;
432	new_idx = vq->vq_used->vu_idx;
433	old_idx = vq->vq_save_used;
434	if (used_all_avail &&
435	    (vs->vs_negotiated_caps & VIRTIO_F_NOTIFY_ON_EMPTY))
436		intr = 1;
437	else if (vs->vs_flags & VIRTIO_EVENT_IDX) {
438		event_idx = VQ_USED_EVENT_IDX(vq);
439		/*
440		 * This calculation is per docs and the kernel
441		 * (see src/sys/dev/virtio/virtio_ring.h).
442		 */
443		intr = (uint16_t)(new_idx - event_idx - 1) <
444			(uint16_t)(new_idx - old_idx);
445	} else {
446		intr = new_idx != old_idx &&
447		    !(vq->vq_avail->va_flags & VRING_AVAIL_F_NO_INTERRUPT);
448	}
449	if (intr)
450		vq_interrupt(vs, vq);
451}
452
453/* Note: these are in sorted order to make for a fast search */
454static struct config_reg {
455	uint16_t	cr_offset;	/* register offset */
456	uint8_t		cr_size;	/* size (bytes) */
457	uint8_t		cr_ro;		/* true => reg is read only */
458	const char	*cr_name;	/* name of reg */
459} config_regs[] = {
460	{ VTCFG_R_HOSTCAP,	4, 1, "HOSTCAP" },
461	{ VTCFG_R_GUESTCAP,	4, 0, "GUESTCAP" },
462	{ VTCFG_R_PFN,		4, 0, "PFN" },
463	{ VTCFG_R_QNUM,		2, 1, "QNUM" },
464	{ VTCFG_R_QSEL,		2, 0, "QSEL" },
465	{ VTCFG_R_QNOTIFY,	2, 0, "QNOTIFY" },
466	{ VTCFG_R_STATUS,	1, 0, "STATUS" },
467	{ VTCFG_R_ISR,		1, 0, "ISR" },
468	{ VTCFG_R_CFGVEC,	2, 0, "CFGVEC" },
469	{ VTCFG_R_QVEC,		2, 0, "QVEC" },
470};
471
472static inline struct config_reg *
473vi_find_cr(int offset) {
474	u_int hi, lo, mid;
475	struct config_reg *cr;
476
477	lo = 0;
478	hi = sizeof(config_regs) / sizeof(*config_regs) - 1;
479	while (hi >= lo) {
480		mid = (hi + lo) >> 1;
481		cr = &config_regs[mid];
482		if (cr->cr_offset == offset)
483			return (cr);
484		if (cr->cr_offset < offset)
485			lo = mid + 1;
486		else
487			hi = mid - 1;
488	}
489	return (NULL);
490}
491
492/*
493 * Handle pci config space reads.
494 * If it's to the MSI-X info, do that.
495 * If it's part of the virtio standard stuff, do that.
496 * Otherwise dispatch to the actual driver.
497 */
498uint64_t
499vi_pci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
500	    int baridx, uint64_t offset, int size)
501{
502	struct virtio_softc *vs = pi->pi_arg;
503	struct virtio_consts *vc;
504	struct config_reg *cr;
505	uint64_t virtio_config_size, max;
506	const char *name;
507	uint32_t newoff;
508	uint32_t value;
509	int error;
510
511	if (vs->vs_flags & VIRTIO_USE_MSIX) {
512		if (baridx == pci_msix_table_bar(pi) ||
513		    baridx == pci_msix_pba_bar(pi)) {
514			return (pci_emul_msix_tread(pi, offset, size));
515		}
516	}
517
518	/* XXX probably should do something better than just assert() */
519	assert(baridx == 0);
520
521	if (vs->vs_mtx)
522		pthread_mutex_lock(vs->vs_mtx);
523
524	vc = vs->vs_vc;
525	name = vc->vc_name;
526	value = size == 1 ? 0xff : size == 2 ? 0xffff : 0xffffffff;
527
528	if (size != 1 && size != 2 && size != 4)
529		goto bad;
530
531	if (pci_msix_enabled(pi))
532		virtio_config_size = VTCFG_R_CFG1;
533	else
534		virtio_config_size = VTCFG_R_CFG0;
535
536	if (offset >= virtio_config_size) {
537		/*
538		 * Subtract off the standard size (including MSI-X
539		 * registers if enabled) and dispatch to underlying driver.
540		 * If that fails, fall into general code.
541		 */
542		newoff = offset - virtio_config_size;
543		max = vc->vc_cfgsize ? vc->vc_cfgsize : 0x100000000;
544		if (newoff + size > max)
545			goto bad;
546		error = (*vc->vc_cfgread)(DEV_SOFTC(vs), newoff, size, &value);
547		if (!error)
548			goto done;
549	}
550
551bad:
552	cr = vi_find_cr(offset);
553	if (cr == NULL || cr->cr_size != size) {
554		if (cr != NULL) {
555			/* offset must be OK, so size must be bad */
556			fprintf(stderr,
557			    "%s: read from %s: bad size %d\r\n",
558			    name, cr->cr_name, size);
559		} else {
560			fprintf(stderr,
561			    "%s: read from bad offset/size %jd/%d\r\n",
562			    name, (uintmax_t)offset, size);
563		}
564		goto done;
565	}
566
567	switch (offset) {
568	case VTCFG_R_HOSTCAP:
569		value = vc->vc_hv_caps;
570		break;
571	case VTCFG_R_GUESTCAP:
572		value = vs->vs_negotiated_caps;
573		break;
574	case VTCFG_R_PFN:
575		if (vs->vs_curq < vc->vc_nvq)
576			value = vs->vs_queues[vs->vs_curq].vq_pfn;
577		break;
578	case VTCFG_R_QNUM:
579		value = vs->vs_curq < vc->vc_nvq ?
580		    vs->vs_queues[vs->vs_curq].vq_qsize : 0;
581		break;
582	case VTCFG_R_QSEL:
583		value = vs->vs_curq;
584		break;
585	case VTCFG_R_QNOTIFY:
586		value = 0;	/* XXX */
587		break;
588	case VTCFG_R_STATUS:
589		value = vs->vs_status;
590		break;
591	case VTCFG_R_ISR:
592		value = vs->vs_isr;
593		vs->vs_isr = 0;		/* a read clears this flag */
594		break;
595	case VTCFG_R_CFGVEC:
596		value = vs->vs_msix_cfg_idx;
597		break;
598	case VTCFG_R_QVEC:
599		value = vs->vs_curq < vc->vc_nvq ?
600		    vs->vs_queues[vs->vs_curq].vq_msix_idx :
601		    VIRTIO_MSI_NO_VECTOR;
602		break;
603	}
604done:
605	if (vs->vs_mtx)
606		pthread_mutex_unlock(vs->vs_mtx);
607	return (value);
608}
609
610/*
611 * Handle pci config space writes.
612 * If it's to the MSI-X info, do that.
613 * If it's part of the virtio standard stuff, do that.
614 * Otherwise dispatch to the actual driver.
615 */
616void
617vi_pci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
618	     int baridx, uint64_t offset, int size, uint64_t value)
619{
620	struct virtio_softc *vs = pi->pi_arg;
621	struct vqueue_info *vq;
622	struct virtio_consts *vc;
623	struct config_reg *cr;
624	uint64_t virtio_config_size, max;
625	const char *name;
626	uint32_t newoff;
627	int error;
628
629	if (vs->vs_flags & VIRTIO_USE_MSIX) {
630		if (baridx == pci_msix_table_bar(pi) ||
631		    baridx == pci_msix_pba_bar(pi)) {
632			pci_emul_msix_twrite(pi, offset, size, value);
633			return;
634		}
635	}
636
637	/* XXX probably should do something better than just assert() */
638	assert(baridx == 0);
639
640	if (vs->vs_mtx)
641		pthread_mutex_lock(vs->vs_mtx);
642
643	vc = vs->vs_vc;
644	name = vc->vc_name;
645
646	if (size != 1 && size != 2 && size != 4)
647		goto bad;
648
649	if (pci_msix_enabled(pi))
650		virtio_config_size = VTCFG_R_CFG1;
651	else
652		virtio_config_size = VTCFG_R_CFG0;
653
654	if (offset >= virtio_config_size) {
655		/*
656		 * Subtract off the standard size (including MSI-X
657		 * registers if enabled) and dispatch to underlying driver.
658		 */
659		newoff = offset - virtio_config_size;
660		max = vc->vc_cfgsize ? vc->vc_cfgsize : 0x100000000;
661		if (newoff + size > max)
662			goto bad;
663		error = (*vc->vc_cfgwrite)(DEV_SOFTC(vs), newoff, size, value);
664		if (!error)
665			goto done;
666	}
667
668bad:
669	cr = vi_find_cr(offset);
670	if (cr == NULL || cr->cr_size != size || cr->cr_ro) {
671		if (cr != NULL) {
672			/* offset must be OK, wrong size and/or reg is R/O */
673			if (cr->cr_size != size)
674				fprintf(stderr,
675				    "%s: write to %s: bad size %d\r\n",
676				    name, cr->cr_name, size);
677			if (cr->cr_ro)
678				fprintf(stderr,
679				    "%s: write to read-only reg %s\r\n",
680				    name, cr->cr_name);
681		} else {
682			fprintf(stderr,
683			    "%s: write to bad offset/size %jd/%d\r\n",
684			    name, (uintmax_t)offset, size);
685		}
686		goto done;
687	}
688
689	switch (offset) {
690	case VTCFG_R_GUESTCAP:
691		vs->vs_negotiated_caps = value & vc->vc_hv_caps;
692		break;
693	case VTCFG_R_PFN:
694		if (vs->vs_curq >= vc->vc_nvq)
695			goto bad_qindex;
696		vi_vq_init(vs, value);
697		break;
698	case VTCFG_R_QSEL:
699		/*
700		 * Note that the guest is allowed to select an
701		 * invalid queue; we just need to return a QNUM
702		 * of 0 while the bad queue is selected.
703		 */
704		vs->vs_curq = value;
705		break;
706	case VTCFG_R_QNOTIFY:
707		if (value >= vc->vc_nvq) {
708			fprintf(stderr, "%s: queue %d notify out of range\r\n",
709				name, (int)value);
710			goto done;
711		}
712		vq = &vs->vs_queues[value];
713		if (vq->vq_notify)
714			(*vq->vq_notify)(DEV_SOFTC(vs), vq);
715		else if (vc->vc_qnotify)
716			(*vc->vc_qnotify)(DEV_SOFTC(vs), vq);
717		else
718			fprintf(stderr,
719			    "%s: qnotify queue %d: missing vq/vc notify\r\n",
720				name, (int)value);
721		break;
722	case VTCFG_R_STATUS:
723		vs->vs_status = value;
724		if (value == 0)
725			(*vc->vc_reset)(DEV_SOFTC(vs));
726		break;
727	case VTCFG_R_CFGVEC:
728		vs->vs_msix_cfg_idx = value;
729		break;
730	case VTCFG_R_QVEC:
731		if (vs->vs_curq >= vc->vc_nvq)
732			goto bad_qindex;
733		vq = &vs->vs_queues[vs->vs_curq];
734		vq->vq_msix_idx = value;
735		break;
736	}
737	goto done;
738
739bad_qindex:
740	fprintf(stderr,
741	    "%s: write config reg %s: curq %d >= max %d\r\n",
742	    name, cr->cr_name, vs->vs_curq, vc->vc_nvq);
743done:
744	if (vs->vs_mtx)
745		pthread_mutex_unlock(vs->vs_mtx);
746}
747