pci_virtio_net.c revision 244159
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#include <sys/param.h>
33#include <sys/linker_set.h>
34#include <sys/select.h>
35#include <sys/uio.h>
36#include <sys/ioctl.h>
37
38#include <errno.h>
39#include <fcntl.h>
40#include <stdio.h>
41#include <stdlib.h>
42#include <stdint.h>
43#include <string.h>
44#include <strings.h>
45#include <unistd.h>
46#include <assert.h>
47#include <md5.h>
48#include <pthread.h>
49
50#include "fbsdrun.h"
51#include "pci_emul.h"
52#include "mevent.h"
53#include "virtio.h"
54
55#define VTNET_RINGSZ	256
56
57#define VTNET_MAXSEGS	32
58
59/*
60 * PCI config-space register offsets
61 */
62#define VTNET_R_CFG0	       20
63#define VTNET_R_CFG1	       21
64#define VTNET_R_CFG2	       22
65#define VTNET_R_CFG3	       23
66#define VTNET_R_CFG4	       24
67#define VTNET_R_CFG5	       25
68#define VTNET_R_CFG6	       26
69#define VTNET_R_CFG7	       27
70#define VTNET_R_MAX	       27
71
72#define VTNET_REGSZ		VTNET_R_MAX+1
73
74/*
75 * Host capabilities
76 */
77#define VTNET_S_HOSTCAPS      \
78  ( 0x00000020 |	/* host supplies MAC */ \
79    0x00008000 |	/* host can merge Rx buffers */ \
80    0x00010000 )	/* config status available */
81
82/*
83 * Queue definitions.
84 */
85#define VTNET_RXQ	0
86#define VTNET_TXQ	1
87#define VTNET_CTLQ	2
88
89#define VTNET_MAXQ	3
90
91struct vring_hqueue {
92	/* Internal state */
93	uint16_t	hq_size;
94	uint16_t	hq_cur_aidx;		/* trails behind 'avail_idx' */
95
96	 /* Host-context pointers to the queue */
97	struct virtio_desc *hq_dtable;
98	uint16_t	*hq_avail_flags;
99	uint16_t	*hq_avail_idx;		/* monotonically increasing */
100	uint16_t	*hq_avail_ring;
101
102	uint16_t	*hq_used_flags;
103	uint16_t	*hq_used_idx;		/* monotonically increasing */
104	struct virtio_used *hq_used_ring;
105};
106
107/*
108 * Fixed network header size
109 */
110struct virtio_net_rxhdr {
111	uint8_t		vrh_flags;
112	uint8_t		vrh_gso_type;
113	uint16_t	vrh_hdr_len;
114	uint16_t	vrh_gso_size;
115	uint16_t	vrh_csum_start;
116	uint16_t	vrh_csum_offset;
117	uint16_t	vrh_bufs;
118} __packed;
119
120/*
121 * Debug printf
122 */
123static int pci_vtnet_debug;
124#define DPRINTF(params) if (pci_vtnet_debug) printf params
125#define WPRINTF(params) printf params
126
127/*
128 * Per-device softc
129 */
130struct pci_vtnet_softc {
131	struct pci_devinst *vsc_pi;
132	pthread_mutex_t vsc_mtx;
133	struct mevent	*vsc_mevp;
134
135	int		vsc_curq;
136	int		vsc_status;
137	int		vsc_isr;
138	int		vsc_tapfd;
139	int		vsc_rx_ready;
140	int		vsc_rxpend;
141
142	uint32_t	vsc_features;
143	uint8_t		vsc_macaddr[6];
144
145	uint64_t	vsc_pfn[VTNET_MAXQ];
146	struct	vring_hqueue vsc_hq[VTNET_MAXQ];
147};
148
149/*
150 * Return the number of available descriptors in the vring taking care
151 * of the 16-bit index wraparound.
152 */
153static int
154hq_num_avail(struct vring_hqueue *hq)
155{
156	int ndesc;
157
158	if (*hq->hq_avail_idx >= hq->hq_cur_aidx)
159		ndesc = *hq->hq_avail_idx - hq->hq_cur_aidx;
160	else
161		ndesc = UINT16_MAX - hq->hq_cur_aidx + *hq->hq_avail_idx + 1;
162
163	assert(ndesc >= 0 && ndesc <= hq->hq_size);
164
165	return (ndesc);
166}
167
168static uint16_t
169pci_vtnet_qsize(int qnum)
170{
171	/* XXX no ctl queue currently */
172	if (qnum == VTNET_CTLQ) {
173		return (0);
174	}
175
176	/* XXX fixed currently. Maybe different for tx/rx/ctl */
177	return (VTNET_RINGSZ);
178}
179
180static void
181pci_vtnet_update_status(struct pci_vtnet_softc *sc, uint32_t value)
182{
183	if (value == 0) {
184		DPRINTF(("vtnet: device reset requested !\n"));
185	}
186
187	sc->vsc_status = value;
188}
189
190/*
191 * Called to send a buffer chain out to the tap device
192 */
193static void
194pci_vtnet_tap_tx(struct pci_vtnet_softc *sc, struct iovec *iov, int iovcnt,
195		 int len)
196{
197	char pad[60];
198
199	if (sc->vsc_tapfd == -1)
200		return;
201
202	/*
203	 * If the length is < 60, pad out to that and add the
204	 * extra zero'd segment to the iov. It is guaranteed that
205	 * there is always an extra iov available by the caller.
206	 */
207	if (len < 60) {
208		memset(pad, 0, 60 - len);
209		iov[iovcnt].iov_base = pad;
210		iov[iovcnt].iov_len = 60 - len;
211		iovcnt++;
212	}
213	(void) writev(sc->vsc_tapfd, iov, iovcnt);
214}
215
216/*
217 *  Called when there is read activity on the tap file descriptor.
218 * Each buffer posted by the guest is assumed to be able to contain
219 * an entire ethernet frame + rx header.
220 *  MP note: the dummybuf is only used for discarding frames, so there
221 * is no need for it to be per-vtnet or locked.
222 */
223static uint8_t dummybuf[2048];
224
225static void
226pci_vtnet_tap_rx(struct pci_vtnet_softc *sc)
227{
228	struct virtio_desc *vd;
229	struct virtio_used *vu;
230	struct vring_hqueue *hq;
231	struct virtio_net_rxhdr *vrx;
232	uint8_t *buf;
233	int i;
234	int len;
235	int ndescs;
236	int didx, uidx, aidx;	/* descriptor, avail and used index */
237
238	/*
239	 * Should never be called without a valid tap fd
240	 */
241	assert(sc->vsc_tapfd != -1);
242
243	/*
244	 * But, will be called when the rx ring hasn't yet
245	 * been set up.
246	 */
247	if (sc->vsc_rx_ready == 0) {
248		/*
249		 * Drop the packet and try later.
250		 */
251		(void) read(sc->vsc_tapfd, dummybuf, sizeof(dummybuf));
252		return;
253	}
254
255	/*
256	 * Calculate the number of available rx buffers
257	 */
258	hq = &sc->vsc_hq[VTNET_RXQ];
259
260	ndescs = hq_num_avail(hq);
261
262	if (ndescs == 0) {
263		/*
264		 * Need to wait for host notification to read
265		 */
266		if (sc->vsc_rxpend == 0) {
267			WPRINTF(("vtnet: no rx descriptors !\n"));
268			sc->vsc_rxpend = 1;
269		}
270
271		/*
272		 * Drop the packet and try later
273		 */
274		(void) read(sc->vsc_tapfd, dummybuf, sizeof(dummybuf));
275		return;
276	}
277
278	aidx = hq->hq_cur_aidx;
279	uidx = *hq->hq_used_idx;
280	for (i = 0; i < ndescs; i++) {
281		/*
282		 * 'aidx' indexes into the an array of descriptor indexes
283		 */
284		didx = hq->hq_avail_ring[aidx % hq->hq_size];
285		assert(didx >= 0 && didx < hq->hq_size);
286
287		vd = &hq->hq_dtable[didx];
288
289		/*
290		 * Get a pointer to the rx header, and use the
291		 * data immediately following it for the packet buffer.
292		 */
293		vrx = (struct virtio_net_rxhdr *)paddr_guest2host(vd->vd_addr);
294		buf = (uint8_t *)(vrx + 1);
295
296		len = read(sc->vsc_tapfd, buf,
297			   vd->vd_len - sizeof(struct virtio_net_rxhdr));
298
299		if (len < 0 && errno == EWOULDBLOCK) {
300			break;
301		}
302
303		/*
304		 * The only valid field in the rx packet header is the
305		 * number of buffers, which is always 1 without TSO
306		 * support.
307		 */
308		memset(vrx, 0, sizeof(struct virtio_net_rxhdr));
309		vrx->vrh_bufs = 1;
310
311		/*
312		 * Write this descriptor into the used ring
313		 */
314		vu = &hq->hq_used_ring[uidx % hq->hq_size];
315		vu->vu_idx = didx;
316		vu->vu_tlen = len + sizeof(struct virtio_net_rxhdr);
317		uidx++;
318		aidx++;
319	}
320
321	/*
322	 * Update the used pointer, and signal an interrupt if allowed
323	 */
324	*hq->hq_used_idx = uidx;
325	hq->hq_cur_aidx = aidx;
326
327	if ((*hq->hq_avail_flags & VRING_AVAIL_F_NO_INTERRUPT) == 0) {
328		sc->vsc_isr |= 1;
329		pci_generate_msi(sc->vsc_pi, 0);
330	}
331}
332
333static void
334pci_vtnet_tap_callback(int fd, enum ev_type type, void *param)
335{
336	struct pci_vtnet_softc *sc = param;
337
338	pthread_mutex_lock(&sc->vsc_mtx);
339	pci_vtnet_tap_rx(sc);
340	pthread_mutex_unlock(&sc->vsc_mtx);
341
342}
343
344static void
345pci_vtnet_ping_rxq(struct pci_vtnet_softc *sc)
346{
347	/*
348	 * A qnotify means that the rx process can now begin
349	 */
350	if (sc->vsc_rx_ready == 0) {
351		sc->vsc_rx_ready = 1;
352	}
353
354	/*
355	 * If the rx queue was empty, attempt to receive a
356	 * packet that was previously blocked due to no rx bufs
357	 * available
358	 */
359	if (sc->vsc_rxpend) {
360		WPRINTF(("vtnet: rx resumed\n\r"));
361		sc->vsc_rxpend = 0;
362		pci_vtnet_tap_rx(sc);
363	}
364}
365
366static void
367pci_vtnet_proctx(struct pci_vtnet_softc *sc, struct vring_hqueue *hq)
368{
369	struct iovec iov[VTNET_MAXSEGS + 1];
370	struct virtio_desc *vd;
371	struct virtio_used *vu;
372	int i;
373	int plen;
374	int tlen;
375	int uidx, aidx, didx;
376
377	uidx = *hq->hq_used_idx;
378	aidx = hq->hq_cur_aidx;
379	didx = hq->hq_avail_ring[aidx % hq->hq_size];
380	assert(didx >= 0 && didx < hq->hq_size);
381
382	vd = &hq->hq_dtable[didx];
383
384	/*
385	 * Run through the chain of descriptors, ignoring the
386	 * first header descriptor. However, include the header
387	 * length in the total length that will be put into the
388	 * used queue.
389	 */
390	tlen = vd->vd_len;
391	vd = &hq->hq_dtable[vd->vd_next];
392
393	for (i = 0, plen = 0;
394	     i < VTNET_MAXSEGS;
395	     i++, vd = &hq->hq_dtable[vd->vd_next]) {
396		iov[i].iov_base = paddr_guest2host(vd->vd_addr);
397		iov[i].iov_len = vd->vd_len;
398		plen += vd->vd_len;
399		tlen += vd->vd_len;
400
401		if ((vd->vd_flags & VRING_DESC_F_NEXT) == 0)
402			break;
403	}
404	assert(i < VTNET_MAXSEGS);
405
406	DPRINTF(("virtio: packet send, %d bytes, %d segs\n\r", plen, i + 1));
407	pci_vtnet_tap_tx(sc, iov, i + 1, plen);
408
409	/*
410	 * Return this chain back to the host
411	 */
412	vu = &hq->hq_used_ring[uidx % hq->hq_size];
413	vu->vu_idx = didx;
414	vu->vu_tlen = tlen;
415	hq->hq_cur_aidx = aidx + 1;
416	*hq->hq_used_idx = uidx + 1;
417
418	/*
419	 * Generate an interrupt if able
420	 */
421	if ((*hq->hq_avail_flags & VRING_AVAIL_F_NO_INTERRUPT) == 0) {
422		sc->vsc_isr |= 1;
423		pci_generate_msi(sc->vsc_pi, 0);
424	}
425}
426
427static void
428pci_vtnet_ping_txq(struct pci_vtnet_softc *sc)
429{
430	struct vring_hqueue *hq = &sc->vsc_hq[VTNET_TXQ];
431	int i;
432	int ndescs;
433
434	/*
435	 * Calculate number of ring entries to process
436	 */
437	ndescs = hq_num_avail(hq);
438
439	if (ndescs == 0)
440		return;
441
442	/*
443	 * Run through all the entries, placing them into iovecs and
444	 * sending when an end-of-packet is found
445	 */
446	for (i = 0; i < ndescs; i++)
447		pci_vtnet_proctx(sc, hq);
448}
449
450static void
451pci_vtnet_ping_ctlq(struct pci_vtnet_softc *sc)
452{
453
454	DPRINTF(("vtnet: control qnotify!\n\r"));
455}
456
457static void
458pci_vtnet_ring_init(struct pci_vtnet_softc *sc, uint64_t pfn)
459{
460	struct vring_hqueue *hq;
461	int qnum = sc->vsc_curq;
462
463	assert(qnum < VTNET_MAXQ);
464
465	sc->vsc_pfn[qnum] = pfn << VRING_PFN;
466
467	/*
468	 * Set up host pointers to the various parts of the
469	 * queue
470	 */
471	hq = &sc->vsc_hq[qnum];
472	hq->hq_size = pci_vtnet_qsize(qnum);
473
474	hq->hq_dtable = paddr_guest2host(pfn << VRING_PFN);
475	hq->hq_avail_flags =  (uint16_t *)(hq->hq_dtable + hq->hq_size);
476	hq->hq_avail_idx = hq->hq_avail_flags + 1;
477	hq->hq_avail_ring = hq->hq_avail_flags + 2;
478	hq->hq_used_flags = (uint16_t *)roundup2((uintptr_t)hq->hq_avail_ring,
479						 VRING_ALIGN);
480	hq->hq_used_idx = hq->hq_used_flags + 1;
481	hq->hq_used_ring = (struct virtio_used *)(hq->hq_used_flags + 2);
482
483	/*
484	 * Initialize queue indexes
485	 */
486	hq->hq_cur_aidx = 0;
487}
488
489static int
490pci_vtnet_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
491{
492	MD5_CTX mdctx;
493	unsigned char digest[16];
494	char nstr[80];
495	struct pci_vtnet_softc *sc;
496
497	/*
498	 * Access to guest memory is required. Fail if
499	 * memory not mapped
500	 */
501	if (paddr_guest2host(0) == NULL)
502		return (1);
503
504	sc = malloc(sizeof(struct pci_vtnet_softc));
505	memset(sc, 0, sizeof(struct pci_vtnet_softc));
506
507	pi->pi_arg = sc;
508	sc->vsc_pi = pi;
509
510	pthread_mutex_init(&sc->vsc_mtx, NULL);
511
512	/*
513	 * Attempt to open the tap device
514	 */
515	sc->vsc_tapfd = -1;
516	if (opts != NULL) {
517		char tbuf[80];
518
519		strcpy(tbuf, "/dev/");
520		strlcat(tbuf, opts, sizeof(tbuf));
521
522		sc->vsc_tapfd = open(tbuf, O_RDWR);
523		if (sc->vsc_tapfd == -1) {
524			WPRINTF(("open of tap device %s failed\n", tbuf));
525		} else {
526			/*
527			 * Set non-blocking and register for read
528			 * notifications with the event loop
529			 */
530			int opt = 1;
531			if (ioctl(sc->vsc_tapfd, FIONBIO, &opt) < 0) {
532				WPRINTF(("tap device O_NONBLOCK failed\n"));
533				close(sc->vsc_tapfd);
534				sc->vsc_tapfd = -1;
535			}
536
537			sc->vsc_mevp = mevent_add(sc->vsc_tapfd,
538						  EVF_READ,
539						  pci_vtnet_tap_callback,
540						  sc);
541			if (sc->vsc_mevp == NULL) {
542				WPRINTF(("Could not register event\n"));
543				close(sc->vsc_tapfd);
544				sc->vsc_tapfd = -1;
545			}
546		}
547	}
548
549	/*
550	 * The MAC address is the standard NetApp OUI of 00-a0-98,
551	 * followed by an MD5 of the vm name. The slot/func number is
552	 * prepended to this for slots other than 1:0, so that
553	 * a bootloader can netboot from the equivalent of slot 1.
554	 */
555	if (pi->pi_slot == 1 && pi->pi_func == 0) {
556		strncpy(nstr, vmname, sizeof(nstr));
557	} else {
558		snprintf(nstr, sizeof(nstr), "%d-%d-%s", pi->pi_slot,
559		    pi->pi_func, vmname);
560	}
561
562	MD5Init(&mdctx);
563	MD5Update(&mdctx, nstr, strlen(nstr));
564	MD5Final(digest, &mdctx);
565
566	sc->vsc_macaddr[0] = 0x00;
567	sc->vsc_macaddr[1] = 0xa0;
568	sc->vsc_macaddr[2] = 0x98;
569	sc->vsc_macaddr[3] = digest[0];
570	sc->vsc_macaddr[4] = digest[1];
571	sc->vsc_macaddr[5] = digest[2];
572
573	/* initialize config space */
574	pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_NET);
575	pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR);
576	pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_NETWORK);
577	pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_NET);
578	pci_emul_add_msicap(pi, 1);
579	pci_emul_alloc_bar(pi, 0, PCIBAR_IO, VTNET_REGSZ);
580
581	return (0);
582}
583
584/*
585 * Function pointer array to handle queue notifications
586 */
587static void (*pci_vtnet_qnotify[VTNET_MAXQ])(struct pci_vtnet_softc *) = {
588	pci_vtnet_ping_rxq,
589	pci_vtnet_ping_txq,
590	pci_vtnet_ping_ctlq
591};
592
593static void
594pci_vtnet_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
595		int baridx, uint64_t offset, int size, uint64_t value)
596{
597	struct pci_vtnet_softc *sc = pi->pi_arg;
598	void *ptr;
599
600	assert(baridx == 0);
601
602	if (offset + size > VTNET_REGSZ) {
603		DPRINTF(("vtnet_write: 2big, offset %ld size %d\n",
604			 offset, size));
605		return;
606	}
607
608	pthread_mutex_lock(&sc->vsc_mtx);
609
610	switch (offset) {
611	case VTCFG_R_GUESTCAP:
612		assert(size == 4);
613		sc->vsc_features = value & VTNET_S_HOSTCAPS;
614		break;
615	case VTCFG_R_PFN:
616		assert(size == 4);
617		pci_vtnet_ring_init(sc, value);
618		break;
619	case VTCFG_R_QSEL:
620		assert(size == 2);
621		assert(value < VTNET_MAXQ);
622		sc->vsc_curq = value;
623		break;
624	case VTCFG_R_QNOTIFY:
625		assert(size == 2);
626		assert(value < VTNET_MAXQ);
627		(*pci_vtnet_qnotify[value])(sc);
628		break;
629	case VTCFG_R_STATUS:
630		assert(size == 1);
631		pci_vtnet_update_status(sc, value);
632		break;
633	case VTNET_R_CFG0:
634	case VTNET_R_CFG1:
635	case VTNET_R_CFG2:
636	case VTNET_R_CFG3:
637	case VTNET_R_CFG4:
638	case VTNET_R_CFG5:
639		assert((size + offset) <= (VTNET_R_CFG5 + 1));
640		ptr = &sc->vsc_macaddr[offset - VTNET_R_CFG0];
641		/*
642		 * The driver is allowed to change the MAC address
643		 */
644		sc->vsc_macaddr[offset - VTNET_R_CFG0] = value;
645		if (size == 1) {
646			*(uint8_t *) ptr = value;
647		} else if (size == 2) {
648			*(uint16_t *) ptr = value;
649		} else {
650			*(uint32_t *) ptr = value;
651		}
652		break;
653	case VTCFG_R_HOSTCAP:
654	case VTCFG_R_QNUM:
655	case VTCFG_R_ISR:
656	case VTNET_R_CFG6:
657	case VTNET_R_CFG7:
658		DPRINTF(("vtnet: write to readonly reg %ld\n\r", offset));
659		break;
660	default:
661		DPRINTF(("vtnet: unknown i/o write offset %ld\n\r", offset));
662		value = 0;
663		break;
664	}
665
666	pthread_mutex_unlock(&sc->vsc_mtx);
667}
668
669uint64_t
670pci_vtnet_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
671	       int baridx, uint64_t offset, int size)
672{
673	struct pci_vtnet_softc *sc = pi->pi_arg;
674	void *ptr;
675	uint64_t value;
676
677	assert(baridx == 0);
678
679	if (offset + size > VTNET_REGSZ) {
680		DPRINTF(("vtnet_read: 2big, offset %ld size %d\n",
681			 offset, size));
682		return (0);
683	}
684
685	pthread_mutex_lock(&sc->vsc_mtx);
686
687	switch (offset) {
688	case VTCFG_R_HOSTCAP:
689		assert(size == 4);
690		value = VTNET_S_HOSTCAPS;
691		break;
692	case VTCFG_R_GUESTCAP:
693		assert(size == 4);
694		value = sc->vsc_features; /* XXX never read ? */
695		break;
696	case VTCFG_R_PFN:
697		assert(size == 4);
698		value = sc->vsc_pfn[sc->vsc_curq] >> VRING_PFN;
699		break;
700	case VTCFG_R_QNUM:
701		assert(size == 2);
702		value = pci_vtnet_qsize(sc->vsc_curq);
703		break;
704	case VTCFG_R_QSEL:
705		assert(size == 2);
706		value = sc->vsc_curq;  /* XXX never read ? */
707		break;
708	case VTCFG_R_QNOTIFY:
709		assert(size == 2);
710		value = sc->vsc_curq;  /* XXX never read ? */
711		break;
712	case VTCFG_R_STATUS:
713		assert(size == 1);
714		value = sc->vsc_status;
715		break;
716	case VTCFG_R_ISR:
717		assert(size == 1);
718		value = sc->vsc_isr;
719		sc->vsc_isr = 0;     /* a read clears this flag */
720		break;
721	case VTNET_R_CFG0:
722	case VTNET_R_CFG1:
723	case VTNET_R_CFG2:
724	case VTNET_R_CFG3:
725	case VTNET_R_CFG4:
726	case VTNET_R_CFG5:
727                assert((size + offset) <= (VTNET_R_CFG5 + 1));
728                ptr = &sc->vsc_macaddr[offset - VTNET_R_CFG0];
729                if (size == 1) {
730                        value = *(uint8_t *) ptr;
731                } else if (size == 2) {
732                        value = *(uint16_t *) ptr;
733                } else {
734                        value = *(uint32_t *) ptr;
735                }
736		break;
737	case VTNET_R_CFG6:
738		assert(size != 4);
739		value = 0x01; /* XXX link always up */
740		break;
741	case VTNET_R_CFG7:
742		assert(size == 1);
743		value = 0; /* XXX link status in LSB */
744		break;
745	default:
746		DPRINTF(("vtnet: unknown i/o read offset %ld\n\r", offset));
747		value = 0;
748		break;
749	}
750
751	pthread_mutex_unlock(&sc->vsc_mtx);
752
753	return (value);
754}
755
756struct pci_devemu pci_de_vnet = {
757	.pe_emu = 	"virtio-net",
758	.pe_init =	pci_vtnet_init,
759	.pe_barwrite =	pci_vtnet_write,
760	.pe_barread =	pci_vtnet_read
761};
762PCI_EMUL_SET(pci_de_vnet);
763