1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2011 NetApp, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/param.h>
30#include <sys/linker_set.h>
31#include <sys/_iovec.h>
32#include <sys/mman.h>
33
34#include <x86/psl.h>
35
36#include <machine/vmm.h>
37#include <machine/vmm_instruction_emul.h>
38#include <vmmapi.h>
39
40#include <stdio.h>
41#include <string.h>
42#include <assert.h>
43
44#include "bhyverun.h"
45#include "config.h"
46#include "inout.h"
47
48SET_DECLARE(inout_port_set, struct inout_port);
49
50#define	MAX_IOPORTS	(1 << 16)
51
52#define	VERIFY_IOPORT(port, size) \
53	assert((port) >= 0 && (size) > 0 && ((port) + (size)) <= MAX_IOPORTS)
54
55static struct {
56	const char	*name;
57	int		flags;
58	inout_func_t	handler;
59	void		*arg;
60} inout_handlers[MAX_IOPORTS];
61
62static int
63default_inout(struct vmctx *ctx __unused, int in,
64    int port __unused, int bytes, uint32_t *eax, void *arg __unused)
65{
66	if (in) {
67		switch (bytes) {
68		case 4:
69			*eax = 0xffffffff;
70			break;
71		case 2:
72			*eax = 0xffff;
73			break;
74		case 1:
75			*eax = 0xff;
76			break;
77		}
78	}
79
80	return (0);
81}
82
83static void
84register_default_iohandler(int start, int size)
85{
86	struct inout_port iop;
87
88	VERIFY_IOPORT(start, size);
89
90	bzero(&iop, sizeof(iop));
91	iop.name = "default";
92	iop.port = start;
93	iop.size = size;
94	iop.flags = IOPORT_F_INOUT | IOPORT_F_DEFAULT;
95	iop.handler = default_inout;
96
97	register_inout(&iop);
98}
99
100int
101emulate_inout(struct vmctx *ctx, struct vcpu *vcpu, struct vm_exit *vmexit)
102{
103	int addrsize, bytes, flags, in, port, prot, rep;
104	uint32_t eax, val;
105	inout_func_t handler;
106	void *arg;
107	int error, fault, retval;
108	enum vm_reg_name idxreg;
109	uint64_t gla, index, iterations, count;
110	struct vm_inout_str *vis;
111	struct iovec iov[2];
112
113	bytes = vmexit->u.inout.bytes;
114	in = vmexit->u.inout.in;
115	port = vmexit->u.inout.port;
116
117	assert(port < MAX_IOPORTS);
118	assert(bytes == 1 || bytes == 2 || bytes == 4);
119
120	handler = inout_handlers[port].handler;
121
122	if (handler == default_inout &&
123	    get_config_bool_default("x86.strictio", false))
124		return (-1);
125
126	flags = inout_handlers[port].flags;
127	arg = inout_handlers[port].arg;
128
129	if (in) {
130		if (!(flags & IOPORT_F_IN))
131			return (-1);
132	} else {
133		if (!(flags & IOPORT_F_OUT))
134			return (-1);
135	}
136
137	retval = 0;
138	if (vmexit->u.inout.string) {
139		vis = &vmexit->u.inout_str;
140		rep = vis->inout.rep;
141		addrsize = vis->addrsize;
142		prot = in ? PROT_WRITE : PROT_READ;
143		assert(addrsize == 2 || addrsize == 4 || addrsize == 8);
144
145		/* Index register */
146		idxreg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI;
147		index = vis->index & vie_size2mask(addrsize);
148
149		/* Count register */
150		count = vis->count & vie_size2mask(addrsize);
151
152		/* Limit number of back-to-back in/out emulations to 16 */
153		iterations = MIN(count, 16);
154		while (iterations > 0) {
155			assert(retval == 0);
156			if (vie_calculate_gla(vis->paging.cpu_mode,
157			    vis->seg_name, &vis->seg_desc, index, bytes,
158			    addrsize, prot, &gla)) {
159				vm_inject_gp(vcpu);
160				break;
161			}
162
163			error = vm_copy_setup(vcpu, &vis->paging, gla,
164			    bytes, prot, iov, nitems(iov), &fault);
165			if (error) {
166				retval = -1;  /* Unrecoverable error */
167				break;
168			} else if (fault) {
169				retval = 0;  /* Resume guest to handle fault */
170				break;
171			}
172
173			if (vie_alignment_check(vis->paging.cpl, bytes,
174			    vis->cr0, vis->rflags, gla)) {
175				vm_inject_ac(vcpu, 0);
176				break;
177			}
178
179			val = 0;
180			if (!in)
181				vm_copyin(iov, &val, bytes);
182
183			retval = handler(ctx, in, port, bytes, &val, arg);
184			if (retval != 0)
185				break;
186
187			if (in)
188				vm_copyout(&val, iov, bytes);
189
190			/* Update index */
191			if (vis->rflags & PSL_D)
192				index -= bytes;
193			else
194				index += bytes;
195
196			count--;
197			iterations--;
198		}
199
200		/* Update index register */
201		error = vie_update_register(vcpu, idxreg, index, addrsize);
202		assert(error == 0);
203
204		/*
205		 * Update count register only if the instruction had a repeat
206		 * prefix.
207		 */
208		if (rep) {
209			error = vie_update_register(vcpu, VM_REG_GUEST_RCX,
210			    count, addrsize);
211			assert(error == 0);
212		}
213
214		/* Restart the instruction if more iterations remain */
215		if (retval == 0 && count != 0) {
216			error = vm_restart_instruction(vcpu);
217			assert(error == 0);
218		}
219	} else {
220		eax = vmexit->u.inout.eax;
221		val = eax & vie_size2mask(bytes);
222		retval = handler(ctx, in, port, bytes, &val, arg);
223		if (retval == 0 && in) {
224			eax &= ~vie_size2mask(bytes);
225			eax |= val & vie_size2mask(bytes);
226			error = vm_set_register(vcpu, VM_REG_GUEST_RAX,
227			    eax);
228			assert(error == 0);
229		}
230	}
231	return (retval);
232}
233
234void
235init_inout(void)
236{
237	struct inout_port **iopp, *iop;
238
239	/*
240	 * Set up the default handler for all ports
241	 */
242	register_default_iohandler(0, MAX_IOPORTS);
243
244	/*
245	 * Overwrite with specified handlers
246	 */
247	SET_FOREACH(iopp, inout_port_set) {
248		iop = *iopp;
249		assert(iop->port < MAX_IOPORTS);
250		inout_handlers[iop->port].name = iop->name;
251		inout_handlers[iop->port].flags = iop->flags;
252		inout_handlers[iop->port].handler = iop->handler;
253		inout_handlers[iop->port].arg = NULL;
254	}
255}
256
257int
258register_inout(struct inout_port *iop)
259{
260	int i;
261
262	VERIFY_IOPORT(iop->port, iop->size);
263
264	/*
265	 * Verify that the new registration is not overwriting an already
266	 * allocated i/o range.
267	 */
268	if ((iop->flags & IOPORT_F_DEFAULT) == 0) {
269		for (i = iop->port; i < iop->port + iop->size; i++) {
270			if ((inout_handlers[i].flags & IOPORT_F_DEFAULT) == 0)
271				return (-1);
272		}
273	}
274
275	for (i = iop->port; i < iop->port + iop->size; i++) {
276		inout_handlers[i].name = iop->name;
277		inout_handlers[i].flags = iop->flags;
278		inout_handlers[i].handler = iop->handler;
279		inout_handlers[i].arg = iop->arg;
280	}
281
282	return (0);
283}
284
285int
286unregister_inout(struct inout_port *iop)
287{
288
289	VERIFY_IOPORT(iop->port, iop->size);
290	assert(inout_handlers[iop->port].name == iop->name);
291
292	register_default_iohandler(iop->port, iop->size);
293
294	return (0);
295}
296