svm_msr.c revision 284900
1/*-
2 * Copyright (c) 2014, Neel Natu (neel@freebsd.org)
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/amd/svm_msr.c 284900 2015-06-28 03:22:26Z neel $");
29
30#include <sys/param.h>
31#include <sys/errno.h>
32#include <sys/systm.h>
33
34#include <machine/cpufunc.h>
35#include <machine/specialreg.h>
36#include <machine/vmm.h>
37
38#include "svm.h"
39#include "vmcb.h"
40#include "svm_softc.h"
41#include "svm_msr.h"
42
43#ifndef MSR_AMDK8_IPM
44#define	MSR_AMDK8_IPM	0xc0010055
45#endif
46
47enum {
48	IDX_MSR_LSTAR,
49	IDX_MSR_CSTAR,
50	IDX_MSR_STAR,
51	IDX_MSR_SF_MASK,
52	HOST_MSR_NUM		/* must be the last enumeration */
53};
54
55static uint64_t host_msrs[HOST_MSR_NUM];
56
57void
58svm_msr_init(void)
59{
60	/*
61	 * It is safe to cache the values of the following MSRs because they
62	 * don't change based on curcpu, curproc or curthread.
63	 */
64	host_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
65	host_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
66	host_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
67	host_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
68}
69
70void
71svm_msr_guest_init(struct svm_softc *sc, int vcpu)
72{
73	/*
74	 * All the MSRs accessible to the guest are either saved/restored by
75	 * hardware on every #VMEXIT/VMRUN (e.g., G_PAT) or are saved/restored
76	 * by VMSAVE/VMLOAD (e.g., MSR_GSBASE).
77	 *
78	 * There are no guest MSRs that are saved/restored "by hand" so nothing
79	 * more to do here.
80	 */
81	return;
82}
83
84void
85svm_msr_guest_enter(struct svm_softc *sc, int vcpu)
86{
87	/*
88	 * Save host MSRs (if any) and restore guest MSRs (if any).
89	 */
90}
91
92void
93svm_msr_guest_exit(struct svm_softc *sc, int vcpu)
94{
95	/*
96	 * Save guest MSRs (if any) and restore host MSRs.
97	 */
98	wrmsr(MSR_LSTAR, host_msrs[IDX_MSR_LSTAR]);
99	wrmsr(MSR_CSTAR, host_msrs[IDX_MSR_CSTAR]);
100	wrmsr(MSR_STAR, host_msrs[IDX_MSR_STAR]);
101	wrmsr(MSR_SF_MASK, host_msrs[IDX_MSR_SF_MASK]);
102
103	/* MSR_KGSBASE will be restored on the way back to userspace */
104}
105
106int
107svm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result,
108    bool *retu)
109{
110	int error = 0;
111
112	switch (num) {
113	case MSR_MCG_CAP:
114	case MSR_MCG_STATUS:
115		*result = 0;
116		break;
117	case MSR_MTRRcap:
118	case MSR_MTRRdefType:
119	case MSR_MTRR4kBase ... MSR_MTRR4kBase + 8:
120	case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
121	case MSR_MTRR64kBase:
122	case MSR_SYSCFG:
123		*result = 0;
124		break;
125	case MSR_AMDK8_IPM:
126		*result = 0;
127		break;
128	default:
129		error = EINVAL;
130		break;
131	}
132
133	return (error);
134}
135
136int
137svm_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, bool *retu)
138{
139	int error = 0;
140
141	switch (num) {
142	case MSR_MCG_CAP:
143	case MSR_MCG_STATUS:
144		break;		/* ignore writes */
145	case MSR_MTRRcap:
146		vm_inject_gp(sc->vm, vcpu);
147		break;
148	case MSR_MTRRdefType:
149	case MSR_MTRR4kBase ... MSR_MTRR4kBase + 8:
150	case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
151	case MSR_MTRR64kBase:
152	case MSR_SYSCFG:
153		break;		/* Ignore writes */
154	case MSR_AMDK8_IPM:
155		/*
156		 * Ignore writes to the "Interrupt Pending Message" MSR.
157		 */
158		break;
159	default:
160		error = EINVAL;
161		break;
162	}
163
164	return (error);
165}
166