svm_msr.c revision 276403
1141296Sdas/*-
22116Sjkh * Copyright (c) 2014, Neel Natu (neel@freebsd.org)
32116Sjkh * All rights reserved.
42116Sjkh *
52116Sjkh * Redistribution and use in source and binary forms, with or without
6141296Sdas * modification, are permitted provided that the following conditions
72116Sjkh * are met:
8272138Skargl * 1. Redistributions of source code must retain the above copyright
92116Sjkh *    notice unmodified, this list of conditions, and the following
102116Sjkh *    disclaimer.
112116Sjkh * 2. Redistributions in binary form must reproduce the above copyright
122116Sjkh *    notice, this list of conditions and the following disclaimer in the
13176451Sdas *    documentation and/or other materials provided with the distribution.
14176451Sdas *
152116Sjkh * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
162116Sjkh * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17272457Skargl * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18272457Skargl * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
192116Sjkh * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
202116Sjkh * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
212116Sjkh * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22272457Skargl * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
232116Sjkh * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
242116Sjkh * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
252116Sjkh */
262116Sjkh
272116Sjkh#include <sys/cdefs.h>
282116Sjkh__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/amd/svm_msr.c 276403 2014-12-30 08:24:14Z neel $");
292116Sjkh
302116Sjkh#include <sys/types.h>
312116Sjkh#include <sys/errno.h>
322116Sjkh
332116Sjkh#include <machine/cpufunc.h>
342116Sjkh#include <machine/specialreg.h>
352116Sjkh
362116Sjkh#include "svm_msr.h"
372116Sjkh
382116Sjkh#ifndef MSR_AMDK8_IPM
392116Sjkh#define	MSR_AMDK8_IPM	0xc0010055
402116Sjkh#endif
412116Sjkh
422116Sjkhenum {
432116Sjkh	IDX_MSR_LSTAR,
442116Sjkh	IDX_MSR_CSTAR,
452116Sjkh	IDX_MSR_STAR,
462116Sjkh	IDX_MSR_SF_MASK,
472116Sjkh	HOST_MSR_NUM		/* must be the last enumeration */
482116Sjkh};
492116Sjkh
502116Sjkhstatic uint64_t host_msrs[HOST_MSR_NUM];
512116Sjkh
522116Sjkhvoid
532116Sjkhsvm_msr_init(void)
542116Sjkh{
552116Sjkh	/*
562116Sjkh	 * It is safe to cache the values of the following MSRs because they
572116Sjkh	 * don't change based on curcpu, curproc or curthread.
582116Sjkh	 */
592116Sjkh	host_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
60272457Skargl	host_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
612116Sjkh	host_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
62272457Skargl	host_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
632116Sjkh}
642116Sjkh
652116Sjkhvoid
662116Sjkhsvm_msr_guest_init(struct svm_softc *sc, int vcpu)
672116Sjkh{
68272457Skargl	/*
692116Sjkh	 * All the MSRs accessible to the guest are either saved/restored by
702116Sjkh	 * hardware on every #VMEXIT/VMRUN (e.g., G_PAT) or are saved/restored
71272457Skargl	 * by VMSAVE/VMLOAD (e.g., MSR_GSBASE).
722116Sjkh	 *
73272457Skargl	 * There are no guest MSRs that are saved/restored "by hand" so nothing
742116Sjkh	 * more to do here.
752116Sjkh	 */
76169220Sbde	return;
77169220Sbde}
78169220Sbde
79169220Sbdevoid
80169220Sbdesvm_msr_guest_enter(struct svm_softc *sc, int vcpu)
812116Sjkh{
822116Sjkh	/*
83271651Skargl	 * Save host MSRs (if any) and restore guest MSRs (if any).
84271651Skargl	 */
852116Sjkh}
862116Sjkh
872116Sjkhvoid
88270947Skarglsvm_msr_guest_exit(struct svm_softc *sc, int vcpu)
89270947Skargl{
908870Srgrimes	/*
91270947Skargl	 * Save guest MSRs (if any) and restore host MSRs.
922116Sjkh	 */
932116Sjkh	wrmsr(MSR_LSTAR, host_msrs[IDX_MSR_LSTAR]);
942116Sjkh	wrmsr(MSR_CSTAR, host_msrs[IDX_MSR_CSTAR]);
952116Sjkh	wrmsr(MSR_STAR, host_msrs[IDX_MSR_STAR]);
962116Sjkh	wrmsr(MSR_SF_MASK, host_msrs[IDX_MSR_SF_MASK]);
972116Sjkh
982116Sjkh	/* MSR_KGSBASE will be restored on the way back to userspace */
992116Sjkh}
1002116Sjkh
1012116Sjkhint
1022116Sjkhsvm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result,
1032116Sjkh    bool *retu)
1042116Sjkh{
1052116Sjkh	int error = 0;
1062116Sjkh
1072116Sjkh	switch (num) {
1082116Sjkh	case MSR_AMDK8_IPM:
1092116Sjkh		*result = 0;
1102116Sjkh		break;
1112116Sjkh	default:
1122116Sjkh		error = EINVAL;
1132116Sjkh		break;
1142116Sjkh	}
1152116Sjkh
1162116Sjkh	return (error);
1172116Sjkh}
1182116Sjkh
1192116Sjkhint
1202116Sjkhsvm_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, bool *retu)
1212116Sjkh{
1222116Sjkh	int error = 0;
1232116Sjkh
1242116Sjkh	switch (num) {
1252116Sjkh	case MSR_AMDK8_IPM:
1262116Sjkh		/*
1272116Sjkh		 * Ignore writes to the "Interrupt Pending Message" MSR.
1282116Sjkh		 */
1292116Sjkh		break;
1302116Sjkh	default:
1312116Sjkh		error = EINVAL;
1322116Sjkh		break;
1332116Sjkh	}
1342116Sjkh
1352116Sjkh	return (error);
1362116Sjkh}
1372116Sjkh