1/* SPDX-License-Identifier: GPL-2.0-only */ 2#ifndef __KVM_HOST_H 3#define __KVM_HOST_H 4 5 6#include <linux/types.h> 7#include <linux/hardirq.h> 8#include <linux/list.h> 9#include <linux/mutex.h> 10#include <linux/spinlock.h> 11#include <linux/signal.h> 12#include <linux/sched.h> 13#include <linux/sched/stat.h> 14#include <linux/bug.h> 15#include <linux/minmax.h> 16#include <linux/mm.h> 17#include <linux/mmu_notifier.h> 18#include <linux/preempt.h> 19#include <linux/msi.h> 20#include <linux/slab.h> 21#include <linux/vmalloc.h> 22#include <linux/rcupdate.h> 23#include <linux/ratelimit.h> 24#include <linux/err.h> 25#include <linux/irqflags.h> 26#include <linux/context_tracking.h> 27#include <linux/irqbypass.h> 28#include <linux/rcuwait.h> 29#include <linux/refcount.h> 30#include <linux/nospec.h> 31#include <linux/notifier.h> 32#include <linux/ftrace.h> 33#include <linux/hashtable.h> 34#include <linux/instrumentation.h> 35#include <linux/interval_tree.h> 36#include <linux/rbtree.h> 37#include <linux/xarray.h> 38#include <asm/signal.h> 39 40#include <linux/kvm.h> 41#include <linux/kvm_para.h> 42 43#include <linux/kvm_types.h> 44 45#include <asm/kvm_host.h> 46#include <linux/kvm_dirty_ring.h> 47 48#ifndef KVM_MAX_VCPU_IDS 49#define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS 50#endif 51 52/* 53 * The bit 16 ~ bit 31 of kvm_userspace_memory_region::flags are internally 54 * used in kvm, other bits are visible for userspace which are defined in 55 * include/linux/kvm_h. 56 */ 57#define KVM_MEMSLOT_INVALID (1UL << 16) 58 59/* 60 * Bit 63 of the memslot generation number is an "update in-progress flag", 61 * e.g. is temporarily set for the duration of kvm_swap_active_memslots(). 62 * This flag effectively creates a unique generation number that is used to 63 * mark cached memslot data, e.g. MMIO accesses, as potentially being stale, 64 * i.e. may (or may not) have come from the previous memslots generation. 65 * 66 * This is necessary because the actual memslots update is not atomic with 67 * respect to the generation number update. Updating the generation number 68 * first would allow a vCPU to cache a spte from the old memslots using the 69 * new generation number, and updating the generation number after switching 70 * to the new memslots would allow cache hits using the old generation number 71 * to reference the defunct memslots. 72 * 73 * This mechanism is used to prevent getting hits in KVM's caches while a 74 * memslot update is in-progress, and to prevent cache hits *after* updating 75 * the actual generation number against accesses that were inserted into the 76 * cache *before* the memslots were updated. 77 */ 78#define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63) 79 80/* Two fragments for cross MMIO pages. */ 81#define KVM_MAX_MMIO_FRAGMENTS 2 82 83#ifndef KVM_MAX_NR_ADDRESS_SPACES 84#define KVM_MAX_NR_ADDRESS_SPACES 1 85#endif 86 87/* 88 * For the normal pfn, the highest 12 bits should be zero, 89 * so we can mask bit 62 ~ bit 52 to indicate the error pfn, 90 * mask bit 63 to indicate the noslot pfn. 91 */ 92#define KVM_PFN_ERR_MASK (0x7ffULL << 52) 93#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) 94#define KVM_PFN_NOSLOT (0x1ULL << 63) 95 96#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) 97#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) 98#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) 99#define KVM_PFN_ERR_SIGPENDING (KVM_PFN_ERR_MASK + 3) 100 101/* 102 * error pfns indicate that the gfn is in slot but faild to 103 * translate it to pfn on host. 104 */ 105static inline bool is_error_pfn(kvm_pfn_t pfn) 106{ 107 return !!(pfn & KVM_PFN_ERR_MASK); 108} 109 110/* 111 * KVM_PFN_ERR_SIGPENDING indicates that fetching the PFN was interrupted 112 * by a pending signal. Note, the signal may or may not be fatal. 113 */ 114static inline bool is_sigpending_pfn(kvm_pfn_t pfn) 115{ 116 return pfn == KVM_PFN_ERR_SIGPENDING; 117} 118 119/* 120 * error_noslot pfns indicate that the gfn can not be 121 * translated to pfn - it is not in slot or failed to 122 * translate it to pfn. 123 */ 124static inline bool is_error_noslot_pfn(kvm_pfn_t pfn) 125{ 126 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); 127} 128 129/* noslot pfn indicates that the gfn is not in slot. */ 130static inline bool is_noslot_pfn(kvm_pfn_t pfn) 131{ 132 return pfn == KVM_PFN_NOSLOT; 133} 134 135/* 136 * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390) 137 * provide own defines and kvm_is_error_hva 138 */ 139#ifndef KVM_HVA_ERR_BAD 140 141#define KVM_HVA_ERR_BAD (PAGE_OFFSET) 142#define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) 143 144static inline bool kvm_is_error_hva(unsigned long addr) 145{ 146 return addr >= PAGE_OFFSET; 147} 148 149#endif 150 151static inline bool kvm_is_error_gpa(gpa_t gpa) 152{ 153 return gpa == INVALID_GPA; 154} 155 156#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) 157 158static inline bool is_error_page(struct page *page) 159{ 160 return IS_ERR(page); 161} 162 163#define KVM_REQUEST_MASK GENMASK(7,0) 164#define KVM_REQUEST_NO_WAKEUP BIT(8) 165#define KVM_REQUEST_WAIT BIT(9) 166#define KVM_REQUEST_NO_ACTION BIT(10) 167/* 168 * Architecture-independent vcpu->requests bit members 169 * Bits 3-7 are reserved for more arch-independent bits. 170 */ 171#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 172#define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 173#define KVM_REQ_UNBLOCK 2 174#define KVM_REQ_DIRTY_RING_SOFT_FULL 3 175#define KVM_REQUEST_ARCH_BASE 8 176 177/* 178 * KVM_REQ_OUTSIDE_GUEST_MODE exists is purely as way to force the vCPU to 179 * OUTSIDE_GUEST_MODE. KVM_REQ_OUTSIDE_GUEST_MODE differs from a vCPU "kick" 180 * in that it ensures the vCPU has reached OUTSIDE_GUEST_MODE before continuing 181 * on. A kick only guarantees that the vCPU is on its way out, e.g. a previous 182 * kick may have set vcpu->mode to EXITING_GUEST_MODE, and so there's no 183 * guarantee the vCPU received an IPI and has actually exited guest mode. 184 */ 185#define KVM_REQ_OUTSIDE_GUEST_MODE (KVM_REQUEST_NO_ACTION | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 186 187#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ 188 BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \ 189 (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \ 190}) 191#define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0) 192 193bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, 194 unsigned long *vcpu_bitmap); 195bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); 196 197#define KVM_USERSPACE_IRQ_SOURCE_ID 0 198#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 199 200extern struct mutex kvm_lock; 201extern struct list_head vm_list; 202 203struct kvm_io_range { 204 gpa_t addr; 205 int len; 206 struct kvm_io_device *dev; 207}; 208 209#define NR_IOBUS_DEVS 1000 210 211struct kvm_io_bus { 212 int dev_count; 213 int ioeventfd_count; 214 struct kvm_io_range range[]; 215}; 216 217enum kvm_bus { 218 KVM_MMIO_BUS, 219 KVM_PIO_BUS, 220 KVM_VIRTIO_CCW_NOTIFY_BUS, 221 KVM_FAST_MMIO_BUS, 222 KVM_NR_BUSES 223}; 224 225int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 226 int len, const void *val); 227int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 228 gpa_t addr, int len, const void *val, long cookie); 229int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 230 int len, void *val); 231int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 232 int len, struct kvm_io_device *dev); 233int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 234 struct kvm_io_device *dev); 235struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 236 gpa_t addr); 237 238#ifdef CONFIG_KVM_ASYNC_PF 239struct kvm_async_pf { 240 struct work_struct work; 241 struct list_head link; 242 struct list_head queue; 243 struct kvm_vcpu *vcpu; 244 gpa_t cr2_or_gpa; 245 unsigned long addr; 246 struct kvm_arch_async_pf arch; 247 bool wakeup_all; 248 bool notpresent_injected; 249}; 250 251void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); 252void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); 253bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 254 unsigned long hva, struct kvm_arch_async_pf *arch); 255int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); 256#endif 257 258#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER 259union kvm_mmu_notifier_arg { 260 unsigned long attributes; 261}; 262 263struct kvm_gfn_range { 264 struct kvm_memory_slot *slot; 265 gfn_t start; 266 gfn_t end; 267 union kvm_mmu_notifier_arg arg; 268 bool may_block; 269}; 270bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); 271bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 272bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 273#endif 274 275enum { 276 OUTSIDE_GUEST_MODE, 277 IN_GUEST_MODE, 278 EXITING_GUEST_MODE, 279 READING_SHADOW_PAGE_TABLES, 280}; 281 282#define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA) 283 284struct kvm_host_map { 285 /* 286 * Only valid if the 'pfn' is managed by the host kernel (i.e. There is 287 * a 'struct page' for it. When using mem= kernel parameter some memory 288 * can be used as guest memory but they are not managed by host 289 * kernel). 290 * If 'pfn' is not managed by the host kernel, this field is 291 * initialized to KVM_UNMAPPED_PAGE. 292 */ 293 struct page *page; 294 void *hva; 295 kvm_pfn_t pfn; 296 kvm_pfn_t gfn; 297}; 298 299/* 300 * Used to check if the mapping is valid or not. Never use 'kvm_host_map' 301 * directly to check for that. 302 */ 303static inline bool kvm_vcpu_mapped(struct kvm_host_map *map) 304{ 305 return !!map->hva; 306} 307 308static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop) 309{ 310 return single_task_running() && !need_resched() && ktime_before(cur, stop); 311} 312 313/* 314 * Sometimes a large or cross-page mmio needs to be broken up into separate 315 * exits for userspace servicing. 316 */ 317struct kvm_mmio_fragment { 318 gpa_t gpa; 319 void *data; 320 unsigned len; 321}; 322 323struct kvm_vcpu { 324 struct kvm *kvm; 325#ifdef CONFIG_PREEMPT_NOTIFIERS 326 struct preempt_notifier preempt_notifier; 327#endif 328 int cpu; 329 int vcpu_id; /* id given by userspace at creation */ 330 int vcpu_idx; /* index into kvm->vcpu_array */ 331 int ____srcu_idx; /* Don't use this directly. You've been warned. */ 332#ifdef CONFIG_PROVE_RCU 333 int srcu_depth; 334#endif 335 int mode; 336 u64 requests; 337 unsigned long guest_debug; 338 339 struct mutex mutex; 340 struct kvm_run *run; 341 342#ifndef __KVM_HAVE_ARCH_WQP 343 struct rcuwait wait; 344#endif 345 struct pid __rcu *pid; 346 int sigset_active; 347 sigset_t sigset; 348 unsigned int halt_poll_ns; 349 bool valid_wakeup; 350 351#ifdef CONFIG_HAS_IOMEM 352 int mmio_needed; 353 int mmio_read_completed; 354 int mmio_is_write; 355 int mmio_cur_fragment; 356 int mmio_nr_fragments; 357 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS]; 358#endif 359 360#ifdef CONFIG_KVM_ASYNC_PF 361 struct { 362 u32 queued; 363 struct list_head queue; 364 struct list_head done; 365 spinlock_t lock; 366 } async_pf; 367#endif 368 369#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 370 /* 371 * Cpu relax intercept or pause loop exit optimization 372 * in_spin_loop: set when a vcpu does a pause loop exit 373 * or cpu relax intercepted. 374 * dy_eligible: indicates whether vcpu is eligible for directed yield. 375 */ 376 struct { 377 bool in_spin_loop; 378 bool dy_eligible; 379 } spin_loop; 380#endif 381 bool preempted; 382 bool ready; 383 struct kvm_vcpu_arch arch; 384 struct kvm_vcpu_stat stat; 385 char stats_id[KVM_STATS_NAME_SIZE]; 386 struct kvm_dirty_ring dirty_ring; 387 388 /* 389 * The most recently used memslot by this vCPU and the slots generation 390 * for which it is valid. 391 * No wraparound protection is needed since generations won't overflow in 392 * thousands of years, even assuming 1M memslot operations per second. 393 */ 394 struct kvm_memory_slot *last_used_slot; 395 u64 last_used_slot_gen; 396}; 397 398/* 399 * Start accounting time towards a guest. 400 * Must be called before entering guest context. 401 */ 402static __always_inline void guest_timing_enter_irqoff(void) 403{ 404 /* 405 * This is running in ioctl context so its safe to assume that it's the 406 * stime pending cputime to flush. 407 */ 408 instrumentation_begin(); 409 vtime_account_guest_enter(); 410 instrumentation_end(); 411} 412 413/* 414 * Enter guest context and enter an RCU extended quiescent state. 415 * 416 * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is 417 * unsafe to use any code which may directly or indirectly use RCU, tracing 418 * (including IRQ flag tracing), or lockdep. All code in this period must be 419 * non-instrumentable. 420 */ 421static __always_inline void guest_context_enter_irqoff(void) 422{ 423 /* 424 * KVM does not hold any references to rcu protected data when it 425 * switches CPU into a guest mode. In fact switching to a guest mode 426 * is very similar to exiting to userspace from rcu point of view. In 427 * addition CPU may stay in a guest mode for quite a long time (up to 428 * one time slice). Lets treat guest mode as quiescent state, just like 429 * we do with user-mode execution. 430 */ 431 if (!context_tracking_guest_enter()) { 432 instrumentation_begin(); 433 rcu_virt_note_context_switch(); 434 instrumentation_end(); 435 } 436} 437 438/* 439 * Deprecated. Architectures should move to guest_timing_enter_irqoff() and 440 * guest_state_enter_irqoff(). 441 */ 442static __always_inline void guest_enter_irqoff(void) 443{ 444 guest_timing_enter_irqoff(); 445 guest_context_enter_irqoff(); 446} 447 448/** 449 * guest_state_enter_irqoff - Fixup state when entering a guest 450 * 451 * Entry to a guest will enable interrupts, but the kernel state is interrupts 452 * disabled when this is invoked. Also tell RCU about it. 453 * 454 * 1) Trace interrupts on state 455 * 2) Invoke context tracking if enabled to adjust RCU state 456 * 3) Tell lockdep that interrupts are enabled 457 * 458 * Invoked from architecture specific code before entering a guest. 459 * Must be called with interrupts disabled and the caller must be 460 * non-instrumentable. 461 * The caller has to invoke guest_timing_enter_irqoff() before this. 462 * 463 * Note: this is analogous to exit_to_user_mode(). 464 */ 465static __always_inline void guest_state_enter_irqoff(void) 466{ 467 instrumentation_begin(); 468 trace_hardirqs_on_prepare(); 469 lockdep_hardirqs_on_prepare(); 470 instrumentation_end(); 471 472 guest_context_enter_irqoff(); 473 lockdep_hardirqs_on(CALLER_ADDR0); 474} 475 476/* 477 * Exit guest context and exit an RCU extended quiescent state. 478 * 479 * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is 480 * unsafe to use any code which may directly or indirectly use RCU, tracing 481 * (including IRQ flag tracing), or lockdep. All code in this period must be 482 * non-instrumentable. 483 */ 484static __always_inline void guest_context_exit_irqoff(void) 485{ 486 context_tracking_guest_exit(); 487} 488 489/* 490 * Stop accounting time towards a guest. 491 * Must be called after exiting guest context. 492 */ 493static __always_inline void guest_timing_exit_irqoff(void) 494{ 495 instrumentation_begin(); 496 /* Flush the guest cputime we spent on the guest */ 497 vtime_account_guest_exit(); 498 instrumentation_end(); 499} 500 501/* 502 * Deprecated. Architectures should move to guest_state_exit_irqoff() and 503 * guest_timing_exit_irqoff(). 504 */ 505static __always_inline void guest_exit_irqoff(void) 506{ 507 guest_context_exit_irqoff(); 508 guest_timing_exit_irqoff(); 509} 510 511static inline void guest_exit(void) 512{ 513 unsigned long flags; 514 515 local_irq_save(flags); 516 guest_exit_irqoff(); 517 local_irq_restore(flags); 518} 519 520/** 521 * guest_state_exit_irqoff - Establish state when returning from guest mode 522 * 523 * Entry from a guest disables interrupts, but guest mode is traced as 524 * interrupts enabled. Also with NO_HZ_FULL RCU might be idle. 525 * 526 * 1) Tell lockdep that interrupts are disabled 527 * 2) Invoke context tracking if enabled to reactivate RCU 528 * 3) Trace interrupts off state 529 * 530 * Invoked from architecture specific code after exiting a guest. 531 * Must be invoked with interrupts disabled and the caller must be 532 * non-instrumentable. 533 * The caller has to invoke guest_timing_exit_irqoff() after this. 534 * 535 * Note: this is analogous to enter_from_user_mode(). 536 */ 537static __always_inline void guest_state_exit_irqoff(void) 538{ 539 lockdep_hardirqs_off(CALLER_ADDR0); 540 guest_context_exit_irqoff(); 541 542 instrumentation_begin(); 543 trace_hardirqs_off_finish(); 544 instrumentation_end(); 545} 546 547static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) 548{ 549 /* 550 * The memory barrier ensures a previous write to vcpu->requests cannot 551 * be reordered with the read of vcpu->mode. It pairs with the general 552 * memory barrier following the write of vcpu->mode in VCPU RUN. 553 */ 554 smp_mb__before_atomic(); 555 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); 556} 557 558/* 559 * Some of the bitops functions do not support too long bitmaps. 560 * This number must be determined not to exceed such limits. 561 */ 562#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) 563 564/* 565 * Since at idle each memslot belongs to two memslot sets it has to contain 566 * two embedded nodes for each data structure that it forms a part of. 567 * 568 * Two memslot sets (one active and one inactive) are necessary so the VM 569 * continues to run on one memslot set while the other is being modified. 570 * 571 * These two memslot sets normally point to the same set of memslots. 572 * They can, however, be desynchronized when performing a memslot management 573 * operation by replacing the memslot to be modified by its copy. 574 * After the operation is complete, both memslot sets once again point to 575 * the same, common set of memslot data. 576 * 577 * The memslots themselves are independent of each other so they can be 578 * individually added or deleted. 579 */ 580struct kvm_memory_slot { 581 struct hlist_node id_node[2]; 582 struct interval_tree_node hva_node[2]; 583 struct rb_node gfn_node[2]; 584 gfn_t base_gfn; 585 unsigned long npages; 586 unsigned long *dirty_bitmap; 587 struct kvm_arch_memory_slot arch; 588 unsigned long userspace_addr; 589 u32 flags; 590 short id; 591 u16 as_id; 592 593#ifdef CONFIG_KVM_PRIVATE_MEM 594 struct { 595 struct file __rcu *file; 596 pgoff_t pgoff; 597 } gmem; 598#endif 599}; 600 601static inline bool kvm_slot_can_be_private(const struct kvm_memory_slot *slot) 602{ 603 return slot && (slot->flags & KVM_MEM_GUEST_MEMFD); 604} 605 606static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot) 607{ 608 return slot->flags & KVM_MEM_LOG_DIRTY_PAGES; 609} 610 611static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) 612{ 613 return ALIGN(memslot->npages, BITS_PER_LONG) / 8; 614} 615 616static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot) 617{ 618 unsigned long len = kvm_dirty_bitmap_bytes(memslot); 619 620 return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap); 621} 622 623#ifndef KVM_DIRTY_LOG_MANUAL_CAPS 624#define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE 625#endif 626 627struct kvm_s390_adapter_int { 628 u64 ind_addr; 629 u64 summary_addr; 630 u64 ind_offset; 631 u32 summary_offset; 632 u32 adapter_id; 633}; 634 635struct kvm_hv_sint { 636 u32 vcpu; 637 u32 sint; 638}; 639 640struct kvm_xen_evtchn { 641 u32 port; 642 u32 vcpu_id; 643 int vcpu_idx; 644 u32 priority; 645}; 646 647struct kvm_kernel_irq_routing_entry { 648 u32 gsi; 649 u32 type; 650 int (*set)(struct kvm_kernel_irq_routing_entry *e, 651 struct kvm *kvm, int irq_source_id, int level, 652 bool line_status); 653 union { 654 struct { 655 unsigned irqchip; 656 unsigned pin; 657 } irqchip; 658 struct { 659 u32 address_lo; 660 u32 address_hi; 661 u32 data; 662 u32 flags; 663 u32 devid; 664 } msi; 665 struct kvm_s390_adapter_int adapter; 666 struct kvm_hv_sint hv_sint; 667 struct kvm_xen_evtchn xen_evtchn; 668 }; 669 struct hlist_node link; 670}; 671 672#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 673struct kvm_irq_routing_table { 674 int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; 675 u32 nr_rt_entries; 676 /* 677 * Array indexed by gsi. Each entry contains list of irq chips 678 * the gsi is connected to. 679 */ 680 struct hlist_head map[] __counted_by(nr_rt_entries); 681}; 682#endif 683 684bool kvm_arch_irqchip_in_kernel(struct kvm *kvm); 685 686#ifndef KVM_INTERNAL_MEM_SLOTS 687#define KVM_INTERNAL_MEM_SLOTS 0 688#endif 689 690#define KVM_MEM_SLOTS_NUM SHRT_MAX 691#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_INTERNAL_MEM_SLOTS) 692 693#if KVM_MAX_NR_ADDRESS_SPACES == 1 694static inline int kvm_arch_nr_memslot_as_ids(struct kvm *kvm) 695{ 696 return KVM_MAX_NR_ADDRESS_SPACES; 697} 698 699static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) 700{ 701 return 0; 702} 703#endif 704 705/* 706 * Arch code must define kvm_arch_has_private_mem if support for private memory 707 * is enabled. 708 */ 709#if !defined(kvm_arch_has_private_mem) && !IS_ENABLED(CONFIG_KVM_PRIVATE_MEM) 710static inline bool kvm_arch_has_private_mem(struct kvm *kvm) 711{ 712 return false; 713} 714#endif 715 716struct kvm_memslots { 717 u64 generation; 718 atomic_long_t last_used_slot; 719 struct rb_root_cached hva_tree; 720 struct rb_root gfn_tree; 721 /* 722 * The mapping table from slot id to memslot. 723 * 724 * 7-bit bucket count matches the size of the old id to index array for 725 * 512 slots, while giving good performance with this slot count. 726 * Higher bucket counts bring only small performance improvements but 727 * always result in higher memory usage (even for lower memslot counts). 728 */ 729 DECLARE_HASHTABLE(id_hash, 7); 730 int node_idx; 731}; 732 733struct kvm { 734#ifdef KVM_HAVE_MMU_RWLOCK 735 rwlock_t mmu_lock; 736#else 737 spinlock_t mmu_lock; 738#endif /* KVM_HAVE_MMU_RWLOCK */ 739 740 struct mutex slots_lock; 741 742 /* 743 * Protects the arch-specific fields of struct kvm_memory_slots in 744 * use by the VM. To be used under the slots_lock (above) or in a 745 * kvm->srcu critical section where acquiring the slots_lock would 746 * lead to deadlock with the synchronize_srcu in 747 * kvm_swap_active_memslots(). 748 */ 749 struct mutex slots_arch_lock; 750 struct mm_struct *mm; /* userspace tied to this vm */ 751 unsigned long nr_memslot_pages; 752 /* The two memslot sets - active and inactive (per address space) */ 753 struct kvm_memslots __memslots[KVM_MAX_NR_ADDRESS_SPACES][2]; 754 /* The current active memslot set for each address space */ 755 struct kvm_memslots __rcu *memslots[KVM_MAX_NR_ADDRESS_SPACES]; 756 struct xarray vcpu_array; 757 /* 758 * Protected by slots_lock, but can be read outside if an 759 * incorrect answer is acceptable. 760 */ 761 atomic_t nr_memslots_dirty_logging; 762 763 /* Used to wait for completion of MMU notifiers. */ 764 spinlock_t mn_invalidate_lock; 765 unsigned long mn_active_invalidate_count; 766 struct rcuwait mn_memslots_update_rcuwait; 767 768 /* For management / invalidation of gfn_to_pfn_caches */ 769 spinlock_t gpc_lock; 770 struct list_head gpc_list; 771 772 /* 773 * created_vcpus is protected by kvm->lock, and is incremented 774 * at the beginning of KVM_CREATE_VCPU. online_vcpus is only 775 * incremented after storing the kvm_vcpu pointer in vcpus, 776 * and is accessed atomically. 777 */ 778 atomic_t online_vcpus; 779 int max_vcpus; 780 int created_vcpus; 781 int last_boosted_vcpu; 782 struct list_head vm_list; 783 struct mutex lock; 784 struct kvm_io_bus __rcu *buses[KVM_NR_BUSES]; 785#ifdef CONFIG_HAVE_KVM_IRQCHIP 786 struct { 787 spinlock_t lock; 788 struct list_head items; 789 /* resampler_list update side is protected by resampler_lock. */ 790 struct list_head resampler_list; 791 struct mutex resampler_lock; 792 } irqfds; 793#endif 794 struct list_head ioeventfds; 795 struct kvm_vm_stat stat; 796 struct kvm_arch arch; 797 refcount_t users_count; 798#ifdef CONFIG_KVM_MMIO 799 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; 800 spinlock_t ring_lock; 801 struct list_head coalesced_zones; 802#endif 803 804 struct mutex irq_lock; 805#ifdef CONFIG_HAVE_KVM_IRQCHIP 806 /* 807 * Update side is protected by irq_lock. 808 */ 809 struct kvm_irq_routing_table __rcu *irq_routing; 810 811 struct hlist_head irq_ack_notifier_list; 812#endif 813 814#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER 815 struct mmu_notifier mmu_notifier; 816 unsigned long mmu_invalidate_seq; 817 long mmu_invalidate_in_progress; 818 gfn_t mmu_invalidate_range_start; 819 gfn_t mmu_invalidate_range_end; 820#endif 821 struct list_head devices; 822 u64 manual_dirty_log_protect; 823 struct dentry *debugfs_dentry; 824 struct kvm_stat_data **debugfs_stat_data; 825 struct srcu_struct srcu; 826 struct srcu_struct irq_srcu; 827 pid_t userspace_pid; 828 bool override_halt_poll_ns; 829 unsigned int max_halt_poll_ns; 830 u32 dirty_ring_size; 831 bool dirty_ring_with_bitmap; 832 bool vm_bugged; 833 bool vm_dead; 834 835#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 836 struct notifier_block pm_notifier; 837#endif 838#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES 839 /* Protected by slots_locks (for writes) and RCU (for reads) */ 840 struct xarray mem_attr_array; 841#endif 842 char stats_id[KVM_STATS_NAME_SIZE]; 843}; 844 845#define kvm_err(fmt, ...) \ 846 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 847#define kvm_info(fmt, ...) \ 848 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 849#define kvm_debug(fmt, ...) \ 850 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 851#define kvm_debug_ratelimited(fmt, ...) \ 852 pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \ 853 ## __VA_ARGS__) 854#define kvm_pr_unimpl(fmt, ...) \ 855 pr_err_ratelimited("kvm [%i]: " fmt, \ 856 task_tgid_nr(current), ## __VA_ARGS__) 857 858/* The guest did something we don't support. */ 859#define vcpu_unimpl(vcpu, fmt, ...) \ 860 kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \ 861 (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__) 862 863#define vcpu_debug(vcpu, fmt, ...) \ 864 kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) 865#define vcpu_debug_ratelimited(vcpu, fmt, ...) \ 866 kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \ 867 ## __VA_ARGS__) 868#define vcpu_err(vcpu, fmt, ...) \ 869 kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) 870 871static inline void kvm_vm_dead(struct kvm *kvm) 872{ 873 kvm->vm_dead = true; 874 kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD); 875} 876 877static inline void kvm_vm_bugged(struct kvm *kvm) 878{ 879 kvm->vm_bugged = true; 880 kvm_vm_dead(kvm); 881} 882 883 884#define KVM_BUG(cond, kvm, fmt...) \ 885({ \ 886 bool __ret = !!(cond); \ 887 \ 888 if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \ 889 kvm_vm_bugged(kvm); \ 890 unlikely(__ret); \ 891}) 892 893#define KVM_BUG_ON(cond, kvm) \ 894({ \ 895 bool __ret = !!(cond); \ 896 \ 897 if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \ 898 kvm_vm_bugged(kvm); \ 899 unlikely(__ret); \ 900}) 901 902/* 903 * Note, "data corruption" refers to corruption of host kernel data structures, 904 * not guest data. Guest data corruption, suspected or confirmed, that is tied 905 * and contained to a single VM should *never* BUG() and potentially panic the 906 * host, i.e. use this variant of KVM_BUG() if and only if a KVM data structure 907 * is corrupted and that corruption can have a cascading effect to other parts 908 * of the hosts and/or to other VMs. 909 */ 910#define KVM_BUG_ON_DATA_CORRUPTION(cond, kvm) \ 911({ \ 912 bool __ret = !!(cond); \ 913 \ 914 if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) \ 915 BUG_ON(__ret); \ 916 else if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \ 917 kvm_vm_bugged(kvm); \ 918 unlikely(__ret); \ 919}) 920 921static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu) 922{ 923#ifdef CONFIG_PROVE_RCU 924 WARN_ONCE(vcpu->srcu_depth++, 925 "KVM: Illegal vCPU srcu_idx LOCK, depth=%d", vcpu->srcu_depth - 1); 926#endif 927 vcpu->____srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 928} 929 930static inline void kvm_vcpu_srcu_read_unlock(struct kvm_vcpu *vcpu) 931{ 932 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->____srcu_idx); 933 934#ifdef CONFIG_PROVE_RCU 935 WARN_ONCE(--vcpu->srcu_depth, 936 "KVM: Illegal vCPU srcu_idx UNLOCK, depth=%d", vcpu->srcu_depth); 937#endif 938} 939 940static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm) 941{ 942 return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); 943} 944 945static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) 946{ 947 return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, 948 lockdep_is_held(&kvm->slots_lock) || 949 !refcount_read(&kvm->users_count)); 950} 951 952static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) 953{ 954 int num_vcpus = atomic_read(&kvm->online_vcpus); 955 i = array_index_nospec(i, num_vcpus); 956 957 /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */ 958 smp_rmb(); 959 return xa_load(&kvm->vcpu_array, i); 960} 961 962#define kvm_for_each_vcpu(idx, vcpup, kvm) \ 963 xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \ 964 (atomic_read(&kvm->online_vcpus) - 1)) 965 966static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) 967{ 968 struct kvm_vcpu *vcpu = NULL; 969 unsigned long i; 970 971 if (id < 0) 972 return NULL; 973 if (id < KVM_MAX_VCPUS) 974 vcpu = kvm_get_vcpu(kvm, id); 975 if (vcpu && vcpu->vcpu_id == id) 976 return vcpu; 977 kvm_for_each_vcpu(i, vcpu, kvm) 978 if (vcpu->vcpu_id == id) 979 return vcpu; 980 return NULL; 981} 982 983void kvm_destroy_vcpus(struct kvm *kvm); 984 985void vcpu_load(struct kvm_vcpu *vcpu); 986void vcpu_put(struct kvm_vcpu *vcpu); 987 988#ifdef __KVM_HAVE_IOAPIC 989void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm); 990void kvm_arch_post_irq_routing_update(struct kvm *kvm); 991#else 992static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm) 993{ 994} 995static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) 996{ 997} 998#endif 999 1000#ifdef CONFIG_HAVE_KVM_IRQCHIP 1001int kvm_irqfd_init(void); 1002void kvm_irqfd_exit(void); 1003#else 1004static inline int kvm_irqfd_init(void) 1005{ 1006 return 0; 1007} 1008 1009static inline void kvm_irqfd_exit(void) 1010{ 1011} 1012#endif 1013int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module); 1014void kvm_exit(void); 1015 1016void kvm_get_kvm(struct kvm *kvm); 1017bool kvm_get_kvm_safe(struct kvm *kvm); 1018void kvm_put_kvm(struct kvm *kvm); 1019bool file_is_kvm(struct file *file); 1020void kvm_put_kvm_no_destroy(struct kvm *kvm); 1021 1022static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) 1023{ 1024 as_id = array_index_nospec(as_id, KVM_MAX_NR_ADDRESS_SPACES); 1025 return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, 1026 lockdep_is_held(&kvm->slots_lock) || 1027 !refcount_read(&kvm->users_count)); 1028} 1029 1030static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) 1031{ 1032 return __kvm_memslots(kvm, 0); 1033} 1034 1035static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) 1036{ 1037 int as_id = kvm_arch_vcpu_memslots_id(vcpu); 1038 1039 return __kvm_memslots(vcpu->kvm, as_id); 1040} 1041 1042static inline bool kvm_memslots_empty(struct kvm_memslots *slots) 1043{ 1044 return RB_EMPTY_ROOT(&slots->gfn_tree); 1045} 1046 1047bool kvm_are_all_memslots_empty(struct kvm *kvm); 1048 1049#define kvm_for_each_memslot(memslot, bkt, slots) \ 1050 hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \ 1051 if (WARN_ON_ONCE(!memslot->npages)) { \ 1052 } else 1053 1054static inline 1055struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id) 1056{ 1057 struct kvm_memory_slot *slot; 1058 int idx = slots->node_idx; 1059 1060 hash_for_each_possible(slots->id_hash, slot, id_node[idx], id) { 1061 if (slot->id == id) 1062 return slot; 1063 } 1064 1065 return NULL; 1066} 1067 1068/* Iterator used for walking memslots that overlap a gfn range. */ 1069struct kvm_memslot_iter { 1070 struct kvm_memslots *slots; 1071 struct rb_node *node; 1072 struct kvm_memory_slot *slot; 1073}; 1074 1075static inline void kvm_memslot_iter_next(struct kvm_memslot_iter *iter) 1076{ 1077 iter->node = rb_next(iter->node); 1078 if (!iter->node) 1079 return; 1080 1081 iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]); 1082} 1083 1084static inline void kvm_memslot_iter_start(struct kvm_memslot_iter *iter, 1085 struct kvm_memslots *slots, 1086 gfn_t start) 1087{ 1088 int idx = slots->node_idx; 1089 struct rb_node *tmp; 1090 struct kvm_memory_slot *slot; 1091 1092 iter->slots = slots; 1093 1094 /* 1095 * Find the so called "upper bound" of a key - the first node that has 1096 * its key strictly greater than the searched one (the start gfn in our case). 1097 */ 1098 iter->node = NULL; 1099 for (tmp = slots->gfn_tree.rb_node; tmp; ) { 1100 slot = container_of(tmp, struct kvm_memory_slot, gfn_node[idx]); 1101 if (start < slot->base_gfn) { 1102 iter->node = tmp; 1103 tmp = tmp->rb_left; 1104 } else { 1105 tmp = tmp->rb_right; 1106 } 1107 } 1108 1109 /* 1110 * Find the slot with the lowest gfn that can possibly intersect with 1111 * the range, so we'll ideally have slot start <= range start 1112 */ 1113 if (iter->node) { 1114 /* 1115 * A NULL previous node means that the very first slot 1116 * already has a higher start gfn. 1117 * In this case slot start > range start. 1118 */ 1119 tmp = rb_prev(iter->node); 1120 if (tmp) 1121 iter->node = tmp; 1122 } else { 1123 /* a NULL node below means no slots */ 1124 iter->node = rb_last(&slots->gfn_tree); 1125 } 1126 1127 if (iter->node) { 1128 iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[idx]); 1129 1130 /* 1131 * It is possible in the slot start < range start case that the 1132 * found slot ends before or at range start (slot end <= range start) 1133 * and so it does not overlap the requested range. 1134 * 1135 * In such non-overlapping case the next slot (if it exists) will 1136 * already have slot start > range start, otherwise the logic above 1137 * would have found it instead of the current slot. 1138 */ 1139 if (iter->slot->base_gfn + iter->slot->npages <= start) 1140 kvm_memslot_iter_next(iter); 1141 } 1142} 1143 1144static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_t end) 1145{ 1146 if (!iter->node) 1147 return false; 1148 1149 /* 1150 * If this slot starts beyond or at the end of the range so does 1151 * every next one 1152 */ 1153 return iter->slot->base_gfn < end; 1154} 1155 1156/* Iterate over each memslot at least partially intersecting [start, end) range */ 1157#define kvm_for_each_memslot_in_gfn_range(iter, slots, start, end) \ 1158 for (kvm_memslot_iter_start(iter, slots, start); \ 1159 kvm_memslot_iter_is_valid(iter, end); \ 1160 kvm_memslot_iter_next(iter)) 1161 1162/* 1163 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: 1164 * - create a new memory slot 1165 * - delete an existing memory slot 1166 * - modify an existing memory slot 1167 * -- move it in the guest physical memory space 1168 * -- just change its flags 1169 * 1170 * Since flags can be changed by some of these operations, the following 1171 * differentiation is the best we can do for __kvm_set_memory_region(): 1172 */ 1173enum kvm_mr_change { 1174 KVM_MR_CREATE, 1175 KVM_MR_DELETE, 1176 KVM_MR_MOVE, 1177 KVM_MR_FLAGS_ONLY, 1178}; 1179 1180int kvm_set_memory_region(struct kvm *kvm, 1181 const struct kvm_userspace_memory_region2 *mem); 1182int __kvm_set_memory_region(struct kvm *kvm, 1183 const struct kvm_userspace_memory_region2 *mem); 1184void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); 1185void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); 1186int kvm_arch_prepare_memory_region(struct kvm *kvm, 1187 const struct kvm_memory_slot *old, 1188 struct kvm_memory_slot *new, 1189 enum kvm_mr_change change); 1190void kvm_arch_commit_memory_region(struct kvm *kvm, 1191 struct kvm_memory_slot *old, 1192 const struct kvm_memory_slot *new, 1193 enum kvm_mr_change change); 1194/* flush all memory translations */ 1195void kvm_arch_flush_shadow_all(struct kvm *kvm); 1196/* flush memory translations pointing to 'slot' */ 1197void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 1198 struct kvm_memory_slot *slot); 1199 1200int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 1201 struct page **pages, int nr_pages); 1202 1203struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 1204unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 1205unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); 1206unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 1207unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, 1208 bool *writable); 1209void kvm_release_page_clean(struct page *page); 1210void kvm_release_page_dirty(struct page *page); 1211 1212kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 1213kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 1214 bool *writable); 1215kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn); 1216kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn); 1217kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, 1218 bool atomic, bool interruptible, bool *async, 1219 bool write_fault, bool *writable, hva_t *hva); 1220 1221void kvm_release_pfn_clean(kvm_pfn_t pfn); 1222void kvm_release_pfn_dirty(kvm_pfn_t pfn); 1223void kvm_set_pfn_dirty(kvm_pfn_t pfn); 1224void kvm_set_pfn_accessed(kvm_pfn_t pfn); 1225 1226void kvm_release_pfn(kvm_pfn_t pfn, bool dirty); 1227int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 1228 int len); 1229int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); 1230int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1231 void *data, unsigned long len); 1232int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1233 void *data, unsigned int offset, 1234 unsigned long len); 1235int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 1236 int offset, int len); 1237int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 1238 unsigned long len); 1239int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1240 void *data, unsigned long len); 1241int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1242 void *data, unsigned int offset, 1243 unsigned long len); 1244int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1245 gpa_t gpa, unsigned long len); 1246 1247#define __kvm_get_guest(kvm, gfn, offset, v) \ 1248({ \ 1249 unsigned long __addr = gfn_to_hva(kvm, gfn); \ 1250 typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \ 1251 int __ret = -EFAULT; \ 1252 \ 1253 if (!kvm_is_error_hva(__addr)) \ 1254 __ret = get_user(v, __uaddr); \ 1255 __ret; \ 1256}) 1257 1258#define kvm_get_guest(kvm, gpa, v) \ 1259({ \ 1260 gpa_t __gpa = gpa; \ 1261 struct kvm *__kvm = kvm; \ 1262 \ 1263 __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \ 1264 offset_in_page(__gpa), v); \ 1265}) 1266 1267#define __kvm_put_guest(kvm, gfn, offset, v) \ 1268({ \ 1269 unsigned long __addr = gfn_to_hva(kvm, gfn); \ 1270 typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \ 1271 int __ret = -EFAULT; \ 1272 \ 1273 if (!kvm_is_error_hva(__addr)) \ 1274 __ret = put_user(v, __uaddr); \ 1275 if (!__ret) \ 1276 mark_page_dirty(kvm, gfn); \ 1277 __ret; \ 1278}) 1279 1280#define kvm_put_guest(kvm, gpa, v) \ 1281({ \ 1282 gpa_t __gpa = gpa; \ 1283 struct kvm *__kvm = kvm; \ 1284 \ 1285 __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \ 1286 offset_in_page(__gpa), v); \ 1287}) 1288 1289int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); 1290struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); 1291bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); 1292bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); 1293unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); 1294void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn); 1295void mark_page_dirty(struct kvm *kvm, gfn_t gfn); 1296 1297struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); 1298struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); 1299kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); 1300kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); 1301int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map); 1302void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); 1303unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); 1304unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); 1305int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, 1306 int len); 1307int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, 1308 unsigned long len); 1309int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, 1310 unsigned long len); 1311int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, 1312 int offset, int len); 1313int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 1314 unsigned long len); 1315void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); 1316 1317/** 1318 * kvm_gpc_init - initialize gfn_to_pfn_cache. 1319 * 1320 * @gpc: struct gfn_to_pfn_cache object. 1321 * @kvm: pointer to kvm instance. 1322 * 1323 * This sets up a gfn_to_pfn_cache by initializing locks and assigning the 1324 * immutable attributes. Note, the cache must be zero-allocated (or zeroed by 1325 * the caller before init). 1326 */ 1327void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm); 1328 1329/** 1330 * kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest 1331 * physical address. 1332 * 1333 * @gpc: struct gfn_to_pfn_cache object. 1334 * @gpa: guest physical address to map. 1335 * @len: sanity check; the range being access must fit a single page. 1336 * 1337 * @return: 0 for success. 1338 * -EINVAL for a mapping which would cross a page boundary. 1339 * -EFAULT for an untranslatable guest physical address. 1340 * 1341 * This primes a gfn_to_pfn_cache and links it into the @gpc->kvm's list for 1342 * invalidations to be processed. Callers are required to use kvm_gpc_check() 1343 * to ensure that the cache is valid before accessing the target page. 1344 */ 1345int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len); 1346 1347/** 1348 * kvm_gpc_activate_hva - prepare a cached kernel mapping and HPA for a given HVA. 1349 * 1350 * @gpc: struct gfn_to_pfn_cache object. 1351 * @hva: userspace virtual address to map. 1352 * @len: sanity check; the range being access must fit a single page. 1353 * 1354 * @return: 0 for success. 1355 * -EINVAL for a mapping which would cross a page boundary. 1356 * -EFAULT for an untranslatable guest physical address. 1357 * 1358 * The semantics of this function are the same as those of kvm_gpc_activate(). It 1359 * merely bypasses a layer of address translation. 1360 */ 1361int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long hva, unsigned long len); 1362 1363/** 1364 * kvm_gpc_check - check validity of a gfn_to_pfn_cache. 1365 * 1366 * @gpc: struct gfn_to_pfn_cache object. 1367 * @len: sanity check; the range being access must fit a single page. 1368 * 1369 * @return: %true if the cache is still valid and the address matches. 1370 * %false if the cache is not valid. 1371 * 1372 * Callers outside IN_GUEST_MODE context should hold a read lock on @gpc->lock 1373 * while calling this function, and then continue to hold the lock until the 1374 * access is complete. 1375 * 1376 * Callers in IN_GUEST_MODE may do so without locking, although they should 1377 * still hold a read lock on kvm->scru for the memslot checks. 1378 */ 1379bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len); 1380 1381/** 1382 * kvm_gpc_refresh - update a previously initialized cache. 1383 * 1384 * @gpc: struct gfn_to_pfn_cache object. 1385 * @len: sanity check; the range being access must fit a single page. 1386 * 1387 * @return: 0 for success. 1388 * -EINVAL for a mapping which would cross a page boundary. 1389 * -EFAULT for an untranslatable guest physical address. 1390 * 1391 * This will attempt to refresh a gfn_to_pfn_cache. Note that a successful 1392 * return from this function does not mean the page can be immediately 1393 * accessed because it may have raced with an invalidation. Callers must 1394 * still lock and check the cache status, as this function does not return 1395 * with the lock still held to permit access. 1396 */ 1397int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len); 1398 1399/** 1400 * kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache. 1401 * 1402 * @gpc: struct gfn_to_pfn_cache object. 1403 * 1404 * This removes a cache from the VM's list to be processed on MMU notifier 1405 * invocation. 1406 */ 1407void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc); 1408 1409static inline bool kvm_gpc_is_gpa_active(struct gfn_to_pfn_cache *gpc) 1410{ 1411 return gpc->active && !kvm_is_error_gpa(gpc->gpa); 1412} 1413 1414static inline bool kvm_gpc_is_hva_active(struct gfn_to_pfn_cache *gpc) 1415{ 1416 return gpc->active && kvm_is_error_gpa(gpc->gpa); 1417} 1418 1419void kvm_sigset_activate(struct kvm_vcpu *vcpu); 1420void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); 1421 1422void kvm_vcpu_halt(struct kvm_vcpu *vcpu); 1423bool kvm_vcpu_block(struct kvm_vcpu *vcpu); 1424void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); 1425void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); 1426bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); 1427void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 1428int kvm_vcpu_yield_to(struct kvm_vcpu *target); 1429void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode); 1430 1431void kvm_flush_remote_tlbs(struct kvm *kvm); 1432void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages); 1433void kvm_flush_remote_tlbs_memslot(struct kvm *kvm, 1434 const struct kvm_memory_slot *memslot); 1435 1436#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 1437int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min); 1438int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min); 1439int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc); 1440void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc); 1441void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); 1442#endif 1443 1444void kvm_mmu_invalidate_begin(struct kvm *kvm); 1445void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end); 1446void kvm_mmu_invalidate_end(struct kvm *kvm); 1447bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); 1448 1449long kvm_arch_dev_ioctl(struct file *filp, 1450 unsigned int ioctl, unsigned long arg); 1451long kvm_arch_vcpu_ioctl(struct file *filp, 1452 unsigned int ioctl, unsigned long arg); 1453vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); 1454 1455int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); 1456 1457void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, 1458 struct kvm_memory_slot *slot, 1459 gfn_t gfn_offset, 1460 unsigned long mask); 1461void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot); 1462 1463#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 1464int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); 1465int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, 1466 int *is_dirty, struct kvm_memory_slot **memslot); 1467#endif 1468 1469int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, 1470 bool line_status); 1471int kvm_vm_ioctl_enable_cap(struct kvm *kvm, 1472 struct kvm_enable_cap *cap); 1473int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); 1474long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, 1475 unsigned long arg); 1476 1477int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 1478int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 1479 1480int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 1481 struct kvm_translation *tr); 1482 1483int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 1484int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 1485int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 1486 struct kvm_sregs *sregs); 1487int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 1488 struct kvm_sregs *sregs); 1489int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 1490 struct kvm_mp_state *mp_state); 1491int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 1492 struct kvm_mp_state *mp_state); 1493int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 1494 struct kvm_guest_debug *dbg); 1495int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu); 1496 1497void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); 1498 1499void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 1500void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); 1501int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id); 1502int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu); 1503void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); 1504void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); 1505 1506#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 1507int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state); 1508#endif 1509 1510#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS 1511void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry); 1512#else 1513static inline void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) {} 1514#endif 1515 1516#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING 1517int kvm_arch_hardware_enable(void); 1518void kvm_arch_hardware_disable(void); 1519#endif 1520int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); 1521bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); 1522int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); 1523bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu); 1524bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu); 1525bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu); 1526int kvm_arch_post_init_vm(struct kvm *kvm); 1527void kvm_arch_pre_destroy_vm(struct kvm *kvm); 1528void kvm_arch_create_vm_debugfs(struct kvm *kvm); 1529 1530#ifndef __KVM_HAVE_ARCH_VM_ALLOC 1531/* 1532 * All architectures that want to use vzalloc currently also 1533 * need their own kvm_arch_alloc_vm implementation. 1534 */ 1535static inline struct kvm *kvm_arch_alloc_vm(void) 1536{ 1537 return kzalloc(sizeof(struct kvm), GFP_KERNEL_ACCOUNT); 1538} 1539#endif 1540 1541static inline void __kvm_arch_free_vm(struct kvm *kvm) 1542{ 1543 kvfree(kvm); 1544} 1545 1546#ifndef __KVM_HAVE_ARCH_VM_FREE 1547static inline void kvm_arch_free_vm(struct kvm *kvm) 1548{ 1549 __kvm_arch_free_vm(kvm); 1550} 1551#endif 1552 1553#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS 1554static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm) 1555{ 1556 return -ENOTSUPP; 1557} 1558#else 1559int kvm_arch_flush_remote_tlbs(struct kvm *kvm); 1560#endif 1561 1562#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE 1563static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, 1564 gfn_t gfn, u64 nr_pages) 1565{ 1566 return -EOPNOTSUPP; 1567} 1568#else 1569int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages); 1570#endif 1571 1572#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA 1573void kvm_arch_register_noncoherent_dma(struct kvm *kvm); 1574void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm); 1575bool kvm_arch_has_noncoherent_dma(struct kvm *kvm); 1576#else 1577static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) 1578{ 1579} 1580 1581static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) 1582{ 1583} 1584 1585static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) 1586{ 1587 return false; 1588} 1589#endif 1590#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE 1591void kvm_arch_start_assignment(struct kvm *kvm); 1592void kvm_arch_end_assignment(struct kvm *kvm); 1593bool kvm_arch_has_assigned_device(struct kvm *kvm); 1594#else 1595static inline void kvm_arch_start_assignment(struct kvm *kvm) 1596{ 1597} 1598 1599static inline void kvm_arch_end_assignment(struct kvm *kvm) 1600{ 1601} 1602 1603static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm) 1604{ 1605 return false; 1606} 1607#endif 1608 1609static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu) 1610{ 1611#ifdef __KVM_HAVE_ARCH_WQP 1612 return vcpu->arch.waitp; 1613#else 1614 return &vcpu->wait; 1615#endif 1616} 1617 1618/* 1619 * Wake a vCPU if necessary, but don't do any stats/metadata updates. Returns 1620 * true if the vCPU was blocking and was awakened, false otherwise. 1621 */ 1622static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) 1623{ 1624 return !!rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu)); 1625} 1626 1627static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu) 1628{ 1629 return rcuwait_active(kvm_arch_vcpu_get_wait(vcpu)); 1630} 1631 1632#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED 1633/* 1634 * returns true if the virtual interrupt controller is initialized and 1635 * ready to accept virtual IRQ. On some architectures the virtual interrupt 1636 * controller is dynamically instantiated and this is not always true. 1637 */ 1638bool kvm_arch_intc_initialized(struct kvm *kvm); 1639#else 1640static inline bool kvm_arch_intc_initialized(struct kvm *kvm) 1641{ 1642 return true; 1643} 1644#endif 1645 1646#ifdef CONFIG_GUEST_PERF_EVENTS 1647unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu); 1648 1649void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void)); 1650void kvm_unregister_perf_callbacks(void); 1651#else 1652static inline void kvm_register_perf_callbacks(void *ign) {} 1653static inline void kvm_unregister_perf_callbacks(void) {} 1654#endif /* CONFIG_GUEST_PERF_EVENTS */ 1655 1656int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); 1657void kvm_arch_destroy_vm(struct kvm *kvm); 1658void kvm_arch_sync_events(struct kvm *kvm); 1659 1660int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); 1661 1662struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn); 1663bool kvm_is_zone_device_page(struct page *page); 1664 1665struct kvm_irq_ack_notifier { 1666 struct hlist_node link; 1667 unsigned gsi; 1668 void (*irq_acked)(struct kvm_irq_ack_notifier *kian); 1669}; 1670 1671int kvm_irq_map_gsi(struct kvm *kvm, 1672 struct kvm_kernel_irq_routing_entry *entries, int gsi); 1673int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); 1674 1675int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, 1676 bool line_status); 1677int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, 1678 int irq_source_id, int level, bool line_status); 1679int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, 1680 struct kvm *kvm, int irq_source_id, 1681 int level, bool line_status); 1682bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); 1683void kvm_notify_acked_gsi(struct kvm *kvm, int gsi); 1684void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); 1685void kvm_register_irq_ack_notifier(struct kvm *kvm, 1686 struct kvm_irq_ack_notifier *kian); 1687void kvm_unregister_irq_ack_notifier(struct kvm *kvm, 1688 struct kvm_irq_ack_notifier *kian); 1689int kvm_request_irq_source_id(struct kvm *kvm); 1690void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); 1691bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); 1692 1693/* 1694 * Returns a pointer to the memslot if it contains gfn. 1695 * Otherwise returns NULL. 1696 */ 1697static inline struct kvm_memory_slot * 1698try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 1699{ 1700 if (!slot) 1701 return NULL; 1702 1703 if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages) 1704 return slot; 1705 else 1706 return NULL; 1707} 1708 1709/* 1710 * Returns a pointer to the memslot that contains gfn. Otherwise returns NULL. 1711 * 1712 * With "approx" set returns the memslot also when the address falls 1713 * in a hole. In that case one of the memslots bordering the hole is 1714 * returned. 1715 */ 1716static inline struct kvm_memory_slot * 1717search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx) 1718{ 1719 struct kvm_memory_slot *slot; 1720 struct rb_node *node; 1721 int idx = slots->node_idx; 1722 1723 slot = NULL; 1724 for (node = slots->gfn_tree.rb_node; node; ) { 1725 slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]); 1726 if (gfn >= slot->base_gfn) { 1727 if (gfn < slot->base_gfn + slot->npages) 1728 return slot; 1729 node = node->rb_right; 1730 } else 1731 node = node->rb_left; 1732 } 1733 1734 return approx ? slot : NULL; 1735} 1736 1737static inline struct kvm_memory_slot * 1738____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx) 1739{ 1740 struct kvm_memory_slot *slot; 1741 1742 slot = (struct kvm_memory_slot *)atomic_long_read(&slots->last_used_slot); 1743 slot = try_get_memslot(slot, gfn); 1744 if (slot) 1745 return slot; 1746 1747 slot = search_memslots(slots, gfn, approx); 1748 if (slot) { 1749 atomic_long_set(&slots->last_used_slot, (unsigned long)slot); 1750 return slot; 1751 } 1752 1753 return NULL; 1754} 1755 1756/* 1757 * __gfn_to_memslot() and its descendants are here to allow arch code to inline 1758 * the lookups in hot paths. gfn_to_memslot() itself isn't here as an inline 1759 * because that would bloat other code too much. 1760 */ 1761static inline struct kvm_memory_slot * 1762__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) 1763{ 1764 return ____gfn_to_memslot(slots, gfn, false); 1765} 1766 1767static inline unsigned long 1768__gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) 1769{ 1770 /* 1771 * The index was checked originally in search_memslots. To avoid 1772 * that a malicious guest builds a Spectre gadget out of e.g. page 1773 * table walks, do not let the processor speculate loads outside 1774 * the guest's registered memslots. 1775 */ 1776 unsigned long offset = gfn - slot->base_gfn; 1777 offset = array_index_nospec(offset, slot->npages); 1778 return slot->userspace_addr + offset * PAGE_SIZE; 1779} 1780 1781static inline int memslot_id(struct kvm *kvm, gfn_t gfn) 1782{ 1783 return gfn_to_memslot(kvm, gfn)->id; 1784} 1785 1786static inline gfn_t 1787hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) 1788{ 1789 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; 1790 1791 return slot->base_gfn + gfn_offset; 1792} 1793 1794static inline gpa_t gfn_to_gpa(gfn_t gfn) 1795{ 1796 return (gpa_t)gfn << PAGE_SHIFT; 1797} 1798 1799static inline gfn_t gpa_to_gfn(gpa_t gpa) 1800{ 1801 return (gfn_t)(gpa >> PAGE_SHIFT); 1802} 1803 1804static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn) 1805{ 1806 return (hpa_t)pfn << PAGE_SHIFT; 1807} 1808 1809static inline bool kvm_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa) 1810{ 1811 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); 1812 1813 return !kvm_is_error_hva(hva); 1814} 1815 1816static inline void kvm_gpc_mark_dirty_in_slot(struct gfn_to_pfn_cache *gpc) 1817{ 1818 lockdep_assert_held(&gpc->lock); 1819 1820 if (!gpc->memslot) 1821 return; 1822 1823 mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpa_to_gfn(gpc->gpa)); 1824} 1825 1826enum kvm_stat_kind { 1827 KVM_STAT_VM, 1828 KVM_STAT_VCPU, 1829}; 1830 1831struct kvm_stat_data { 1832 struct kvm *kvm; 1833 const struct _kvm_stats_desc *desc; 1834 enum kvm_stat_kind kind; 1835}; 1836 1837struct _kvm_stats_desc { 1838 struct kvm_stats_desc desc; 1839 char name[KVM_STATS_NAME_SIZE]; 1840}; 1841 1842#define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \ 1843 .flags = type | unit | base | \ 1844 BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \ 1845 BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \ 1846 BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \ 1847 .exponent = exp, \ 1848 .size = sz, \ 1849 .bucket_size = bsz 1850 1851#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1852 { \ 1853 { \ 1854 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1855 .offset = offsetof(struct kvm_vm_stat, generic.stat) \ 1856 }, \ 1857 .name = #stat, \ 1858 } 1859#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1860 { \ 1861 { \ 1862 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1863 .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \ 1864 }, \ 1865 .name = #stat, \ 1866 } 1867#define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1868 { \ 1869 { \ 1870 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1871 .offset = offsetof(struct kvm_vm_stat, stat) \ 1872 }, \ 1873 .name = #stat, \ 1874 } 1875#define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1876 { \ 1877 { \ 1878 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1879 .offset = offsetof(struct kvm_vcpu_stat, stat) \ 1880 }, \ 1881 .name = #stat, \ 1882 } 1883/* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */ 1884#define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \ 1885 SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz) 1886 1887#define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent) \ 1888 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE, \ 1889 unit, base, exponent, 1, 0) 1890#define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent) \ 1891 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT, \ 1892 unit, base, exponent, 1, 0) 1893#define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent) \ 1894 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK, \ 1895 unit, base, exponent, 1, 0) 1896#define STATS_DESC_LINEAR_HIST(SCOPE, name, unit, base, exponent, sz, bsz) \ 1897 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LINEAR_HIST, \ 1898 unit, base, exponent, sz, bsz) 1899#define STATS_DESC_LOG_HIST(SCOPE, name, unit, base, exponent, sz) \ 1900 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LOG_HIST, \ 1901 unit, base, exponent, sz, 0) 1902 1903/* Cumulative counter, read/write */ 1904#define STATS_DESC_COUNTER(SCOPE, name) \ 1905 STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE, \ 1906 KVM_STATS_BASE_POW10, 0) 1907/* Instantaneous counter, read only */ 1908#define STATS_DESC_ICOUNTER(SCOPE, name) \ 1909 STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE, \ 1910 KVM_STATS_BASE_POW10, 0) 1911/* Peak counter, read/write */ 1912#define STATS_DESC_PCOUNTER(SCOPE, name) \ 1913 STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \ 1914 KVM_STATS_BASE_POW10, 0) 1915 1916/* Instantaneous boolean value, read only */ 1917#define STATS_DESC_IBOOLEAN(SCOPE, name) \ 1918 STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \ 1919 KVM_STATS_BASE_POW10, 0) 1920/* Peak (sticky) boolean value, read/write */ 1921#define STATS_DESC_PBOOLEAN(SCOPE, name) \ 1922 STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \ 1923 KVM_STATS_BASE_POW10, 0) 1924 1925/* Cumulative time in nanosecond */ 1926#define STATS_DESC_TIME_NSEC(SCOPE, name) \ 1927 STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ 1928 KVM_STATS_BASE_POW10, -9) 1929/* Linear histogram for time in nanosecond */ 1930#define STATS_DESC_LINHIST_TIME_NSEC(SCOPE, name, sz, bsz) \ 1931 STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ 1932 KVM_STATS_BASE_POW10, -9, sz, bsz) 1933/* Logarithmic histogram for time in nanosecond */ 1934#define STATS_DESC_LOGHIST_TIME_NSEC(SCOPE, name, sz) \ 1935 STATS_DESC_LOG_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ 1936 KVM_STATS_BASE_POW10, -9, sz) 1937 1938#define KVM_GENERIC_VM_STATS() \ 1939 STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush), \ 1940 STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush_requests) 1941 1942#define KVM_GENERIC_VCPU_STATS() \ 1943 STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll), \ 1944 STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll), \ 1945 STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid), \ 1946 STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup), \ 1947 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns), \ 1948 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns), \ 1949 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_wait_ns), \ 1950 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_success_hist, \ 1951 HALT_POLL_HIST_COUNT), \ 1952 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist, \ 1953 HALT_POLL_HIST_COUNT), \ 1954 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \ 1955 HALT_POLL_HIST_COUNT), \ 1956 STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking) 1957 1958extern struct dentry *kvm_debugfs_dir; 1959 1960ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header, 1961 const struct _kvm_stats_desc *desc, 1962 void *stats, size_t size_stats, 1963 char __user *user_buffer, size_t size, loff_t *offset); 1964 1965/** 1966 * kvm_stats_linear_hist_update() - Update bucket value for linear histogram 1967 * statistics data. 1968 * 1969 * @data: start address of the stats data 1970 * @size: the number of bucket of the stats data 1971 * @value: the new value used to update the linear histogram's bucket 1972 * @bucket_size: the size (width) of a bucket 1973 */ 1974static inline void kvm_stats_linear_hist_update(u64 *data, size_t size, 1975 u64 value, size_t bucket_size) 1976{ 1977 size_t index = div64_u64(value, bucket_size); 1978 1979 index = min(index, size - 1); 1980 ++data[index]; 1981} 1982 1983/** 1984 * kvm_stats_log_hist_update() - Update bucket value for logarithmic histogram 1985 * statistics data. 1986 * 1987 * @data: start address of the stats data 1988 * @size: the number of bucket of the stats data 1989 * @value: the new value used to update the logarithmic histogram's bucket 1990 */ 1991static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value) 1992{ 1993 size_t index = fls64(value); 1994 1995 index = min(index, size - 1); 1996 ++data[index]; 1997} 1998 1999#define KVM_STATS_LINEAR_HIST_UPDATE(array, value, bsize) \ 2000 kvm_stats_linear_hist_update(array, ARRAY_SIZE(array), value, bsize) 2001#define KVM_STATS_LOG_HIST_UPDATE(array, value) \ 2002 kvm_stats_log_hist_update(array, ARRAY_SIZE(array), value) 2003 2004 2005extern const struct kvm_stats_header kvm_vm_stats_header; 2006extern const struct _kvm_stats_desc kvm_vm_stats_desc[]; 2007extern const struct kvm_stats_header kvm_vcpu_stats_header; 2008extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[]; 2009 2010#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER 2011static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq) 2012{ 2013 if (unlikely(kvm->mmu_invalidate_in_progress)) 2014 return 1; 2015 /* 2016 * Ensure the read of mmu_invalidate_in_progress happens before 2017 * the read of mmu_invalidate_seq. This interacts with the 2018 * smp_wmb() in mmu_notifier_invalidate_range_end to make sure 2019 * that the caller either sees the old (non-zero) value of 2020 * mmu_invalidate_in_progress or the new (incremented) value of 2021 * mmu_invalidate_seq. 2022 * 2023 * PowerPC Book3s HV KVM calls this under a per-page lock rather 2024 * than under kvm->mmu_lock, for scalability, so can't rely on 2025 * kvm->mmu_lock to keep things ordered. 2026 */ 2027 smp_rmb(); 2028 if (kvm->mmu_invalidate_seq != mmu_seq) 2029 return 1; 2030 return 0; 2031} 2032 2033static inline int mmu_invalidate_retry_gfn(struct kvm *kvm, 2034 unsigned long mmu_seq, 2035 gfn_t gfn) 2036{ 2037 lockdep_assert_held(&kvm->mmu_lock); 2038 /* 2039 * If mmu_invalidate_in_progress is non-zero, then the range maintained 2040 * by kvm_mmu_notifier_invalidate_range_start contains all addresses 2041 * that might be being invalidated. Note that it may include some false 2042 * positives, due to shortcuts when handing concurrent invalidations. 2043 */ 2044 if (unlikely(kvm->mmu_invalidate_in_progress)) { 2045 /* 2046 * Dropping mmu_lock after bumping mmu_invalidate_in_progress 2047 * but before updating the range is a KVM bug. 2048 */ 2049 if (WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA || 2050 kvm->mmu_invalidate_range_end == INVALID_GPA)) 2051 return 1; 2052 2053 if (gfn >= kvm->mmu_invalidate_range_start && 2054 gfn < kvm->mmu_invalidate_range_end) 2055 return 1; 2056 } 2057 2058 if (kvm->mmu_invalidate_seq != mmu_seq) 2059 return 1; 2060 return 0; 2061} 2062 2063/* 2064 * This lockless version of the range-based retry check *must* be paired with a 2065 * call to the locked version after acquiring mmu_lock, i.e. this is safe to 2066 * use only as a pre-check to avoid contending mmu_lock. This version *will* 2067 * get false negatives and false positives. 2068 */ 2069static inline bool mmu_invalidate_retry_gfn_unsafe(struct kvm *kvm, 2070 unsigned long mmu_seq, 2071 gfn_t gfn) 2072{ 2073 /* 2074 * Use READ_ONCE() to ensure the in-progress flag and sequence counter 2075 * are always read from memory, e.g. so that checking for retry in a 2076 * loop won't result in an infinite retry loop. Don't force loads for 2077 * start+end, as the key to avoiding infinite retry loops is observing 2078 * the 1=>0 transition of in-progress, i.e. getting false negatives 2079 * due to stale start+end values is acceptable. 2080 */ 2081 if (unlikely(READ_ONCE(kvm->mmu_invalidate_in_progress)) && 2082 gfn >= kvm->mmu_invalidate_range_start && 2083 gfn < kvm->mmu_invalidate_range_end) 2084 return true; 2085 2086 return READ_ONCE(kvm->mmu_invalidate_seq) != mmu_seq; 2087} 2088#endif 2089 2090#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 2091 2092#define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */ 2093 2094bool kvm_arch_can_set_irq_routing(struct kvm *kvm); 2095int kvm_set_irq_routing(struct kvm *kvm, 2096 const struct kvm_irq_routing_entry *entries, 2097 unsigned nr, 2098 unsigned flags); 2099int kvm_set_routing_entry(struct kvm *kvm, 2100 struct kvm_kernel_irq_routing_entry *e, 2101 const struct kvm_irq_routing_entry *ue); 2102void kvm_free_irq_routing(struct kvm *kvm); 2103 2104#else 2105 2106static inline void kvm_free_irq_routing(struct kvm *kvm) {} 2107 2108#endif 2109 2110int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); 2111 2112void kvm_eventfd_init(struct kvm *kvm); 2113int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); 2114 2115#ifdef CONFIG_HAVE_KVM_IRQCHIP 2116int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); 2117void kvm_irqfd_release(struct kvm *kvm); 2118bool kvm_notify_irqfd_resampler(struct kvm *kvm, 2119 unsigned int irqchip, 2120 unsigned int pin); 2121void kvm_irq_routing_update(struct kvm *); 2122#else 2123static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) 2124{ 2125 return -EINVAL; 2126} 2127 2128static inline void kvm_irqfd_release(struct kvm *kvm) {} 2129 2130static inline bool kvm_notify_irqfd_resampler(struct kvm *kvm, 2131 unsigned int irqchip, 2132 unsigned int pin) 2133{ 2134 return false; 2135} 2136#endif /* CONFIG_HAVE_KVM_IRQCHIP */ 2137 2138void kvm_arch_irq_routing_update(struct kvm *kvm); 2139 2140static inline void __kvm_make_request(int req, struct kvm_vcpu *vcpu) 2141{ 2142 /* 2143 * Ensure the rest of the request is published to kvm_check_request's 2144 * caller. Paired with the smp_mb__after_atomic in kvm_check_request. 2145 */ 2146 smp_wmb(); 2147 set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); 2148} 2149 2150static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) 2151{ 2152 /* 2153 * Request that don't require vCPU action should never be logged in 2154 * vcpu->requests. The vCPU won't clear the request, so it will stay 2155 * logged indefinitely and prevent the vCPU from entering the guest. 2156 */ 2157 BUILD_BUG_ON(!__builtin_constant_p(req) || 2158 (req & KVM_REQUEST_NO_ACTION)); 2159 2160 __kvm_make_request(req, vcpu); 2161} 2162 2163static inline bool kvm_request_pending(struct kvm_vcpu *vcpu) 2164{ 2165 return READ_ONCE(vcpu->requests); 2166} 2167 2168static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu) 2169{ 2170 return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); 2171} 2172 2173static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu) 2174{ 2175 clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); 2176} 2177 2178static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) 2179{ 2180 if (kvm_test_request(req, vcpu)) { 2181 kvm_clear_request(req, vcpu); 2182 2183 /* 2184 * Ensure the rest of the request is visible to kvm_check_request's 2185 * caller. Paired with the smp_wmb in kvm_make_request. 2186 */ 2187 smp_mb__after_atomic(); 2188 return true; 2189 } else { 2190 return false; 2191 } 2192} 2193 2194#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING 2195extern bool kvm_rebooting; 2196#endif 2197 2198extern unsigned int halt_poll_ns; 2199extern unsigned int halt_poll_ns_grow; 2200extern unsigned int halt_poll_ns_grow_start; 2201extern unsigned int halt_poll_ns_shrink; 2202 2203struct kvm_device { 2204 const struct kvm_device_ops *ops; 2205 struct kvm *kvm; 2206 void *private; 2207 struct list_head vm_node; 2208}; 2209 2210/* create, destroy, and name are mandatory */ 2211struct kvm_device_ops { 2212 const char *name; 2213 2214 /* 2215 * create is called holding kvm->lock and any operations not suitable 2216 * to do while holding the lock should be deferred to init (see 2217 * below). 2218 */ 2219 int (*create)(struct kvm_device *dev, u32 type); 2220 2221 /* 2222 * init is called after create if create is successful and is called 2223 * outside of holding kvm->lock. 2224 */ 2225 void (*init)(struct kvm_device *dev); 2226 2227 /* 2228 * Destroy is responsible for freeing dev. 2229 * 2230 * Destroy may be called before or after destructors are called 2231 * on emulated I/O regions, depending on whether a reference is 2232 * held by a vcpu or other kvm component that gets destroyed 2233 * after the emulated I/O. 2234 */ 2235 void (*destroy)(struct kvm_device *dev); 2236 2237 /* 2238 * Release is an alternative method to free the device. It is 2239 * called when the device file descriptor is closed. Once 2240 * release is called, the destroy method will not be called 2241 * anymore as the device is removed from the device list of 2242 * the VM. kvm->lock is held. 2243 */ 2244 void (*release)(struct kvm_device *dev); 2245 2246 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 2247 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 2248 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 2249 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, 2250 unsigned long arg); 2251 int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma); 2252}; 2253 2254struct kvm_device *kvm_device_from_filp(struct file *filp); 2255int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type); 2256void kvm_unregister_device_ops(u32 type); 2257 2258extern struct kvm_device_ops kvm_mpic_ops; 2259extern struct kvm_device_ops kvm_arm_vgic_v2_ops; 2260extern struct kvm_device_ops kvm_arm_vgic_v3_ops; 2261 2262#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 2263 2264static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) 2265{ 2266 vcpu->spin_loop.in_spin_loop = val; 2267} 2268static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) 2269{ 2270 vcpu->spin_loop.dy_eligible = val; 2271} 2272 2273#else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ 2274 2275static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) 2276{ 2277} 2278 2279static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) 2280{ 2281} 2282#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ 2283 2284static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot) 2285{ 2286 return (memslot && memslot->id < KVM_USER_MEM_SLOTS && 2287 !(memslot->flags & KVM_MEMSLOT_INVALID)); 2288} 2289 2290struct kvm_vcpu *kvm_get_running_vcpu(void); 2291struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); 2292 2293#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS 2294bool kvm_arch_has_irq_bypass(void); 2295int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, 2296 struct irq_bypass_producer *); 2297void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, 2298 struct irq_bypass_producer *); 2299void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); 2300void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); 2301int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, 2302 uint32_t guest_irq, bool set); 2303bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *, 2304 struct kvm_kernel_irq_routing_entry *); 2305#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ 2306 2307#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS 2308/* If we wakeup during the poll time, was it a sucessful poll? */ 2309static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) 2310{ 2311 return vcpu->valid_wakeup; 2312} 2313 2314#else 2315static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) 2316{ 2317 return true; 2318} 2319#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */ 2320 2321#ifdef CONFIG_HAVE_KVM_NO_POLL 2322/* Callback that tells if we must not poll */ 2323bool kvm_arch_no_poll(struct kvm_vcpu *vcpu); 2324#else 2325static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) 2326{ 2327 return false; 2328} 2329#endif /* CONFIG_HAVE_KVM_NO_POLL */ 2330 2331#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL 2332long kvm_arch_vcpu_async_ioctl(struct file *filp, 2333 unsigned int ioctl, unsigned long arg); 2334#else 2335static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, 2336 unsigned int ioctl, 2337 unsigned long arg) 2338{ 2339 return -ENOIOCTLCMD; 2340} 2341#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ 2342 2343void kvm_arch_guest_memory_reclaimed(struct kvm *kvm); 2344 2345#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE 2346int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); 2347#else 2348static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) 2349{ 2350 return 0; 2351} 2352#endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */ 2353 2354typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data); 2355 2356int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, 2357 uintptr_t data, const char *name, 2358 struct task_struct **thread_ptr); 2359 2360#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK 2361static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu) 2362{ 2363 vcpu->run->exit_reason = KVM_EXIT_INTR; 2364 vcpu->stat.signal_exits++; 2365} 2366#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */ 2367 2368/* 2369 * If more than one page is being (un)accounted, @virt must be the address of 2370 * the first page of a block of pages what were allocated together (i.e 2371 * accounted together). 2372 * 2373 * kvm_account_pgtable_pages() is thread-safe because mod_lruvec_page_state() 2374 * is thread-safe. 2375 */ 2376static inline void kvm_account_pgtable_pages(void *virt, int nr) 2377{ 2378 mod_lruvec_page_state(virt_to_page(virt), NR_SECONDARY_PAGETABLE, nr); 2379} 2380 2381/* 2382 * This defines how many reserved entries we want to keep before we 2383 * kick the vcpu to the userspace to avoid dirty ring full. This 2384 * value can be tuned to higher if e.g. PML is enabled on the host. 2385 */ 2386#define KVM_DIRTY_RING_RSVD_ENTRIES 64 2387 2388/* Max number of entries allowed for each kvm dirty ring */ 2389#define KVM_DIRTY_RING_MAX_ENTRIES 65536 2390 2391static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu, 2392 gpa_t gpa, gpa_t size, 2393 bool is_write, bool is_exec, 2394 bool is_private) 2395{ 2396 vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT; 2397 vcpu->run->memory_fault.gpa = gpa; 2398 vcpu->run->memory_fault.size = size; 2399 2400 /* RWX flags are not (yet) defined or communicated to userspace. */ 2401 vcpu->run->memory_fault.flags = 0; 2402 if (is_private) 2403 vcpu->run->memory_fault.flags |= KVM_MEMORY_EXIT_FLAG_PRIVATE; 2404} 2405 2406#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES 2407static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn) 2408{ 2409 return xa_to_value(xa_load(&kvm->mem_attr_array, gfn)); 2410} 2411 2412bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end, 2413 unsigned long attrs); 2414bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm, 2415 struct kvm_gfn_range *range); 2416bool kvm_arch_post_set_memory_attributes(struct kvm *kvm, 2417 struct kvm_gfn_range *range); 2418 2419static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) 2420{ 2421 return IS_ENABLED(CONFIG_KVM_PRIVATE_MEM) && 2422 kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE; 2423} 2424#else 2425static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) 2426{ 2427 return false; 2428} 2429#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */ 2430 2431#ifdef CONFIG_KVM_PRIVATE_MEM 2432int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, 2433 gfn_t gfn, kvm_pfn_t *pfn, int *max_order); 2434#else 2435static inline int kvm_gmem_get_pfn(struct kvm *kvm, 2436 struct kvm_memory_slot *slot, gfn_t gfn, 2437 kvm_pfn_t *pfn, int *max_order) 2438{ 2439 KVM_BUG_ON(1, kvm); 2440 return -EIO; 2441} 2442#endif /* CONFIG_KVM_PRIVATE_MEM */ 2443 2444#endif 2445