Lines Matching defs:cache

123 /* make pte_list_desc fit well in cache lines */
133 * cache locality.
945 static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
954 desc = kvm_mmu_memory_cache_alloc(cache);
970 desc = kvm_mmu_memory_cache_alloc(cache);
1601 struct kvm_mmu_memory_cache *cache,
1614 rmap_count = pte_list_add(cache, spte, rmap_head);
1627 struct kvm_mmu_memory_cache *cache = &vcpu->arch.mmu_pte_list_desc_cache;
1629 __rmap_add(vcpu->kvm, cache, slot, spte, gfn, access);
1713 static void mmu_page_add_parent_pte(struct kvm_mmu_memory_cache *cache,
1719 pte_list_add(cache, parent_pte, &sp->parent_ptes);
2390 struct kvm_mmu_memory_cache *cache, u64 *sptep,
2409 mmu_page_add_parent_pte(cache, sp, sptep);
3256 * Do not cache the mmio info caused by writing the readonly gfn
4070 * A nested guest cannot use the MMIO cache if it is using nested
4071 * page tables, because cr2 is a nGPA while the cache stores GPAs.
4407 * MMIO SPTE. That way the cache doesn't need to be purged
4691 * and insert the current root as the MRU in the cache.
4708 * The swaps end up rotating the cache like this:
4728 * of the cache becomes invalid, and true is returned.
6474 static inline bool need_topup(struct kvm_mmu_memory_cache *cache, int min)
6476 return kvm_mmu_memory_cache_nr_free_objects(cache) < min;
6505 * doesn't need to allocate a list. Buffer the cache by the default
6559 struct kvm_mmu_memory_cache *cache = &kvm->arch.split_desc_cache;
6594 __rmap_add(kvm, cache, slot, sptep, gfn, sp->role.access);
6597 __link_shadow_page(kvm, cache, huge_sptep, sp, flush);