1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * x86 KFENCE support.
4 *
5 * Copyright (C) 2020, Google LLC.
6 */
7
8#ifndef _ASM_X86_KFENCE_H
9#define _ASM_X86_KFENCE_H
10
11#ifndef MODULE
12
13#include <linux/bug.h>
14#include <linux/kfence.h>
15
16#include <asm/pgalloc.h>
17#include <asm/pgtable.h>
18#include <asm/set_memory.h>
19#include <asm/tlbflush.h>
20
21/* Force 4K pages for __kfence_pool. */
22static inline bool arch_kfence_init_pool(void)
23{
24	unsigned long addr;
25
26	for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
27	     addr += PAGE_SIZE) {
28		unsigned int level;
29
30		if (!lookup_address(addr, &level))
31			return false;
32
33		if (level != PG_LEVEL_4K)
34			set_memory_4k(addr, 1);
35	}
36
37	return true;
38}
39
40/* Protect the given page and flush TLB. */
41static inline bool kfence_protect_page(unsigned long addr, bool protect)
42{
43	unsigned int level;
44	pte_t *pte = lookup_address(addr, &level);
45
46	if (WARN_ON(!pte || level != PG_LEVEL_4K))
47		return false;
48
49	/*
50	 * We need to avoid IPIs, as we may get KFENCE allocations or faults
51	 * with interrupts disabled. Therefore, the below is best-effort, and
52	 * does not flush TLBs on all CPUs. We can tolerate some inaccuracy;
53	 * lazy fault handling takes care of faults after the page is PRESENT.
54	 */
55
56	if (protect)
57		set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
58	else
59		set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
60
61	/*
62	 * Flush this CPU's TLB, assuming whoever did the allocation/free is
63	 * likely to continue running on this CPU.
64	 */
65	preempt_disable();
66	flush_tlb_one_kernel(addr);
67	preempt_enable();
68	return true;
69}
70
71#endif /* !MODULE */
72
73#endif /* _ASM_X86_KFENCE_H */
74