1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012 ARM Ltd.
4 */
5#ifndef __ASM_MMU_H
6#define __ASM_MMU_H
7
8#include <asm/cputype.h>
9
10#define MMCF_AARCH32	0x1	/* mm context flag for AArch32 executables */
11#define USER_ASID_BIT	48
12#define USER_ASID_FLAG	(UL(1) << USER_ASID_BIT)
13#define TTBR_ASID_MASK	(UL(0xffff) << 48)
14
15#ifndef __ASSEMBLY__
16
17#include <linux/refcount.h>
18#include <asm/cpufeature.h>
19
20typedef struct {
21	atomic64_t	id;
22#ifdef CONFIG_COMPAT
23	void		*sigpage;
24#endif
25	refcount_t	pinned;
26	void		*vdso;
27	unsigned long	flags;
28} mm_context_t;
29
30/*
31 * We use atomic64_read() here because the ASID for an 'mm_struct' can
32 * be reallocated when scheduling one of its threads following a
33 * rollover event (see new_context() and flush_context()). In this case,
34 * a concurrent TLBI (e.g. via try_to_unmap_one() and ptep_clear_flush())
35 * may use a stale ASID. This is fine in principle as the new ASID is
36 * guaranteed to be clean in the TLB, but the TLBI routines have to take
37 * care to handle the following race:
38 *
39 *    CPU 0                    CPU 1                          CPU 2
40 *
41 *    // ptep_clear_flush(mm)
42 *    xchg_relaxed(pte, 0)
43 *    DSB ISHST
44 *    old = ASID(mm)
45 *         |                                                  <rollover>
46 *         |                   new = new_context(mm)
47 *         \-----------------> atomic_set(mm->context.id, new)
48 *                             cpu_switch_mm(mm)
49 *                             // Hardware walk of pte using new ASID
50 *    TLBI(old)
51 *
52 * In this scenario, the barrier on CPU 0 and the dependency on CPU 1
53 * ensure that the page-table walker on CPU 1 *must* see the invalid PTE
54 * written by CPU 0.
55 */
56#define ASID(mm)	(atomic64_read(&(mm)->context.id) & 0xffff)
57
58static inline bool arm64_kernel_unmapped_at_el0(void)
59{
60	return alternative_has_cap_unlikely(ARM64_UNMAP_KERNEL_AT_EL0);
61}
62
63extern void arm64_memblock_init(void);
64extern void paging_init(void);
65extern void bootmem_init(void);
66extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
67extern void create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
68				   phys_addr_t size, pgprot_t prot);
69extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
70			       unsigned long virt, phys_addr_t size,
71			       pgprot_t prot, bool page_mappings_only);
72extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
73extern void mark_linear_text_alias_ro(void);
74
75/*
76 * This check is triggered during the early boot before the cpufeature
77 * is initialised. Checking the status on the local CPU allows the boot
78 * CPU to detect the need for non-global mappings and thus avoiding a
79 * pagetable re-write after all the CPUs are booted. This check will be
80 * anyway run on individual CPUs, allowing us to get the consistent
81 * state once the SMP CPUs are up and thus make the switch to non-global
82 * mappings if required.
83 */
84static inline bool kaslr_requires_kpti(void)
85{
86	/*
87	 * E0PD does a similar job to KPTI so can be used instead
88	 * where available.
89	 */
90	if (IS_ENABLED(CONFIG_ARM64_E0PD)) {
91		u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
92		if (cpuid_feature_extract_unsigned_field(mmfr2,
93						ID_AA64MMFR2_EL1_E0PD_SHIFT))
94			return false;
95	}
96
97	/*
98	 * Systems affected by Cavium erratum 24756 are incompatible
99	 * with KPTI.
100	 */
101	if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
102		extern const struct midr_range cavium_erratum_27456_cpus[];
103
104		if (is_midr_in_range_list(read_cpuid_id(),
105					  cavium_erratum_27456_cpus))
106			return false;
107	}
108
109	return true;
110}
111
112#define INIT_MM_CONTEXT(name)	\
113	.pgd = swapper_pg_dir,
114
115#endif	/* !__ASSEMBLY__ */
116#endif
117