126213Swpaul/* SPDX-License-Identifier: GPL-2.0 */
226213Swpaul#ifndef __PARISC_MMU_CONTEXT_H
326213Swpaul#define __PARISC_MMU_CONTEXT_H
426213Swpaul
526213Swpaul#include <linux/mm.h>
626213Swpaul#include <linux/sched.h>
726213Swpaul#include <linux/atomic.h>
826213Swpaul#include <linux/spinlock.h>
926213Swpaul#include <asm-generic/mm_hooks.h>
1026213Swpaul
1126213Swpaul/* on PA-RISC, we actually have enough contexts to justify an allocator
1226213Swpaul * for them.  prumpf */
1326213Swpaul
1426213Swpaulextern unsigned long alloc_sid(void);
1526213Swpaulextern void free_sid(unsigned long);
1626213Swpaul
1726213Swpaul#define init_new_context init_new_context
1826213Swpaulstatic inline int
1926213Swpaulinit_new_context(struct task_struct *tsk, struct mm_struct *mm)
2026213Swpaul{
2126213Swpaul	BUG_ON(atomic_read(&mm->mm_users) != 1);
2226213Swpaul
2326213Swpaul	mm->context.space_id = alloc_sid();
2426213Swpaul	return 0;
2526213Swpaul}
2626213Swpaul
2726213Swpaul#define destroy_context destroy_context
2826213Swpaulstatic inline void
2926213Swpauldestroy_context(struct mm_struct *mm)
3084220Sdillon{
3184220Sdillon	free_sid(mm->context.space_id);
3284220Sdillon	mm->context.space_id = 0;
3384220Sdillon}
3426213Swpaul
3526213Swpaulstatic inline unsigned long __space_to_prot(mm_context_t context)
3626213Swpaul{
3726213Swpaul#if SPACEID_SHIFT == 0
3826213Swpaul	return context.space_id << 1;
3926213Swpaul#else
4026213Swpaul	return context.space_id >> (SPACEID_SHIFT - 1);
4126213Swpaul#endif
4226213Swpaul}
4326213Swpaul
4426213Swpaulstatic inline void load_context(mm_context_t context)
4526213Swpaul{
4626213Swpaul	mtsp(context.space_id, SR_USER);
4726213Swpaul	mtctl(__space_to_prot(context), 8);
4826213Swpaul}
4926213Swpaul
5026213Swpaulstatic inline void switch_mm_irqs_off(struct mm_struct *prev,
5126213Swpaul		struct mm_struct *next, struct task_struct *tsk)
5226213Swpaul{
5326213Swpaul	if (prev != next) {
5492917Sobrien#ifdef CONFIG_TLB_PTLOCK
5526213Swpaul		/* put physical address of page_table_lock in cr28 (tr4)
5626213Swpaul		   for TLB faults */
5726213Swpaul		spinlock_t *pgd_lock = &next->page_table_lock;
5826213Swpaul		mtctl(__pa(__ldcw_align(&pgd_lock->rlock.raw_lock)), 28);
5926213Swpaul#endif
6026213Swpaul		mtctl(__pa(next->pgd), 25);
61189087Sed		load_context(next->context);
6226213Swpaul	}
6326213Swpaul}
6426213Swpaul
6526213Swpaulstatic inline void switch_mm(struct mm_struct *prev,
6626213Swpaul		struct mm_struct *next, struct task_struct *tsk)
6726213Swpaul{
6826213Swpaul	unsigned long flags;
6926213Swpaul
7026213Swpaul	if (prev == next)
7126213Swpaul		return;
7226213Swpaul
7326213Swpaul	local_irq_save(flags);
7426213Swpaul	switch_mm_irqs_off(prev, next, tsk);
7526213Swpaul	local_irq_restore(flags);
7626213Swpaul}
7726213Swpaul#define switch_mm_irqs_off switch_mm_irqs_off
7826213Swpaul
7926213Swpaul#define activate_mm activate_mm
8026213Swpaulstatic inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
8126213Swpaul{
8226213Swpaul	/*
8326213Swpaul	 * Activate_mm is our one chance to allocate a space id
8426213Swpaul	 * for a new mm created in the exec path. There's also
8526213Swpaul	 * some lazy tlb stuff, which is currently dead code, but
8626213Swpaul	 * we only allocate a space id if one hasn't been allocated
87	 * already, so we should be OK.
88	 */
89
90	BUG_ON(next == &init_mm); /* Should never happen */
91
92	if (next->context.space_id == 0)
93		next->context.space_id = alloc_sid();
94
95	switch_mm(prev,next,current);
96}
97
98#include <asm-generic/mmu_context.h>
99
100#endif
101