1/*
2 * Copyright 2021, Haiku, Inc. All rights reserved.
3 * Distributed under the terms of the MIT License.
4 */
5#ifndef _SYSTEM_ARCH_RISCV64_DEFS_H
6#define _SYSTEM_ARCH_RISCV64_DEFS_H
7
8
9#include <SupportDefs.h>
10
11#define B_ALWAYS_INLINE __attribute__((always_inline)) inline
12
13
14#ifdef __cplusplus
15
16enum {
17	modeU = 0,
18	modeS = 1,
19	modeM = 3,
20};
21
22// fs, xs
23enum {
24	extStatusOff     = 0,
25	extStatusInitial = 1,
26	extStatusClean   = 2,
27	extStatusDirty   = 3,
28};
29
30union MstatusReg {
31	struct {
32		uint64 ie:      4; // interrupt enable
33		uint64 pie:     4; // previous interrupt enable
34		uint64 spp:     1; // previous mode (supervisor)
35		uint64 unused1: 2;
36		uint64 mpp:     2; // previous mode (machine)
37		uint64 fs:      2; // FPU status
38		uint64 xs:      2; // extensions status
39		uint64 mprv:    1; // modify privilege
40		uint64 sum:     1; // permit supervisor user memory access
41		uint64 mxr:     1; // make executable readable
42		uint64 tvm:     1; // trap virtual memory
43		uint64 tw:      1; // timeout wait (trap WFI)
44		uint64 tsr:     1; // trap SRET
45		uint64 unused2: 9;
46		uint64 uxl:     2; // U-mode XLEN
47		uint64 sxl:     2; // S-mode XLEN
48		uint64 unused3: 27;
49		uint64 sd:      1; // status dirty
50	};
51	uint64 val;
52};
53
54union SstatusReg {
55	struct {
56		uint64 ie:      2; // interrupt enable
57		uint64 unused1: 2;
58		uint64 pie:     2; // previous interrupt enable
59		uint64 unused2: 2;
60		uint64 spp:     1; // previous mode (supervisor)
61		uint64 unused3: 4;
62		uint64 fs:      2; // FPU status
63		uint64 xs:      2; // extensions status
64		uint64 unused4: 1;
65		uint64 sum:     1; // permit supervisor user memory access
66		uint64 mxr:     1; // make executable readable
67		uint64 unused5: 12;
68		uint64 uxl:     2; // U-mode XLEN
69		uint64 unused6: 29;
70		uint64 sd:      1; // status dirty
71	};
72	uint64 val;
73};
74
75enum {
76	softInt    = 0,
77	uSoftInt   = softInt + modeU,
78	sSoftInt   = softInt + modeS,
79	mSoftInt   = softInt + modeM,
80	timerInt   = 4,
81	uTimerInt  = timerInt + modeU,
82	sTimerInt  = timerInt + modeS,
83	mTimerInt  = timerInt + modeM,
84	externInt  = 8,
85	uExternInt = externInt + modeU,
86	sExternInt = externInt + modeS,
87	mExternInt = externInt + modeM,
88};
89
90enum {
91	causeInterrupt        = 1ULL << 63, // rest bits are interrupt number
92	causeExecMisalign     = 0,
93	causeExecAccessFault  = 1,
94	causeIllegalInst      = 2,
95	causeBreakpoint       = 3,
96	causeLoadMisalign     = 4,
97	causeLoadAccessFault  = 5,
98	causeStoreMisalign    = 6,
99	causeStoreAccessFault = 7,
100	causeECall            = 8,
101	causeUEcall           = causeECall + modeU,
102	causeSEcall           = causeECall + modeS,
103	causeMEcall           = causeECall + modeM,
104	causeExecPageFault    = 12,
105	causeLoadPageFault    = 13,
106	causeStorePageFault   = 15,
107};
108
109// physical memory protection
110enum {
111	pmpR = 0,
112	pmpW = 1,
113	pmpX = 2,
114};
115
116enum {
117	// naturally aligned power of two
118	pmpMatchNapot = 3 << 3,
119};
120
121enum {
122	pageBits = 12,
123	pteCount = 512,
124	pteIdxBits = 9,
125};
126
127union Pte {
128	struct {
129		uint64 isValid:    1;
130		uint64 isRead:     1;
131		uint64 isWrite:    1;
132		uint64 isExec:     1;
133		uint64 isUser:     1;
134		uint64 isGlobal:   1;
135		uint64 isAccessed: 1;
136		uint64 isDirty:    1;
137
138		uint64 rsw:        2;
139		uint64 ppn:       44;
140		uint64 reserved:  10;
141	};
142	uint64 val;
143};
144
145enum {
146	satpModeBare =  0,
147	satpModeSv39 =  8,
148	satpModeSv48 =  9,
149	satpModeSv57 = 10,
150	satpModeSv64 = 11,
151};
152
153union SatpReg {
154	struct {
155		uint64 ppn:  44;
156		uint64 asid: 16;
157		uint64 mode:  4;
158	};
159	uint64 val;
160};
161
162static B_ALWAYS_INLINE uint64 VirtAdrPte(uint64 physAdr, uint32 level)
163{
164	return (physAdr >> (pageBits + pteIdxBits*level)) % (1 << pteIdxBits);
165}
166
167static B_ALWAYS_INLINE uint64 VirtAdrOfs(uint64 physAdr)
168{
169	return physAdr % PAGESIZE;
170}
171
172#define CSR_REG_MACRO(Name, value) \
173	static B_ALWAYS_INLINE uint64 Name() { \
174		uint64 x; asm volatile("csrr %0, " #value : "=r" (x)); return x;} \
175	static B_ALWAYS_INLINE void Set##Name(uint64 x) { \
176		asm volatile("csrw " #value ", %0" : : "r" (x));} \
177	static B_ALWAYS_INLINE void SetBits##Name(uint64 x) { \
178		asm volatile("csrs " #value ", %0" : : "r" (x));} \
179	static B_ALWAYS_INLINE void ClearBits##Name(uint64 x) { \
180		asm volatile("csrc " #value ", %0" : : "r" (x));} \
181	static B_ALWAYS_INLINE uint64 GetAndSetBits##Name(uint64 x) { \
182		uint64 res; \
183		asm volatile("csrrs %0, " #value ", %1" : "=r" (res) : "r" (x)); \
184		return res; \
185	} \
186	static B_ALWAYS_INLINE uint64 GetAndClearBits##Name(uint64 x) { \
187		uint64 res; \
188		asm volatile("csrrc %0, " #value ", %1" : "=r" (res) : "r" (x)); \
189		return res; \
190	} \
191
192// CPU core ID
193CSR_REG_MACRO(Mhartid, mhartid)
194
195// status register
196CSR_REG_MACRO(Mstatus, mstatus)
197CSR_REG_MACRO(Sstatus, sstatus)
198
199// exception program counter
200CSR_REG_MACRO(Mepc, mepc)
201CSR_REG_MACRO(Sepc, sepc)
202
203// interrupt pending
204CSR_REG_MACRO(Mip, mip)
205CSR_REG_MACRO(Sip, sip)
206
207// interrupt enable
208CSR_REG_MACRO(Mie, mie)
209CSR_REG_MACRO(Sie, sie)
210
211// exception delegation
212CSR_REG_MACRO(Medeleg, medeleg)
213// interrupt delegation
214CSR_REG_MACRO(Mideleg, mideleg)
215
216// trap vector, 2 low bits: mode
217CSR_REG_MACRO(Mtvec, mtvec)
218CSR_REG_MACRO(Stvec, stvec)
219
220// address translation and protection (pointer to page table and flags)
221CSR_REG_MACRO(Satp, satp)
222
223// scratch register
224CSR_REG_MACRO(Mscratch, mscratch)
225CSR_REG_MACRO(Sscratch, sscratch)
226
227// trap cause
228CSR_REG_MACRO(Mcause, mcause)
229CSR_REG_MACRO(Scause, scause)
230
231// trap value
232CSR_REG_MACRO(Mtval, mtval)
233CSR_REG_MACRO(Stval, stval)
234
235// machine-mode counter enable
236CSR_REG_MACRO(Mcounteren, mcounteren)
237
238// cycle counter
239CSR_REG_MACRO(CpuMcycle, mcycle)
240CSR_REG_MACRO(CpuCycle, cycle)
241// monotonic timer
242CSR_REG_MACRO(CpuTime, time)
243
244// physical memory protection
245CSR_REG_MACRO(Pmpaddr0, pmpaddr0)
246CSR_REG_MACRO(Pmpcfg0, pmpcfg0)
247
248// flush the TLB
249static B_ALWAYS_INLINE void FlushTlbAll() {
250	asm volatile("sfence.vma" : : : "memory");}
251static B_ALWAYS_INLINE void FlushTlbPage(uint64 x) {
252	asm volatile("sfence.vma %0" : : "r" (x) : "memory");}
253static B_ALWAYS_INLINE void FlushTlbAllAsid(uint64 asid) {
254	asm volatile("sfence.vma x0, %0" : : "r" (asid) : "memory");}
255static B_ALWAYS_INLINE void FlushTlbPageAsid(uint64 page, uint64 asid) {
256	asm volatile("sfence.vma %0, %1" : : "r" (page), "r" (asid) : "memory");}
257
258// flush instruction cache
259static B_ALWAYS_INLINE void FenceI() {
260	asm volatile("fence.i" : : : "memory");}
261
262static B_ALWAYS_INLINE uint64 Sp() {
263	uint64 x; asm volatile("mv %0, sp" : "=r" (x)); return x;}
264static B_ALWAYS_INLINE void SetSp(uint64 x) {
265	asm volatile("mv sp, %0" : : "r" (x));}
266static B_ALWAYS_INLINE uint64 Fp() {
267	uint64 x; asm volatile("mv %0, fp" : "=r" (x)); return x;}
268static B_ALWAYS_INLINE void SetFp(uint64 x) {
269	asm volatile("mv fp, %0" : : "r" (x));}
270static B_ALWAYS_INLINE uint64 Tp() {
271	uint64 x; asm volatile("mv %0, tp" : "=r" (x)); return x;}
272static B_ALWAYS_INLINE void SetTp(uint64 x) {
273	asm volatile("mv tp, %0" : : "r" (x));}
274static B_ALWAYS_INLINE uint64 Ra() {
275	uint64 x; asm volatile("mv %0, ra" : "=r" (x)); return x;}
276static B_ALWAYS_INLINE void SetRa(uint64 x) {
277	asm volatile("mv ra, %0" : : "r" (x));}
278
279static B_ALWAYS_INLINE void Ecall() {asm volatile("ecall");}
280
281// Wait for interrupts, reduce CPU load when inactive.
282static B_ALWAYS_INLINE void Wfi() {asm volatile("wfi");}
283
284static B_ALWAYS_INLINE void Mret() {asm volatile("mret");}
285static B_ALWAYS_INLINE void Sret() {asm volatile("sret");}
286
287#endif // __cplusplus
288
289
290#define SPINLOCK_PAUSE()	do {} while (false)
291
292
293#endif	/* _SYSTEM_ARCH_RISCV64_DEFS_H */
294
295