1/*
2 * Copyright 2022-2023, Haiku Inc. All rights reserved.
3 * Distributed under the terms of the MIT License.
4 *
5 * Copyright 2009, Wischert, johanneswi@gmail.com.
6 * All rights reserved. Distributed under the terms of the MIT License.
7 *
8 * Copyright 2003, Travis Geiselbrecht. All rights reserved.
9 * Distributed under the terms of the NewOS License.
10 */
11
12#include <arch/arm/arch_cpu.h>
13#include <arch/arm/arch_cpu_defs.h>
14
15#include <asm_defs.h>
16
17#include "asm_offsets.h"
18#include "syscall_numbers.h"
19
20.text
21
22
23/* NOTE: the I bit in cpsr (bit 7) is *set* to disable... */
24
25
26/* void arch_int_enable_interrupts(void) */
27FUNCTION(arch_int_enable_interrupts):
28        mrs     r0, cpsr
29        bic     r0, r0, #(1<<7)
30        msr     cpsr_c, r0
31        bx      lr
32FUNCTION_END(arch_int_enable_interrupts)
33
34
35/* int arch_int_disable_interrupts(void) */
36FUNCTION(arch_int_disable_interrupts):
37        mrs     r0, cpsr
38        orr     r1, r0, #(1<<7)
39        msr     cpsr_c, r1
40        bx      lr
41FUNCTION_END(arch_int_disable_interrupts)
42
43
44/* void arch_int_restore_interrupts(int oldState) */
45FUNCTION(arch_int_restore_interrupts):
46	mrs     r1, cpsr
47	and	r0, r0, #(1<<7)
48	bic     r1, r1, #(1<<7)
49        orr     r1, r1, r0
50        msr     cpsr_c, r1
51	bx 	lr
52FUNCTION_END(arch_int_restore_interrupts)
53
54
55/* bool arch_int_are_interrupts_enabled(void) */
56FUNCTION(arch_int_are_interrupts_enabled):
57        mrs     r0, cpsr
58        and     r0, r0, #(1<<7)		/*read the I bit*/
59	cmp 	r0, #0
60	moveq	r0, #1
61	movne	r0, #0
62	bx 	lr
63FUNCTION_END(arch_int_are_interrupts_enabled)
64
65
66/* void arm_context_switch(struct arch_thread* oldState,
67	struct arch_thread* newState); */
68FUNCTION(arm_context_switch):
69	stmfd   sp!, { r0-r12, lr }
70	str	sp, [r0]
71	ldr	sp, [r1]
72	ldmfd   sp!, { r0-r12, lr }
73	bx	lr
74FUNCTION_END(arm_context_switch)
75
76
77/* void arm_save_fpu(struct arch_fpu_context* context); */
78FUNCTION(arm_save_fpu):
79	fstmiad		r0!, {d0-d15}
80	fstmiad		r0!, {d16-d31}
81	vmrs		r1, fpscr
82	str			r1, [r0]
83	bx			lr
84FUNCTION_END(arm_save_fpu)
85
86
87/* void arm_restore_fpu(struct arch_fpu_context* context); */
88FUNCTION(arm_restore_fpu):
89	fldmiad		r0!, {d0-d15}
90	fldmiad		r0!, {d16-d31}
91	ldr			r1, [r0]
92	vmsr		fpscr, r1
93	bx			lr
94FUNCTION_END(arm_restore_fpu)
95
96
97/* status_t arch_cpu_user_memcpy(void *to, const void *from, size_t size, addr_t *faultHandler) */
98FUNCTION(_arch_cpu_user_memcpy):
99	stmfd   sp!, { r4-r6, lr }
100
101	ldr	r6, [r3]
102	ldr	r4, =.L_user_memcpy_error
103	str	r4, [r3]	/* set fault handler */
104	mov	r4, r2, lsr #2	/* size / 4 */
1051:
106	ldr	r5, [r1]
107	str	r5, [r0]
108	add	r1, #4
109	add	r0, #4
110	subs	r4, #1
111	bne	1b
112
113	ands	r4, r2, #3	/* size % 4 */
114	beq	3f
115
1162:
117	ldrb	r5, [r1]
118	strb	r5, [r0]
119	add	r1, #1
120	add	r0, #1
121	subs	r4, #1
122	bne	2b
1233:
124	str	r6, [r3]	/* restore fault handler */
125	mov	r0, #0
126	ldmfd   sp!, { r4-r6, pc }
127
128.L_user_memcpy_error:
129	str	r6, [r3]	/* restore fault handler */
130	mov	r0, #-1
131
132	ldmfd   sp!, { r4-r6, pc }
133FUNCTION_END(_arch_cpu_user_memcpy)
134
135/* status_t arch_cpu_user_memset(void *to, char c, size_t count, addr_t *faultHandler) */
136FUNCTION(_arch_cpu_user_memset):
137	stmfd   sp!, { r4-r5, lr }
138
139	ldr	r5, [r3]
140	ldr	r4, =.L_user_memset_error
141	str	r4, [r3]
142
143	and	r1, r1, #0xff
144	add	r1, r1, lsl #8
145	add	r1, r1, lsl #16
146	add	r1, r1, lsl #24
147
148	mov	r4, r2, lsr #2	/* count / 4 */
1491:
150	str	r1, [r0]
151	add	r0, r0, #4
152	subs	r4, r4, #1
153	bne	1b
154
155	and	r4, r2, #3	/* count % 4 */
1562:
157	strb	r1, [r0]
158	add	r0, r0, #1
159	subs	r4, r4, #1
160	bne	2b
161
162	mov	r0, #0
163	str	r5, [r3]
164
165	ldmfd   sp!, { r4-r5, pc }
166
167.L_user_memset_error:
168	mov	r0, #-1
169	str	r5, [r3]
170
171	ldmfd   sp!, { r4-r5, pc }
172FUNCTION_END(_arch_cpu_user_memset)
173
174/* ssize_t arch_cpu_user_strlcpy(void *to, const void *from, size_t size, addr_t *faultHandler) */
175FUNCTION(_arch_cpu_user_strlcpy):
176	stmfd   sp!, { r4-r6, lr }
177	ldr	r5, [r3]
178	ldr	r4, =.L_user_strlcpy_error
179	str	r4, [r3]
180	mov	r6, #0
1811:
182	ldrb	r4, [r1, r6]
183	strb	r4, [r0, r6]
184	add	r6, r6, #1
185	cmp	r4, #0
186	beq	2f
187	cmp	r6, r2		/* reached max length? */
188	blt	1b
1892:
190	mov	r4, #0
191	strb	r4, [r0, r6]
192
193	mov	r0, r6		/* return length */
194	str	r5, [r3]	/* restore fault handler */
195
196	ldmfd   sp!, { r4-r6, pc }
197
198.L_user_strlcpy_error:
199	mov	r0, #-1
200	str	r5, [r3]
201
202	ldmfd   sp!, { r4-r6, pc }
203FUNCTION_END(_arch_cpu_user_strlcpy)
204
205
206/*!	\fn void arch_debug_call_with_fault_handler(cpu_ent* cpu,
207		jmp_buf jumpBuffer, void (*function)(void*), void* parameter)
208
209	Called by debug_call_with_fault_handler() to do the dirty work of setting
210	the fault handler and calling the function. If the function causes a page
211	fault, the arch_debug_call_with_fault_handler() calls longjmp() with the
212	given \a jumpBuffer. Otherwise it returns normally.
213
214	debug_call_with_fault_handler() has already saved the CPU's fault_handler
215	and fault_handler_stack_pointer and will reset them later, so
216	arch_debug_call_with_fault_handler() doesn't need to care about it.
217
218	\param cpu The \c cpu_ent for the current CPU.
219	\param jumpBuffer Buffer to be used for longjmp().
220	\param function The function to be called.
221	\param parameter The parameter to be passed to the function to be called.
222*/
223FUNCTION(arch_debug_call_with_fault_handler):
224	stmfd   sp!, { r1, r4, lr }
225
226	// Set fault handler address, and fault handler stack pointer address. We
227	// don't need to save the previous values, since that's done by the caller.
228	ldr	r4, =1f
229	str	r4, [r0, #CPU_ENT_fault_handler]
230	str	sp, [r0, #CPU_ENT_fault_handler_stack_pointer]
231	mov	r4, r1
232
233	// call the function
234	mov	r0, r3
235	blx	r2
236
237	// regular return
238	ldmfd   sp!, { r1, r4, pc }
239
240	// fault -- return via longjmp(jumpBuffer, 1)
2411:
242	ldmfd   sp!, { r0, r4, lr } // restore jumpBuffer in r0 (was r1)
243	mov	r1, #1
244	b	longjmp
245FUNCTION_END(arch_debug_call_with_fault_handler)
246
247
248FUNCTION(arch_return_to_userland):
249	// set SPSR to user mode, IRQ enabled, FIQ disabled
250	mrs		ip, cpsr
251	bic		ip, ip, #(CPSR_MODE_MASK | CPSR_T | CPSR_F | CPSR_I)
252	orr		ip, ip, #(CPSR_MODE_USR | CPSR_F)
253	msr		spsr, ip
254
255	// use system mode to load user mode SP and LR
256	ldr		r4, [r0, #IFRAME_usr_sp]
257	ldr		r5, [r0, #IFRAME_usr_lr]
258	mrs		ip, cpsr
259	bic		ip, ip, #(CPSR_MODE_MASK)
260	orr		ip, ip, #(CPSR_MODE_SYS)
261	msr		cpsr, ip
262	mov		sp, r4
263	mov		lr, r5
264	bic		ip, ip, #(CPSR_MODE_MASK)
265	orr		ip, ip, #(CPSR_MODE_SVC)
266	msr		cpsr, ip
267
268	// load user mode entry point in LR
269	ldr		lr, [r0, #IFRAME_pc]
270
271	// load general purpose registers
272	mov		sp, r0
273	add		sp, sp, #4
274	ldmfd	sp!, { r0-r12 }
275
276	// jump to user mode entry point
277	movs	pc, lr
278FUNCTION_END(arch_return_to_userland)
279
280
281FUNCTION(arch_user_thread_exit):
282	svc		SYSCALL_EXIT_THREAD
283	bx		lr
284FUNCTION_END(arch_user_thread_exit)
285