1/*
2 * Copyright 2023 Haiku, Inc. All rights reserved.
3 * Distributed under the terms of the MIT License.
4 *
5 * Authors:
6 *		Adrien Destugues, pulkomandy@pulkomandy.tk
7 *
8 * Corresponds to:
9 *		headers/os/drivers/KernelExport;h rev 57477
10 */
11
12/*!
13	\file KernelExport.h
14	\ingroup drivers
15	\ingroup kernel
16	\brief Interfaces for drivers code running in kernel space.
17*/
18
19/*!
20	\fn cpu_status disable_interrupts(void)
21	\ingroup interrupts
22	\brief Disable interruptions.
23
24	Drivers can disable interrupts in order to set up the interrupt handler for a device without
25	being interrupted, or as a simple way to implement critical sections.
26
27	Interruptions should be kept disabled for as short as possible, and re-enabled using
28	\ref restore_interrupts.
29
30	\return The previous state of interrupts, which should be passed to \ref restore_interrupts
31*/
32
33/*!
34	\fn void restore_interrupts(cpu_status status)
35	\ingroup interrupts
36	\brief Restore interrupts to the previous state.
37
38	If interrupts were already disabled before the matching call to \ref disable_interrupts, do
39	nothing. Otherwise, enable interrupts again.
40*/
41
42/*!
43	\fn void acquire_spinlock(spinlock* lock)
44	\ingroup spinlocks
45	\brief Busy wait until the lock is acquired.
46
47	Wait until the lock is acquired. Note that this keeps the thread running on the CPU, and does
48	not release the CPU for other threads to run.
49
50	If the spinlock does not become available quickly enough, calls panic().
51*/
52
53/*!
54	\fn void release_spinlock(spinlock* lock)
55	\ingroup spinlocks
56	\brief Release a previously acquired spinlock.
57
58	This will unblock any thread that is waiting on the spinlock.
59*/
60
61/*!
62	\fn bool try_acquire_write_spinlock(rw_spinlock* lock)
63	\ingroup rw_spinlocks
64	\brief Acquire a rw_spinlock for writing, if available.
65
66	Check if no other thread is holding the lock, and in that case, acquires it
67	immediately. Otherwise, return false. There is no wait for the rw_spinlock to become available.
68
69	Interrupts must be disabled, and recursive locking is not allowed.
70*/
71
72/*!
73	\fn void acquire_write_spinlock(rw_spinlock* lock)
74	\ingroup rw_spinlocks
75	\brief Wait for and acquire a write lock on an rw_spinlock
76
77	Repeatedly try to acquire a write lock until it works.
78	If this fails for too long, call panic().
79*/
80
81/*!
82	\fn void release_write_spinlock(rw_spinlock* lock)
83	\ingroup rw_spinlocks
84	\brief Release a previously acquired write lock on an rw_spinlock.
85*/
86
87/*!
88	\fn bool try_acquire_read_spinlock(rw_spinlock* lock)
89	\ingroup rw_spinlocks
90	\brief Acquire a rw_spinlock for reading, if available.
91
92	If the rw_spinlock is not currently write locked, add a read lock on it and return true.
93	Otherwise, return false.
94
95	There can be multiple readers at the same time on an rw_spinlock, but there can be only one
96	writer.
97*/
98
99/*!
100	\fn void acquire_read_spinlock(rw_spinlock* lock)
101	\ingroup rw_spinlocks
102	\brief Busy wait until a rw_spinlock can be read locked.
103
104	Loop until there are no writers holding te lock, then acquire a read lock.
105*/
106
107/*!
108	\fn void release_read_spinlock(rw_spinlock* lock)
109	\ingroup rw_spinlocks
110	\brief Release a read lock on a rw_spinlock
111*/
112
113/*!
114	\fn bool try_acquire_write_seqlock(seqlock* lock)
115	\ingroup seqlocks
116	\brief Acquire a seqlock for writing, without waiting.
117
118	A seqlock is similar to an rw_spinlock in that it can be locked separately for reading and
119	writing. However, it avoids writer starvation problems (when there are always reads being
120	done and a writer can never acquire the write lock).
121
122	To achieve this, the readers are not actually locked. Instead, they are allowed to read the
123	protected resource even while it is being written. The writer increments a counter whenever
124	it acquires and releases the lock. When releasing a read lock, a reader can use this counter
125	to compare against the value when it acquired its read lock. If the counter changed, that
126	means there was a concurrent write access, and the read data is invalid. The reader can
127	try to acquire a read lock again and read the updated value of the data.
128*/
129
130/*!
131	\fn void acquire_write_seqlock(seqlock* lock)
132	\ingroup seqlocks
133	\brief Busy wait for a seqlock and acquire it for writing.
134
135	Wait for all other writers to release the lock, then acquire it.
136
137	This increments the counter after acquiring the lock.
138*/
139
140/*!
141	\fn void release_write_seqlock(seqlock* lock)
142	\ingroup seqlocks
143	\brief Release a write lock on a seqlock.
144
145	This increments the counter before releasing the lock.
146*/
147
148/*!
149	\fn uint32 acquire_read_seqlock(seqlock* lock)
150	\ingroup seqlocks
151	\brief Prepare for read access to data protected by a seqlock
152
153	\return The counter value of the seqlock, at the time of locking, to be passed to
154		\ref release_read_seqlock.
155*/
156
157/*!
158	\fn bool release_read_seqlock(seqlock* lock, uint32 count)
159	\ingroup seqlocks
160	\brief Release a read lock and check if the read operation was successful.
161
162	\param count The lock count value returned by the correspondin \ref acquire_read_seqlock
163	\return true if there were no write access while the lock was read-locked, false otherwise.
164*/
165
166/*!
167	\fn status_t install_io_interrupt_handler(long interrupt_number, interrupt_handler handler, void *data, ulong flags)
168	\ingroup interrupts
169	\param flags a bit combination of:
170		- B_NO_LOCK_VECTOR to allow multiple interrupt handlers to run in parallel on different CPUs
171		- B_NO_HANDLED_INFO to disable tracking of wether the handler has handled the interrupt or not
172		- B_NO_ENABLE_COUNTER do not enable or disable interrupts for this handler
173	\brief Install an interrupt handler for legacy interrupts
174
175	For hardware that is hardwired to a specific interrupt pin on the interrupt controller.
176	The interrupt handler will be called when the interrupt is triggered, and the \ref data passed
177	as a parameter.
178
179	If flags does not contain B_NO_ENABLE_COUNTER, the interrupt will be activated when the first
180	handler is registered and deactivated when the last handler is removed. Handlers with
181	B_NO_ENABLE_COUNTER will not affect interrupt enabling or disabling: registering one never
182	enables the interrupt, and the interrupt can be disabled even if B_NO_ENABLE_COUNTER handlers
183	are still registered.
184*/
185
186/*!
187	\fn status_t remove_io_interrupt_handler(long interrupt_number, interrupt_handler handler, void	*data)
188	\ingroup interrupts
189	\brief Remove a previously registered interrupt handler
190*/
191
192/*!
193	\fn status_t add_timer(timer *t, timer_hook hook, bigtime_t period, int32 flags)
194	\ingroup timers
195	\brief Schedule a timer to call the \ref hook function periodically or at a specified time.
196
197	\param flags If B_ONE_SHOT_ABSOLUTE_TIMER, use the period as a date when the hook should be
198		called. Otherwise, use it as a period to call the hook repeatedly.
199	\param flags If B_TIMER_USE_TIMER_STRUCT_TIMES, use the period defined by \ref t instead of
200		\ref period.
201*/
202
203/*!
204	\fn bool cancel_timer(timer *t)
205	\ingroup timers
206	\brief Stop a timer scheduled by \ref add_timer.
207*/
208
209/*!
210	\fn thread_id spawn_kernel_thread(thread_func function, const char *name, int32 priority, void *arg)
211	\ingroup threads
212	\brief Start a kernel thread.
213
214	Similar to \ref spawn_thread, but the thread will run in the kernel team.
215*/
216
217/*!
218	\fn int send_signal_etc(pid_t thread, uint signal, uint32 flags)
219	\ingroup threads
220	\brief Send a signal to a thread.
221*/
222
223/*!
224	\fn status_t lock_memory_etc(team_id team, void *buffer, size_t numBytes, uint32 flags)
225	\ingroup memory
226	\brief Lock memory pages into RAM.
227
228	Lock a memory area and prevent accesses from other parts of the system.
229	This establishes the following:
230	- The memory is mapped into physical RAM (moved out of swap or committed if needed)
231	- No other thread can lock an overlapping memory range
232
233	This is used for example during DMA transfers, to make sure the DMA can operate on memory
234	that will not be accessed by the CPU or other devices.
235*/
236
237/*!
238	\fn status_t lock_memory(void *buffer, size_t numBytes, uint32 flags)
239	\ingroup memory
240	\brief Lock memory from the current team into RAM.
241*/
242
243/*!
244	\fn status_t unlock_memory_etc(team_id team, void *address, size_t numBytes, uint32 flags)
245	\ingroup memory
246	\brief Unlock memory previously locked by \ref lock_memory_etc
247*/
248
249/*!
250	\fn status_t unlock_memory(void *buffer, size_t numBytes, uint32 flags)
251	\ingroup memory
252	\brief Unlock memory previously locked by \ref lock_memory.
253*/
254
255/*!
256	\fn status_t get_memory_map_etc(team_id team, const void *address, size_t numBytes, physical_entry *table, uint32* _numEntries)
257	\ingroup memory
258	\brief Determine the physical addresses corresponding to a virtual memory range.
259*/
260
261/*!
262	\fn int32 get_memory_map(const void *buffer, size_t size, physical_entry *table, int32 numEntries)
263	\brief Get memory map for the current team.
264*/
265
266/*!
267	\fn area_id map_physical_memory(const char *areaName, phys_addr_t physicalAddress, size_t size, uint32 flags, uint32 protection, void **_mappedAddress)
268	\brief Create an area that allows access to a specific range of physical memory.
269
270	This can be used to map memory-mapped hardware to allow accessing it. The area can then be
271	used by a driver, or its id sent to userspace for direct hardware access from userspace.
272*/
273
274/*!
275	\fn void dprintf(const char *format, ...)
276	\ingroup debugging
277	\brief Print a message to the kernel log if dprintf is enabled.
278*/
279
280/*!
281	\fn void dvprintf(const char *format, va_list args)
282	\ingroup debugging
283	\brief Print a message to the kernel log if dprintf is enabled.
284*/
285
286/*!
287	\fn void kprintf(const char *fmt, ...)
288	\ingroup debugging
289	\brief Print a message to the kernel log unconditionally.
290*/
291
292/*!
293	\fn void dump_block(const char *buffer, int size, const char *prefix)
294	\brief Dump a memory buffer as hexadecimal values to the kernel log.
295*/
296
297/*!
298	\fn bool set_dprintf_enabled(bool new_state)
299	\ingroup debugging
300	\brief Enable dprintf log messages.
301
302	dprintf is used for debugging. It can be disabled to reduce the amount of logs from the kernel
303	and drivers, which will also speed up the system in some cases. However, this makes debugging
304	hardware and driver problems a lot more difficult.
305*/
306
307/*!
308	\fn void panic(const char *format, ...)
309	\ingroup debugging
310	\brief Enter the kernel debugger with the passed (formatted) message.
311*/
312
313/*!
314	\fn void kernel_debugger(const char *message)
315	\ingroup debugging
316	\brief Enter the kernel debugger with the passed message.
317*/
318
319/*!
320	\fn uint64 parse_expression(const char *string)
321	\ingroup debugging
322	\brief Parse an expression and return its value.
323
324	Expressions can contain numbers in various bases and simple arithmetic operations, as well as
325	kernel debugger variables. This function is used to parse kernel debugger command arguments.
326*/
327
328/*!
329	\fn int add_debugger_command(const char *name, debugger_command_hook hook, const char *help)
330	\ingroup debugging
331	\brief Add a command to the krnel debugger.
332
333	Drivers can add extra commands to the kernel debugger to ease investigation and debugging of
334	the driver and hardware. The commands accept a typical argc/argv command line.
335*/
336
337/*!
338	\fn int remove_debugger_command(const char *name, debugger_command_hook hook)
339	\ingroup debugging
340	\brief Remove a debugger command previously installed by \ref add_debugger_command.
341
342	It is important to remove the commands from a driver or module before it is unloaded, to avoid
343	having commands that point to code that doesn't exist anymore.
344*/
345
346/*!
347	\fn void spin(bigtime_t microseconds)
348	\brief Busy loop for the given time
349
350	Some I/O operations may take a short while to complete. When the expected delay is less than
351	a few hundred micrseconds, it is not worth locking the thread and calling the scheduler. In
352	these situation, a busy loop is a better compromise, and the driver can continue its IO
353	accesses in a reasonable time and without too many reschedulings.
354*/
355
356/*!
357	\fn status_t register_kernel_daemon(daemon_hook hook, void *arg, int frequency)
358	\brief Register a function to be called periodically.
359*/
360
361/*!
362	\fn status_t unregister_kernel_daemon(daemon_hook hook, void *arg)
363	\brief Stop calling a daemon function.
364*/
365
366/*!
367	\fn void call_all_cpus(void (*func)(void *, int), void *cookie)
368	\brief Execute a function on all CPUs.
369*/
370
371/*!
372	\fn void call_all_cpus_sync(void (*func)(void *, int), void *cookie)
373	\brief Execute a function on all CPUs, and wait for all of them to complete it before
374		returning.
375*/
376
377/*!
378	\fn void memory_read_barrier(void)
379	\brief Execute a memory read barrier.
380
381	Some CPU and cache architectures do not automatically ensure consistency between the CPU cache,
382	the instruction ordering, and the memory. A barrier makes sure every read that should be
383	executed before the barrier will be complete before any more memory access operations can be
384	done.
385*/
386
387/*!
388	\fn void memory_write_barrier(void)
389	\brief Execute a memory write barrier.
390
391	Some CPU and cache architectures do not automatically ensure consistency between the CPU cache,
392	the instruction ordering, and the memory. A barrier makes sure every read that should be
393	executed before the barrier will be complete before any more memory access operations can be
394	done.
395*/
396
397/*!
398	\fn status_t user_memcpy(void *to, const void *from, size_t size)
399	\ingroup memory
400	\brief Copy memory between userspace and kernelspace.
401
402	There are protections in place to avoid the kernel accidentally reading or writing to userspace
403	memory. As a result, every access to userspace memory must be done with user_memcpy,
404	user_strlcpy or user_memset.
405
406	For example, the buffers for a read, write or ioctl operation are handled in this way.
407*/
408
409/*!
410	\fn ssize_t user_strlcpy(char *to, const char *from, size_t size)
411	\brief Copy a string between userspace and kernel space.
412
413	Similar to strlcpy, but one of the source and destination must be in kernel space, and the
414	other must be in userspace.
415*/
416
417/*!
418	\fn status_t user_memset(void *start, char c, size_t count)
419	\brief Set userspace memory
420
421	Set memory to a specific byte value in the current userspace team.
422*/
423