1/*
2 * Copyright 2004-2016, Haiku, Inc.
3 * Distributed under the terms of the MIT License.
4 *
5 * Thread definition and structures
6 */
7#ifndef _KERNEL_THREAD_TYPES_H
8#define _KERNEL_THREAD_TYPES_H
9
10
11#ifndef _ASSEMBLER
12
13#include <pthread.h>
14
15#include <arch/thread_types.h>
16#include <condition_variable.h>
17#include <heap.h>
18#include <ksignal.h>
19#include <lock.h>
20#include <smp.h>
21#include <thread_defs.h>
22#include <timer.h>
23#include <UserTimer.h>
24#include <user_debugger.h>
25#include <util/DoublyLinkedList.h>
26#include <util/KernelReferenceable.h>
27#include <util/list.h>
28
29#include <SupportDefs.h>
30
31
32enum additional_thread_state {
33	THREAD_STATE_FREE_ON_RESCHED = 7, // free the thread structure upon reschedule
34//	THREAD_STATE_BIRTH	// thread is being created
35};
36
37#define THREAD_MIN_SET_PRIORITY				B_LOWEST_ACTIVE_PRIORITY
38#define THREAD_MAX_SET_PRIORITY				B_REAL_TIME_PRIORITY
39
40enum team_state {
41	TEAM_STATE_NORMAL,		// normal state
42	TEAM_STATE_BIRTH,		// being constructed
43	TEAM_STATE_SHUTDOWN,	// still lives, but is going down
44	TEAM_STATE_DEATH		// only the Team object still exists, threads are
45							// gone
46};
47
48#define	TEAM_FLAG_EXEC_DONE	0x01
49	// team has executed exec*()
50#define	TEAM_FLAG_DUMP_CORE	0x02
51	// a core dump is in progress
52
53typedef enum job_control_state {
54	JOB_CONTROL_STATE_NONE,
55	JOB_CONTROL_STATE_STOPPED,
56	JOB_CONTROL_STATE_CONTINUED,
57	JOB_CONTROL_STATE_DEAD
58} job_control_state;
59
60
61struct cpu_ent;
62struct image;					// defined in image.c
63struct io_context;
64struct realtime_sem_context;	// defined in realtime_sem.cpp
65struct select_info;
66struct user_thread;				// defined in libroot/user_thread.h
67struct VMAddressSpace;
68struct user_mutex_context;		// defined in user_mutex.cpp
69struct xsi_sem_context;			// defined in xsi_semaphore.cpp
70
71namespace Scheduler {
72	struct ThreadData;
73}
74
75namespace BKernel {
76	struct Team;
77	struct Thread;
78	struct ProcessGroup;
79}
80
81
82struct thread_death_entry {
83	struct list_link	link;
84	thread_id			thread;
85	status_t			status;
86};
87
88struct team_loading_info {
89	ConditionVariable	condition;
90	status_t			result;		// the result of the loading
91};
92
93struct team_watcher {
94	struct list_link	link;
95	void				(*hook)(team_id team, void *data);
96	void				*data;
97};
98
99
100#define MAX_DEAD_CHILDREN	32
101	// this is a soft limit for the number of child death entries in a team
102
103
104struct job_control_entry : DoublyLinkedListLinkImpl<job_control_entry> {
105	job_control_state	state;		// current team job control state
106	thread_id			thread;		// main thread ID == team ID
107	uint16				signal;		// signal causing the current state
108	bool				has_group_ref;
109	uid_t				signaling_user;
110
111	// valid while state != JOB_CONTROL_STATE_DEAD
112	BKernel::Team*		team;
113
114	// valid when state == JOB_CONTROL_STATE_DEAD
115	pid_t				group_id;
116	status_t			status;
117	uint16				reason;		// reason for the team's demise, one of the
118									// CLD_* values defined in <signal.h>
119	bigtime_t			user_time;
120	bigtime_t			kernel_time;
121
122	job_control_entry();
123	~job_control_entry();
124
125	void InitDeadState();
126
127	job_control_entry& operator=(const job_control_entry& other);
128};
129
130typedef DoublyLinkedList<job_control_entry> JobControlEntryList;
131
132struct team_job_control_children {
133	JobControlEntryList		entries;
134};
135
136struct team_dead_children : team_job_control_children {
137	ConditionVariable	condition_variable;
138	uint32				count;
139	bigtime_t			kernel_time;
140	bigtime_t			user_time;
141};
142
143
144struct team_death_entry {
145	int32				remaining_threads;
146	ConditionVariable	condition;
147};
148
149
150struct free_user_thread {
151	struct free_user_thread*	next;
152	struct user_thread*			thread;
153};
154
155
156class AssociatedDataOwner;
157
158class AssociatedData : public BReferenceable,
159	public DoublyLinkedListLinkImpl<AssociatedData> {
160public:
161								AssociatedData();
162	virtual						~AssociatedData();
163
164			AssociatedDataOwner* Owner() const
165									{ return fOwner; }
166			void				SetOwner(AssociatedDataOwner* owner)
167									{ fOwner = owner; }
168
169	virtual	void				OwnerDeleted(AssociatedDataOwner* owner);
170
171private:
172			AssociatedDataOwner* fOwner;
173};
174
175
176class AssociatedDataOwner {
177public:
178								AssociatedDataOwner();
179								~AssociatedDataOwner();
180
181			bool				AddData(AssociatedData* data);
182			bool				RemoveData(AssociatedData* data);
183
184			void				PrepareForDeletion();
185
186private:
187			typedef DoublyLinkedList<AssociatedData> DataList;
188
189private:
190
191			mutex				fLock;
192			DataList			fList;
193};
194
195
196typedef int32 (*thread_entry_func)(thread_func, void *);
197
198
199namespace BKernel {
200
201
202struct GroupsArray : KernelReferenceable {
203	int		count;
204	gid_t	groups[];
205};
206
207
208template<typename IDType>
209struct TeamThreadIteratorEntry
210	: DoublyLinkedListLinkImpl<TeamThreadIteratorEntry<IDType> > {
211	typedef IDType	id_type;
212	typedef TeamThreadIteratorEntry<id_type> iterator_type;
213
214	id_type	id;			// -1 for iterator entries, >= 0 for actual elements
215	bool	visible;	// the entry is publicly visible
216};
217
218
219struct Team : TeamThreadIteratorEntry<team_id>, KernelReferenceable,
220		AssociatedDataOwner {
221	DoublyLinkedListLink<Team>	global_list_link;
222	Team			*hash_next;		// next in hash
223	Team			*siblings_next;	// next in parent's list; protected by
224									// parent's fLock
225	Team			*parent;		// write-protected by both parent (if any)
226									// and this team's fLock
227	Team			*children;		// protected by this team's fLock;
228									// adding/removing a child also requires the
229									// child's fLock
230	Team			*group_next;	// protected by the group's lock
231
232	int64			serial_number;	// immutable after adding team to hash
233
234	// process group info -- write-protected by both the group's lock, the
235	// team's lock, and the team's parent's lock
236	pid_t			group_id;
237	pid_t			session_id;
238	ProcessGroup	*group;
239
240	int				num_threads;	// number of threads in this team
241	int				state;			// current team state, see above
242	int32			flags;
243	struct io_context *io_context;
244	struct user_mutex_context *user_mutex_context;
245	struct realtime_sem_context	*realtime_sem_context;
246	struct xsi_sem_context *xsi_sem_context;
247	struct team_death_entry *death_entry;	// protected by fLock
248	struct list		dead_threads;
249
250	// protected by the team's fLock
251	team_dead_children dead_children;
252	team_job_control_children stopped_children;
253	team_job_control_children continued_children;
254
255	// protected by the parent team's fLock
256	struct job_control_entry* job_control_entry;
257
258	VMAddressSpace	*address_space;
259	Thread			*main_thread;	// protected by fLock, immutable
260									// after first set
261	Thread			*thread_list;	// protected by fLock, signal_lock and
262									// gThreadCreationLock
263	struct team_loading_info *loading_info;	// protected by fLock
264	struct list		image_list;		// protected by sImageMutex
265	struct list		watcher_list;
266	struct list		sem_list;		// protected by sSemsSpinlock
267	struct list		port_list;		// protected by sPortsLock
268	struct arch_team arch_info;
269
270	addr_t			user_data;
271	area_id			user_data_area;
272	size_t			user_data_size;
273	size_t			used_user_data;
274	struct free_user_thread* free_user_threads;
275
276	void*			commpage_address;
277
278	struct team_debug_info debug_info;
279
280	bigtime_t		start_time;
281
282	// protected by time_lock
283	bigtime_t		dead_threads_kernel_time;
284	bigtime_t		dead_threads_user_time;
285	bigtime_t		cpu_clock_offset;
286	spinlock		time_lock;
287
288	// user group information; protected by fLock
289	uid_t			saved_set_uid;
290	uid_t			real_uid;
291	uid_t			effective_uid;
292	gid_t			saved_set_gid;
293	gid_t			real_gid;
294	gid_t			effective_gid;
295	BReference<GroupsArray> supplementary_groups;
296
297	// Exit status information. Set when the first terminal event occurs,
298	// immutable afterwards. Protected by fLock.
299	struct {
300		uint16		reason;			// reason for the team's demise, one of the
301									// CLD_* values defined in <signal.h>
302		uint16		signal;			// signal killing the team
303		uid_t		signaling_user;	// real UID of the signal sender
304		status_t	status;			// exit status, if normal team exit
305		bool		initialized;	// true when the state has been initialized
306	} exit;
307
308	spinlock		signal_lock;
309
310public:
311								~Team();
312
313	static	Team*				Create(team_id id, const char* name,
314									bool kernel);
315	static	Team*				Get(team_id id);
316	static	Team*				GetAndLock(team_id id);
317
318			bool				Lock()
319									{ mutex_lock(&fLock); return true; }
320			bool				TryLock()
321									{ return mutex_trylock(&fLock) == B_OK; }
322			void				Unlock()
323									{ mutex_unlock(&fLock); }
324
325			void				UnlockAndReleaseReference()
326									{ Unlock(); ReleaseReference(); }
327
328			void				LockTeamAndParent(bool dontLockParentIfKernel);
329			void				UnlockTeamAndParent();
330			void				LockTeamAndProcessGroup();
331			void				UnlockTeamAndProcessGroup();
332			void				LockTeamParentAndProcessGroup();
333			void				UnlockTeamParentAndProcessGroup();
334			void				LockProcessGroup()
335									{ LockTeamAndProcessGroup(); Unlock(); }
336
337			const char*			Name() const	{ return fName; }
338			void				SetName(const char* name);
339
340			const char*			Args() const	{ return fArgs; }
341			void				SetArgs(const char* args);
342			void				SetArgs(const char* path,
343									const char* const* otherArgs,
344									int otherArgCount);
345
346			BKernel::QueuedSignalsCounter* QueuedSignalsCounter() const
347									{ return fQueuedSignalsCounter; }
348			sigset_t			PendingSignals() const
349									{ return fPendingSignals.AllSignals(); }
350
351			void				AddPendingSignal(int signal)
352									{ fPendingSignals.AddSignal(signal); }
353			void				AddPendingSignal(Signal* signal)
354									{ fPendingSignals.AddSignal(signal); }
355			void				RemovePendingSignal(int signal)
356									{ fPendingSignals.RemoveSignal(signal); }
357			void				RemovePendingSignal(Signal* signal)
358									{ fPendingSignals.RemoveSignal(signal); }
359			void				RemovePendingSignals(sigset_t mask)
360									{ fPendingSignals.RemoveSignals(mask); }
361			void				ResetSignalsOnExec();
362
363	inline	int32				HighestPendingSignalPriority(
364									sigset_t nonBlocked) const;
365	inline	Signal*				DequeuePendingSignal(sigset_t nonBlocked,
366									Signal& buffer);
367
368			struct sigaction&	SignalActionFor(int32 signal)
369									{ return fSignalActions[signal - 1]; }
370			void				InheritSignalActions(Team* parent);
371
372			// user timers -- protected by fLock
373			UserTimer*			UserTimerFor(int32 id) const
374									{ return fUserTimers.TimerFor(id); }
375			status_t			AddUserTimer(UserTimer* timer);
376			void				RemoveUserTimer(UserTimer* timer);
377			void				DeleteUserTimers(bool userDefinedOnly);
378
379			bool				CheckAddUserDefinedTimer();
380			void				UserDefinedTimersRemoved(int32 count);
381
382			void				UserTimerActivated(TeamTimeUserTimer* timer)
383									{ fCPUTimeUserTimers.Add(timer); }
384			void				UserTimerActivated(TeamUserTimeUserTimer* timer)
385									{ fUserTimeUserTimers.Add(timer); }
386			void				UserTimerDeactivated(TeamTimeUserTimer* timer)
387									{ fCPUTimeUserTimers.Remove(timer); }
388			void				UserTimerDeactivated(
389									TeamUserTimeUserTimer* timer)
390									{ fUserTimeUserTimers.Remove(timer); }
391			void				DeactivateCPUTimeUserTimers();
392									// both total and user CPU timers
393			bool				HasActiveCPUTimeUserTimers() const
394									{ return !fCPUTimeUserTimers.IsEmpty(); }
395			bool				HasActiveUserTimeUserTimers() const
396									{ return !fUserTimeUserTimers.IsEmpty(); }
397			TeamTimeUserTimerList::ConstIterator
398									CPUTimeUserTimerIterator() const
399									{ return fCPUTimeUserTimers.GetIterator(); }
400	inline	TeamUserTimeUserTimerList::ConstIterator
401									UserTimeUserTimerIterator() const;
402
403			bigtime_t			CPUTime(bool ignoreCurrentRun,
404									Thread* lockedThread = NULL) const;
405			bigtime_t			UserCPUTime() const;
406
407			ConditionVariable*	CoreDumpCondition() const
408									{ return fCoreDumpCondition; }
409			void				SetCoreDumpCondition(
410									ConditionVariable* condition)
411									{ fCoreDumpCondition = condition; }
412private:
413								Team(team_id id, bool kernel);
414
415private:
416			mutex				fLock;
417			char				fName[B_OS_NAME_LENGTH];
418			char				fArgs[64];
419									// contents for the team_info::args field
420
421			BKernel::QueuedSignalsCounter* fQueuedSignalsCounter;
422			BKernel::PendingSignals	fPendingSignals;
423									// protected by signal_lock
424			struct sigaction 	fSignalActions[MAX_SIGNAL_NUMBER];
425									// indexed signal - 1, protected by fLock
426
427			UserTimerList		fUserTimers;			// protected by fLock
428			TeamTimeUserTimerList fCPUTimeUserTimers;
429									// protected by scheduler lock
430			TeamUserTimeUserTimerList fUserTimeUserTimers;
431			int32				fUserDefinedTimerCount;	// accessed atomically
432
433			ConditionVariable*	fCoreDumpCondition;
434									// protected by fLock
435};
436
437
438struct Thread : TeamThreadIteratorEntry<thread_id>, KernelReferenceable {
439	int32			flags;			// summary of events relevant in interrupt
440									// handlers (signals pending, user debugging
441									// enabled, etc.)
442	int64			serial_number;	// immutable after adding thread to hash
443	Thread			*hash_next;		// protected by thread hash lock
444	Thread			*team_next;		// protected by team lock and fLock
445	char			name[B_OS_NAME_LENGTH];	// protected by fLock
446	bool			going_to_suspend;	// protected by scheduler lock
447	int32			priority;		// protected by scheduler lock
448	int32			io_priority;	// protected by fLock
449	int32			state;			// protected by scheduler lock
450	struct cpu_ent	*cpu;			// protected by scheduler lock
451	struct cpu_ent	*previous_cpu;	// protected by scheduler lock
452	int32			pinned_to_cpu;	// only accessed by this thread or in the
453									// scheduler, when thread is not running
454	spinlock		scheduler_lock;
455
456	sigset_t		sig_block_mask;	// protected by team->signal_lock,
457									// only modified by the thread itself
458	sigset_t		sigsuspend_original_unblocked_mask;
459		// non-0 after a return from _user_sigsuspend(), containing the inverted
460		// original signal mask, reset in handle_signals(); only accessed by
461		// this thread
462	sigset_t		old_sig_block_mask;
463		// the old sig_block_mask to be restored when returning to userland
464		// when THREAD_FLAGS_OLD_SIGMASK is set
465
466	ucontext_t*		user_signal_context;	// only accessed by this thread
467	addr_t			signal_stack_base;		// only accessed by this thread
468	size_t			signal_stack_size;		// only accessed by this thread
469	bool			signal_stack_enabled;	// only accessed by this thread
470
471	bool			in_kernel;		// protected by time_lock, only written by
472									// this thread
473	bool			has_yielded;	// protected by scheduler lock
474	Scheduler::ThreadData*	scheduler_data; // protected by scheduler lock
475
476	struct user_thread*	user_thread;	// write-protected by fLock, only
477										// modified by the thread itself and
478										// thus freely readable by it
479
480	void			(*cancel_function)(int);
481
482	struct {
483		uint8		parameters[SYSCALL_RESTART_PARAMETER_SIZE];
484	} syscall_restart;
485
486	struct {
487		status_t	status;				// current wait status
488		uint32		flags;				// interrupable flags
489		uint32		type;				// type of the object waited on
490		const void*	object;				// pointer to the object waited on
491		timer		unblock_timer;		// timer for block with timeout
492	} wait;
493
494	struct {
495		sem_id		write_sem;	// acquired by writers before writing
496		sem_id		read_sem;	// release by writers after writing, acquired
497								// by this thread when reading
498		thread_id	sender;
499		int32		code;
500		size_t		size;
501		void*		buffer;
502	} msg;	// write_sem/read_sem are protected by fLock when accessed by
503			// others, the other fields are protected by write_sem/read_sem
504
505	void			(*fault_handler)(void);
506	jmp_buf			fault_handler_state;
507	int32			page_faults_allowed;
508		/* this field may only stay in debug builds in the future */
509
510	BKernel::Team	*team;	// protected by team lock, thread lock, scheduler
511							// lock, team_lock
512	rw_spinlock		team_lock;
513
514	struct {
515		sem_id		sem;		// immutable after thread creation
516		status_t	status;		// accessed only by this thread
517		struct list	waiters;	// protected by fLock
518	} exit;
519
520	struct select_info *select_infos;	// protected by fLock
521
522	struct thread_debug_info debug_info;
523
524	// stack
525	area_id			kernel_stack_area;	// immutable after thread creation
526	addr_t			kernel_stack_base;	// immutable after thread creation
527	addr_t			kernel_stack_top;	// immutable after thread creation
528	area_id			user_stack_area;	// protected by thread lock
529	addr_t			user_stack_base;	// protected by thread lock
530	size_t			user_stack_size;	// protected by thread lock
531
532	addr_t			user_local_storage;
533		// usually allocated at the safe side of the stack
534	int				kernel_errno;
535		// kernel "errno" differs from its userspace alter ego
536
537	// user_time, kernel_time, and last_time are only written by the thread
538	// itself, so they can be read by the thread without lock. Holding the
539	// scheduler lock and checking that the thread does not run also guarantees
540	// that the times will not change.
541	spinlock		time_lock;
542	bigtime_t		user_time;			// protected by time_lock
543	bigtime_t		kernel_time;		// protected by time_lock
544	bigtime_t		last_time;			// protected by time_lock
545	bigtime_t		cpu_clock_offset;	// protected by time_lock
546
547	void			(*post_interrupt_callback)(void*);
548	void*			post_interrupt_data;
549
550#if KDEBUG_RW_LOCK_DEBUG
551	rw_lock*		held_read_locks[64] = {}; // only modified by this thread
552#endif
553
554	// architecture dependent section
555	struct arch_thread arch_info;
556
557public:
558								Thread() {}
559									// dummy for the idle threads
560								Thread(const char *name, thread_id threadID,
561									struct cpu_ent *cpu);
562								~Thread();
563
564	static	status_t			Create(const char* name, Thread*& _thread);
565
566	static	Thread*				Get(thread_id id);
567	static	Thread*				GetAndLock(thread_id id);
568	static	Thread*				GetDebug(thread_id id);
569									// in kernel debugger only
570
571	static	bool				IsAlive(thread_id id);
572
573			void*				operator new(size_t size);
574			void*				operator new(size_t, void* pointer);
575			void				operator delete(void* pointer, size_t size);
576
577			status_t			Init(bool idleThread);
578
579			bool				Lock()
580									{ mutex_lock(&fLock); return true; }
581			bool				TryLock()
582									{ return mutex_trylock(&fLock) == B_OK; }
583			void				Unlock()
584									{ mutex_unlock(&fLock); }
585
586			void				UnlockAndReleaseReference()
587									{ Unlock(); ReleaseReference(); }
588
589			bool				IsAlive() const;
590
591			bool				IsRunning() const
592									{ return cpu != NULL; }
593									// scheduler lock must be held
594
595			sigset_t			ThreadPendingSignals() const
596									{ return fPendingSignals.AllSignals(); }
597	inline	sigset_t			AllPendingSignals() const;
598			void				AddPendingSignal(int signal)
599									{ fPendingSignals.AddSignal(signal); }
600			void				AddPendingSignal(Signal* signal)
601									{ fPendingSignals.AddSignal(signal); }
602			void				RemovePendingSignal(int signal)
603									{ fPendingSignals.RemoveSignal(signal); }
604			void				RemovePendingSignal(Signal* signal)
605									{ fPendingSignals.RemoveSignal(signal); }
606			void				RemovePendingSignals(sigset_t mask)
607									{ fPendingSignals.RemoveSignals(mask); }
608			void				ResetSignalsOnExec();
609
610	inline	int32				HighestPendingSignalPriority(
611									sigset_t nonBlocked) const;
612	inline	Signal*				DequeuePendingSignal(sigset_t nonBlocked,
613									Signal& buffer);
614
615			// user timers -- protected by fLock
616			UserTimer*			UserTimerFor(int32 id) const
617									{ return fUserTimers.TimerFor(id); }
618			status_t			AddUserTimer(UserTimer* timer);
619			void				RemoveUserTimer(UserTimer* timer);
620			void				DeleteUserTimers(bool userDefinedOnly);
621
622			void				UserTimerActivated(ThreadTimeUserTimer* timer)
623									{ fCPUTimeUserTimers.Add(timer); }
624			void				UserTimerDeactivated(ThreadTimeUserTimer* timer)
625									{ fCPUTimeUserTimers.Remove(timer); }
626			void				DeactivateCPUTimeUserTimers();
627			bool				HasActiveCPUTimeUserTimers() const
628									{ return !fCPUTimeUserTimers.IsEmpty(); }
629			ThreadTimeUserTimerList::ConstIterator
630									CPUTimeUserTimerIterator() const
631									{ return fCPUTimeUserTimers.GetIterator(); }
632
633	inline	bigtime_t			CPUTime(bool ignoreCurrentRun) const;
634
635private:
636			mutex				fLock;
637
638			BKernel::PendingSignals	fPendingSignals;
639									// protected by team->signal_lock
640
641			UserTimerList		fUserTimers;			// protected by fLock
642			ThreadTimeUserTimerList fCPUTimeUserTimers;
643									// protected by time_lock
644};
645
646
647struct ProcessSession : BReferenceable {
648	pid_t				id;
649	void*				controlling_tty;
650	pid_t				foreground_group;
651
652public:
653								ProcessSession(pid_t id);
654								~ProcessSession();
655
656			bool				Lock()
657									{ mutex_lock(&fLock); return true; }
658			bool				TryLock()
659									{ return mutex_trylock(&fLock) == B_OK; }
660			void				Unlock()
661									{ mutex_unlock(&fLock); }
662
663private:
664			mutex				fLock;
665};
666
667
668struct ProcessGroup : KernelReferenceable {
669	struct ProcessGroup *next;		// next in hash
670	pid_t				id;
671	BKernel::Team		*teams;
672
673public:
674								ProcessGroup(pid_t id);
675								~ProcessGroup();
676
677	static	ProcessGroup*		Get(pid_t id);
678
679			bool				Lock()
680									{ mutex_lock(&fLock); return true; }
681			bool				TryLock()
682									{ return mutex_trylock(&fLock) == B_OK; }
683			void				Unlock()
684									{ mutex_unlock(&fLock); }
685
686			ProcessSession*		Session() const
687									{ return fSession; }
688			void				Publish(ProcessSession* session);
689			void				PublishLocked(ProcessSession* session);
690
691			bool				IsOrphaned() const;
692
693			void				ScheduleOrphanedCheck();
694			void				UnsetOrphanedCheck();
695
696public:
697			SinglyLinkedListLink<ProcessGroup> fOrphanedCheckListLink;
698
699private:
700			mutex				fLock;
701			ProcessSession*		fSession;
702			bool				fInOrphanedCheckList;	// protected by
703														// sOrphanedCheckLock
704};
705
706typedef SinglyLinkedList<ProcessGroup,
707	SinglyLinkedListMemberGetLink<ProcessGroup,
708		&ProcessGroup::fOrphanedCheckListLink> > ProcessGroupList;
709
710
711/*!	\brief Allows to iterate through all teams.
712*/
713struct TeamListIterator {
714								TeamListIterator();
715								~TeamListIterator();
716
717			Team*				Next();
718
719private:
720			TeamThreadIteratorEntry<team_id> fEntry;
721};
722
723
724/*!	\brief Allows to iterate through all threads.
725*/
726struct ThreadListIterator {
727								ThreadListIterator();
728								~ThreadListIterator();
729
730			Thread*				Next();
731
732private:
733			TeamThreadIteratorEntry<thread_id> fEntry;
734};
735
736
737inline int32
738Team::HighestPendingSignalPriority(sigset_t nonBlocked) const
739{
740	return fPendingSignals.HighestSignalPriority(nonBlocked);
741}
742
743
744inline Signal*
745Team::DequeuePendingSignal(sigset_t nonBlocked, Signal& buffer)
746{
747	return fPendingSignals.DequeueSignal(nonBlocked, buffer);
748}
749
750
751inline TeamUserTimeUserTimerList::ConstIterator
752Team::UserTimeUserTimerIterator() const
753{
754	return fUserTimeUserTimers.GetIterator();
755}
756
757
758inline sigset_t
759Thread::AllPendingSignals() const
760{
761	return fPendingSignals.AllSignals() | team->PendingSignals();
762}
763
764
765inline int32
766Thread::HighestPendingSignalPriority(sigset_t nonBlocked) const
767{
768	return fPendingSignals.HighestSignalPriority(nonBlocked);
769}
770
771
772inline Signal*
773Thread::DequeuePendingSignal(sigset_t nonBlocked, Signal& buffer)
774{
775	return fPendingSignals.DequeueSignal(nonBlocked, buffer);
776}
777
778
779/*!	Returns the thread's current total CPU time (kernel + user + offset).
780
781	The caller must hold \c time_lock.
782
783	\param ignoreCurrentRun If \c true and the thread is currently running,
784		don't add the time since the last time \c last_time was updated. Should
785		be used in "thread unscheduled" scheduler callbacks, since although the
786		thread is still running at that time, its time has already been stopped.
787	\return The thread's current total CPU time.
788*/
789inline bigtime_t
790Thread::CPUTime(bool ignoreCurrentRun) const
791{
792	bigtime_t time = user_time + kernel_time + cpu_clock_offset;
793
794	// If currently running, also add the time since the last check, unless
795	// requested otherwise.
796	if (!ignoreCurrentRun && last_time != 0)
797		time += system_time() - last_time;
798
799	return time;
800}
801
802
803}	// namespace BKernel
804
805using BKernel::Team;
806using BKernel::TeamListIterator;
807using BKernel::Thread;
808using BKernel::ThreadListIterator;
809using BKernel::ProcessSession;
810using BKernel::ProcessGroup;
811using BKernel::ProcessGroupList;
812
813
814#endif	// !_ASSEMBLER
815
816
817// bits for the thread::flags field
818#define	THREAD_FLAGS_SIGNALS_PENDING		0x0001
819	// unblocked signals are pending (computed flag for optimization purposes)
820#define	THREAD_FLAGS_DEBUG_THREAD			0x0002
821	// forces the thread into the debugger as soon as possible (set by
822	// debug_thread())
823#define	THREAD_FLAGS_SINGLE_STEP			0x0004
824	// indicates that the thread is in single-step mode (in userland)
825#define	THREAD_FLAGS_DEBUGGER_INSTALLED		0x0008
826	// a debugger is installed for the current team (computed flag for
827	// optimization purposes)
828#define	THREAD_FLAGS_BREAKPOINTS_DEFINED	0x0010
829	// hardware breakpoints are defined for the current team (computed flag for
830	// optimization purposes)
831#define	THREAD_FLAGS_BREAKPOINTS_INSTALLED	0x0020
832	// breakpoints are currently installed for the thread (i.e. the hardware is
833	// actually set up to trigger debug events for them)
834#define	THREAD_FLAGS_64_BIT_SYSCALL_RETURN	0x0040
835	// set by 64 bit return value syscalls
836#define	THREAD_FLAGS_RESTART_SYSCALL		0x0080
837	// set by handle_signals(), if the current syscall shall be restarted
838#define	THREAD_FLAGS_DONT_RESTART_SYSCALL	0x0100
839	// explicitly disables automatic syscall restarts (e.g. resume_thread())
840#define	THREAD_FLAGS_ALWAYS_RESTART_SYSCALL	0x0200
841	// force syscall restart, even if a signal handler without SA_RESTART was
842	// invoked (e.g. sigwait())
843#define	THREAD_FLAGS_SYSCALL_RESTARTED		0x0400
844	// the current syscall has been restarted
845#define	THREAD_FLAGS_SYSCALL				0x0800
846	// the thread is currently in a syscall; set/reset only for certain
847	// functions (e.g. ioctl()) to allow inner functions to discriminate
848	// whether e.g. parameters were passed from userland or kernel
849#define	THREAD_FLAGS_TRAP_FOR_CORE_DUMP		0x1000
850	// core dump in progress; the thread shall not exit the kernel to userland,
851	// but shall invoke core_dump_trap_thread() instead.
852#ifdef _COMPAT_MODE
853#define	THREAD_FLAGS_COMPAT_MODE			0x2000
854	// the thread runs a compatibility mode (for instance IA32 on x86_64).
855#endif
856#define	THREAD_FLAGS_OLD_SIGMASK			0x4000
857	// the thread has an old sigmask to be restored
858
859#endif	/* _KERNEL_THREAD_TYPES_H */
860