1/*
2 * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
3 * Licensed under the GPL
4 */
5
6#include "linux/kernel.h"
7#include "linux/sched.h"
8#include "linux/slab.h"
9#include "linux/types.h"
10#include "asm/uaccess.h"
11#include "asm/ptrace.h"
12#include "asm/segment.h"
13#include "asm/smp.h"
14#include "asm/desc.h"
15#include "choose-mode.h"
16#include "kern.h"
17#include "kern_util.h"
18#include "mode_kern.h"
19#include "os.h"
20#include "mode.h"
21
22#ifdef CONFIG_MODE_SKAS
23#include "skas.h"
24#endif
25
26/*
27 * If needed we can detect when it's uninitialized.
28 *
29 * These are initialized in an initcall and unchanged thereafter.
30 */
31static int host_supports_tls = -1;
32int host_gdt_entry_tls_min;
33
34#ifdef CONFIG_MODE_SKAS
35int do_set_thread_area_skas(struct user_desc *info)
36{
37	int ret;
38	u32 cpu;
39
40	cpu = get_cpu();
41	ret = os_set_thread_area(info, userspace_pid[cpu]);
42	put_cpu();
43	return ret;
44}
45
46int do_get_thread_area_skas(struct user_desc *info)
47{
48	int ret;
49	u32 cpu;
50
51	cpu = get_cpu();
52	ret = os_get_thread_area(info, userspace_pid[cpu]);
53	put_cpu();
54	return ret;
55}
56#endif
57
58static int get_free_idx(struct task_struct* task)
59{
60	struct thread_struct *t = &task->thread;
61	int idx;
62
63	if (!t->arch.tls_array)
64		return GDT_ENTRY_TLS_MIN;
65
66	for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
67		if (!t->arch.tls_array[idx].present)
68			return idx + GDT_ENTRY_TLS_MIN;
69	return -ESRCH;
70}
71
72static inline void clear_user_desc(struct user_desc* info)
73{
74	/* Postcondition: LDT_empty(info) returns true. */
75	memset(info, 0, sizeof(*info));
76
77	/* Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
78	 * indeed an empty user_desc.
79	 */
80	info->read_exec_only = 1;
81	info->seg_not_present = 1;
82}
83
84#define O_FORCE 1
85
86static int load_TLS(int flags, struct task_struct *to)
87{
88	int ret = 0;
89	int idx;
90
91	for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
92		struct uml_tls_struct* curr = &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
93
94		/* Actually, now if it wasn't flushed it gets cleared and
95		 * flushed to the host, which will clear it.*/
96		if (!curr->present) {
97			if (!curr->flushed) {
98				clear_user_desc(&curr->tls);
99				curr->tls.entry_number = idx;
100			} else {
101				WARN_ON(!LDT_empty(&curr->tls));
102				continue;
103			}
104		}
105
106		if (!(flags & O_FORCE) && curr->flushed)
107			continue;
108
109		ret = do_set_thread_area(&curr->tls);
110		if (ret)
111			goto out;
112
113		curr->flushed = 1;
114	}
115out:
116	return ret;
117}
118
119/* Verify if we need to do a flush for the new process, i.e. if there are any
120 * present desc's, only if they haven't been flushed.
121 */
122static inline int needs_TLS_update(struct task_struct *task)
123{
124	int i;
125	int ret = 0;
126
127	for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
128		struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
129
130		/* Can't test curr->present, we may need to clear a descriptor
131		 * which had a value. */
132		if (curr->flushed)
133			continue;
134		ret = 1;
135		break;
136	}
137	return ret;
138}
139
140/* On a newly forked process, the TLS descriptors haven't yet been flushed. So
141 * we mark them as such and the first switch_to will do the job.
142 */
143void clear_flushed_tls(struct task_struct *task)
144{
145	int i;
146
147	for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
148		struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
149
150		/* Still correct to do this, if it wasn't present on the host it
151		 * will remain as flushed as it was. */
152		if (!curr->present)
153			continue;
154
155		curr->flushed = 0;
156	}
157}
158
159/* In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
160 * common host process. So this is needed in SKAS0 too.
161 *
162 * However, if each thread had a different host process (and this was discussed
163 * for SMP support) this won't be needed.
164 *
165 * And this will not need be used when (and if) we'll add support to the host
166 * SKAS patch. */
167
168int arch_switch_tls_skas(struct task_struct *from, struct task_struct *to)
169{
170	if (!host_supports_tls)
171		return 0;
172
173	/* We have no need whatsoever to switch TLS for kernel threads; beyond
174	 * that, that would also result in us calling os_set_thread_area with
175	 * userspace_pid[cpu] == 0, which gives an error. */
176	if (likely(to->mm))
177		return load_TLS(O_FORCE, to);
178
179	return 0;
180}
181
182int arch_switch_tls_tt(struct task_struct *from, struct task_struct *to)
183{
184	if (!host_supports_tls)
185		return 0;
186
187	if (needs_TLS_update(to))
188		return load_TLS(0, to);
189
190	return 0;
191}
192
193static int set_tls_entry(struct task_struct* task, struct user_desc *info,
194			 int idx, int flushed)
195{
196	struct thread_struct *t = &task->thread;
197
198	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
199		return -EINVAL;
200
201	t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
202	t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
203	t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
204
205	return 0;
206}
207
208int arch_copy_tls(struct task_struct *new)
209{
210	struct user_desc info;
211	int idx, ret = -EFAULT;
212
213	if (copy_from_user(&info,
214			   (void __user *) UPT_ESI(&new->thread.regs.regs),
215			   sizeof(info)))
216		goto out;
217
218	ret = -EINVAL;
219	if (LDT_empty(&info))
220		goto out;
221
222	idx = info.entry_number;
223
224	ret = set_tls_entry(new, &info, idx, 0);
225out:
226	return ret;
227}
228
229static int get_tls_entry(struct task_struct* task, struct user_desc *info, int idx)
230{
231	struct thread_struct *t = &task->thread;
232
233	if (!t->arch.tls_array)
234		goto clear;
235
236	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
237		return -EINVAL;
238
239	if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
240		goto clear;
241
242	*info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
243
244out:
245	/* Temporary debugging check, to make sure that things have been
246	 * flushed. This could be triggered if load_TLS() failed.
247	 */
248	if (unlikely(task == current && !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
249		printk(KERN_ERR "get_tls_entry: task with pid %d got here "
250				"without flushed TLS.", current->pid);
251	}
252
253	return 0;
254clear:
255	/* When the TLS entry has not been set, the values read to user in the
256	 * tls_array are 0 (because it's cleared at boot, see
257	 * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
258	 */
259	clear_user_desc(info);
260	info->entry_number = idx;
261	goto out;
262}
263
264asmlinkage int sys_set_thread_area(struct user_desc __user *user_desc)
265{
266	struct user_desc info;
267	int idx, ret;
268
269	if (!host_supports_tls)
270		return -ENOSYS;
271
272	if (copy_from_user(&info, user_desc, sizeof(info)))
273		return -EFAULT;
274
275	idx = info.entry_number;
276
277	if (idx == -1) {
278		idx = get_free_idx(current);
279		if (idx < 0)
280			return idx;
281		info.entry_number = idx;
282		/* Tell the user which slot we chose for him.*/
283		if (put_user(idx, &user_desc->entry_number))
284			return -EFAULT;
285	}
286
287	ret = CHOOSE_MODE_PROC(do_set_thread_area_tt, do_set_thread_area_skas, &info);
288	if (ret)
289		return ret;
290	return set_tls_entry(current, &info, idx, 1);
291}
292
293/*
294 * Perform set_thread_area on behalf of the traced child.
295 * Note: error handling is not done on the deferred load, and this differ from
296 * i386. However the only possible error are caused by bugs.
297 */
298int ptrace_set_thread_area(struct task_struct *child, int idx,
299		struct user_desc __user *user_desc)
300{
301	struct user_desc info;
302
303	if (!host_supports_tls)
304		return -EIO;
305
306	if (copy_from_user(&info, user_desc, sizeof(info)))
307		return -EFAULT;
308
309	return set_tls_entry(child, &info, idx, 0);
310}
311
312asmlinkage int sys_get_thread_area(struct user_desc __user *user_desc)
313{
314	struct user_desc info;
315	int idx, ret;
316
317	if (!host_supports_tls)
318		return -ENOSYS;
319
320	if (get_user(idx, &user_desc->entry_number))
321		return -EFAULT;
322
323	ret = get_tls_entry(current, &info, idx);
324	if (ret < 0)
325		goto out;
326
327	if (copy_to_user(user_desc, &info, sizeof(info)))
328		ret = -EFAULT;
329
330out:
331	return ret;
332}
333
334/*
335 * Perform get_thread_area on behalf of the traced child.
336 */
337int ptrace_get_thread_area(struct task_struct *child, int idx,
338		struct user_desc __user *user_desc)
339{
340	struct user_desc info;
341	int ret;
342
343	if (!host_supports_tls)
344		return -EIO;
345
346	ret = get_tls_entry(child, &info, idx);
347	if (ret < 0)
348		goto out;
349
350	if (copy_to_user(user_desc, &info, sizeof(info)))
351		ret = -EFAULT;
352out:
353	return ret;
354}
355
356
357static int __init __setup_host_supports_tls(void)
358{
359	check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
360	if (host_supports_tls) {
361		printk(KERN_INFO "Host TLS support detected\n");
362		printk(KERN_INFO "Detected host type: ");
363		switch (host_gdt_entry_tls_min) {
364			case GDT_ENTRY_TLS_MIN_I386:
365				printk("i386\n");
366				break;
367			case GDT_ENTRY_TLS_MIN_X86_64:
368				printk("x86_64\n");
369				break;
370		}
371	} else
372		printk(KERN_ERR "  Host TLS support NOT detected! "
373				"TLS support inside UML will not work\n");
374	return 0;
375}
376
377__initcall(__setup_host_supports_tls);
378