1/*
2 * IA32 helper functions
3 *
4 * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
5 * Copyright (C) 2000 Asit K. Mallick <asit.k.mallick@intel.com>
6 * Copyright (C) 2001-2002 Hewlett-Packard Co
7 *	David Mosberger-Tang <davidm@hpl.hp.com>
8 *
9 * 06/16/00	A. Mallick	added csd/ssd/tssd for ia32 thread context
10 * 02/19/01	D. Mosberger	dropped tssd; it's not needed
11 * 09/14/01	D. Mosberger	fixed memory management for gdt/tss page
12 * 09/29/01	D. Mosberger	added ia32_load_segment_descriptors()
13 */
14
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/personality.h>
19#include <linux/sched.h>
20
21#include <asm/page.h>
22#include <asm/pgtable.h>
23#include <asm/system.h>
24#include <asm/processor.h>
25#include <asm/ia32.h>
26
27extern void die_if_kernel (char *str, struct pt_regs *regs, long err);
28
29struct exec_domain ia32_exec_domain;
30struct page *ia32_shared_page[(2*IA32_PAGE_SIZE + PAGE_SIZE - 1)/PAGE_SIZE];
31unsigned long *ia32_gdt;
32
33static unsigned long
34load_desc (u16 selector)
35{
36	unsigned long *table, limit, index;
37
38	if (!selector)
39		return 0;
40	if (selector & IA32_SEGSEL_TI) {
41		table = (unsigned long *) IA32_LDT_OFFSET;
42		limit = IA32_LDT_ENTRIES;
43	} else {
44		table = ia32_gdt;
45		limit = IA32_PAGE_SIZE / sizeof(ia32_gdt[0]);
46	}
47	index = selector >> IA32_SEGSEL_INDEX_SHIFT;
48	if (index >= limit)
49		return 0;
50	return IA32_SEG_UNSCRAMBLE(table[index]);
51}
52
53void
54ia32_load_segment_descriptors (struct task_struct *task)
55{
56	struct pt_regs *regs = ia64_task_regs(task);
57
58	/* Setup the segment descriptors */
59	regs->r24 = load_desc(regs->r16 >> 16);		/* ESD */
60	regs->r27 = load_desc(regs->r16 >>  0);		/* DSD */
61	regs->r28 = load_desc(regs->r16 >> 32);		/* FSD */
62	regs->r29 = load_desc(regs->r16 >> 48);		/* GSD */
63	task->thread.csd = load_desc(regs->r17 >>  0);	/* CSD */
64	task->thread.ssd = load_desc(regs->r17 >> 16);	/* SSD */
65}
66
67void
68ia32_save_state (struct task_struct *t)
69{
70	unsigned long eflag, fsr, fcr, fir, fdr, csd, ssd;
71
72	asm ("mov %0=ar.eflag;"
73	     "mov %1=ar.fsr;"
74	     "mov %2=ar.fcr;"
75	     "mov %3=ar.fir;"
76	     "mov %4=ar.fdr;"
77	     "mov %5=ar.csd;"
78	     "mov %6=ar.ssd;"
79	     : "=r"(eflag), "=r"(fsr), "=r"(fcr), "=r"(fir), "=r"(fdr), "=r"(csd), "=r"(ssd));
80	t->thread.eflag = eflag;
81	t->thread.fsr = fsr;
82	t->thread.fcr = fcr;
83	t->thread.fir = fir;
84	t->thread.fdr = fdr;
85	t->thread.csd = csd;
86	t->thread.ssd = ssd;
87	ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob);
88	ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1);
89}
90
91void
92ia32_load_state (struct task_struct *t)
93{
94	unsigned long eflag, fsr, fcr, fir, fdr, csd, ssd, tssd;
95	struct pt_regs *regs = ia64_task_regs(t);
96	int nr = smp_processor_id();	/* LDT and TSS depend on CPU number: */
97
98	nr = smp_processor_id();
99
100	eflag = t->thread.eflag;
101	fsr = t->thread.fsr;
102	fcr = t->thread.fcr;
103	fir = t->thread.fir;
104	fdr = t->thread.fdr;
105	csd = t->thread.csd;
106	ssd = t->thread.ssd;
107	tssd = load_desc(_TSS(nr));					/* TSSD */
108
109	asm volatile ("mov ar.eflag=%0;"
110		      "mov ar.fsr=%1;"
111		      "mov ar.fcr=%2;"
112		      "mov ar.fir=%3;"
113		      "mov ar.fdr=%4;"
114		      "mov ar.csd=%5;"
115		      "mov ar.ssd=%6;"
116		      :: "r"(eflag), "r"(fsr), "r"(fcr), "r"(fir), "r"(fdr), "r"(csd), "r"(ssd));
117	current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE);
118	current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD);
119	ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
120	ia64_set_kr(IA64_KR_TSSD, tssd);
121
122	regs->r17 = (_TSS(nr) << 48) | (_LDT(nr) << 32) | (__u32) regs->r17;
123	regs->r30 = load_desc(_LDT(nr));				/* LDTD */
124}
125
126/*
127 * Setup IA32 GDT and TSS
128 */
129void
130ia32_gdt_init (void)
131{
132	unsigned long *tss;
133	unsigned long ldt_size;
134	int nr;
135
136	ia32_shared_page[0] = alloc_page(GFP_KERNEL);
137	ia32_gdt = page_address(ia32_shared_page[0]);
138	tss = ia32_gdt + IA32_PAGE_SIZE/sizeof(ia32_gdt[0]);
139
140	if (IA32_PAGE_SIZE == PAGE_SIZE) {
141		ia32_shared_page[1] = alloc_page(GFP_KERNEL);
142		tss = page_address(ia32_shared_page[1]);
143	}
144
145	/* CS descriptor in IA-32 (scrambled) format */
146	ia32_gdt[__USER_CS >> 3] = IA32_SEG_DESCRIPTOR(0, (IA32_PAGE_OFFSET-1) >> IA32_PAGE_SHIFT,
147						       0xb, 1, 3, 1, 1, 1, 1);
148
149	/* DS descriptor in IA-32 (scrambled) format */
150	ia32_gdt[__USER_DS >> 3] = IA32_SEG_DESCRIPTOR(0, (IA32_PAGE_OFFSET-1) >> IA32_PAGE_SHIFT,
151						       0x3, 1, 3, 1, 1, 1, 1);
152
153	/* We never change the TSS and LDT descriptors, so we can share them across all CPUs.  */
154	ldt_size = PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
155	for (nr = 0; nr < NR_CPUS; ++nr) {
156		ia32_gdt[_TSS(nr) >> IA32_SEGSEL_INDEX_SHIFT]
157			= IA32_SEG_DESCRIPTOR(IA32_TSS_OFFSET, 235,
158					      0xb, 0, 3, 1, 1, 1, 0);
159		ia32_gdt[_LDT(nr) >> IA32_SEGSEL_INDEX_SHIFT]
160			= IA32_SEG_DESCRIPTOR(IA32_LDT_OFFSET, ldt_size - 1,
161					      0x2, 0, 3, 1, 1, 1, 0);
162	}
163}
164
165/*
166 * Handle bad IA32 interrupt via syscall
167 */
168void
169ia32_bad_interrupt (unsigned long int_num, struct pt_regs *regs)
170{
171	siginfo_t siginfo;
172
173	die_if_kernel("Bad IA-32 interrupt", regs, int_num);
174
175	siginfo.si_signo = SIGTRAP;
176	siginfo.si_errno = int_num;
177	siginfo.si_flags = 0;
178	siginfo.si_isr = 0;
179	siginfo.si_addr = 0;
180	siginfo.si_imm = 0;
181	siginfo.si_code = TRAP_BRKPT;
182	force_sig_info(SIGTRAP, &siginfo, current);
183}
184
185static int __init
186ia32_init (void)
187{
188	ia32_exec_domain.name = "Linux/x86";
189	ia32_exec_domain.handler = NULL;
190	ia32_exec_domain.pers_low = PER_LINUX32;
191	ia32_exec_domain.pers_high = PER_LINUX32;
192	ia32_exec_domain.signal_map = default_exec_domain.signal_map;
193	ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
194	register_exec_domain(&ia32_exec_domain);
195	return 0;
196}
197
198__initcall(ia32_init);
199