1/* sun3_pgalloc.h --
2 * reorganization around 2.3.39, routines moved from sun3_pgtable.h
3 *
4 * moved 1/26/2000 Sam Creasey
5 */
6
7#ifndef _SUN3_PGALLOC_H
8#define _SUN3_PGALLOC_H
9
10/* Pagetable caches. */
11//todo: should implement for at least ptes. --m
12#define pgd_quicklist ((unsigned long *) 0)
13#define pmd_quicklist ((unsigned long *) 0)
14#define pte_quicklist ((unsigned long *) 0)
15#define pgtable_cache_size (0L)
16
17/* Allocation and deallocation of various flavours of pagetables. */
18extern inline int free_pmd_fast (pmd_t *pmdp) { return 0; }
19extern inline int free_pmd_slow (pmd_t *pmdp) { return 0; }
20extern inline pmd_t *get_pmd_fast (void) { return (pmd_t *) 0; }
21
22//todo: implement the following properly.
23#define get_pte_fast() ((pte_t *) 0)
24#define get_pte_slow pte_alloc
25#define free_pte_fast(pte)
26#define free_pte_slow pte_free
27
28/* erm, now that it's compiling, what do we do with it? */
29#define _KERNPG_TABLE 0
30
31extern inline void pte_free_kernel(pte_t * pte)
32{
33        free_page((unsigned long) pte);
34}
35
36extern const char bad_pmd_string[];
37
38extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
39{
40        address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
41        if (pmd_none(*pmd)) {
42                pte_t * page = (pte_t *) get_free_page(GFP_KERNEL);
43                if (pmd_none(*pmd)) {
44                        if (page) {
45                                pmd_val(*pmd) = _KERNPG_TABLE + __pa(page);
46                                return page + address;
47                        }
48                        pmd_val(*pmd) = _KERNPG_TABLE + __pa((unsigned long)BAD_PAGETABLE);
49                        return NULL;
50                }
51                free_page((unsigned long) page);
52        }
53        if (pmd_bad(*pmd)) {
54                printk(bad_pmd_string, pmd_val(*pmd));
55		printk("at kernel pgd off %08x\n", (unsigned int)pmd);
56                pmd_val(*pmd) = _KERNPG_TABLE + __pa((unsigned long)BAD_PAGETABLE);
57                return NULL;
58        }
59        return (pte_t *) __pmd_page(*pmd) + address;
60}
61
62/*
63 * allocating and freeing a pmd is trivial: the 1-entry pmd is
64 * inside the pgd, so has no extra memory associated with it.
65 */
66extern inline void pmd_free_kernel(pmd_t * pmd)
67{
68//        pmd_val(*pmd) = 0;
69}
70
71extern inline pmd_t * pmd_alloc_kernel(pgd_t * pgd, unsigned long address)
72{
73        return (pmd_t *) pgd;
74}
75
76#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
77#define pmd_alloc_one(mm,address)       ({ BUG(); ((pmd_t *)2); })
78
79extern inline void pte_free(pte_t * pte)
80{
81        free_page((unsigned long) pte);
82}
83
84static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
85{
86	unsigned long page = __get_free_page(GFP_KERNEL);
87
88	if (!page)
89		return NULL;
90
91	memset((void *)page, 0, PAGE_SIZE);
92//	pmd_val(*pmd) = SUN3_PMD_MAGIC + __pa(page);
93/*	pmd_val(*pmd) = __pa(page); */
94	return (pte_t *) (page);
95}
96
97#define pte_alloc_one_fast(mm,addr) pte_alloc_one(mm,addr)
98
99#define pmd_populate(mm, pmd, pte) (pmd_val(*pmd) = __pa((unsigned long)pte))
100
101/*
102 * allocating and freeing a pmd is trivial: the 1-entry pmd is
103 * inside the pgd, so has no extra memory associated with it.
104 */
105extern inline void pmd_free(pmd_t * pmd)
106{
107        pmd_val(*pmd) = 0;
108}
109
110extern inline void pgd_free(pgd_t * pgd)
111{
112        free_page((unsigned long) pgd);
113}
114
115extern inline pgd_t * pgd_alloc(struct mm_struct *mm)
116{
117     pgd_t *new_pgd;
118
119     new_pgd = (pgd_t *)get_free_page(GFP_KERNEL);
120     memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
121     memset(new_pgd, 0, (PAGE_OFFSET >> PGDIR_SHIFT));
122     return new_pgd;
123}
124
125#define pgd_populate(mm, pmd, pte) BUG()
126
127
128extern int do_check_pgt_cache(int, int);
129
130extern inline void set_pgdir(unsigned long address, pgd_t entry)
131{
132}
133
134/* Reserved PMEGs. */
135extern char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
136extern unsigned long pmeg_vaddr[SUN3_PMEGS_NUM];
137extern unsigned char pmeg_alloc[SUN3_PMEGS_NUM];
138extern unsigned char pmeg_ctx[SUN3_PMEGS_NUM];
139
140/* Flush all userspace mappings one by one...  (why no flush command,
141   sun?) */
142static inline void flush_tlb_all(void)
143{
144       unsigned long addr;
145       unsigned char ctx, oldctx;
146
147       oldctx = sun3_get_context();
148       for(addr = 0x00000000; addr < TASK_SIZE; addr += SUN3_PMEG_SIZE) {
149	       for(ctx = 0; ctx < 8; ctx++) {
150		       sun3_put_context(ctx);
151		       sun3_put_segmap(addr, SUN3_INVALID_PMEG);
152	       }
153       }
154
155       sun3_put_context(oldctx);
156       /* erase all of the userspace pmeg maps, we've clobbered them
157	  all anyway */
158       for(addr = 0; addr < SUN3_INVALID_PMEG; addr++) {
159	       if(pmeg_alloc[addr] == 1) {
160		       pmeg_alloc[addr] = 0;
161		       pmeg_ctx[addr] = 0;
162		       pmeg_vaddr[addr] = 0;
163	       }
164       }
165
166}
167
168/* Clear user TLB entries within the context named in mm */
169static inline void flush_tlb_mm (struct mm_struct *mm)
170{
171     unsigned char oldctx;
172     unsigned char seg;
173     unsigned long i;
174
175     oldctx = sun3_get_context();
176     sun3_put_context(mm->context);
177
178     for(i = 0; i < TASK_SIZE; i += SUN3_PMEG_SIZE) {
179	     seg = sun3_get_segmap(i);
180	     if(seg == SUN3_INVALID_PMEG)
181		     continue;
182
183	     sun3_put_segmap(i, SUN3_INVALID_PMEG);
184	     pmeg_alloc[seg] = 0;
185	     pmeg_ctx[seg] = 0;
186	     pmeg_vaddr[seg] = 0;
187     }
188
189     sun3_put_context(oldctx);
190
191}
192
193/* Flush a single TLB page. In this case, we're limited to flushing a
194   single PMEG */
195static inline void flush_tlb_page (struct vm_area_struct *vma,
196				   unsigned long addr)
197{
198	unsigned char oldctx;
199	unsigned char i;
200
201	oldctx = sun3_get_context();
202	sun3_put_context(vma->vm_mm->context);
203	addr &= ~SUN3_PMEG_MASK;
204	if((i = sun3_get_segmap(addr)) != SUN3_INVALID_PMEG)
205	{
206		pmeg_alloc[i] = 0;
207		pmeg_ctx[i] = 0;
208		pmeg_vaddr[i] = 0;
209		sun3_put_segmap (addr,  SUN3_INVALID_PMEG);
210	}
211	sun3_put_context(oldctx);
212
213}
214/* Flush a range of pages from TLB. */
215
216static inline void flush_tlb_range (struct mm_struct *mm,
217		      unsigned long start, unsigned long end)
218{
219	unsigned char seg, oldctx;
220
221	start &= ~SUN3_PMEG_MASK;
222
223	oldctx = sun3_get_context();
224	sun3_put_context(mm->context);
225
226	while(start < end)
227	{
228		if((seg = sun3_get_segmap(start)) == SUN3_INVALID_PMEG)
229		     goto next;
230		if(pmeg_ctx[seg] == mm->context) {
231			pmeg_alloc[seg] = 0;
232			pmeg_ctx[seg] = 0;
233			pmeg_vaddr[seg] = 0;
234		}
235		sun3_put_segmap(start, SUN3_INVALID_PMEG);
236	next:
237		start += SUN3_PMEG_SIZE;
238	}
239}
240
241/* Flush kernel page from TLB. */
242static inline void flush_tlb_kernel_page (unsigned long addr)
243{
244	sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
245}
246
247extern inline void flush_tlb_pgtables(struct mm_struct *mm,
248				      unsigned long start, unsigned long end)
249{
250}
251
252#endif /* SUN3_PGALLOC_H */
253