1#ifndef _MOTOROLA_PGALLOC_H
2#define _MOTOROLA_PGALLOC_H
3
4extern struct pgtable_cache_struct {
5	unsigned long *pmd_cache;
6	unsigned long *pte_cache;
7/* This counts in units of pointer tables, of which can be eight per page. */
8	unsigned long pgtable_cache_sz;
9} quicklists;
10
11#define pgd_quicklist ((unsigned long *)0)
12#define pmd_quicklist (quicklists.pmd_cache)
13#define pte_quicklist (quicklists.pte_cache)
14/* This isn't accurate because of fragmentation of allocated pages for
15   pointer tables, but that should not be a problem. */
16#define pgtable_cache_size ((quicklists.pgtable_cache_sz+7)/8)
17
18extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset);
19extern pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long offset);
20
21extern pmd_t *get_pointer_table(void);
22extern int free_pointer_table(pmd_t *);
23
24
25extern inline void flush_tlb_kernel_page(unsigned long addr)
26{
27	if (CPU_IS_040_OR_060) {
28		mm_segment_t old_fs = get_fs();
29		set_fs(KERNEL_DS);
30		__asm__ __volatile__(".chip 68040\n\t"
31				     "pflush (%0)\n\t"
32				     ".chip 68k"
33				     : : "a" (addr));
34		set_fs(old_fs);
35	} else
36		__asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
37}
38
39
40extern inline pte_t *get_pte_fast(void)
41{
42	unsigned long *ret;
43
44	ret = pte_quicklist;
45	if (ret) {
46		pte_quicklist = (unsigned long *)*ret;
47		ret[0] = 0;
48		quicklists.pgtable_cache_sz -= 8;
49	}
50	return (pte_t *)ret;
51}
52#define pte_alloc_one_fast(mm,addr)  get_pte_fast()
53
54static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
55{
56 	pte_t *pte;
57
58	pte = (pte_t *) __get_free_page(GFP_KERNEL);
59	if (pte) {
60	         clear_page(pte);
61		 __flush_page_to_ram((unsigned long)pte);
62		 flush_tlb_kernel_page((unsigned long)pte);
63		 nocache_page((unsigned long)pte);
64		}
65
66	return pte;
67}
68
69
70extern __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
71{
72        return get_pointer_table();
73}
74
75
76extern inline void free_pte_fast(pte_t *pte)
77{
78	*(unsigned long *)pte = (unsigned long)pte_quicklist;
79	pte_quicklist = (unsigned long *)pte;
80	quicklists.pgtable_cache_sz += 8;
81}
82
83extern inline void free_pte_slow(pte_t *pte)
84{
85	cache_page((unsigned long)pte);
86	free_page((unsigned long) pte);
87}
88
89extern inline pmd_t *get_pmd_fast(void)
90{
91	unsigned long *ret;
92
93	ret = pmd_quicklist;
94	if (ret) {
95		pmd_quicklist = (unsigned long *)*ret;
96		ret[0] = 0;
97		quicklists.pgtable_cache_sz--;
98	}
99	return (pmd_t *)ret;
100}
101#define pmd_alloc_one_fast(mm,addr) get_pmd_fast()
102
103extern inline void free_pmd_fast(pmd_t *pmd)
104{
105	*(unsigned long *)pmd = (unsigned long)pmd_quicklist;
106	pmd_quicklist = (unsigned long *) pmd;
107	quicklists.pgtable_cache_sz++;
108}
109
110extern inline int free_pmd_slow(pmd_t *pmd)
111{
112	return free_pointer_table(pmd);
113}
114
115/* The pgd cache is folded into the pmd cache, so these are dummy routines. */
116extern inline pgd_t *get_pgd_fast(void)
117{
118	return (pgd_t *)0;
119}
120
121extern inline void free_pgd_fast(pgd_t *pgd)
122{
123}
124
125extern inline void free_pgd_slow(pgd_t *pgd)
126{
127}
128
129extern void __bad_pte(pmd_t *pmd);
130extern void __bad_pmd(pgd_t *pgd);
131
132extern inline void pte_free(pte_t *pte)
133{
134	free_pte_fast(pte);
135}
136
137extern inline void pmd_free(pmd_t *pmd)
138{
139	free_pmd_fast(pmd);
140}
141
142
143extern inline void pte_free_kernel(pte_t *pte)
144{
145	free_pte_fast(pte);
146}
147
148extern inline pte_t *pte_alloc_kernel(pmd_t *pmd, unsigned long address)
149{
150	return pte_alloc(&init_mm,pmd, address);
151}
152
153extern inline void pmd_free_kernel(pmd_t *pmd)
154{
155	free_pmd_fast(pmd);
156}
157
158extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
159{
160	return pmd_alloc(&init_mm,pgd, address);
161}
162
163extern inline void pgd_free(pgd_t *pgd)
164{
165	free_pmd_fast((pmd_t *)pgd);
166}
167
168extern inline pgd_t *pgd_alloc(struct mm_struct *mm)
169{
170	pgd_t *pgd = (pgd_t *)get_pmd_fast();
171	if (!pgd)
172		pgd = (pgd_t *)get_pointer_table();
173	return pgd;
174}
175
176
177#define pmd_populate(MM, PMD, PTE)	pmd_set(PMD, PTE)
178#define pgd_populate(MM, PGD, PMD)	pgd_set(PGD, PMD)
179
180
181extern int do_check_pgt_cache(int, int);
182
183extern inline void set_pgdir(unsigned long address, pgd_t entry)
184{
185}
186
187
188/*
189 * flush all user-space atc entries.
190 */
191static inline void __flush_tlb(void)
192{
193	if (CPU_IS_040_OR_060)
194		__asm__ __volatile__(".chip 68040\n\t"
195				     "pflushan\n\t"
196				     ".chip 68k");
197	else
198		__asm__ __volatile__("pflush #0,#4");
199}
200
201static inline void __flush_tlb040_one(unsigned long addr)
202{
203	__asm__ __volatile__(".chip 68040\n\t"
204			     "pflush (%0)\n\t"
205			     ".chip 68k"
206			     : : "a" (addr));
207}
208
209static inline void __flush_tlb_one(unsigned long addr)
210{
211	if (CPU_IS_040_OR_060)
212		__flush_tlb040_one(addr);
213	else
214		__asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr));
215}
216
217#define flush_tlb() __flush_tlb()
218
219/*
220 * flush all atc entries (both kernel and user-space entries).
221 */
222static inline void flush_tlb_all(void)
223{
224	if (CPU_IS_040_OR_060)
225		__asm__ __volatile__(".chip 68040\n\t"
226				     "pflusha\n\t"
227				     ".chip 68k");
228	else
229		__asm__ __volatile__("pflusha");
230}
231
232static inline void flush_tlb_mm(struct mm_struct *mm)
233{
234	if (mm == current->mm)
235		__flush_tlb();
236}
237
238static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
239{
240	if (vma->vm_mm == current->mm)
241		__flush_tlb_one(addr);
242}
243
244static inline void flush_tlb_range(struct mm_struct *mm,
245				   unsigned long start, unsigned long end)
246{
247	if (mm == current->mm)
248		__flush_tlb();
249}
250
251
252extern inline void flush_tlb_pgtables(struct mm_struct *mm,
253				      unsigned long start, unsigned long end)
254{
255}
256
257#endif /* _MOTOROLA_PGALLOC_H */
258