1/*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io)
4 * Copyright (c) 2017 Mellanox Technologies, Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice unmodified, this list of conditions, and the following
12 *    disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/malloc.h>
35#include <sys/kernel.h>
36#include <sys/sysctl.h>
37#include <sys/lock.h>
38#include <sys/mutex.h>
39#include <sys/rwlock.h>
40#include <sys/proc.h>
41#include <sys/sched.h>
42
43#include <machine/bus.h>
44
45#include <vm/vm.h>
46#include <vm/pmap.h>
47#include <vm/vm_param.h>
48#include <vm/vm_kern.h>
49#include <vm/vm_object.h>
50#include <vm/vm_map.h>
51#include <vm/vm_page.h>
52#include <vm/vm_pageout.h>
53#include <vm/vm_pager.h>
54#include <vm/vm_radix.h>
55#include <vm/vm_reserv.h>
56#include <vm/vm_extern.h>
57
58#include <vm/uma.h>
59#include <vm/uma_int.h>
60
61#include <linux/gfp.h>
62#include <linux/mm.h>
63#include <linux/preempt.h>
64#include <linux/fs.h>
65#include <linux/shmem_fs.h>
66
67void
68si_meminfo(struct sysinfo *si)
69{
70	si->totalram = physmem;
71	si->totalhigh = 0;
72	si->mem_unit = PAGE_SIZE;
73}
74
75void *
76linux_page_address(struct page *page)
77{
78
79	if (page->object != kernel_object) {
80		return (PMAP_HAS_DMAP ?
81		    ((void *)(uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(page))) :
82		    NULL);
83	}
84	return ((void *)(uintptr_t)(VM_MIN_KERNEL_ADDRESS +
85	    IDX_TO_OFF(page->pindex)));
86}
87
88vm_page_t
89linux_alloc_pages(gfp_t flags, unsigned int order)
90{
91	vm_page_t page;
92
93	if (PMAP_HAS_DMAP) {
94		unsigned long npages = 1UL << order;
95		int req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_NORMAL;
96
97		if ((flags & M_ZERO) != 0)
98			req |= VM_ALLOC_ZERO;
99		if (order == 0 && (flags & GFP_DMA32) == 0) {
100			page = vm_page_alloc(NULL, 0, req);
101			if (page == NULL)
102				return (NULL);
103		} else {
104			vm_paddr_t pmax = (flags & GFP_DMA32) ?
105			    BUS_SPACE_MAXADDR_32BIT : BUS_SPACE_MAXADDR;
106		retry:
107			page = vm_page_alloc_contig(NULL, 0, req,
108			    npages, 0, pmax, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
109
110			if (page == NULL) {
111				if (flags & M_WAITOK) {
112					if (!vm_page_reclaim_contig(req,
113					    npages, 0, pmax, PAGE_SIZE, 0)) {
114						vm_wait(NULL);
115					}
116					flags &= ~M_WAITOK;
117					goto retry;
118				}
119				return (NULL);
120			}
121		}
122		if (flags & M_ZERO) {
123			unsigned long x;
124
125			for (x = 0; x != npages; x++) {
126				vm_page_t pgo = page + x;
127
128				if ((pgo->flags & PG_ZERO) == 0)
129					pmap_zero_page(pgo);
130			}
131		}
132	} else {
133		vm_offset_t vaddr;
134
135		vaddr = linux_alloc_kmem(flags, order);
136		if (vaddr == 0)
137			return (NULL);
138
139		page = PHYS_TO_VM_PAGE(vtophys((void *)vaddr));
140
141		KASSERT(vaddr == (vm_offset_t)page_address(page),
142		    ("Page address mismatch"));
143	}
144
145	return (page);
146}
147
148void
149linux_free_pages(vm_page_t page, unsigned int order)
150{
151	if (PMAP_HAS_DMAP) {
152		unsigned long npages = 1UL << order;
153		unsigned long x;
154
155		for (x = 0; x != npages; x++) {
156			vm_page_t pgo = page + x;
157
158			if (vm_page_unwire_noq(pgo))
159				vm_page_free(pgo);
160		}
161	} else {
162		vm_offset_t vaddr;
163
164		vaddr = (vm_offset_t)page_address(page);
165
166		linux_free_kmem(vaddr, order);
167	}
168}
169
170vm_offset_t
171linux_alloc_kmem(gfp_t flags, unsigned int order)
172{
173	size_t size = ((size_t)PAGE_SIZE) << order;
174	vm_offset_t addr;
175
176	if ((flags & GFP_DMA32) == 0) {
177		addr = kmem_malloc(size, flags & GFP_NATIVE_MASK);
178	} else {
179		addr = kmem_alloc_contig(size, flags & GFP_NATIVE_MASK, 0,
180		    BUS_SPACE_MAXADDR_32BIT, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
181	}
182	return (addr);
183}
184
185void
186linux_free_kmem(vm_offset_t addr, unsigned int order)
187{
188	size_t size = ((size_t)PAGE_SIZE) << order;
189
190	kmem_free(addr, size);
191}
192
193static int
194linux_get_user_pages_internal(vm_map_t map, unsigned long start, int nr_pages,
195    int write, struct page **pages)
196{
197	vm_prot_t prot;
198	size_t len;
199	int count;
200
201	prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
202	len = ptoa((vm_offset_t)nr_pages);
203	count = vm_fault_quick_hold_pages(map, start, len, prot, pages, nr_pages);
204	return (count == -1 ? -EFAULT : nr_pages);
205}
206
207int
208__get_user_pages_fast(unsigned long start, int nr_pages, int write,
209    struct page **pages)
210{
211	vm_map_t map;
212	vm_page_t *mp;
213	vm_offset_t va;
214	vm_offset_t end;
215	vm_prot_t prot;
216	int count;
217
218	if (nr_pages == 0 || in_interrupt())
219		return (0);
220
221	MPASS(pages != NULL);
222	map = &curthread->td_proc->p_vmspace->vm_map;
223	end = start + ptoa((vm_offset_t)nr_pages);
224	if (!vm_map_range_valid(map, start, end))
225		return (-EINVAL);
226	prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
227	for (count = 0, mp = pages, va = start; va < end;
228	    mp++, va += PAGE_SIZE, count++) {
229		*mp = pmap_extract_and_hold(map->pmap, va, prot);
230		if (*mp == NULL)
231			break;
232
233		if ((prot & VM_PROT_WRITE) != 0 &&
234		    (*mp)->dirty != VM_PAGE_BITS_ALL) {
235			/*
236			 * Explicitly dirty the physical page.  Otherwise, the
237			 * caller's changes may go unnoticed because they are
238			 * performed through an unmanaged mapping or by a DMA
239			 * operation.
240			 *
241			 * The object lock is not held here.
242			 * See vm_page_clear_dirty_mask().
243			 */
244			vm_page_dirty(*mp);
245		}
246	}
247	return (count);
248}
249
250long
251get_user_pages_remote(struct task_struct *task, struct mm_struct *mm,
252    unsigned long start, unsigned long nr_pages, int gup_flags,
253    struct page **pages, struct vm_area_struct **vmas)
254{
255	vm_map_t map;
256
257	map = &task->task_thread->td_proc->p_vmspace->vm_map;
258	return (linux_get_user_pages_internal(map, start, nr_pages,
259	    !!(gup_flags & FOLL_WRITE), pages));
260}
261
262long
263get_user_pages(unsigned long start, unsigned long nr_pages, int gup_flags,
264    struct page **pages, struct vm_area_struct **vmas)
265{
266	vm_map_t map;
267
268	map = &curthread->td_proc->p_vmspace->vm_map;
269	return (linux_get_user_pages_internal(map, start, nr_pages,
270	    !!(gup_flags & FOLL_WRITE), pages));
271}
272
273int
274is_vmalloc_addr(const void *addr)
275{
276	return (vtoslab((vm_offset_t)addr & ~UMA_SLAB_MASK) != NULL);
277}
278