mem.c revision 119291
1/*-
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department, and code derived from software contributed to
9 * Berkeley by William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the University of
22 *	California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 *    may be used to endorse or promote products derived from this software
25 *    without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 *	from: Utah $Hdr: mem.c 1.13 89/10/08$
40 *	from: @(#)mem.c	7.2 (Berkeley) 5/9/91
41 *	from: FreeBSD: src/sys/i386/i386/mem.c,v 1.94 2001/09/26
42 *
43 * $FreeBSD: head/sys/sparc64/sparc64/mem.c 113238 2003-04-08 06:35:09Z jake $
44 */
45
46/*
47 * Memory special file
48 *
49 * NOTE: other architectures support mmap()'ing the mem and kmem devices; this
50 * might cause illegal aliases to be created for the locked kernel page(s), so
51 * it is not implemented.
52 */
53
54#include <sys/param.h>
55#include <sys/conf.h>
56#include <sys/fcntl.h>
57#include <sys/kernel.h>
58#include <sys/lock.h>
59#include <sys/mutex.h>
60#include <sys/proc.h>
61#include <sys/signalvar.h>
62#include <sys/systm.h>
63#include <sys/uio.h>
64
65#include <vm/vm.h>
66#include <vm/vm_param.h>
67#include <vm/vm_page.h>
68#include <vm/vm_kern.h>
69#include <vm/pmap.h>
70#include <vm/vm_extern.h>
71
72#include <machine/cache.h>
73#include <machine/md_var.h>
74#include <machine/pmap.h>
75#include <machine/tlb.h>
76#include <machine/upa.h>
77
78static dev_t memdev, kmemdev;
79
80static	d_open_t	mmopen;
81static	d_close_t	mmclose;
82static	d_read_t	mmrw;
83
84#define CDEV_MAJOR 2
85static struct cdevsw mem_cdevsw = {
86	.d_open =	mmopen,
87	.d_close =	mmclose,
88	.d_read =	mmrw,
89	.d_write =	mmrw,
90	.d_name =	"mem",
91	.d_maj =	CDEV_MAJOR,
92	.d_flags =	D_MEM,
93};
94
95static int
96mmclose(dev_t dev, int flags, int fmt, struct thread *td)
97{
98
99	return (0);
100}
101
102static int
103mmopen(dev_t dev, int flags, int fmt, struct thread *td)
104{
105	int error;
106
107	switch (minor(dev)) {
108	case 0:
109	case 1:
110		if (flags & FWRITE) {
111			error = securelevel_gt(td->td_ucred, 0);
112			if (error != 0)
113				return (error);
114		}
115		break;
116	default:
117		return (ENXIO);
118	}
119	return (0);
120}
121
122/*ARGSUSED*/
123static int
124mmrw(dev_t dev, struct uio *uio, int flags)
125{
126	struct iovec *iov;
127	vm_offset_t eva;
128	vm_offset_t off;
129	vm_offset_t ova;
130	vm_offset_t va;
131	vm_prot_t prot;
132	vm_paddr_t pa;
133	vm_size_t cnt;
134	vm_page_t m;
135	int color;
136	int error;
137	int i;
138
139	cnt = 0;
140	error = 0;
141	ova = 0;
142
143	GIANT_REQUIRED;
144
145	while (uio->uio_resid > 0 && error == 0) {
146		iov = uio->uio_iov;
147		if (iov->iov_len == 0) {
148			uio->uio_iov++;
149			uio->uio_iovcnt--;
150			if (uio->uio_iovcnt < 0)
151				panic("mmrw");
152			continue;
153		}
154		switch (minor(dev)) {
155		case 0:
156			/* mem (physical memory) */
157			pa = uio->uio_offset & ~PAGE_MASK;
158			if (!is_physical_memory(pa)) {
159				error = EFAULT;
160				break;
161			}
162
163			off = uio->uio_offset & PAGE_MASK;
164			cnt = PAGE_SIZE - ((vm_offset_t)iov->iov_base &
165			    PAGE_MASK);
166			cnt = min(cnt, PAGE_SIZE - off);
167			cnt = min(cnt, iov->iov_len);
168
169			m = NULL;
170			for (i = 0; phys_avail[i] != 0; i += 2) {
171				if (pa >= phys_avail[i] &&
172				    pa < phys_avail[i + 1]) {
173					m = PHYS_TO_VM_PAGE(pa);
174					break;
175				}
176			}
177
178			if (m != NULL) {
179				if (ova == 0) {
180					ova = kmem_alloc_wait(kernel_map,
181					    PAGE_SIZE * DCACHE_COLORS);
182				}
183				if ((color = m->md.color) == -1)
184					va = ova;
185				else
186					va = ova + color * PAGE_SIZE;
187				pmap_qenter(va, &m, 1);
188				error = uiomove((void *)(va + off), cnt,
189				    uio);
190				pmap_qremove(va, 1);
191			} else {
192				va = TLB_PHYS_TO_DIRECT(pa);
193				error = uiomove((void *)(va + off), cnt,
194				    uio);
195			}
196			break;
197		case 1:
198			/* kmem (kernel memory) */
199			va = trunc_page(uio->uio_offset);
200			eva = round_page(uio->uio_offset + iov->iov_len);
201
202			/*
203			 * Make sure that all of the pages are currently
204			 * resident so we don't create any zero fill pages.
205			 */
206			for (; va < eva; va += PAGE_SIZE)
207				if (pmap_kextract(va) == 0)
208					return (EFAULT);
209
210			prot = (uio->uio_rw == UIO_READ) ? VM_PROT_READ :
211			    VM_PROT_WRITE;
212			va = uio->uio_offset;
213			if (va < VM_MIN_DIRECT_ADDRESS &&
214			    kernacc((void *)va, iov->iov_len, prot) == FALSE)
215				return (EFAULT);
216
217			error = uiomove((void *)va, iov->iov_len, uio);
218			break;
219		default:
220			return (ENODEV);
221		}
222	}
223	if (ova != 0)
224		kmem_free_wakeup(kernel_map, ova, PAGE_SIZE * DCACHE_COLORS);
225	return (error);
226}
227
228static int
229mem_modevent(module_t mod, int type, void *data)
230{
231	switch(type) {
232	case MOD_LOAD:
233		if (bootverbose)
234			printf("mem: <memory & I/O>\n");
235
236		memdev = make_dev(&mem_cdevsw, 0, UID_ROOT, GID_KMEM,
237			0640, "mem");
238		kmemdev = make_dev(&mem_cdevsw, 1, UID_ROOT, GID_KMEM,
239			0640, "kmem");
240		return 0;
241
242	case MOD_UNLOAD:
243		destroy_dev(memdev);
244		destroy_dev(kmemdev);
245		return 0;
246
247	case MOD_SHUTDOWN:
248		return 0;
249
250	default:
251		return EOPNOTSUPP;
252	}
253}
254
255DEV_MODULE(mem, mem_modevent, NULL);
256