mem.c revision 111815
1/*-
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department, and code derived from software contributed to
9 * Berkeley by William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the University of
22 *	California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 *    may be used to endorse or promote products derived from this software
25 *    without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 *	from: Utah $Hdr: mem.c 1.13 89/10/08$
40 *	from: @(#)mem.c	7.2 (Berkeley) 5/9/91
41 *	from: FreeBSD: src/sys/i386/i386/mem.c,v 1.94 2001/09/26
42 *
43 * $FreeBSD: head/sys/sparc64/sparc64/mem.c 111815 2003-03-03 12:15:54Z phk $
44 */
45
46/*
47 * Memory special file
48 *
49 * NOTE: other architectures support mmap()'ing the mem and kmem devices; this
50 * might cause illegal aliases to be created for the locked kernel page(s), so
51 * it is not implemented.
52 */
53
54#include <sys/param.h>
55#include <sys/conf.h>
56#include <sys/fcntl.h>
57#include <sys/kernel.h>
58#include <sys/lock.h>
59#include <sys/mutex.h>
60#include <sys/proc.h>
61#include <sys/signalvar.h>
62#include <sys/systm.h>
63#include <sys/uio.h>
64
65#include <vm/vm.h>
66#include <vm/vm_param.h>
67#include <vm/pmap.h>
68#include <vm/vm_extern.h>
69
70#include <machine/cache.h>
71#include <machine/pmap.h>
72#include <machine/upa.h>
73
74static dev_t memdev, kmemdev;
75
76static	d_open_t	mmopen;
77static	d_close_t	mmclose;
78static	d_read_t	mmrw;
79
80#define CDEV_MAJOR 2
81static struct cdevsw mem_cdevsw = {
82	.d_open =	mmopen,
83	.d_close =	mmclose,
84	.d_read =	mmrw,
85	.d_write =	mmrw,
86	.d_name =	"mem",
87	.d_maj =	CDEV_MAJOR,
88	.d_flags =	D_MEM,
89};
90
91static int
92mmclose(dev_t dev, int flags, int fmt, struct thread *td)
93{
94
95	return (0);
96}
97
98static int
99mmopen(dev_t dev, int flags, int fmt, struct thread *td)
100{
101	int error;
102
103	switch (minor(dev)) {
104	case 0:
105	case 1:
106		if (flags & FWRITE) {
107			error = securelevel_gt(td->td_proc->p_ucred, 0);
108			if (error != 0)
109				return (error);
110		}
111		break;
112	default:
113		return (ENXIO);
114	}
115	return (0);
116}
117
118#define	IOSTART		UPA_MEMSTART
119
120/*ARGSUSED*/
121static int
122mmrw(dev_t dev, struct uio *uio, int flags)
123{
124	struct iovec *iov;
125	int error = 0;
126	vm_offset_t addr, eaddr, o, v = 0;
127	vm_prot_t prot;
128	vm_size_t c = 0;
129	u_long asi;
130	char *buf = NULL;
131
132	GIANT_REQUIRED;
133
134	while (uio->uio_resid > 0 && error == 0) {
135		iov = uio->uio_iov;
136		if (iov->iov_len == 0) {
137			uio->uio_iov++;
138			uio->uio_iovcnt--;
139			if (uio->uio_iovcnt < 0)
140				panic("mmrw");
141			continue;
142		}
143		switch (minor(dev)) {
144		case 0:
145			/* mem (physical memory) */
146			if (buf == NULL) {
147				buf = malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK);
148				if (buf == NULL) {
149					error = ENOMEM;
150					break;
151				}
152			}
153			v = uio->uio_offset;
154			asi = ASI_PHYS_USE_EC;
155			/* Access device memory noncacheable. */
156			if (v >= IOSTART)
157				asi = ASI_PHYS_BYPASS_EC_WITH_EBIT;
158			o = v & PAGE_MASK;
159			c = ulmin(iov->iov_len, PAGE_SIZE - o);
160			/*
161			 * This double copy could be avoided, at the cost of
162			 * inlining a version of uiomove. Since this is not
163			 * performance-critical, it is probably not worth it.
164			 */
165			if (uio->uio_rw == UIO_READ)
166				ascopyfrom(asi, v, buf, c);
167			error = uiomove(buf, c, uio);
168			if (error == 0 && uio->uio_rw == UIO_WRITE)
169				ascopyto(buf, asi, v, c);
170			/*
171			 * If a write was evil enough to change kernel code,
172			 * I$ must be flushed. Also, D$ must be flushed if there
173			 * is a chance that there is a cacheable mapping to
174			 * avoid working with stale data.
175			 */
176			if (v < IOSTART && uio->uio_rw == UIO_WRITE) {
177				icache_inval_phys(v, v + c);
178				dcache_inval_phys(v, v + c);
179			}
180			break;
181		case 1:
182			/* kmem (kernel memory) */
183			c = iov->iov_len;
184
185			/*
186			 * Make sure that all of the pages are currently resident so
187			 * that we don't create any zero-fill pages.
188			 */
189			addr = trunc_page(uio->uio_offset);
190			eaddr = round_page(uio->uio_offset + c);
191
192			for (; addr < eaddr; addr += PAGE_SIZE)
193				if (pmap_extract(kernel_pmap, addr) == 0)
194					return EFAULT;
195
196			prot = (uio->uio_rw == UIO_READ) ? VM_PROT_READ :
197			    VM_PROT_WRITE;
198			v = uio->uio_offset;
199			if (v < VM_MIN_DIRECT_ADDRESS &&
200			    kernacc((caddr_t)v, c, prot) == FALSE)
201				return (EFAULT);
202			error = uiomove((caddr_t)v, c, uio);
203			if (uio->uio_rw == UIO_WRITE)
204				icache_flush(v, v + c);
205		}
206	}
207	if (buf != NULL)
208		free(buf, M_DEVBUF);
209	return (error);
210}
211
212static int
213mem_modevent(module_t mod, int type, void *data)
214{
215	switch(type) {
216	case MOD_LOAD:
217		if (bootverbose)
218			printf("mem: <memory & I/O>\n");
219
220		memdev = make_dev(&mem_cdevsw, 0, UID_ROOT, GID_KMEM,
221			0640, "mem");
222		kmemdev = make_dev(&mem_cdevsw, 1, UID_ROOT, GID_KMEM,
223			0640, "kmem");
224		return 0;
225
226	case MOD_UNLOAD:
227		destroy_dev(memdev);
228		destroy_dev(kmemdev);
229		return 0;
230
231	case MOD_SHUTDOWN:
232		return 0;
233
234	default:
235		return EOPNOTSUPP;
236	}
237}
238
239DEV_MODULE(mem, mem_modevent, NULL);
240