1/*-
2 * Copyright (c) 2014 Gleb Smirnoff <glebius@FreeBSD.org>
3 * Copyright (c) 2003, 2005 Alan L. Cox <alc@cs.rice.edu>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/param.h>
29#include <sys/kernel.h>
30#include <sys/lock.h>
31#include <sys/malloc.h>
32#include <sys/mutex.h>
33#include <sys/proc.h>
34#include <sys/sf_buf.h>
35#include <sys/smp.h>
36#include <sys/sysctl.h>
37
38#include <vm/vm.h>
39#include <vm/vm_extern.h>
40#include <vm/vm_page.h>
41
42#ifndef NSFBUFS
43#define	NSFBUFS		(512 + maxusers * 16)
44#endif
45
46static int nsfbufs;
47static int nsfbufspeak;
48static int nsfbufsused;
49
50SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufs, CTLFLAG_RDTUN, &nsfbufs, 0,
51    "Maximum number of sendfile(2) sf_bufs available");
52SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufspeak, CTLFLAG_RD, &nsfbufspeak, 0,
53    "Number of sendfile(2) sf_bufs at peak usage");
54SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufsused, CTLFLAG_RD, &nsfbufsused, 0,
55    "Number of sendfile(2) sf_bufs in use");
56
57static void	sf_buf_init(void *arg);
58SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL);
59
60LIST_HEAD(sf_head, sf_buf);
61
62/*
63 * A hash table of active sendfile(2) buffers
64 */
65static struct sf_head *sf_buf_active;
66static u_long sf_buf_hashmask;
67
68#define	SF_BUF_HASH(m)	(((m) - vm_page_array) & sf_buf_hashmask)
69
70static TAILQ_HEAD(, sf_buf) sf_buf_freelist;
71static u_int	sf_buf_alloc_want;
72
73/*
74 * A lock used to synchronize access to the hash table and free list
75 */
76static struct mtx sf_buf_lock;
77
78/*
79 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
80 */
81static void
82sf_buf_init(void *arg)
83{
84	struct sf_buf *sf_bufs;
85	vm_offset_t sf_base;
86	int i;
87
88	if (PMAP_HAS_DMAP)
89		return;
90
91	nsfbufs = NSFBUFS;
92	TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);
93
94	sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
95	TAILQ_INIT(&sf_buf_freelist);
96	sf_base = kva_alloc(nsfbufs * PAGE_SIZE);
97	sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
98	    M_WAITOK | M_ZERO);
99	for (i = 0; i < nsfbufs; i++) {
100		sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
101		TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
102	}
103	sf_buf_alloc_want = 0;
104	mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
105}
106
107/*
108 * Get an sf_buf from the freelist.  May block if none are available.
109 */
110struct sf_buf *
111sf_buf_alloc(struct vm_page *m, int flags)
112{
113	struct sf_head *hash_list;
114	struct sf_buf *sf;
115	int error;
116
117	if (PMAP_HAS_DMAP)
118		return ((struct sf_buf *)m);
119
120	KASSERT(curthread->td_pinned > 0 || (flags & SFB_CPUPRIVATE) == 0,
121	    ("sf_buf_alloc(SFB_CPUPRIVATE): curthread not pinned"));
122	hash_list = &sf_buf_active[SF_BUF_HASH(m)];
123	mtx_lock(&sf_buf_lock);
124	LIST_FOREACH(sf, hash_list, list_entry) {
125		if (sf->m == m) {
126			sf->ref_count++;
127			if (sf->ref_count == 1) {
128				TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
129				nsfbufsused++;
130				nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
131			}
132#if defined(SMP) && defined(SFBUF_CPUSET)
133			sf_buf_shootdown(sf, flags);
134#endif
135			goto done;
136		}
137	}
138	while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
139		if (flags & SFB_NOWAIT)
140			goto done;
141		sf_buf_alloc_want++;
142		SFSTAT_INC(sf_allocwait);
143		error = msleep(&sf_buf_freelist, &sf_buf_lock,
144		    (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0);
145		sf_buf_alloc_want--;
146
147		/*
148		 * If we got a signal, don't risk going back to sleep.
149		 */
150		if (error)
151			goto done;
152	}
153	TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
154	if (sf->m != NULL)
155		LIST_REMOVE(sf, list_entry);
156	LIST_INSERT_HEAD(hash_list, sf, list_entry);
157	sf->ref_count = 1;
158	sf->m = m;
159	nsfbufsused++;
160	nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
161	sf_buf_map(sf, flags);
162done:
163	mtx_unlock(&sf_buf_lock);
164	return (sf);
165}
166
167/*
168 * Remove a reference from the given sf_buf, adding it to the free
169 * list when its reference count reaches zero.  A freed sf_buf still,
170 * however, retains its virtual-to-physical mapping until it is
171 * recycled or reactivated by sf_buf_alloc(9).
172 */
173void
174sf_buf_free(struct sf_buf *sf)
175{
176
177	if (PMAP_HAS_DMAP)
178		return;
179
180	mtx_lock(&sf_buf_lock);
181	sf->ref_count--;
182	if (sf->ref_count == 0) {
183		TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
184		nsfbufsused--;
185		if (sf_buf_unmap(sf)) {
186			sf->m = NULL;
187			LIST_REMOVE(sf, list_entry);
188		}
189		if (sf_buf_alloc_want > 0)
190			wakeup(&sf_buf_freelist);
191	}
192	mtx_unlock(&sf_buf_lock);
193}
194
195void
196sf_buf_ref(struct sf_buf *sf)
197{
198
199	if (PMAP_HAS_DMAP)
200		return;
201
202	mtx_lock(&sf_buf_lock);
203	KASSERT(sf->ref_count > 0, ("%s: sf %p not allocated", __func__, sf));
204	sf->ref_count++;
205	mtx_unlock(&sf_buf_lock);
206}
207
208#ifdef SFBUF_PROCESS_PAGE
209/*
210 * Run callback function on sf_buf that holds a certain page.
211 */
212boolean_t
213sf_buf_process_page(vm_page_t m, void (*cb)(struct sf_buf *))
214{
215	struct sf_head *hash_list;
216	struct sf_buf *sf;
217
218	hash_list = &sf_buf_active[SF_BUF_HASH(m)];
219	mtx_lock(&sf_buf_lock);
220	LIST_FOREACH(sf, hash_list, list_entry) {
221		if (sf->m == m) {
222			cb(sf);
223			mtx_unlock(&sf_buf_lock);
224			return (TRUE);
225		}
226	}
227	mtx_unlock(&sf_buf_lock);
228	return (FALSE);
229}
230#endif	/* SFBUF_PROCESS_PAGE */
231