1/*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32#ifndef _SYS_SOCKETVAR_H_ 33#define _SYS_SOCKETVAR_H_ 34 35/* 36 * Socket generation count type. Also used in xinpcb, xtcpcb, xunpcb. 37 */ 38typedef uint64_t so_gen_t; 39 40#if defined(_KERNEL) || defined(_WANT_SOCKET) 41#include <sys/queue.h> /* for TAILQ macros */ 42#include <sys/selinfo.h> /* for struct selinfo */ 43#include <sys/_lock.h> 44#include <sys/_mutex.h> 45#include <sys/osd.h> 46#include <sys/_sx.h> 47#include <sys/sockbuf.h> 48#ifdef _KERNEL 49#include <sys/caprights.h> 50#include <sys/sockopt.h> 51#endif 52 53struct vnet; 54 55/* 56 * Kernel structure per socket. 57 * Contains send and receive buffer queues, 58 * handle on protocol and pointer to protocol 59 * private data and error information. 60 */ 61typedef int so_upcall_t(struct socket *, void *, int); 62typedef void so_dtor_t(struct socket *); 63 64struct socket; 65 66enum socket_qstate { 67 SQ_NONE = 0, 68 SQ_INCOMP = 0x0800, /* on sol_incomp */ 69 SQ_COMP = 0x1000, /* on sol_comp */ 70}; 71 72/*- 73 * Locking key to struct socket: 74 * (a) constant after allocation, no locking required. 75 * (b) locked by SOCK_LOCK(so). 76 * (cr) locked by SOCK_RECVBUF_LOCK(so) 77 * (cs) locked by SOCK_SENDBUF_LOCK(so) 78 * (e) locked by SOLISTEN_LOCK() of corresponding listening socket. 79 * (f) not locked since integer reads/writes are atomic. 80 * (g) used only as a sleep/wakeup address, no value. 81 * (h) locked by global mutex so_global_mtx. 82 * (k) locked by KTLS workqueue mutex 83 */ 84TAILQ_HEAD(accept_queue, socket); 85struct socket { 86 struct mtx so_lock; 87 volatile u_int so_count; /* (b / refcount) */ 88 struct selinfo so_rdsel; /* (b/cr) for so_rcv/so_comp */ 89 struct selinfo so_wrsel; /* (b/cs) for so_snd */ 90 int so_options; /* (b) from socket call, see socket.h */ 91 short so_type; /* (a) generic type, see socket.h */ 92 short so_state; /* (b) internal state flags SS_* */ 93 void *so_pcb; /* protocol control block */ 94 struct vnet *so_vnet; /* (a) network stack instance */ 95 struct protosw *so_proto; /* (a) protocol handle */ 96 short so_linger; /* time to linger close(2) */ 97 short so_timeo; /* (g) connection timeout */ 98 u_short so_error; /* (f) error affecting connection */ 99 u_short so_rerror; /* (f) error affecting connection */ 100 struct sigio *so_sigio; /* [sg] information for async I/O or 101 out of band data (SIGURG) */ 102 struct ucred *so_cred; /* (a) user credentials */ 103 struct label *so_label; /* (b) MAC label for socket */ 104 /* NB: generation count must not be first. */ 105 so_gen_t so_gencnt; /* (h) generation count */ 106 void *so_emuldata; /* (b) private data for emulators */ 107 so_dtor_t *so_dtor; /* (b) optional destructor */ 108 struct osd osd; /* Object Specific extensions */ 109 /* 110 * so_fibnum, so_user_cookie and friends can be used to attach 111 * some user-specified metadata to a socket, which then can be 112 * used by the kernel for various actions. 113 * so_user_cookie is used by ipfw/dummynet. 114 */ 115 int so_fibnum; /* routing domain for this socket */ 116 uint32_t so_user_cookie; 117 118 int so_ts_clock; /* type of the clock used for timestamps */ 119 uint32_t so_max_pacing_rate; /* (f) TX rate limit in bytes/s */ 120 121 /* 122 * Mutexes to prevent interleaving of socket I/O. These have to be 123 * outside of the socket buffers in order to interlock with listen(2). 124 */ 125 struct sx so_snd_sx __aligned(CACHE_LINE_SIZE); 126 struct mtx so_snd_mtx; 127 128 struct sx so_rcv_sx __aligned(CACHE_LINE_SIZE); 129 struct mtx so_rcv_mtx; 130 131 union { 132 /* Regular (data flow) socket. */ 133 struct { 134 /* (cr, cs) Receive and send buffers. */ 135 struct sockbuf so_rcv, so_snd; 136 137 /* (e) Our place on accept queue. */ 138 TAILQ_ENTRY(socket) so_list; 139 struct socket *so_listen; /* (b) */ 140 enum socket_qstate so_qstate; /* (b) */ 141 /* (b) cached MAC label for peer */ 142 struct label *so_peerlabel; 143 u_long so_oobmark; /* chars to oob mark */ 144 145 /* (k) Our place on KTLS RX work queue. */ 146 STAILQ_ENTRY(socket) so_ktls_rx_list; 147 }; 148 /* 149 * Listening socket, where accepts occur, is so_listen in all 150 * subsidiary sockets. If so_listen is NULL, socket is not 151 * related to an accept. For a listening socket itself 152 * sol_incomp queues partially completed connections, while 153 * sol_comp is a queue of connections ready to be accepted. 154 * If a connection is aborted and it has so_listen set, then 155 * it has to be pulled out of either sol_incomp or sol_comp. 156 * We allow connections to queue up based on current queue 157 * lengths and limit on number of queued connections for this 158 * socket. 159 */ 160 struct { 161 /* (e) queue of partial unaccepted connections */ 162 struct accept_queue sol_incomp; 163 /* (e) queue of complete unaccepted connections */ 164 struct accept_queue sol_comp; 165 u_int sol_qlen; /* (e) sol_comp length */ 166 u_int sol_incqlen; /* (e) sol_incomp length */ 167 u_int sol_qlimit; /* (e) queue limit */ 168 169 /* accept_filter(9) optional data */ 170 struct accept_filter *sol_accept_filter; 171 void *sol_accept_filter_arg; /* saved filter args */ 172 char *sol_accept_filter_str; /* saved user args */ 173 174 /* Optional upcall, for kernel socket. */ 175 so_upcall_t *sol_upcall; /* (e) */ 176 void *sol_upcallarg; /* (e) */ 177 178 /* Socket buffer parameters, to be copied to 179 * dataflow sockets, accepted from this one. */ 180 int sol_sbrcv_lowat; 181 int sol_sbsnd_lowat; 182 u_int sol_sbrcv_hiwat; 183 u_int sol_sbsnd_hiwat; 184 short sol_sbrcv_flags; 185 short sol_sbsnd_flags; 186 sbintime_t sol_sbrcv_timeo; 187 sbintime_t sol_sbsnd_timeo; 188 189 /* Information tracking listen queue overflows. */ 190 struct timeval sol_lastover; /* (e) */ 191 int sol_overcount; /* (e) */ 192 }; 193 }; 194}; 195#endif /* defined(_KERNEL) || defined(_WANT_SOCKET) */ 196 197/* 198 * Socket state bits. 199 * 200 * Historically, these bits were all kept in the so_state field. 201 * They are now split into separate, lock-specific fields. 202 * so_state maintains basic socket state protected by the socket lock. 203 * so_qstate holds information about the socket accept queues. 204 * Each socket buffer also has a state field holding information 205 * relevant to that socket buffer (can't send, rcv). 206 * Many fields will be read without locks to improve performance and avoid 207 * lock order issues. However, this approach must be used with caution. 208 */ 209#define SS_ISCONNECTED 0x0002 /* socket connected to a peer */ 210#define SS_ISCONNECTING 0x0004 /* in process of connecting to peer */ 211#define SS_ISDISCONNECTING 0x0008 /* in process of disconnecting */ 212#define SS_NBIO 0x0100 /* non-blocking ops */ 213#define SS_ASYNC 0x0200 /* async i/o notify */ 214/* was SS_ISCONFIRMING 0x0400 */ 215#define SS_ISDISCONNECTED 0x2000 /* socket disconnected from peer */ 216 217#ifdef _KERNEL 218 219#define SOCK_MTX(so) (&(so)->so_lock) 220#define SOCK_LOCK(so) mtx_lock(&(so)->so_lock) 221#define SOCK_OWNED(so) mtx_owned(&(so)->so_lock) 222#define SOCK_UNLOCK(so) mtx_unlock(&(so)->so_lock) 223#define SOCK_LOCK_ASSERT(so) mtx_assert(&(so)->so_lock, MA_OWNED) 224#define SOCK_UNLOCK_ASSERT(so) mtx_assert(&(so)->so_lock, MA_NOTOWNED) 225 226#define SOLISTENING(sol) (((sol)->so_options & SO_ACCEPTCONN) != 0) 227#define SOLISTEN_LOCK(sol) do { \ 228 mtx_lock(&(sol)->so_lock); \ 229 KASSERT(SOLISTENING(sol), \ 230 ("%s: %p not listening", __func__, (sol))); \ 231} while (0) 232#define SOLISTEN_TRYLOCK(sol) mtx_trylock(&(sol)->so_lock) 233#define SOLISTEN_UNLOCK(sol) do { \ 234 KASSERT(SOLISTENING(sol), \ 235 ("%s: %p not listening", __func__, (sol))); \ 236 mtx_unlock(&(sol)->so_lock); \ 237} while (0) 238#define SOLISTEN_LOCK_ASSERT(sol) do { \ 239 mtx_assert(&(sol)->so_lock, MA_OWNED); \ 240 KASSERT(SOLISTENING(sol), \ 241 ("%s: %p not listening", __func__, (sol))); \ 242} while (0) 243#define SOLISTEN_UNLOCK_ASSERT(sol) do { \ 244 mtx_assert(&(sol)->so_lock, MA_NOTOWNED); \ 245 KASSERT(SOLISTENING(sol), \ 246 ("%s: %p not listening", __func__, (sol))); \ 247} while (0) 248 249/* 250 * Socket buffer locks. These are strongly preferred over SOCKBUF_LOCK(sb) 251 * macros, as we are moving towards protocol specific socket buffers. 252 */ 253#define SOCK_RECVBUF_MTX(so) \ 254 (&(so)->so_rcv_mtx) 255#define SOCK_RECVBUF_LOCK(so) \ 256 mtx_lock(SOCK_RECVBUF_MTX(so)) 257#define SOCK_RECVBUF_UNLOCK(so) \ 258 mtx_unlock(SOCK_RECVBUF_MTX(so)) 259#define SOCK_RECVBUF_LOCK_ASSERT(so) \ 260 mtx_assert(SOCK_RECVBUF_MTX(so), MA_OWNED) 261#define SOCK_RECVBUF_UNLOCK_ASSERT(so) \ 262 mtx_assert(SOCK_RECVBUF_MTX(so), MA_NOTOWNED) 263 264#define SOCK_SENDBUF_MTX(so) \ 265 (&(so)->so_snd_mtx) 266#define SOCK_SENDBUF_LOCK(so) \ 267 mtx_lock(SOCK_SENDBUF_MTX(so)) 268#define SOCK_SENDBUF_UNLOCK(so) \ 269 mtx_unlock(SOCK_SENDBUF_MTX(so)) 270#define SOCK_SENDBUF_LOCK_ASSERT(so) \ 271 mtx_assert(SOCK_SENDBUF_MTX(so), MA_OWNED) 272#define SOCK_SENDBUF_UNLOCK_ASSERT(so) \ 273 mtx_assert(SOCK_SENDBUF_MTX(so), MA_NOTOWNED) 274 275#define SOCK_BUF_LOCK(so, which) \ 276 mtx_lock(soeventmtx(so, which)) 277#define SOCK_BUF_UNLOCK(so, which) \ 278 mtx_unlock(soeventmtx(so, which)) 279#define SOCK_BUF_LOCK_ASSERT(so, which) \ 280 mtx_assert(soeventmtx(so, which), MA_OWNED) 281#define SOCK_BUF_UNLOCK_ASSERT(so, which) \ 282 mtx_assert(soeventmtx(so, which), MA_NOTOWNED) 283 284static inline struct sockbuf * 285sobuf(struct socket *so, const sb_which which) 286{ 287 return (which == SO_RCV ? &so->so_rcv : &so->so_snd); 288} 289 290static inline struct mtx * 291soeventmtx(struct socket *so, const sb_which which) 292{ 293 return (which == SO_RCV ? SOCK_RECVBUF_MTX(so) : SOCK_SENDBUF_MTX(so)); 294} 295 296/* 297 * Macros for sockets and socket buffering. 298 */ 299 300/* 301 * Flags to soiolock(). 302 */ 303#define SBL_WAIT 0x00000001 /* Wait if not immediately available. */ 304#define SBL_NOINTR 0x00000002 /* Force non-interruptible sleep. */ 305#define SBL_VALID (SBL_WAIT | SBL_NOINTR) 306 307#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT) 308 309#define SOCK_IO_SEND_LOCK(so, flags) \ 310 soiolock((so), &(so)->so_snd_sx, (flags)) 311#define SOCK_IO_SEND_UNLOCK(so) \ 312 soiounlock(&(so)->so_snd_sx) 313#define SOCK_IO_SEND_OWNED(so) sx_xlocked(&(so)->so_snd_sx) 314#define SOCK_IO_RECV_LOCK(so, flags) \ 315 soiolock((so), &(so)->so_rcv_sx, (flags)) 316#define SOCK_IO_RECV_UNLOCK(so) \ 317 soiounlock(&(so)->so_rcv_sx) 318#define SOCK_IO_RECV_OWNED(so) sx_xlocked(&(so)->so_rcv_sx) 319 320/* do we have to send all at once on a socket? */ 321#define sosendallatonce(so) \ 322 ((so)->so_proto->pr_flags & PR_ATOMIC) 323 324/* can we read something from so? */ 325#define soreadabledata(so) \ 326 (sbavail(&(so)->so_rcv) >= (so)->so_rcv.sb_lowat || \ 327 (so)->so_error || (so)->so_rerror) 328#define soreadable(so) \ 329 (soreadabledata(so) || ((so)->so_rcv.sb_state & SBS_CANTRCVMORE)) 330 331/* can we write something to so? */ 332#define sowriteable(so) \ 333 ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \ 334 (((so)->so_state&SS_ISCONNECTED) || \ 335 ((so)->so_proto->pr_flags&PR_CONNREQUIRED)==0)) || \ 336 ((so)->so_snd.sb_state & SBS_CANTSENDMORE) || \ 337 (so)->so_error) 338 339/* 340 * soref()/sorele() ref-count the socket structure. 341 * soref() may be called without owning socket lock, but in that case a 342 * caller must own something that holds socket, and so_count must be not 0. 343 * Note that you must still explicitly close the socket, but the last ref 344 * count will free the structure. 345 */ 346#define soref(so) refcount_acquire(&(so)->so_count) 347#define sorele(so) do { \ 348 SOCK_UNLOCK_ASSERT(so); \ 349 if (!refcount_release_if_not_last(&(so)->so_count)) { \ 350 SOCK_LOCK(so); \ 351 sorele_locked(so); \ 352 } \ 353} while (0) 354 355/* 356 * In sorwakeup() and sowwakeup(), acquire the socket buffer lock to 357 * avoid a non-atomic test-and-wakeup. However, sowakeup is 358 * responsible for releasing the lock if it is called. We unlock only 359 * if we don't call into sowakeup. If any code is introduced that 360 * directly invokes the underlying sowakeup() primitives, it must 361 * maintain the same semantics. 362 */ 363#define sorwakeup(so) do { \ 364 SOCK_RECVBUF_LOCK(so); \ 365 sorwakeup_locked(so); \ 366} while (0) 367 368#define sowwakeup(so) do { \ 369 SOCK_SENDBUF_LOCK(so); \ 370 sowwakeup_locked(so); \ 371} while (0) 372 373struct accept_filter { 374 char accf_name[16]; 375 int (*accf_callback) 376 (struct socket *so, void *arg, int waitflag); 377 void * (*accf_create) 378 (struct socket *so, char *arg); 379 void (*accf_destroy) 380 (struct socket *so); 381 SLIST_ENTRY(accept_filter) accf_next; 382}; 383 384#define ACCEPT_FILTER_DEFINE(modname, filtname, cb, create, destroy, ver) \ 385 static struct accept_filter modname##_filter = { \ 386 .accf_name = filtname, \ 387 .accf_callback = cb, \ 388 .accf_create = create, \ 389 .accf_destroy = destroy, \ 390 }; \ 391 static moduledata_t modname##_mod = { \ 392 .name = __XSTRING(modname), \ 393 .evhand = accept_filt_generic_mod_event, \ 394 .priv = &modname##_filter, \ 395 }; \ 396 DECLARE_MODULE(modname, modname##_mod, SI_SUB_DRIVERS, \ 397 SI_ORDER_MIDDLE); \ 398 MODULE_VERSION(modname, ver) 399 400#ifdef MALLOC_DECLARE 401MALLOC_DECLARE(M_ACCF); 402MALLOC_DECLARE(M_PCB); 403MALLOC_DECLARE(M_SONAME); 404#endif 405 406/* 407 * Socket specific helper hook point identifiers 408 * Do not leave holes in the sequence, hook registration is a loop. 409 */ 410#define HHOOK_SOCKET_OPT 0 411#define HHOOK_SOCKET_CREATE 1 412#define HHOOK_SOCKET_RCV 2 413#define HHOOK_SOCKET_SND 3 414#define HHOOK_FILT_SOREAD 4 415#define HHOOK_FILT_SOWRITE 5 416#define HHOOK_SOCKET_CLOSE 6 417#define HHOOK_SOCKET_NEWCONN 7 418#define HHOOK_SOCKET_LAST HHOOK_SOCKET_NEWCONN 419 420struct socket_hhook_data { 421 struct socket *so; 422 struct mbuf *m; 423 void *hctx; /* hook point specific data*/ 424 int status; 425}; 426 427extern int maxsockets; 428extern u_long sb_max; 429extern so_gen_t so_gencnt; 430 431struct file; 432struct filecaps; 433struct filedesc; 434struct mbuf; 435struct sockaddr; 436struct ucred; 437struct uio; 438enum shutdown_how; 439 440/* Return values for socket upcalls. */ 441#define SU_OK 0 442#define SU_ISCONNECTED 1 443 444/* 445 * From uipc_socket and friends 446 */ 447int getsockaddr(struct sockaddr **namp, const struct sockaddr *uaddr, 448 size_t len); 449int getsock_cap(struct thread *td, int fd, cap_rights_t *rightsp, 450 struct file **fpp, struct filecaps *havecaps); 451int getsock(struct thread *td, int fd, cap_rights_t *rightsp, 452 struct file **fpp); 453void soabort(struct socket *so); 454int soaccept(struct socket *so, struct sockaddr *sa); 455int sopeeraddr(struct socket *so, struct sockaddr *sa); 456int sosockaddr(struct socket *so, struct sockaddr *sa); 457void soaio_enqueue(struct task *task); 458void soaio_rcv(void *context, int pending); 459void soaio_snd(void *context, int pending); 460int socheckuid(struct socket *so, uid_t uid); 461int sobind(struct socket *so, struct sockaddr *nam, struct thread *td); 462int sobindat(int fd, struct socket *so, struct sockaddr *nam, 463 struct thread *td); 464int soclose(struct socket *so); 465int soconnect(struct socket *so, struct sockaddr *nam, struct thread *td); 466int soconnectat(int fd, struct socket *so, struct sockaddr *nam, 467 struct thread *td); 468int soconnect2(struct socket *so1, struct socket *so2); 469int socreate(int dom, struct socket **aso, int type, int proto, 470 struct ucred *cred, struct thread *td); 471int sodisconnect(struct socket *so); 472void sodtor_set(struct socket *, so_dtor_t *); 473struct sockaddr *sodupsockaddr(const struct sockaddr *sa, int mflags); 474void sohasoutofband(struct socket *so); 475int solisten(struct socket *so, int backlog, struct thread *td); 476void solisten_proto(struct socket *so, int backlog); 477void solisten_proto_abort(struct socket *so); 478int solisten_proto_check(struct socket *so); 479bool solisten_enqueue(struct socket *, int); 480int solisten_dequeue(struct socket *, struct socket **, int); 481struct socket * 482 solisten_clone(struct socket *); 483struct socket * 484 sonewconn(struct socket *head, int connstatus); 485struct socket * 486 sopeeloff(struct socket *); 487int sopoll(struct socket *so, int events, struct ucred *active_cred, 488 struct thread *td); 489int sopoll_generic(struct socket *so, int events, 490 struct ucred *active_cred, struct thread *td); 491int soreceive(struct socket *so, struct sockaddr **paddr, struct uio *uio, 492 struct mbuf **mp0, struct mbuf **controlp, int *flagsp); 493int soreceive_stream(struct socket *so, struct sockaddr **paddr, 494 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, 495 int *flagsp); 496int soreceive_dgram(struct socket *so, struct sockaddr **paddr, 497 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, 498 int *flagsp); 499int soreceive_generic(struct socket *so, struct sockaddr **paddr, 500 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, 501 int *flagsp); 502void sorele_locked(struct socket *so); 503void sodealloc(struct socket *); 504int soreserve(struct socket *so, u_long sndcc, u_long rcvcc); 505void sorflush(struct socket *so); 506int sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 507 struct mbuf *top, struct mbuf *control, int flags, 508 struct thread *td); 509int sousrsend(struct socket *so, struct sockaddr *addr, struct uio *uio, 510 struct mbuf *control, int flags, struct proc *); 511int sosend_dgram(struct socket *so, struct sockaddr *addr, 512 struct uio *uio, struct mbuf *top, struct mbuf *control, 513 int flags, struct thread *td); 514int sosend_generic(struct socket *so, struct sockaddr *addr, 515 struct uio *uio, struct mbuf *top, struct mbuf *control, 516 int flags, struct thread *td); 517int soshutdown(struct socket *so, enum shutdown_how); 518void soupcall_clear(struct socket *, sb_which); 519void soupcall_set(struct socket *, sb_which, so_upcall_t, void *); 520void solisten_upcall_set(struct socket *, so_upcall_t, void *); 521void sorwakeup_locked(struct socket *); 522void sowwakeup_locked(struct socket *); 523void sowakeup_aio(struct socket *, sb_which); 524void solisten_wakeup(struct socket *); 525int selsocket(struct socket *so, int events, struct timeval *tv, 526 struct thread *td); 527void soisconnected(struct socket *so); 528void soisconnecting(struct socket *so); 529void soisdisconnected(struct socket *so); 530void soisdisconnecting(struct socket *so); 531void socantrcvmore(struct socket *so); 532void socantrcvmore_locked(struct socket *so); 533void socantsendmore(struct socket *so); 534void socantsendmore_locked(struct socket *so); 535void soroverflow(struct socket *so); 536void soroverflow_locked(struct socket *so); 537int soiolock(struct socket *so, struct sx *sx, int flags); 538void soiounlock(struct sx *sx); 539 540/* 541 * Accept filter functions (duh). 542 */ 543int accept_filt_add(struct accept_filter *filt); 544int accept_filt_del(char *name); 545struct accept_filter *accept_filt_get(char *name); 546#ifdef ACCEPT_FILTER_MOD 547#ifdef SYSCTL_DECL 548SYSCTL_DECL(_net_inet_accf); 549#endif 550int accept_filt_generic_mod_event(module_t mod, int event, void *data); 551#endif 552 553#endif /* _KERNEL */ 554 555/* 556 * Structure to export socket from kernel to utilities, via sysctl(3). 557 */ 558struct xsocket { 559 ksize_t xso_len; /* length of this structure */ 560 kvaddr_t xso_so; /* kernel address of struct socket */ 561 kvaddr_t so_pcb; /* kernel address of struct inpcb */ 562 uint64_t so_oobmark; 563 int64_t so_spare64[8]; 564 int32_t xso_protocol; 565 int32_t xso_family; 566 uint32_t so_qlen; 567 uint32_t so_incqlen; 568 uint32_t so_qlimit; 569 pid_t so_pgid; 570 uid_t so_uid; 571 int32_t so_spare32[8]; 572 int16_t so_type; 573 int16_t so_options; 574 int16_t so_linger; 575 int16_t so_state; 576 int16_t so_timeo; 577 uint16_t so_error; 578 struct xsockbuf { 579 uint32_t sb_cc; 580 uint32_t sb_hiwat; 581 uint32_t sb_mbcnt; 582 uint32_t sb_spare0; /* was sb_mcnt */ 583 uint32_t sb_spare1; /* was sb_ccnt */ 584 uint32_t sb_mbmax; 585 int32_t sb_lowat; 586 int32_t sb_timeo; 587 int16_t sb_flags; 588 } so_rcv, so_snd; 589}; 590 591#ifdef _KERNEL 592void sotoxsocket(struct socket *so, struct xsocket *xso); 593void sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb); 594#endif 595 596/* 597 * Socket buffer state bits. Exported via libprocstat(3). 598 */ 599#define SBS_CANTSENDMORE 0x0010 /* can't send more data to peer */ 600#define SBS_CANTRCVMORE 0x0020 /* can't receive more data from peer */ 601#define SBS_RCVATMARK 0x0040 /* at mark on input */ 602 603#endif /* !_SYS_SOCKETVAR_H_ */ 604