master
1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1990, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)socketvar.h 8.3 (Berkeley) 2/19/95
32 */
33
34#ifndef _SYS_SOCKETVAR_H_
35#define _SYS_SOCKETVAR_H_
36
37/*
38 * Socket generation count type. Also used in xinpcb, xtcpcb, xunpcb.
39 */
40typedef uint64_t so_gen_t;
41
42#if defined(_KERNEL) || defined(_WANT_SOCKET)
43#include <sys/queue.h> /* for TAILQ macros */
44#include <sys/selinfo.h> /* for struct selinfo */
45#include <sys/_lock.h>
46#include <sys/_mutex.h>
47#include <sys/osd.h>
48#include <sys/_sx.h>
49#include <sys/sockbuf.h>
50#include <sys/_task.h>
51#ifdef _KERNEL
52#include <sys/caprights.h>
53#include <sys/sockopt.h>
54#else
55#include <stdbool.h>
56#endif
57
58struct vnet;
59
60/*
61 * Kernel structure per socket.
62 * Contains send and receive buffer queues,
63 * handle on protocol and pointer to protocol
64 * private data and error information.
65 */
66typedef int so_upcall_t(struct socket *, void *, int);
67typedef void so_dtor_t(struct socket *);
68
69struct socket;
70
71enum socket_qstate {
72 SQ_NONE = 0,
73 SQ_INCOMP = 0x0800, /* on sol_incomp */
74 SQ_COMP = 0x1000, /* on sol_comp */
75};
76
77
78struct so_splice {
79 struct socket *src;
80 struct socket *dst;
81 off_t max; /* maximum bytes to splice, or -1 */
82 struct mtx mtx;
83 unsigned int wq_index;
84 enum so_splice_state {
85 SPLICE_IDLE, /* waiting for work to arrive */
86 SPLICE_QUEUED, /* a wakeup has queued some work */
87 SPLICE_RUNNING, /* currently transferring data */
88 SPLICE_CLOSING, /* waiting for work to drain */
89 SPLICE_CLOSED, /* unsplicing, terminal state */
90 SPLICE_EXCEPTION, /* I/O error or limit, implicit unsplice */
91 } state;
92 struct timeout_task timeout;
93 STAILQ_ENTRY(so_splice) next;
94};
95
96/*-
97 * Locking key to struct socket:
98 * (a) constant after allocation, no locking required.
99 * (b) locked by SOCK_LOCK(so).
100 * (cr) locked by SOCK_RECVBUF_LOCK(so)
101 * (cs) locked by SOCK_SENDBUF_LOCK(so)
102 * (e) locked by SOLISTEN_LOCK() of corresponding listening socket.
103 * (f) not locked since integer reads/writes are atomic.
104 * (g) used only as a sleep/wakeup address, no value.
105 * (h) locked by global mutex so_global_mtx.
106 * (ir,is) locked by recv or send I/O locks.
107 * (k) locked by KTLS workqueue mutex
108 */
109TAILQ_HEAD(accept_queue, socket);
110struct socket {
111 struct mtx so_lock;
112 volatile u_int so_count; /* (b / refcount) */
113 struct selinfo so_rdsel; /* (b/cr) for so_rcv/so_comp */
114 struct selinfo so_wrsel; /* (b/cs) for so_snd */
115 int so_options; /* (b) from socket call, see socket.h */
116 short so_type; /* (a) generic type, see socket.h */
117 short so_state; /* (b) internal state flags SS_* */
118 void *so_pcb; /* protocol control block */
119 struct vnet *so_vnet; /* (a) network stack instance */
120 struct protosw *so_proto; /* (a) protocol handle */
121 short so_linger; /* time to linger close(2) */
122 short so_timeo; /* (g) connection timeout */
123 u_short so_error; /* (f) error affecting connection */
124 u_short so_rerror; /* (f) error affecting connection */
125 struct sigio *so_sigio; /* [sg] information for async I/O or
126 out of band data (SIGURG) */
127 struct ucred *so_cred; /* (a) user credentials */
128 struct label *so_label; /* (b) MAC label for socket */
129 /* NB: generation count must not be first. */
130 so_gen_t so_gencnt; /* (h) generation count */
131 void *so_emuldata; /* (b) private data for emulators */
132 so_dtor_t *so_dtor; /* (b) optional destructor */
133 struct osd osd; /* Object Specific extensions */
134 /*
135 * so_fibnum, so_user_cookie and friends can be used to attach
136 * some user-specified metadata to a socket, which then can be
137 * used by the kernel for various actions.
138 * so_user_cookie is used by ipfw/dummynet.
139 */
140 int so_fibnum; /* routing domain for this socket */
141 uint32_t so_user_cookie;
142
143 int so_ts_clock; /* type of the clock used for timestamps */
144 uint32_t so_max_pacing_rate; /* (f) TX rate limit in bytes/s */
145 struct so_splice *so_splice; /* (b) splice state for sink */
146 struct so_splice *so_splice_back; /* (b) splice state for source */
147 off_t so_splice_sent; /* (ir) splice bytes sent so far */
148
149 /*
150 * Mutexes to prevent interleaving of socket I/O. These have to be
151 * outside of the socket buffers in order to interlock with listen(2).
152 */
153 struct sx so_snd_sx __aligned(CACHE_LINE_SIZE);
154 struct mtx so_snd_mtx;
155
156 struct sx so_rcv_sx __aligned(CACHE_LINE_SIZE);
157 struct mtx so_rcv_mtx;
158
159 union {
160 /* Regular (data flow) socket. */
161 struct {
162 /* (cr, cs) Receive and send buffers. */
163 struct sockbuf so_rcv, so_snd;
164
165 /* (e) Our place on accept queue. */
166 TAILQ_ENTRY(socket) so_list;
167 struct socket *so_listen; /* (b) */
168 enum socket_qstate so_qstate; /* (b) */
169 /* (b) cached MAC label for peer */
170 struct label *so_peerlabel;
171 u_long so_oobmark; /* chars to oob mark */
172
173 /* (k) Our place on KTLS RX work queue. */
174 STAILQ_ENTRY(socket) so_ktls_rx_list;
175 };
176 /*
177 * Listening socket, where accepts occur, is so_listen in all
178 * subsidiary sockets. If so_listen is NULL, socket is not
179 * related to an accept. For a listening socket itself
180 * sol_incomp queues partially completed connections, while
181 * sol_comp is a queue of connections ready to be accepted.
182 * If a connection is aborted and it has so_listen set, then
183 * it has to be pulled out of either sol_incomp or sol_comp.
184 * We allow connections to queue up based on current queue
185 * lengths and limit on number of queued connections for this
186 * socket.
187 */
188 struct {
189 /* (e) queue of partial unaccepted connections */
190 struct accept_queue sol_incomp;
191 /* (e) queue of complete unaccepted connections */
192 struct accept_queue sol_comp;
193 u_int sol_qlen; /* (e) sol_comp length */
194 u_int sol_incqlen; /* (e) sol_incomp length */
195 u_int sol_qlimit; /* (e) queue limit */
196
197 /* accept_filter(9) optional data */
198 struct accept_filter *sol_accept_filter;
199 void *sol_accept_filter_arg; /* saved filter args */
200 char *sol_accept_filter_str; /* saved user args */
201
202 /* Optional upcall, for kernel socket. */
203 so_upcall_t *sol_upcall; /* (e) */
204 void *sol_upcallarg; /* (e) */
205
206 /* Socket buffer parameters, to be copied to
207 * dataflow sockets, accepted from this one. */
208 int sol_sbrcv_lowat;
209 int sol_sbsnd_lowat;
210 u_int sol_sbrcv_hiwat;
211 u_int sol_sbsnd_hiwat;
212 short sol_sbrcv_flags;
213 short sol_sbsnd_flags;
214 sbintime_t sol_sbrcv_timeo;
215 sbintime_t sol_sbsnd_timeo;
216
217 /* Information tracking listen queue overflows. */
218 struct timeval sol_lastover; /* (e) */
219 int sol_overcount; /* (e) */
220 };
221 };
222};
223#endif /* defined(_KERNEL) || defined(_WANT_SOCKET) */
224
225/*
226 * Socket state bits.
227 *
228 * Historically, these bits were all kept in the so_state field.
229 * They are now split into separate, lock-specific fields.
230 * so_state maintains basic socket state protected by the socket lock.
231 * so_qstate holds information about the socket accept queues.
232 * Each socket buffer also has a state field holding information
233 * relevant to that socket buffer (can't send, rcv).
234 * Many fields will be read without locks to improve performance and avoid
235 * lock order issues. However, this approach must be used with caution.
236 */
237#define SS_ISCONNECTED 0x0002 /* socket connected to a peer */
238#define SS_ISCONNECTING 0x0004 /* in process of connecting to peer */
239#define SS_ISDISCONNECTING 0x0008 /* in process of disconnecting */
240#define SS_NBIO 0x0100 /* non-blocking ops */
241#define SS_ASYNC 0x0200 /* async i/o notify */
242#define SS_ISCONFIRMING 0x0400 /* deciding to accept connection req */
243#define SS_ISDISCONNECTED 0x2000 /* socket disconnected from peer */
244
245#ifdef _KERNEL
246
247#define SOCK_MTX(so) (&(so)->so_lock)
248#define SOCK_LOCK(so) mtx_lock(&(so)->so_lock)
249#define SOCK_OWNED(so) mtx_owned(&(so)->so_lock)
250#define SOCK_UNLOCK(so) mtx_unlock(&(so)->so_lock)
251#define SOCK_LOCK_ASSERT(so) mtx_assert(&(so)->so_lock, MA_OWNED)
252#define SOCK_UNLOCK_ASSERT(so) mtx_assert(&(so)->so_lock, MA_NOTOWNED)
253
254#define SOLISTENING(sol) (((sol)->so_options & SO_ACCEPTCONN) != 0)
255#define SOLISTEN_LOCK(sol) do { \
256 mtx_lock(&(sol)->so_lock); \
257 KASSERT(SOLISTENING(sol), \
258 ("%s: %p not listening", __func__, (sol))); \
259} while (0)
260#define SOLISTEN_TRYLOCK(sol) mtx_trylock(&(sol)->so_lock)
261#define SOLISTEN_UNLOCK(sol) do { \
262 KASSERT(SOLISTENING(sol), \
263 ("%s: %p not listening", __func__, (sol))); \
264 mtx_unlock(&(sol)->so_lock); \
265} while (0)
266#define SOLISTEN_LOCK_ASSERT(sol) do { \
267 mtx_assert(&(sol)->so_lock, MA_OWNED); \
268 KASSERT(SOLISTENING(sol), \
269 ("%s: %p not listening", __func__, (sol))); \
270} while (0)
271#define SOLISTEN_UNLOCK_ASSERT(sol) do { \
272 mtx_assert(&(sol)->so_lock, MA_NOTOWNED); \
273 KASSERT(SOLISTENING(sol), \
274 ("%s: %p not listening", __func__, (sol))); \
275} while (0)
276
277/*
278 * Socket buffer locks. These are strongly preferred over SOCKBUF_LOCK(sb)
279 * macros, as we are moving towards protocol specific socket buffers.
280 */
281#define SOCK_RECVBUF_MTX(so) \
282 (&(so)->so_rcv_mtx)
283#define SOCK_RECVBUF_LOCK(so) \
284 mtx_lock(SOCK_RECVBUF_MTX(so))
285#define SOCK_RECVBUF_UNLOCK(so) \
286 mtx_unlock(SOCK_RECVBUF_MTX(so))
287#define SOCK_RECVBUF_LOCK_ASSERT(so) \
288 mtx_assert(SOCK_RECVBUF_MTX(so), MA_OWNED)
289#define SOCK_RECVBUF_UNLOCK_ASSERT(so) \
290 mtx_assert(SOCK_RECVBUF_MTX(so), MA_NOTOWNED)
291
292#define SOCK_SENDBUF_MTX(so) \
293 (&(so)->so_snd_mtx)
294#define SOCK_SENDBUF_LOCK(so) \
295 mtx_lock(SOCK_SENDBUF_MTX(so))
296#define SOCK_SENDBUF_UNLOCK(so) \
297 mtx_unlock(SOCK_SENDBUF_MTX(so))
298#define SOCK_SENDBUF_LOCK_ASSERT(so) \
299 mtx_assert(SOCK_SENDBUF_MTX(so), MA_OWNED)
300#define SOCK_SENDBUF_UNLOCK_ASSERT(so) \
301 mtx_assert(SOCK_SENDBUF_MTX(so), MA_NOTOWNED)
302
303#define SOCK_BUF_LOCK(so, which) \
304 mtx_lock(soeventmtx(so, which))
305#define SOCK_BUF_UNLOCK(so, which) \
306 mtx_unlock(soeventmtx(so, which))
307#define SOCK_BUF_LOCK_ASSERT(so, which) \
308 mtx_assert(soeventmtx(so, which), MA_OWNED)
309#define SOCK_BUF_UNLOCK_ASSERT(so, which) \
310 mtx_assert(soeventmtx(so, which), MA_NOTOWNED)
311
312static inline struct sockbuf *
313sobuf(struct socket *so, const sb_which which)
314{
315 return (which == SO_RCV ? &so->so_rcv : &so->so_snd);
316}
317
318static inline struct mtx *
319soeventmtx(struct socket *so, const sb_which which)
320{
321 return (which == SO_RCV ? SOCK_RECVBUF_MTX(so) : SOCK_SENDBUF_MTX(so));
322}
323
324/*
325 * Macros for sockets and socket buffering.
326 */
327
328
329#define isspliced(so) ((so->so_splice != NULL && \
330 so->so_splice->src != NULL))
331#define issplicedback(so) ((so->so_splice_back != NULL && \
332 so->so_splice_back->dst != NULL))
333/*
334 * Flags to soiolock().
335 */
336#define SBL_WAIT 0x00000001 /* Wait if not immediately available. */
337#define SBL_NOINTR 0x00000002 /* Force non-interruptible sleep. */
338#define SBL_VALID (SBL_WAIT | SBL_NOINTR)
339
340#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT)
341
342#define SOCK_IO_SEND_LOCK(so, flags) \
343 soiolock((so), &(so)->so_snd_sx, (flags))
344#define SOCK_IO_SEND_UNLOCK(so) \
345 soiounlock(&(so)->so_snd_sx)
346#define SOCK_IO_SEND_OWNED(so) sx_xlocked(&(so)->so_snd_sx)
347#define SOCK_IO_SEND_ASSERT_LOCKED(so) \
348 sx_assert(&(so)->so_snd_sx, SA_XLOCKED)
349#define SOCK_IO_RECV_LOCK(so, flags) \
350 soiolock((so), &(so)->so_rcv_sx, (flags))
351#define SOCK_IO_RECV_UNLOCK(so) \
352 soiounlock(&(so)->so_rcv_sx)
353#define SOCK_IO_RECV_OWNED(so) sx_xlocked(&(so)->so_rcv_sx)
354#define SOCK_IO_RECV_ASSERT_LOCKED(so) \
355 sx_assert(&(so)->so_rcv_sx, SA_XLOCKED)
356
357/* do we have to send all at once on a socket? */
358#define sosendallatonce(so) \
359 ((so)->so_proto->pr_flags & PR_ATOMIC)
360
361/* can we read something from so? */
362#define soreadabledata(so) \
363 (sbavail(&(so)->so_rcv) >= (so)->so_rcv.sb_lowat || \
364 (so)->so_error || (so)->so_rerror)
365#define _soreadable(so) \
366 (soreadabledata(so) || ((so)->so_rcv.sb_state & SBS_CANTRCVMORE))
367
368static inline bool
369soreadable(struct socket *so)
370{
371 if (isspliced(so))
372 return (false);
373 return (_soreadable(so));
374}
375
376/* can we write something to so? */
377#define sowriteable(so) \
378 ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \
379 (((so)->so_state&SS_ISCONNECTED) || \
380 ((so)->so_proto->pr_flags&PR_CONNREQUIRED)==0)) || \
381 ((so)->so_snd.sb_state & SBS_CANTSENDMORE) || \
382 (so)->so_error)
383
384/*
385 * soref()/sorele() ref-count the socket structure.
386 * soref() may be called without owning socket lock, but in that case a
387 * caller must own something that holds socket, and so_count must be not 0.
388 * Note that you must still explicitly close the socket, but the last ref
389 * count will free the structure.
390 */
391#define soref(so) refcount_acquire(&(so)->so_count)
392#define sorele(so) do { \
393 SOCK_UNLOCK_ASSERT(so); \
394 if (!refcount_release_if_not_last(&(so)->so_count)) { \
395 SOCK_LOCK(so); \
396 sorele_locked(so); \
397 } \
398} while (0)
399
400/*
401 * In sorwakeup() and sowwakeup(), acquire the socket buffer lock to
402 * avoid a non-atomic test-and-wakeup. However, sowakeup is
403 * responsible for releasing the lock if it is called. We unlock only
404 * if we don't call into sowakeup. If any code is introduced that
405 * directly invokes the underlying sowakeup() primitives, it must
406 * maintain the same semantics.
407 */
408#define sorwakeup(so) do { \
409 SOCK_RECVBUF_LOCK(so); \
410 sorwakeup_locked(so); \
411} while (0)
412
413#define sowwakeup(so) do { \
414 SOCK_SENDBUF_LOCK(so); \
415 sowwakeup_locked(so); \
416} while (0)
417
418struct accept_filter {
419 char accf_name[16];
420 int (*accf_callback)
421 (struct socket *so, void *arg, int waitflag);
422 void * (*accf_create)
423 (struct socket *so, char *arg);
424 void (*accf_destroy)
425 (struct socket *so);
426 SLIST_ENTRY(accept_filter) accf_next;
427};
428
429#define ACCEPT_FILTER_DEFINE(modname, filtname, cb, create, destroy, ver) \
430 static struct accept_filter modname##_filter = { \
431 .accf_name = filtname, \
432 .accf_callback = cb, \
433 .accf_create = create, \
434 .accf_destroy = destroy, \
435 }; \
436 static moduledata_t modname##_mod = { \
437 .name = __XSTRING(modname), \
438 .evhand = accept_filt_generic_mod_event, \
439 .priv = &modname##_filter, \
440 }; \
441 DECLARE_MODULE(modname, modname##_mod, SI_SUB_DRIVERS, \
442 SI_ORDER_MIDDLE); \
443 MODULE_VERSION(modname, ver)
444
445#ifdef MALLOC_DECLARE
446MALLOC_DECLARE(M_ACCF);
447MALLOC_DECLARE(M_PCB);
448MALLOC_DECLARE(M_SONAME);
449#endif
450
451/*
452 * Socket specific helper hook point identifiers
453 * Do not leave holes in the sequence, hook registration is a loop.
454 */
455#define HHOOK_SOCKET_OPT 0
456#define HHOOK_SOCKET_CREATE 1
457#define HHOOK_SOCKET_RCV 2
458#define HHOOK_SOCKET_SND 3
459#define HHOOK_FILT_SOREAD 4
460#define HHOOK_FILT_SOWRITE 5
461#define HHOOK_SOCKET_CLOSE 6
462#define HHOOK_SOCKET_LAST HHOOK_SOCKET_CLOSE
463
464struct socket_hhook_data {
465 struct socket *so;
466 struct mbuf *m;
467 void *hctx; /* hook point specific data*/
468 int status;
469};
470
471extern int maxsockets;
472extern u_long sb_max;
473extern so_gen_t so_gencnt;
474
475struct file;
476struct filecaps;
477struct filedesc;
478struct mbuf;
479struct sockaddr;
480struct ucred;
481struct uio;
482
483/* Return values for socket upcalls. */
484#define SU_OK 0
485#define SU_ISCONNECTED 1
486
487/*
488 * From uipc_socket and friends
489 */
490int getsockaddr(struct sockaddr **namp, const struct sockaddr *uaddr,
491 size_t len);
492int getsock_cap(struct thread *td, int fd, cap_rights_t *rightsp,
493 struct file **fpp, struct filecaps *havecaps);
494int getsock(struct thread *td, int fd, cap_rights_t *rightsp,
495 struct file **fpp);
496void soabort(struct socket *so);
497int soaccept(struct socket *so, struct sockaddr **nam);
498void soaio_enqueue(struct task *task);
499void soaio_rcv(void *context, int pending);
500void soaio_snd(void *context, int pending);
501int socheckuid(struct socket *so, uid_t uid);
502int sobind(struct socket *so, struct sockaddr *nam, struct thread *td);
503int sobindat(int fd, struct socket *so, struct sockaddr *nam,
504 struct thread *td);
505int soclose(struct socket *so);
506int soconnect(struct socket *so, struct sockaddr *nam, struct thread *td);
507int soconnectat(int fd, struct socket *so, struct sockaddr *nam,
508 struct thread *td);
509int soconnect2(struct socket *so1, struct socket *so2);
510int socreate(int dom, struct socket **aso, int type, int proto,
511 struct ucred *cred, struct thread *td);
512int sodisconnect(struct socket *so);
513void sodtor_set(struct socket *, so_dtor_t *);
514struct sockaddr *sodupsockaddr(const struct sockaddr *sa, int mflags);
515void sohasoutofband(struct socket *so);
516int solisten(struct socket *so, int backlog, struct thread *td);
517void solisten_proto(struct socket *so, int backlog);
518void solisten_proto_abort(struct socket *so);
519int solisten_proto_check(struct socket *so);
520bool solisten_enqueue(struct socket *, int);
521int solisten_dequeue(struct socket *, struct socket **, int);
522struct socket *
523 solisten_clone(struct socket *);
524struct socket *
525 sonewconn(struct socket *head, int connstatus);
526struct socket *
527 sopeeloff(struct socket *);
528int sopoll(struct socket *so, int events, struct ucred *active_cred,
529 struct thread *td);
530int sopoll_generic(struct socket *so, int events,
531 struct ucred *active_cred, struct thread *td);
532int soreceive(struct socket *so, struct sockaddr **paddr, struct uio *uio,
533 struct mbuf **mp0, struct mbuf **controlp, int *flagsp);
534int soreceive_stream(struct socket *so, struct sockaddr **paddr,
535 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp,
536 int *flagsp);
537int soreceive_dgram(struct socket *so, struct sockaddr **paddr,
538 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp,
539 int *flagsp);
540int soreceive_generic(struct socket *so, struct sockaddr **paddr,
541 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp,
542 int *flagsp);
543void sorele_locked(struct socket *so);
544void sodealloc(struct socket *);
545int soreserve(struct socket *so, u_long sndcc, u_long rcvcc);
546void sorflush(struct socket *so);
547int sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
548 struct mbuf *top, struct mbuf *control, int flags,
549 struct thread *td);
550int sousrsend(struct socket *so, struct sockaddr *addr, struct uio *uio,
551 struct mbuf *control, int flags, struct proc *);
552int sosend_dgram(struct socket *so, struct sockaddr *addr,
553 struct uio *uio, struct mbuf *top, struct mbuf *control,
554 int flags, struct thread *td);
555int sosend_generic(struct socket *so, struct sockaddr *addr,
556 struct uio *uio, struct mbuf *top, struct mbuf *control,
557 int flags, struct thread *td);
558int soshutdown(struct socket *so, int how);
559void soupcall_clear(struct socket *, sb_which);
560void soupcall_set(struct socket *, sb_which, so_upcall_t, void *);
561void solisten_upcall_set(struct socket *, so_upcall_t, void *);
562void sorwakeup_locked(struct socket *);
563void sowwakeup_locked(struct socket *);
564void sowakeup_aio(struct socket *, sb_which);
565void solisten_wakeup(struct socket *);
566int selsocket(struct socket *so, int events, struct timeval *tv,
567 struct thread *td);
568void soisconnected(struct socket *so);
569void soisconnecting(struct socket *so);
570void soisdisconnected(struct socket *so);
571void soisdisconnecting(struct socket *so);
572void socantrcvmore(struct socket *so);
573void socantrcvmore_locked(struct socket *so);
574void socantsendmore(struct socket *so);
575void socantsendmore_locked(struct socket *so);
576void soroverflow(struct socket *so);
577void soroverflow_locked(struct socket *so);
578int soiolock(struct socket *so, struct sx *sx, int flags);
579void soiounlock(struct sx *sx);
580
581/*
582 * Socket splicing routines.
583 */
584void so_splice_dispatch(struct so_splice *sp);
585
586/*
587 * Accept filter functions (duh).
588 */
589int accept_filt_add(struct accept_filter *filt);
590int accept_filt_del(char *name);
591struct accept_filter *accept_filt_get(char *name);
592#ifdef ACCEPT_FILTER_MOD
593#ifdef SYSCTL_DECL
594SYSCTL_DECL(_net_inet_accf);
595#endif
596int accept_filt_generic_mod_event(module_t mod, int event, void *data);
597#endif
598
599#endif /* _KERNEL */
600
601/*
602 * Structure to export socket from kernel to utilities, via sysctl(3).
603 */
604struct xsocket {
605 ksize_t xso_len; /* length of this structure */
606 kvaddr_t xso_so; /* kernel address of struct socket */
607 kvaddr_t so_pcb; /* kernel address of struct inpcb */
608 uint64_t so_oobmark;
609 kvaddr_t so_splice_so; /* kernel address of spliced socket */
610 int64_t so_spare64[7];
611 int32_t xso_protocol;
612 int32_t xso_family;
613 uint32_t so_qlen;
614 uint32_t so_incqlen;
615 uint32_t so_qlimit;
616 pid_t so_pgid;
617 uid_t so_uid;
618 int32_t so_spare32[8];
619 int16_t so_type;
620 int16_t so_options;
621 int16_t so_linger;
622 int16_t so_state;
623 int16_t so_timeo;
624 uint16_t so_error;
625 struct xsockbuf {
626 uint32_t sb_cc;
627 uint32_t sb_hiwat;
628 uint32_t sb_mbcnt;
629 uint32_t sb_spare0; /* was sb_mcnt */
630 uint32_t sb_spare1; /* was sb_ccnt */
631 uint32_t sb_mbmax;
632 int32_t sb_lowat;
633 int32_t sb_timeo;
634 int16_t sb_flags;
635 } so_rcv, so_snd;
636};
637
638#ifdef _KERNEL
639void sotoxsocket(struct socket *so, struct xsocket *xso);
640void sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb);
641#endif
642
643/*
644 * Socket buffer state bits. Exported via libprocstat(3).
645 */
646#define SBS_CANTSENDMORE 0x0010 /* can't send more data to peer */
647#define SBS_CANTRCVMORE 0x0020 /* can't receive more data from peer */
648#define SBS_RCVATMARK 0x0040 /* at mark on input */
649
650#endif /* !_SYS_SOCKETVAR_H_ */