master
1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)proc.h 8.15 (Berkeley) 5/19/95
37 */
38
39#ifndef _SYS_PROC_H_
40#define _SYS_PROC_H_
41
42#include <sys/callout.h> /* For struct callout. */
43#include <sys/event.h> /* For struct klist. */
44#ifdef _KERNEL
45#include <sys/_eventhandler.h>
46#endif
47#include <sys/condvar.h>
48#ifndef _KERNEL
49#include <sys/filedesc.h>
50#endif
51#include <sys/queue.h>
52#include <sys/_lock.h>
53#include <sys/lock_profile.h>
54#include <sys/_mutex.h>
55#include <sys/osd.h>
56#include <sys/priority.h>
57#include <sys/rtprio.h> /* XXX. */
58#include <sys/runq.h>
59#include <sys/resource.h>
60#include <sys/sigio.h>
61#include <sys/signal.h>
62#include <sys/signalvar.h>
63#ifndef _KERNEL
64#include <sys/time.h> /* For structs itimerval, timeval. */
65#else
66#include <sys/pcpu.h>
67#include <sys/systm.h>
68#endif
69#include <sys/ucontext.h>
70#include <sys/ucred.h>
71#include <sys/types.h>
72#include <sys/_domainset.h>
73
74#include <machine/proc.h> /* Machine-dependent proc substruct. */
75#ifdef _KERNEL
76#include <machine/cpu.h>
77#endif
78
79/*
80 * One structure allocated per session.
81 *
82 * List of locks
83 * (m) locked by s_mtx mtx
84 * (e) locked by proctree_lock sx
85 * (c) const until freeing
86 */
87struct session {
88 u_int s_count; /* Ref cnt; pgrps in session - atomic. */
89 struct proc *s_leader; /* (m + e) Session leader. */
90 struct vnode *s_ttyvp; /* (m) Vnode of controlling tty. */
91 struct cdev_priv *s_ttydp; /* (m) Device of controlling tty. */
92 struct tty *s_ttyp; /* (e) Controlling tty. */
93 pid_t s_sid; /* (c) Session ID. */
94 /* (m) Setlogin() name: */
95 char s_login[roundup(MAXLOGNAME, sizeof(long))];
96 struct mtx s_mtx; /* Mutex to protect members. */
97};
98
99/*
100 * One structure allocated per process group.
101 *
102 * List of locks
103 * (m) locked by pg_mtx mtx
104 * (e) locked by proctree_lock sx
105 * (c) const until freeing
106 */
107struct pgrp {
108 LIST_ENTRY(pgrp) pg_hash; /* (e) Hash chain. */
109 LIST_HEAD(, proc) pg_members; /* (m + e) Pointer to pgrp members. */
110 struct session *pg_session; /* (c) Pointer to session. */
111 struct sigiolst pg_sigiolst; /* (m) List of sigio sources. */
112 pid_t pg_id; /* (c) Process group id. */
113 struct mtx pg_mtx; /* Mutex to protect members */
114 int pg_flags; /* (m) PGRP_ flags */
115 struct sx pg_killsx; /* Mutual exclusion between group member
116 * fork() and killpg() */
117};
118
119#define PGRP_ORPHANED 0x00000001 /* Group is orphaned */
120
121/*
122 * pargs, used to hold a copy of the command line, if it had a sane length.
123 */
124struct pargs {
125 u_int ar_ref; /* Reference count. */
126 u_int ar_length; /* Length. */
127 u_char ar_args[1]; /* Arguments. */
128};
129
130/*-
131 * Description of a process.
132 *
133 * This structure contains the information needed to manage a thread of
134 * control, known in UN*X as a process; it has references to substructures
135 * containing descriptions of things that the process uses, but may share
136 * with related processes. The process structure and the substructures
137 * are always addressable except for those marked "(CPU)" below,
138 * which might be addressable only on a processor on which the process
139 * is running.
140 *
141 * Below is a key of locks used to protect each member of struct proc. The
142 * lock is indicated by a reference to a specific character in parens in the
143 * associated comment.
144 * * - not yet protected
145 * a - only touched by curproc or parent during fork/wait
146 * b - created at fork, never changes
147 * (exception aiods switch vmspaces, but they are also
148 * marked 'P_SYSTEM' so hopefully it will be left alone)
149 * c - locked by proc mtx
150 * d - locked by allproc_lock lock
151 * e - locked by proctree_lock lock
152 * f - session mtx
153 * g - process group mtx
154 * h - callout_lock mtx
155 * i - by curproc or the master session mtx
156 * j - locked by proc slock
157 * k - only accessed by curthread
158 * k*- only accessed by curthread and from an interrupt
159 * kx- only accessed by curthread and by debugger
160 * l - the attaching proc or attaching proc parent
161 * n - not locked, lazy
162 * o - ktrace lock
163 * q - td_contested lock
164 * r - p_peers lock
165 * s - see sleepq_switch(), sleeping_on_old_rtc(), and sleep(9)
166 * t - thread lock
167 * u - process stat lock
168 * w - process timer lock
169 * x - created at fork, only changes during single threading in exec
170 * y - created at first aio, doesn't change until exit or exec at which
171 * point we are single-threaded and only curthread changes it
172 *
173 * If the locking key specifies two identifiers (for example, p_pptr) then
174 * either lock is sufficient for read access, but both locks must be held
175 * for write access.
176 */
177struct cpuset;
178struct filecaps;
179struct filemon;
180struct kaioinfo;
181struct kaudit_record;
182struct kcov_info;
183struct kdtrace_proc;
184struct kdtrace_thread;
185struct kmsan_td;
186struct kq_timer_cb_data;
187struct mqueue_notifier;
188struct p_sched;
189struct proc;
190struct procdesc;
191struct racct;
192struct sbuf;
193struct sleepqueue;
194struct socket;
195struct td_sched;
196struct thread;
197struct trapframe;
198struct turnstile;
199struct vm_map;
200struct vm_map_entry;
201struct epoch_tracker;
202
203struct syscall_args {
204 u_int code;
205 u_int original_code;
206 struct sysent *callp;
207 register_t args[8];
208};
209
210/*
211 * XXX: Does this belong in resource.h or resourcevar.h instead?
212 * Resource usage extension. The times in rusage structs in the kernel are
213 * never up to date. The actual times are kept as runtimes and tick counts
214 * (with control info in the "previous" times), and are converted when
215 * userland asks for rusage info. Backwards compatibility prevents putting
216 * this directly in the user-visible rusage struct.
217 *
218 * Locking for p_rux: (cu) means (u) for p_rux and (c) for p_crux.
219 * Locking for td_rux: (t) for all fields.
220 */
221struct rusage_ext {
222 uint64_t rux_runtime; /* (cu) Real time. */
223 uint64_t rux_uticks; /* (cu) Statclock hits in user mode. */
224 uint64_t rux_sticks; /* (cu) Statclock hits in sys mode. */
225 uint64_t rux_iticks; /* (cu) Statclock hits in intr mode. */
226 uint64_t rux_uu; /* (c) Previous user time in usec. */
227 uint64_t rux_su; /* (c) Previous sys time in usec. */
228 uint64_t rux_tu; /* (c) Previous total time in usec. */
229};
230
231/*
232 * Kernel runnable context (thread).
233 * This is what is put to sleep and reactivated.
234 * Thread context. Processes may have multiple threads.
235 */
236struct thread {
237 struct mtx *volatile td_lock; /* replaces sched lock */
238 struct proc *td_proc; /* (*) Associated process. */
239 TAILQ_ENTRY(thread) td_plist; /* (*) All threads in this proc. */
240 TAILQ_ENTRY(thread) td_runq; /* (t) Run queue. */
241 union {
242 TAILQ_ENTRY(thread) td_slpq; /* (t) Sleep queue. */
243 struct thread *td_zombie; /* Zombie list linkage */
244 };
245 TAILQ_ENTRY(thread) td_lockq; /* (t) Lock queue. */
246 LIST_ENTRY(thread) td_hash; /* (d) Hash chain. */
247 struct cpuset *td_cpuset; /* (t) CPU affinity mask. */
248 struct domainset_ref td_domain; /* (a) NUMA policy */
249 struct seltd *td_sel; /* Select queue/channel. */
250 struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */
251 struct turnstile *td_turnstile; /* (k) Associated turnstile. */
252 struct rl_q_entry *td_rlqe; /* (k) Associated range lock entry. */
253 struct umtx_q *td_umtxq; /* (c?) Link for when we're blocked. */
254 lwpid_t td_tid; /* (b) Thread ID. */
255 sigqueue_t td_sigqueue; /* (c) Sigs arrived, not delivered. */
256#define td_siglist td_sigqueue.sq_signals
257 u_char td_lend_user_pri; /* (t) Lend user pri. */
258 u_char td_allocdomain; /* (b) NUMA domain backing this struct thread. */
259 u_char td_base_ithread_pri; /* (t) Base ithread pri */
260 struct kmsan_td *td_kmsan; /* (k) KMSAN state */
261
262/* Cleared during fork1(), thread_create(), or kthread_add(). */
263#define td_startzero td_flags
264 int td_flags; /* (t) TDF_* flags. */
265 int td_ast; /* (t) TDA_* indicators */
266 int td_inhibitors; /* (t) Why can not run. */
267 int td_pflags; /* (k) Private thread (TDP_*) flags. */
268 int td_pflags2; /* (k) Private thread (TDP2_*) flags. */
269 int td_dupfd; /* (k) Ret value from fdopen. XXX */
270 int td_sqqueue; /* (t) Sleepqueue queue blocked on. */
271 const void *td_wchan; /* (t) Sleep address. */
272 const char *td_wmesg; /* (t) Reason for sleep. */
273 volatile u_char td_owepreempt; /* (k*) Preempt on last critical_exit */
274 u_char td_tsqueue; /* (t) Turnstile queue blocked on. */
275 u_char td_stopsched; /* (k) Scheduler stopped. */
276 int td_locks; /* (k) Debug: count of non-spin locks */
277 int td_rw_rlocks; /* (k) Count of rwlock read locks. */
278 int td_sx_slocks; /* (k) Count of sx shared locks. */
279 int td_lk_slocks; /* (k) Count of lockmgr shared locks. */
280 struct turnstile *td_blocked; /* (t) Lock thread is blocked on. */
281 const char *td_lockname; /* (t) Name of lock blocked on. */
282 LIST_HEAD(, turnstile) td_contested; /* (q) Contested locks. */
283 struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */
284 int td_intr_nesting_level; /* (k) Interrupt recursion. */
285 int td_pinned; /* (k) Temporary cpu pin count. */
286 struct ucred *td_realucred; /* (k) Reference to credentials. */
287 struct ucred *td_ucred; /* (k) Used credentials, temporarily switchable. */
288 struct plimit *td_limit; /* (k) Resource limits. */
289 int td_slptick; /* (t) Time at sleep. */
290 int td_blktick; /* (t) Time spent blocked. */
291 int td_swvoltick; /* (t) Time at last SW_VOL switch. */
292 int td_swinvoltick; /* (t) Time at last SW_INVOL switch. */
293 u_int td_cow; /* (*) Number of copy-on-write faults */
294 struct rusage td_ru; /* (t) rusage information. */
295 struct rusage_ext td_rux; /* (t) Internal rusage information. */
296 uint64_t td_incruntime; /* (t) Cpu ticks to transfer to proc. */
297 uint64_t td_runtime; /* (t) How many cpu ticks we've run. */
298 u_int td_pticks; /* (t) Statclock hits for profiling */
299 u_int td_sticks; /* (t) Statclock hits in system mode. */
300 u_int td_iticks; /* (t) Statclock hits in intr mode. */
301 u_int td_uticks; /* (t) Statclock hits in user mode. */
302 int td_intrval; /* (t) Return value for sleepq. */
303 sigset_t td_oldsigmask; /* (k) Saved mask from pre sigpause. */
304 volatile u_int td_generation; /* (k) For detection of preemption */
305 stack_t td_sigstk; /* (k) Stack ptr and on-stack flag. */
306 int td_xsig; /* (c) Signal for ptrace */
307 u_long td_profil_addr; /* (k) Temporary addr until AST. */
308 u_int td_profil_ticks; /* (k) Temporary ticks until AST. */
309 char td_name[MAXCOMLEN + 1]; /* (*) Thread name. */
310 struct file *td_fpop; /* (k) file referencing cdev under op */
311 int td_dbgflags; /* (c) Userland debugger flags */
312 siginfo_t td_si; /* (c) For debugger or core file */
313 int td_ng_outbound; /* (k) Thread entered ng from above. */
314 struct osd td_osd; /* (k) Object specific data. */
315 struct vm_map_entry *td_map_def_user; /* (k) Deferred entries. */
316 pid_t td_dbg_forked; /* (c) Child pid for debugger. */
317 struct vnode *td_vp_reserved;/* (k) Preallocated vnode. */
318 u_int td_no_sleeping; /* (k) Sleeping disabled count. */
319 void *td_su; /* (k) FFS SU private */
320 sbintime_t td_sleeptimo; /* (t) Sleep timeout. */
321 int td_rtcgen; /* (s) rtc_generation of abs. sleep */
322 int td_errno; /* (k) Error from last syscall. */
323 size_t td_vslock_sz; /* (k) amount of vslock-ed space */
324 struct kcov_info *td_kcov_info; /* (*) Kernel code coverage data */
325 long td_ucredref; /* (k) references on td_realucred */
326#define td_endzero td_sigmask
327
328/* Copied during fork1(), thread_create(), or kthread_add(). */
329#define td_startcopy td_endzero
330 sigset_t td_sigmask; /* (c) Current signal mask. */
331 u_char td_rqindex; /* (t) Run queue index. */
332 u_char td_base_pri; /* (t) Thread base kernel priority. */
333 u_char td_priority; /* (t) Thread active priority. */
334 u_char td_pri_class; /* (t) Scheduling class. */
335 u_char td_user_pri; /* (t) User pri from estcpu and nice. */
336 u_char td_base_user_pri; /* (t) Base user pri */
337 uintptr_t td_rb_list; /* (k) Robust list head. */
338 uintptr_t td_rbp_list; /* (k) Robust priv list head. */
339 uintptr_t td_rb_inact; /* (k) Current in-action mutex loc. */
340 struct syscall_args td_sa; /* (kx) Syscall parameters. Copied on
341 fork for child tracing. */
342 void *td_sigblock_ptr; /* (k) uptr for fast sigblock. */
343 uint32_t td_sigblock_val; /* (k) fast sigblock value read at
344 td_sigblock_ptr on kern entry */
345#define td_endcopy td_pcb
346
347/*
348 * Fields that must be manually set in fork1(), thread_create(), kthread_add(),
349 * or already have been set in the allocator, constructor, etc.
350 */
351 struct pcb *td_pcb; /* (k) Kernel VA of pcb and kstack. */
352 enum td_states {
353 TDS_INACTIVE = 0x0,
354 TDS_INHIBITED,
355 TDS_CAN_RUN,
356 TDS_RUNQ,
357 TDS_RUNNING
358 } td_state; /* (t) thread state */
359 /* Note: td_state must be accessed using TD_{GET,SET}_STATE(). */
360 union {
361 syscallarg_t tdu_retval[2];
362 off_t tdu_off;
363 } td_uretoff; /* (k) Syscall aux returns. */
364#define td_retval td_uretoff.tdu_retval
365 u_int td_cowgen; /* (k) Generation of COW pointers. */
366 /* LP64 hole */
367 struct callout td_slpcallout; /* (h) Callout for sleep. */
368 struct trapframe *td_frame; /* (k) */
369 vm_offset_t td_kstack; /* (a) Kernel VA of kstack. */
370 int td_kstack_pages; /* (a) Size of the kstack. */
371 volatile u_int td_critnest; /* (k*) Critical section nest level. */
372 struct mdthread td_md; /* (k) Any machine-dependent fields. */
373 struct kaudit_record *td_ar; /* (k) Active audit record, if any. */
374 struct lpohead td_lprof[2]; /* (a) lock profiling objects. */
375 struct kdtrace_thread *td_dtrace; /* (*) DTrace-specific data. */
376 struct vnet *td_vnet; /* (k) Effective vnet. */
377 const char *td_vnet_lpush; /* (k) Debugging vnet push / pop. */
378 struct trapframe *td_intr_frame;/* (k) Frame of the current irq */
379 struct proc *td_rfppwait_p; /* (k) The vforked child */
380 struct vm_page **td_ma; /* (k) uio pages held */
381 int td_ma_cnt; /* (k) size of *td_ma */
382 /* LP64 hole */
383 void *td_emuldata; /* Emulator state data */
384 int td_lastcpu; /* (t) Last cpu we were on. */
385 int td_oncpu; /* (t) Which cpu we are on. */
386 void *td_lkpi_task; /* LinuxKPI task struct pointer */
387 int td_pmcpend;
388 void *td_remotereq; /* (c) dbg remote request. */
389 off_t td_ktr_io_lim; /* (k) limit for ktrace file size */
390#ifdef EPOCH_TRACE
391 SLIST_HEAD(, epoch_tracker) td_epochs;
392#endif
393};
394
395struct thread0_storage {
396 struct thread t0st_thread;
397 uint64_t t0st_sched[10];
398};
399
400struct mtx *thread_lock_block(struct thread *);
401void thread_lock_block_wait(struct thread *);
402void thread_lock_set(struct thread *, struct mtx *);
403void thread_lock_unblock(struct thread *, struct mtx *);
404#define THREAD_LOCK_ASSERT(td, type) \
405 mtx_assert((td)->td_lock, (type))
406
407#define THREAD_LOCK_BLOCKED_ASSERT(td, type) \
408do { \
409 struct mtx *__m = (td)->td_lock; \
410 if (__m != &blocked_lock) \
411 mtx_assert(__m, (type)); \
412} while (0)
413
414#ifdef INVARIANTS
415#define THREAD_LOCKPTR_ASSERT(td, lock) \
416do { \
417 struct mtx *__m; \
418 __m = (td)->td_lock; \
419 KASSERT(__m == (lock), \
420 ("Thread %p lock %p does not match %p", td, __m, (lock))); \
421} while (0)
422
423#define THREAD_LOCKPTR_BLOCKED_ASSERT(td, lock) \
424do { \
425 struct mtx *__m; \
426 __m = (td)->td_lock; \
427 KASSERT(__m == (lock) || __m == &blocked_lock, \
428 ("Thread %p lock %p does not match %p", td, __m, (lock))); \
429} while (0)
430
431#define TD_LOCKS_INC(td) ((td)->td_locks++)
432#define TD_LOCKS_DEC(td) do { \
433 KASSERT(SCHEDULER_STOPPED_TD(td) || (td)->td_locks > 0, \
434 ("Thread %p owns no locks", (td))); \
435 (td)->td_locks--; \
436} while (0)
437#else
438#define THREAD_LOCKPTR_ASSERT(td, lock)
439#define THREAD_LOCKPTR_BLOCKED_ASSERT(td, lock)
440
441#define TD_LOCKS_INC(td)
442#define TD_LOCKS_DEC(td)
443#endif
444
445/*
446 * Flags kept in td_flags:
447 * To change these you MUST have the scheduler lock.
448 */
449#define TDF_BORROWING 0x00000001 /* Thread is borrowing pri from another. */
450#define TDF_INPANIC 0x00000002 /* Caused a panic, let it drive crashdump. */
451#define TDF_INMEM 0x00000004 /* Thread's stack is in memory. */
452#define TDF_SINTR 0x00000008 /* Sleep is interruptible. */
453#define TDF_TIMEOUT 0x00000010 /* Timing out during sleep. */
454#define TDF_IDLETD 0x00000020 /* This is a per-CPU idle thread. */
455#define TDF_CANSWAP 0x00000040 /* Thread can be swapped. */
456#define TDF_SIGWAIT 0x00000080 /* Ignore ignored signals */
457#define TDF_KTH_SUSP 0x00000100 /* kthread is suspended */
458#define TDF_ALLPROCSUSP 0x00000200 /* suspended by SINGLE_ALLPROC */
459#define TDF_BOUNDARY 0x00000400 /* Thread suspended at user boundary */
460#define TDF_UNUSED1 0x00000800 /* Available */
461#define TDF_UNUSED2 0x00001000 /* Available */
462#define TDF_SBDRY 0x00002000 /* Stop only on usermode boundary. */
463#define TDF_UPIBLOCKED 0x00004000 /* Thread blocked on user PI mutex. */
464#define TDF_UNUSED3 0x00008000 /* Available */
465#define TDF_UNUSED4 0x00010000 /* Available */
466#define TDF_UNUSED5 0x00020000 /* Available */
467#define TDF_NOLOAD 0x00040000 /* Ignore during load avg calculations. */
468#define TDF_SERESTART 0x00080000 /* ERESTART on stop attempts. */
469#define TDF_THRWAKEUP 0x00100000 /* Libthr thread must not suspend itself. */
470#define TDF_SEINTR 0x00200000 /* EINTR on stop attempts. */
471#define TDF_SWAPINREQ 0x00400000 /* Swapin request due to wakeup. */
472#define TDF_UNUSED6 0x00800000 /* Available */
473#define TDF_SCHED0 0x01000000 /* Reserved for scheduler private use */
474#define TDF_SCHED1 0x02000000 /* Reserved for scheduler private use */
475#define TDF_SCHED2 0x04000000 /* Reserved for scheduler private use */
476#define TDF_SCHED3 0x08000000 /* Reserved for scheduler private use */
477#define TDF_UNUSED7 0x10000000 /* Available */
478#define TDF_UNUSED8 0x20000000 /* Available */
479#define TDF_UNUSED9 0x40000000 /* Available */
480#define TDF_UNUSED10 0x80000000 /* Available */
481
482enum {
483 TDA_AST = 0, /* Special: call all non-flagged AST handlers */
484 TDA_OWEUPC,
485 TDA_HWPMC,
486 TDA_VFORK,
487 TDA_ALRM,
488 TDA_PROF,
489 TDA_MAC,
490 TDA_SCHED,
491 TDA_UFS,
492 TDA_GEOM,
493 TDA_KQUEUE,
494 TDA_RACCT,
495 TDA_MOD1, /* For third party use, before signals are */
496 TAD_MOD2, /* processed .. */
497 TDA_SIG,
498 TDA_KTRACE,
499 TDA_SUSPEND,
500 TDA_SIGSUSPEND,
501 TDA_MOD3, /* .. and after */
502 TAD_MOD4,
503 TDA_MAX,
504};
505#define TDAI(tda) (1U << (tda))
506#define td_ast_pending(td, tda) ((td->td_ast & TDAI(tda)) != 0)
507
508/* Userland debug flags */
509#define TDB_SUSPEND 0x00000001 /* Thread is suspended by debugger */
510#define TDB_XSIG 0x00000002 /* Thread is exchanging signal under trace */
511#define TDB_USERWR 0x00000004 /* Debugger modified memory or registers */
512#define TDB_SCE 0x00000008 /* Thread performs syscall enter */
513#define TDB_SCX 0x00000010 /* Thread performs syscall exit */
514#define TDB_EXEC 0x00000020 /* TDB_SCX from exec(2) family */
515#define TDB_FORK 0x00000040 /* TDB_SCX from fork(2) that created new
516 process */
517#define TDB_STOPATFORK 0x00000080 /* Stop at the return from fork (child
518 only) */
519#define TDB_CHILD 0x00000100 /* New child indicator for ptrace() */
520#define TDB_BORN 0x00000200 /* New LWP indicator for ptrace() */
521#define TDB_EXIT 0x00000400 /* Exiting LWP indicator for ptrace() */
522#define TDB_VFORK 0x00000800 /* vfork indicator for ptrace() */
523#define TDB_FSTP 0x00001000 /* The thread is PT_ATTACH leader */
524#define TDB_STEP 0x00002000 /* (x86) PSL_T set for PT_STEP */
525#define TDB_SSWITCH 0x00004000 /* Suspended in ptracestop */
526#define TDB_BOUNDARY 0x00008000 /* ptracestop() at boundary */
527#define TDB_COREDUMPREQ 0x00010000 /* Coredump request */
528#define TDB_SCREMOTEREQ 0x00020000 /* Remote syscall request */
529
530/*
531 * "Private" flags kept in td_pflags:
532 * These are only written by curthread and thus need no locking.
533 */
534#define TDP_OLDMASK 0x00000001 /* Need to restore mask after suspend. */
535#define TDP_INKTR 0x00000002 /* Thread is currently in KTR code. */
536#define TDP_INKTRACE 0x00000004 /* Thread is currently in KTRACE code. */
537#define TDP_BUFNEED 0x00000008 /* Do not recurse into the buf flush */
538#define TDP_COWINPROGRESS 0x00000010 /* Snapshot copy-on-write in progress. */
539#define TDP_ALTSTACK 0x00000020 /* Have alternate signal stack. */
540#define TDP_DEADLKTREAT 0x00000040 /* Lock acquisition - deadlock treatment. */
541#define TDP_NOFAULTING 0x00000080 /* Do not handle page faults. */
542#define TDP_SIGFASTBLOCK 0x00000100 /* Fast sigblock active */
543#define TDP_OWEUPC 0x00000200 /* Call addupc() at next AST. */
544#define TDP_ITHREAD 0x00000400 /* Thread is an interrupt thread. */
545#define TDP_SYNCIO 0x00000800 /* Local override, disable async i/o. */
546#define TDP_SCHED1 0x00001000 /* Reserved for scheduler private use */
547#define TDP_SCHED2 0x00002000 /* Reserved for scheduler private use */
548#define TDP_SCHED3 0x00004000 /* Reserved for scheduler private use */
549#define TDP_SCHED4 0x00008000 /* Reserved for scheduler private use */
550#define TDP_GEOM 0x00010000 /* Settle GEOM before finishing syscall */
551#define TDP_SOFTDEP 0x00020000 /* Stuck processing softdep worklist */
552#define TDP_NORUNNINGBUF 0x00040000 /* Ignore runningbufspace check */
553#define TDP_WAKEUP 0x00080000 /* Don't sleep in umtx cond_wait */
554#define TDP_INBDFLUSH 0x00100000 /* Already in BO_BDFLUSH, do not recurse */
555#define TDP_KTHREAD 0x00200000 /* This is an official kernel thread */
556#define TDP_CALLCHAIN 0x00400000 /* Capture thread's callchain */
557#define TDP_IGNSUSP 0x00800000 /* Permission to ignore the MNTK_SUSPEND* */
558#define TDP_AUDITREC 0x01000000 /* Audit record pending on thread */
559#define TDP_RFPPWAIT 0x02000000 /* Handle RFPPWAIT on syscall exit */
560#define TDP_RESETSPUR 0x04000000 /* Reset spurious page fault history. */
561#define TDP_NERRNO 0x08000000 /* Last errno is already in td_errno */
562#define TDP_UIOHELD 0x10000000 /* Current uio has pages held in td_ma */
563#define TDP_INTCPCALLOUT 0x20000000 /* used by netinet/tcp_timer.c */
564#define TDP_EXECVMSPC 0x40000000 /* Execve destroyed old vmspace */
565#define TDP_SIGFASTPENDING 0x80000000 /* Pending signal due to sigfastblock */
566
567#define TDP2_SBPAGES 0x00000001 /* Owns sbusy on some pages */
568#define TDP2_COMPAT32RB 0x00000002 /* compat32 ABI for robust lists */
569#define TDP2_ACCT 0x00000004 /* Doing accounting */
570
571/*
572 * Reasons that the current thread can not be run yet.
573 * More than one may apply.
574 */
575#define TDI_SUSPENDED 0x0001 /* On suspension queue. */
576#define TDI_SLEEPING 0x0002 /* Actually asleep! (tricky). */
577#define TDI_SWAPPED 0x0004 /* Stack not in mem. Bad juju if run. */
578#define TDI_LOCK 0x0008 /* Stopped on a lock. */
579#define TDI_IWAIT 0x0010 /* Awaiting interrupt. */
580
581#define TD_IS_SLEEPING(td) ((td)->td_inhibitors & TDI_SLEEPING)
582#define TD_ON_SLEEPQ(td) ((td)->td_wchan != NULL)
583#define TD_IS_SUSPENDED(td) ((td)->td_inhibitors & TDI_SUSPENDED)
584#define TD_IS_SWAPPED(td) ((td)->td_inhibitors & TDI_SWAPPED)
585#define TD_ON_LOCK(td) ((td)->td_inhibitors & TDI_LOCK)
586#define TD_AWAITING_INTR(td) ((td)->td_inhibitors & TDI_IWAIT)
587#ifdef _KERNEL
588#define TD_GET_STATE(td) atomic_load_int(&(td)->td_state)
589#else
590#define TD_GET_STATE(td) ((td)->td_state)
591#endif
592#define TD_IS_RUNNING(td) (TD_GET_STATE(td) == TDS_RUNNING)
593#define TD_ON_RUNQ(td) (TD_GET_STATE(td) == TDS_RUNQ)
594#define TD_CAN_RUN(td) (TD_GET_STATE(td) == TDS_CAN_RUN)
595#define TD_IS_INHIBITED(td) (TD_GET_STATE(td) == TDS_INHIBITED)
596#define TD_ON_UPILOCK(td) ((td)->td_flags & TDF_UPIBLOCKED)
597#define TD_IS_IDLETHREAD(td) ((td)->td_flags & TDF_IDLETD)
598
599#define TD_CAN_ABORT(td) (TD_ON_SLEEPQ((td)) && \
600 ((td)->td_flags & TDF_SINTR) != 0)
601
602#define KTDSTATE(td) \
603 (((td)->td_inhibitors & TDI_SLEEPING) != 0 ? "sleep" : \
604 ((td)->td_inhibitors & TDI_SUSPENDED) != 0 ? "suspended" : \
605 ((td)->td_inhibitors & TDI_SWAPPED) != 0 ? "swapped" : \
606 ((td)->td_inhibitors & TDI_LOCK) != 0 ? "blocked" : \
607 ((td)->td_inhibitors & TDI_IWAIT) != 0 ? "iwait" : "yielding")
608
609#define TD_SET_INHIB(td, inhib) do { \
610 TD_SET_STATE(td, TDS_INHIBITED); \
611 (td)->td_inhibitors |= (inhib); \
612} while (0)
613
614#define TD_CLR_INHIB(td, inhib) do { \
615 if (((td)->td_inhibitors & (inhib)) && \
616 (((td)->td_inhibitors &= ~(inhib)) == 0)) \
617 TD_SET_STATE(td, TDS_CAN_RUN); \
618} while (0)
619
620#define TD_SET_SLEEPING(td) TD_SET_INHIB((td), TDI_SLEEPING)
621#define TD_SET_SWAPPED(td) TD_SET_INHIB((td), TDI_SWAPPED)
622#define TD_SET_LOCK(td) TD_SET_INHIB((td), TDI_LOCK)
623#define TD_SET_SUSPENDED(td) TD_SET_INHIB((td), TDI_SUSPENDED)
624#define TD_SET_IWAIT(td) TD_SET_INHIB((td), TDI_IWAIT)
625#define TD_SET_EXITING(td) TD_SET_INHIB((td), TDI_EXITING)
626
627#define TD_CLR_SLEEPING(td) TD_CLR_INHIB((td), TDI_SLEEPING)
628#define TD_CLR_SWAPPED(td) TD_CLR_INHIB((td), TDI_SWAPPED)
629#define TD_CLR_LOCK(td) TD_CLR_INHIB((td), TDI_LOCK)
630#define TD_CLR_SUSPENDED(td) TD_CLR_INHIB((td), TDI_SUSPENDED)
631#define TD_CLR_IWAIT(td) TD_CLR_INHIB((td), TDI_IWAIT)
632
633#ifdef _KERNEL
634#define TD_SET_STATE(td, state) atomic_store_int(&(td)->td_state, state)
635#else
636#define TD_SET_STATE(td, state) (td)->td_state = state
637#endif
638#define TD_SET_RUNNING(td) TD_SET_STATE(td, TDS_RUNNING)
639#define TD_SET_RUNQ(td) TD_SET_STATE(td, TDS_RUNQ)
640#define TD_SET_CAN_RUN(td) TD_SET_STATE(td, TDS_CAN_RUN)
641
642
643#define TD_SBDRY_INTR(td) \
644 (((td)->td_flags & (TDF_SEINTR | TDF_SERESTART)) != 0)
645#define TD_SBDRY_ERRNO(td) \
646 (((td)->td_flags & TDF_SEINTR) != 0 ? EINTR : ERESTART)
647
648/*
649 * Process structure.
650 */
651struct proc {
652 LIST_ENTRY(proc) p_list; /* (d) List of all processes. */
653 TAILQ_HEAD(, thread) p_threads; /* (c) all threads. */
654 struct mtx p_slock; /* process spin lock */
655 struct ucred *p_ucred; /* (c) Process owner's identity. */
656 struct filedesc *p_fd; /* (b) Open files. */
657 struct filedesc_to_leader *p_fdtol; /* (b) Tracking node */
658 struct pwddesc *p_pd; /* (b) Cwd, chroot, jail, umask */
659 struct pstats *p_stats; /* (b) Accounting/statistics (CPU). */
660 struct plimit *p_limit; /* (c) Resource limits. */
661 struct callout p_limco; /* (c) Limit callout handle */
662 struct sigacts *p_sigacts; /* (x) Signal actions, state (CPU). */
663
664 int p_flag; /* (c) P_* flags. */
665 int p_flag2; /* (c) P2_* flags. */
666 enum p_states {
667 PRS_NEW = 0, /* In creation */
668 PRS_NORMAL, /* threads can be run. */
669 PRS_ZOMBIE
670 } p_state; /* (j/c) Process status. */
671 pid_t p_pid; /* (b) Process identifier. */
672 LIST_ENTRY(proc) p_hash; /* (d) Hash chain. */
673 LIST_ENTRY(proc) p_pglist; /* (g + e) List of processes in pgrp. */
674 struct proc *p_pptr; /* (c + e) Pointer to parent process. */
675 LIST_ENTRY(proc) p_sibling; /* (e) List of sibling processes. */
676 LIST_HEAD(, proc) p_children; /* (e) Pointer to list of children. */
677 struct proc *p_reaper; /* (e) My reaper. */
678 LIST_HEAD(, proc) p_reaplist; /* (e) List of my descendants
679 (if I am reaper). */
680 LIST_ENTRY(proc) p_reapsibling; /* (e) List of siblings - descendants of
681 the same reaper. */
682 struct mtx p_mtx; /* (n) Lock for this struct. */
683 struct mtx p_statmtx; /* Lock for the stats */
684 struct mtx p_itimmtx; /* Lock for the virt/prof timers */
685 struct mtx p_profmtx; /* Lock for the profiling */
686 struct ksiginfo *p_ksi; /* Locked by parent proc lock */
687 sigqueue_t p_sigqueue; /* (c) Sigs not delivered to a td. */
688#define p_siglist p_sigqueue.sq_signals
689 pid_t p_oppid; /* (c + e) Real parent pid. */
690
691/* The following fields are all zeroed upon creation in fork. */
692#define p_startzero p_vmspace
693 struct vmspace *p_vmspace; /* (b) Address space. */
694 u_int p_swtick; /* (c) Tick when swapped in or out. */
695 u_int p_cowgen; /* (c) Generation of COW pointers. */
696 struct itimerval p_realtimer; /* (c) Alarm timer. */
697 struct rusage p_ru; /* (a) Exit information. */
698 struct rusage_ext p_rux; /* (cu) Internal resource usage. */
699 struct rusage_ext p_crux; /* (c) Internal child resource usage. */
700 int p_profthreads; /* (c) Num threads in addupc_task. */
701 volatile int p_exitthreads; /* (j) Number of threads exiting */
702 int p_traceflag; /* (o) Kernel trace points. */
703 struct ktr_io_params *p_ktrioparms; /* (c + o) Params for ktrace. */
704 struct vnode *p_textvp; /* (b) Vnode of executable. */
705 struct vnode *p_textdvp; /* (b) Dir containing textvp. */
706 char *p_binname; /* (b) Binary hardlink name. */
707 u_int p_lock; /* (c) Proclock (prevent swap) count. */
708 struct sigiolst p_sigiolst; /* (c) List of sigio sources. */
709 int p_sigparent; /* (c) Signal to parent on exit. */
710 int p_sig; /* (n) For core dump/debugger XXX. */
711 u_int p_ptevents; /* (c + e) ptrace() event mask. */
712 struct kaioinfo *p_aioinfo; /* (y) ASYNC I/O info. */
713 struct thread *p_singlethread;/* (c + j) If single threading this is it */
714 int p_suspcount; /* (j) Num threads in suspended mode. */
715 struct thread *p_xthread; /* (c) Trap thread */
716 int p_boundary_count;/* (j) Num threads at user boundary */
717 int p_pendingcnt; /* how many signals are pending */
718 struct itimers *p_itimers; /* (c) POSIX interval timers. */
719 struct procdesc *p_procdesc; /* (e) Process descriptor, if any. */
720 u_int p_treeflag; /* (e) P_TREE flags */
721 int p_pendingexits; /* (c) Count of pending thread exits. */
722 struct filemon *p_filemon; /* (c) filemon-specific data. */
723 int p_pdeathsig; /* (c) Signal from parent on exit. */
724/* End area that is zeroed on creation. */
725#define p_endzero p_magic
726
727/* The following fields are all copied upon creation in fork. */
728#define p_startcopy p_endzero
729 u_int p_magic; /* (b) Magic number. */
730 int p_osrel; /* (x) osreldate for the
731 binary (from ELF note, if any) */
732 uint32_t p_fctl0; /* (x) ABI feature control, ELF note */
733 char p_comm[MAXCOMLEN + 1]; /* (x) Process name. */
734 struct sysentvec *p_sysent; /* (b) Syscall dispatch info. */
735 struct pargs *p_args; /* (c) Process arguments. */
736 rlim_t p_cpulimit; /* (c) Current CPU limit in seconds. */
737 signed char p_nice; /* (c) Process "nice" value. */
738 int p_fibnum; /* in this routing domain XXX MRT */
739 pid_t p_reapsubtree; /* (e) Pid of the direct child of the
740 reaper which spawned
741 our subtree. */
742 uint64_t p_elf_flags; /* (x) ELF flags */
743 void *p_elf_brandinfo; /* (x) Elf_Brandinfo, NULL for
744 non ELF binaries. */
745 sbintime_t p_umtx_min_timeout;
746/* End area that is copied on creation. */
747#define p_endcopy p_xexit
748
749 u_int p_xexit; /* (c) Exit code. */
750 u_int p_xsig; /* (c) Stop/kill sig. */
751 struct pgrp *p_pgrp; /* (c + e) Pointer to process group. */
752 struct knlist *p_klist; /* (c) Knotes attached to this proc. */
753 int p_numthreads; /* (c) Number of threads. */
754 struct mdproc p_md; /* Any machine-dependent fields. */
755 struct callout p_itcallout; /* (h + c) Interval timer callout. */
756 u_short p_acflag; /* (c) Accounting flags. */
757 struct proc *p_peers; /* (r) */
758 struct proc *p_leader; /* (b) */
759 void *p_emuldata; /* (c) Emulator state data. */
760 struct label *p_label; /* (*) Proc (not subject) MAC label. */
761 STAILQ_HEAD(, ktr_request) p_ktr; /* (o) KTR event queue. */
762 LIST_HEAD(, mqueue_notifier) p_mqnotifier; /* (c) mqueue notifiers.*/
763 struct kdtrace_proc *p_dtrace; /* (*) DTrace-specific data. */
764 struct cv p_pwait; /* (*) wait cv for exit/exec. */
765 uint64_t p_prev_runtime; /* (c) Resource usage accounting. */
766 struct racct *p_racct; /* (b) Resource accounting. */
767 int p_throttled; /* (c) Flag for racct pcpu throttling */
768 /*
769 * An orphan is the child that has been re-parented to the
770 * debugger as a result of attaching to it. Need to keep
771 * track of them for parent to be able to collect the exit
772 * status of what used to be children.
773 */
774 LIST_ENTRY(proc) p_orphan; /* (e) List of orphan processes. */
775 LIST_HEAD(, proc) p_orphans; /* (e) Pointer to list of orphans. */
776
777 TAILQ_HEAD(, kq_timer_cb_data) p_kqtim_stop; /* (c) */
778 LIST_ENTRY(proc) p_jaillist; /* (d) Jail process linkage. */
779};
780
781#define p_session p_pgrp->pg_session
782#define p_pgid p_pgrp->pg_id
783
784#define NOCPU (-1) /* For when we aren't on a CPU. */
785#define NOCPU_OLD (255)
786#define MAXCPU_OLD (254)
787
788#define PROC_SLOCK(p) mtx_lock_spin(&(p)->p_slock)
789#define PROC_SUNLOCK(p) mtx_unlock_spin(&(p)->p_slock)
790#define PROC_SLOCK_ASSERT(p, type) mtx_assert(&(p)->p_slock, (type))
791
792#define PROC_STATLOCK(p) mtx_lock_spin(&(p)->p_statmtx)
793#define PROC_STATUNLOCK(p) mtx_unlock_spin(&(p)->p_statmtx)
794#define PROC_STATLOCK_ASSERT(p, type) mtx_assert(&(p)->p_statmtx, (type))
795
796#define PROC_ITIMLOCK(p) mtx_lock_spin(&(p)->p_itimmtx)
797#define PROC_ITIMUNLOCK(p) mtx_unlock_spin(&(p)->p_itimmtx)
798#define PROC_ITIMLOCK_ASSERT(p, type) mtx_assert(&(p)->p_itimmtx, (type))
799
800#define PROC_PROFLOCK(p) mtx_lock_spin(&(p)->p_profmtx)
801#define PROC_PROFUNLOCK(p) mtx_unlock_spin(&(p)->p_profmtx)
802#define PROC_PROFLOCK_ASSERT(p, type) mtx_assert(&(p)->p_profmtx, (type))
803
804/* These flags are kept in p_flag. */
805#define P_ADVLOCK 0x00000001 /* Process may hold a POSIX advisory
806 lock. */
807#define P_CONTROLT 0x00000002 /* Has a controlling terminal. */
808#define P_KPROC 0x00000004 /* Kernel process. */
809#define P_UNUSED3 0x00000008 /* --available-- */
810#define P_PPWAIT 0x00000010 /* Parent is waiting for child to
811 exec/exit. */
812#define P_PROFIL 0x00000020 /* Has started profiling. */
813#define P_STOPPROF 0x00000040 /* Has thread requesting to stop
814 profiling. */
815#define P_HADTHREADS 0x00000080 /* Has had threads (no cleanup
816 shortcuts) */
817#define P_SUGID 0x00000100 /* Had set id privileges since last
818 exec. */
819#define P_SYSTEM 0x00000200 /* System proc: no sigs, stats or
820 swapping. */
821#define P_SINGLE_EXIT 0x00000400 /* Threads suspending should exit,
822 not wait. */
823#define P_TRACED 0x00000800 /* Debugged process being traced. */
824#define P_WAITED 0x00001000 /* Someone is waiting for us. */
825#define P_WEXIT 0x00002000 /* Working on exiting. */
826#define P_EXEC 0x00004000 /* Process called exec. */
827#define P_WKILLED 0x00008000 /* Killed, go to kernel/user boundary
828 ASAP. */
829#define P_CONTINUED 0x00010000 /* Proc has continued from a stopped
830 state. */
831#define P_STOPPED_SIG 0x00020000 /* Stopped due to SIGSTOP/SIGTSTP. */
832#define P_STOPPED_TRACE 0x00040000 /* Stopped because of tracing. */
833#define P_STOPPED_SINGLE 0x00080000 /* Only 1 thread can continue (not to
834 user). */
835#define P_PROTECTED 0x00100000 /* Do not kill on memory overcommit. */
836#define P_SIGEVENT 0x00200000 /* Process pending signals changed. */
837#define P_SINGLE_BOUNDARY 0x00400000 /* Threads should suspend at user
838 boundary. */
839#define P_HWPMC 0x00800000 /* Process is using HWPMCs */
840#define P_JAILED 0x01000000 /* Process is in jail. */
841#define P_TOTAL_STOP 0x02000000 /* Stopped in stop_all_proc. */
842#define P_INEXEC 0x04000000 /* Process is in execve(). */
843#define P_STATCHILD 0x08000000 /* Child process stopped or exited. */
844#define P_INMEM 0x10000000 /* Loaded into memory. */
845#define P_SWAPPINGOUT 0x20000000 /* Process is being swapped out. */
846#define P_SWAPPINGIN 0x40000000 /* Process is being swapped in. */
847#define P_PPTRACE 0x80000000 /* PT_TRACEME by vforked child. */
848
849#define P_STOPPED (P_STOPPED_SIG|P_STOPPED_SINGLE|P_STOPPED_TRACE)
850#define P_SHOULDSTOP(p) ((p)->p_flag & P_STOPPED)
851#define P_KILLED(p) ((p)->p_flag & P_WKILLED)
852
853/* These flags are kept in p_flag2. */
854#define P2_INHERIT_PROTECTED 0x00000001 /* New children get
855 P_PROTECTED. */
856#define P2_NOTRACE 0x00000002 /* No ptrace(2) attach or
857 coredumps. */
858#define P2_NOTRACE_EXEC 0x00000004 /* Keep P2_NOPTRACE on
859 exec(2). */
860#define P2_AST_SU 0x00000008 /* Handles SU ast for
861 kthreads. */
862#define P2_PTRACE_FSTP 0x00000010 /* SIGSTOP from PT_ATTACH not
863 yet handled. */
864#define P2_TRAPCAP 0x00000020 /* SIGTRAP on ENOTCAPABLE */
865#define P2_ASLR_ENABLE 0x00000040 /* Force enable ASLR. */
866#define P2_ASLR_DISABLE 0x00000080 /* Force disable ASLR. */
867#define P2_ASLR_IGNSTART 0x00000100 /* Enable ASLR to consume sbrk
868 area. */
869#define P2_PROTMAX_ENABLE 0x00000200 /* Force enable implied
870 PROT_MAX. */
871#define P2_PROTMAX_DISABLE 0x00000400 /* Force disable implied
872 PROT_MAX. */
873#define P2_STKGAP_DISABLE 0x00000800 /* Disable stack gap for
874 MAP_STACK */
875#define P2_STKGAP_DISABLE_EXEC 0x00001000 /* Stack gap disabled
876 after exec */
877#define P2_ITSTOPPED 0x00002000
878#define P2_PTRACEREQ 0x00004000 /* Active ptrace req */
879#define P2_NO_NEW_PRIVS 0x00008000 /* Ignore setuid */
880#define P2_WXORX_DISABLE 0x00010000 /* WX mappings enabled */
881#define P2_WXORX_ENABLE_EXEC 0x00020000 /* WXORX enabled after exec */
882#define P2_WEXIT 0x00040000 /* exit just started, no
883 external thread_single() is
884 permitted */
885#define P2_REAPKILLED 0x00080000
886#define P2_MEMBAR_PRIVE 0x00100000 /* membar private expedited
887 registered */
888#define P2_MEMBAR_PRIVE_SYNCORE 0x00200000 /* membar private expedited
889 sync core registered */
890#define P2_MEMBAR_GLOBE 0x00400000 /* membar global expedited
891 registered */
892
893/* Flags protected by proctree_lock, kept in p_treeflags. */
894#define P_TREE_ORPHANED 0x00000001 /* Reparented, on orphan list */
895#define P_TREE_FIRST_ORPHAN 0x00000002 /* First element of orphan
896 list */
897#define P_TREE_REAPER 0x00000004 /* Reaper of subtree */
898#define P_TREE_GRPEXITED 0x00000008 /* exit1() done with job ctl */
899
900/*
901 * These were process status values (p_stat), now they are only used in
902 * legacy conversion code.
903 */
904#define SIDL 1 /* Process being created by fork. */
905#define SRUN 2 /* Currently runnable. */
906#define SSLEEP 3 /* Sleeping on an address. */
907#define SSTOP 4 /* Process debugging or suspension. */
908#define SZOMB 5 /* Awaiting collection by parent. */
909#define SWAIT 6 /* Waiting for interrupt. */
910#define SLOCK 7 /* Blocked on a lock. */
911
912#define P_MAGIC 0xbeefface
913
914#ifdef _KERNEL
915
916/* Types and flags for mi_switch(9). */
917#define SW_TYPE_MASK 0xff /* First 8 bits are switch type */
918#define SWT_OWEPREEMPT 1 /* Switching due to owepreempt. */
919#define SWT_TURNSTILE 2 /* Turnstile contention. */
920#define SWT_SLEEPQ 3 /* Sleepq wait. */
921#define SWT_RELINQUISH 4 /* yield call. */
922#define SWT_NEEDRESCHED 5 /* NEEDRESCHED was set. */
923#define SWT_IDLE 6 /* Switching from the idle thread. */
924#define SWT_IWAIT 7 /* Waiting for interrupts. */
925#define SWT_SUSPEND 8 /* Thread suspended. */
926#define SWT_REMOTEPREEMPT 9 /* Remote processor preempted. */
927#define SWT_REMOTEWAKEIDLE 10 /* Remote processor preempted idle. */
928#define SWT_BIND 11 /* Thread bound to a new CPU. */
929#define SWT_COUNT 12 /* Number of switch types. */
930/* Flags */
931#define SW_VOL 0x0100 /* Voluntary switch. */
932#define SW_INVOL 0x0200 /* Involuntary switch. */
933#define SW_PREEMPT 0x0400 /* The invol switch is a preemption */
934
935/* How values for thread_single(). */
936#define SINGLE_NO_EXIT 0
937#define SINGLE_EXIT 1
938#define SINGLE_BOUNDARY 2
939#define SINGLE_ALLPROC 3
940
941#ifdef MALLOC_DECLARE
942MALLOC_DECLARE(M_PARGS);
943MALLOC_DECLARE(M_SESSION);
944MALLOC_DECLARE(M_SUBPROC);
945#endif
946
947#define FOREACH_PROC_IN_SYSTEM(p) \
948 LIST_FOREACH((p), &allproc, p_list)
949#define FOREACH_THREAD_IN_PROC(p, td) \
950 TAILQ_FOREACH((td), &(p)->p_threads, td_plist)
951
952#define FIRST_THREAD_IN_PROC(p) TAILQ_FIRST(&(p)->p_threads)
953
954/*
955 * We use process IDs <= pid_max <= PID_MAX; PID_MAX + 1 must also fit
956 * in a pid_t, as it is used to represent "no process group".
957 */
958#define PID_MAX 99999
959#define NO_PID 100000
960#define THREAD0_TID NO_PID
961extern pid_t pid_max;
962
963#define SESS_LEADER(p) ((p)->p_session->s_leader == (p))
964
965/* Lock and unlock a process. */
966#define PROC_LOCK(p) mtx_lock(&(p)->p_mtx)
967#define PROC_TRYLOCK(p) mtx_trylock(&(p)->p_mtx)
968#define PROC_UNLOCK(p) mtx_unlock(&(p)->p_mtx)
969#define PROC_LOCKED(p) mtx_owned(&(p)->p_mtx)
970#define PROC_WAIT_UNLOCKED(p) mtx_wait_unlocked(&(p)->p_mtx)
971#define PROC_LOCK_ASSERT(p, type) mtx_assert(&(p)->p_mtx, (type))
972
973/* Lock and unlock a process group. */
974#define PGRP_LOCK(pg) mtx_lock(&(pg)->pg_mtx)
975#define PGRP_UNLOCK(pg) mtx_unlock(&(pg)->pg_mtx)
976#define PGRP_LOCKED(pg) mtx_owned(&(pg)->pg_mtx)
977#define PGRP_LOCK_ASSERT(pg, type) mtx_assert(&(pg)->pg_mtx, (type))
978
979#define PGRP_LOCK_PGSIGNAL(pg) do { \
980 if ((pg) != NULL) \
981 PGRP_LOCK(pg); \
982} while (0)
983#define PGRP_UNLOCK_PGSIGNAL(pg) do { \
984 if ((pg) != NULL) \
985 PGRP_UNLOCK(pg); \
986} while (0)
987
988/* Lock and unlock a session. */
989#define SESS_LOCK(s) mtx_lock(&(s)->s_mtx)
990#define SESS_UNLOCK(s) mtx_unlock(&(s)->s_mtx)
991#define SESS_LOCKED(s) mtx_owned(&(s)->s_mtx)
992#define SESS_LOCK_ASSERT(s, type) mtx_assert(&(s)->s_mtx, (type))
993
994/*
995 * Non-zero p_lock ensures that:
996 * - exit1() is not performed until p_lock reaches zero;
997 * - the process' threads stack are not swapped out if they are currently
998 * not (P_INMEM).
999 *
1000 * PHOLD() asserts that the process (except the current process) is
1001 * not exiting, increments p_lock and swaps threads stacks into memory,
1002 * if needed.
1003 * _PHOLD() is same as PHOLD(), it takes the process locked.
1004 * _PHOLD_LITE() also takes the process locked, but comparing with
1005 * _PHOLD(), it only guarantees that exit1() is not executed,
1006 * faultin() is not called.
1007 */
1008#define PHOLD(p) do { \
1009 PROC_LOCK(p); \
1010 _PHOLD(p); \
1011 PROC_UNLOCK(p); \
1012} while (0)
1013#define _PHOLD(p) do { \
1014 PROC_LOCK_ASSERT((p), MA_OWNED); \
1015 KASSERT(!((p)->p_flag & P_WEXIT) || (p) == curproc, \
1016 ("PHOLD of exiting process %p", p)); \
1017 (p)->p_lock++; \
1018 if (((p)->p_flag & P_INMEM) == 0) \
1019 faultin((p)); \
1020} while (0)
1021#define _PHOLD_LITE(p) do { \
1022 PROC_LOCK_ASSERT((p), MA_OWNED); \
1023 KASSERT(!((p)->p_flag & P_WEXIT) || (p) == curproc, \
1024 ("PHOLD of exiting process %p", p)); \
1025 (p)->p_lock++; \
1026} while (0)
1027#define PROC_ASSERT_HELD(p) do { \
1028 KASSERT((p)->p_lock > 0, ("process %p not held", p)); \
1029} while (0)
1030
1031#define PRELE(p) do { \
1032 PROC_LOCK((p)); \
1033 _PRELE((p)); \
1034 PROC_UNLOCK((p)); \
1035} while (0)
1036#define _PRELE(p) do { \
1037 PROC_LOCK_ASSERT((p), MA_OWNED); \
1038 PROC_ASSERT_HELD(p); \
1039 (--(p)->p_lock); \
1040 if (((p)->p_flag & P_WEXIT) && (p)->p_lock == 0) \
1041 wakeup(&(p)->p_lock); \
1042} while (0)
1043#define PROC_ASSERT_NOT_HELD(p) do { \
1044 KASSERT((p)->p_lock == 0, ("process %p held", p)); \
1045} while (0)
1046
1047#define PROC_UPDATE_COW(p) do { \
1048 struct proc *_p = (p); \
1049 PROC_LOCK_ASSERT((_p), MA_OWNED); \
1050 atomic_store_int(&_p->p_cowgen, _p->p_cowgen + 1); \
1051} while (0)
1052
1053#define PROC_COW_CHANGECOUNT(td, p) ({ \
1054 struct thread *_td = (td); \
1055 struct proc *_p = (p); \
1056 MPASS(_td == curthread); \
1057 PROC_LOCK_ASSERT(_p, MA_OWNED); \
1058 _p->p_cowgen - _td->td_cowgen; \
1059})
1060
1061/* Check whether a thread is safe to be swapped out. */
1062#define thread_safetoswapout(td) ((td)->td_flags & TDF_CANSWAP)
1063
1064/* Control whether or not it is safe for curthread to sleep. */
1065#define THREAD_NO_SLEEPING() do { \
1066 curthread->td_no_sleeping++; \
1067 MPASS(curthread->td_no_sleeping > 0); \
1068} while (0)
1069
1070#define THREAD_SLEEPING_OK() do { \
1071 MPASS(curthread->td_no_sleeping > 0); \
1072 curthread->td_no_sleeping--; \
1073} while (0)
1074
1075#define THREAD_CAN_SLEEP() ((curthread)->td_no_sleeping == 0)
1076
1077#define PIDHASH(pid) (&pidhashtbl[(pid) & pidhash])
1078#define PIDHASHLOCK(pid) (&pidhashtbl_lock[((pid) & pidhashlock)])
1079extern LIST_HEAD(pidhashhead, proc) *pidhashtbl;
1080extern struct sx *pidhashtbl_lock;
1081extern u_long pidhash;
1082extern u_long pidhashlock;
1083
1084#define PGRPHASH(pgid) (&pgrphashtbl[(pgid) & pgrphash])
1085extern LIST_HEAD(pgrphashhead, pgrp) *pgrphashtbl;
1086extern u_long pgrphash;
1087
1088extern struct sx allproc_lock;
1089extern int allproc_gen;
1090extern struct sx proctree_lock;
1091extern struct mtx ppeers_lock;
1092extern struct mtx procid_lock;
1093extern struct proc proc0; /* Process slot for swapper. */
1094extern struct thread0_storage thread0_st; /* Primary thread in proc0. */
1095#define thread0 (thread0_st.t0st_thread)
1096extern struct vmspace vmspace0; /* VM space for proc0. */
1097extern int hogticks; /* Limit on kernel cpu hogs. */
1098extern int lastpid;
1099extern int nprocs, maxproc; /* Current and max number of procs. */
1100extern int maxprocperuid; /* Max procs per uid. */
1101extern u_long ps_arg_cache_limit;
1102
1103LIST_HEAD(proclist, proc);
1104TAILQ_HEAD(procqueue, proc);
1105TAILQ_HEAD(threadqueue, thread);
1106extern struct proclist allproc; /* List of all processes. */
1107extern struct proc *initproc, *pageproc; /* Process slots for init, pager. */
1108
1109extern struct uma_zone *proc_zone;
1110extern struct uma_zone *pgrp_zone;
1111
1112struct proc *pfind(pid_t); /* Find process by id. */
1113struct proc *pfind_any(pid_t); /* Find (zombie) process by id. */
1114struct proc *pfind_any_locked(pid_t pid); /* Find process by id, locked. */
1115struct pgrp *pgfind(pid_t); /* Find process group by id. */
1116void pidhash_slockall(void); /* Shared lock all pid hash lists. */
1117void pidhash_sunlockall(void); /* Shared unlock all pid hash lists. */
1118
1119struct fork_req {
1120 int fr_flags;
1121 int fr_pages;
1122 int *fr_pidp;
1123 struct proc **fr_procp;
1124 int *fr_pd_fd;
1125 int fr_pd_flags;
1126 struct filecaps *fr_pd_fcaps;
1127 int fr_flags2;
1128#define FR2_DROPSIG_CAUGHT 0x00000001 /* Drop caught non-DFL signals */
1129#define FR2_SHARE_PATHS 0x00000002 /* Invert sense of RFFDG for paths */
1130#define FR2_KPROC 0x00000004 /* Create a kernel process */
1131};
1132
1133/*
1134 * pget() flags.
1135 */
1136#define PGET_HOLD 0x00001 /* Hold the process. */
1137#define PGET_CANSEE 0x00002 /* Check against p_cansee(). */
1138#define PGET_CANDEBUG 0x00004 /* Check against p_candebug(). */
1139#define PGET_ISCURRENT 0x00008 /* Check that the found process is current. */
1140#define PGET_NOTWEXIT 0x00010 /* Check that the process is not in P_WEXIT. */
1141#define PGET_NOTINEXEC 0x00020 /* Check that the process is not in P_INEXEC. */
1142#define PGET_NOTID 0x00040 /* Do not assume tid if pid > PID_MAX. */
1143
1144#define PGET_WANTREAD (PGET_HOLD | PGET_CANDEBUG | PGET_NOTWEXIT)
1145
1146int pget(pid_t pid, int flags, struct proc **pp);
1147
1148/* ast_register() flags */
1149#define ASTR_ASTF_REQUIRED 0x0001 /* td_ast TDAI(TDA_X) flag set is
1150 required for call */
1151#define ASTR_TDP 0x0002 /* td_pflags flag set is required */
1152#define ASTR_KCLEAR 0x0004 /* call me on ast_kclear() */
1153#define ASTR_UNCOND 0x0008 /* call me always */
1154
1155void ast(struct trapframe *framep);
1156void ast_kclear(struct thread *td);
1157void ast_register(int ast, int ast_flags, int tdp,
1158 void (*f)(struct thread *td, int asts));
1159void ast_deregister(int tda);
1160void ast_sched_locked(struct thread *td, int tda);
1161void ast_sched_mask(struct thread *td, int ast);
1162void ast_sched(struct thread *td, int tda);
1163void ast_unsched_locked(struct thread *td, int tda);
1164
1165struct thread *choosethread(void);
1166int cr_bsd_visible(struct ucred *u1, struct ucred *u2);
1167int cr_cansee(struct ucred *u1, struct ucred *u2);
1168int cr_canseesocket(struct ucred *cred, struct socket *so);
1169int cr_cansignal(struct ucred *cred, struct proc *proc, int signum);
1170int enterpgrp(struct proc *p, pid_t pgid, struct pgrp *pgrp,
1171 struct session *sess);
1172int enterthispgrp(struct proc *p, struct pgrp *pgrp);
1173void faultin(struct proc *p);
1174int fork1(struct thread *, struct fork_req *);
1175void fork_exit(void (*)(void *, struct trapframe *), void *,
1176 struct trapframe *);
1177void fork_return(struct thread *, struct trapframe *);
1178int inferior(struct proc *p);
1179void itimer_proc_continue(struct proc *p);
1180void kqtimer_proc_continue(struct proc *p);
1181void kern_proc_vmmap_resident(struct vm_map *map, struct vm_map_entry *entry,
1182 int *resident_count, bool *super);
1183void kern_yield(int);
1184void kick_proc0(void);
1185void killjobc(void);
1186int leavepgrp(struct proc *p);
1187int maybe_preempt(struct thread *td);
1188void maybe_yield(void);
1189void mi_switch(int flags);
1190int p_candebug(struct thread *td, struct proc *p);
1191int p_cansee(struct thread *td, struct proc *p);
1192int p_cansched(struct thread *td, struct proc *p);
1193int p_cansignal(struct thread *td, struct proc *p, int signum);
1194int p_canwait(struct thread *td, struct proc *p);
1195struct pargs *pargs_alloc(int len);
1196void pargs_drop(struct pargs *pa);
1197void pargs_hold(struct pargs *pa);
1198void proc_add_orphan(struct proc *child, struct proc *parent);
1199int proc_get_binpath(struct proc *p, char *binname, char **fullpath,
1200 char **freepath);
1201int proc_getargv(struct thread *td, struct proc *p, struct sbuf *sb);
1202int proc_getauxv(struct thread *td, struct proc *p, struct sbuf *sb);
1203int proc_getenvv(struct thread *td, struct proc *p, struct sbuf *sb);
1204void procinit(void);
1205int proc_iterate(int (*cb)(struct proc *, void *), void *cbarg);
1206void proc_linkup0(struct proc *p, struct thread *td);
1207void proc_linkup(struct proc *p, struct thread *td);
1208struct proc *proc_realparent(struct proc *child);
1209void proc_reap(struct thread *td, struct proc *p, int *status, int options);
1210void proc_reparent(struct proc *child, struct proc *newparent, bool set_oppid);
1211void proc_set_p2_wexit(struct proc *p);
1212void proc_set_traced(struct proc *p, bool stop);
1213void proc_wkilled(struct proc *p);
1214struct pstats *pstats_alloc(void);
1215void pstats_fork(struct pstats *src, struct pstats *dst);
1216void pstats_free(struct pstats *ps);
1217void proc_clear_orphan(struct proc *p);
1218void reaper_abandon_children(struct proc *p, bool exiting);
1219int securelevel_ge(struct ucred *cr, int level);
1220int securelevel_gt(struct ucred *cr, int level);
1221void sess_hold(struct session *);
1222void sess_release(struct session *);
1223int setrunnable(struct thread *, int);
1224void setsugid(struct proc *p);
1225bool should_yield(void);
1226int sigonstack(size_t sp);
1227void stopevent(struct proc *, u_int, u_int);
1228struct thread *tdfind(lwpid_t, pid_t);
1229void threadinit(void);
1230void tidhash_add(struct thread *);
1231void tidhash_remove(struct thread *);
1232void cpu_idle(int);
1233int cpu_idle_wakeup(int);
1234extern void (*cpu_idle_hook)(sbintime_t); /* Hook to machdep CPU idler. */
1235void cpu_switch(struct thread *, struct thread *, struct mtx *);
1236void cpu_sync_core(void);
1237void cpu_throw(struct thread *, struct thread *) __dead2;
1238bool curproc_sigkilled(void);
1239void userret(struct thread *, struct trapframe *);
1240
1241void cpu_exit(struct thread *);
1242void exit1(struct thread *, int, int) __dead2;
1243void cpu_copy_thread(struct thread *td, struct thread *td0);
1244bool cpu_exec_vmspace_reuse(struct proc *p, struct vm_map *map);
1245int cpu_fetch_syscall_args(struct thread *td);
1246void cpu_fork(struct thread *, struct proc *, struct thread *, int);
1247void cpu_fork_kthread_handler(struct thread *, void (*)(void *), void *);
1248int cpu_procctl(struct thread *td, int idtype, id_t id, int com,
1249 void *data);
1250void cpu_set_syscall_retval(struct thread *, int);
1251int cpu_set_upcall(struct thread *, void (*)(void *), void *,
1252 stack_t *);
1253int cpu_set_user_tls(struct thread *, void *tls_base);
1254void cpu_thread_alloc(struct thread *);
1255void cpu_thread_clean(struct thread *);
1256void cpu_thread_exit(struct thread *);
1257void cpu_thread_free(struct thread *);
1258void cpu_thread_swapin(struct thread *);
1259void cpu_thread_swapout(struct thread *);
1260struct thread *thread_alloc(int pages);
1261int thread_alloc_stack(struct thread *, int pages);
1262int thread_check_susp(struct thread *td, bool sleep);
1263void thread_cow_get_proc(struct thread *newtd, struct proc *p);
1264void thread_cow_get(struct thread *newtd, struct thread *td);
1265void thread_cow_free(struct thread *td);
1266void thread_cow_update(struct thread *td);
1267void thread_cow_synced(struct thread *td);
1268int thread_create(struct thread *td, struct rtprio *rtp,
1269 int (*initialize_thread)(struct thread *, void *), void *thunk);
1270void thread_exit(void) __dead2;
1271void thread_free(struct thread *td);
1272void thread_link(struct thread *td, struct proc *p);
1273void thread_reap_barrier(void);
1274int thread_single(struct proc *p, int how);
1275void thread_single_end(struct proc *p, int how);
1276void thread_stash(struct thread *td);
1277void thread_stopped(struct proc *p);
1278void childproc_stopped(struct proc *child, int reason);
1279void childproc_continued(struct proc *child);
1280void childproc_exited(struct proc *child);
1281void thread_run_flash(struct thread *td);
1282int thread_suspend_check(int how);
1283bool thread_suspend_check_needed(void);
1284void thread_suspend_switch(struct thread *, struct proc *p);
1285void thread_suspend_one(struct thread *td);
1286void thread_unlink(struct thread *td);
1287void thread_unsuspend(struct proc *p);
1288void thread_wait(struct proc *p);
1289
1290bool stop_all_proc_block(void);
1291void stop_all_proc_unblock(void);
1292void stop_all_proc(void);
1293void resume_all_proc(void);
1294
1295static __inline int
1296curthread_pflags_set(int flags)
1297{
1298 struct thread *td;
1299 int save;
1300
1301 td = curthread;
1302 save = ~flags | (td->td_pflags & flags);
1303 td->td_pflags |= flags;
1304 return (save);
1305}
1306
1307static __inline void
1308curthread_pflags_restore(int save)
1309{
1310
1311 curthread->td_pflags &= save;
1312}
1313
1314static __inline int
1315curthread_pflags2_set(int flags)
1316{
1317 struct thread *td;
1318 int save;
1319
1320 td = curthread;
1321 save = ~flags | (td->td_pflags2 & flags);
1322 td->td_pflags2 |= flags;
1323 return (save);
1324}
1325
1326static __inline void
1327curthread_pflags2_restore(int save)
1328{
1329
1330 curthread->td_pflags2 &= save;
1331}
1332
1333static __inline __pure2 struct td_sched *
1334td_get_sched(struct thread *td)
1335{
1336
1337 return ((struct td_sched *)&td[1]);
1338}
1339
1340#define PROC_ID_PID 0
1341#define PROC_ID_GROUP 1
1342#define PROC_ID_SESSION 2
1343#define PROC_ID_REAP 3
1344
1345void proc_id_set(int type, pid_t id);
1346void proc_id_set_cond(int type, pid_t id);
1347void proc_id_clear(int type, pid_t id);
1348
1349EVENTHANDLER_LIST_DECLARE(process_ctor);
1350EVENTHANDLER_LIST_DECLARE(process_dtor);
1351EVENTHANDLER_LIST_DECLARE(process_init);
1352EVENTHANDLER_LIST_DECLARE(process_fini);
1353EVENTHANDLER_LIST_DECLARE(process_exit);
1354EVENTHANDLER_LIST_DECLARE(process_fork);
1355EVENTHANDLER_LIST_DECLARE(process_exec);
1356
1357EVENTHANDLER_LIST_DECLARE(thread_ctor);
1358EVENTHANDLER_LIST_DECLARE(thread_dtor);
1359EVENTHANDLER_LIST_DECLARE(thread_init);
1360
1361#endif /* _KERNEL */
1362
1363#endif /* !_SYS_PROC_H_ */