summaryrefslogtreecommitdiffstats
path: root/sys/sys/proc.h
blob: f37ade737b46c4e47d55de685eda42a0d0d2ecc6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
/*-
 * Copyright (c) 1986, 1989, 1991, 1993
 *	The Regents of the University of California.  All rights reserved.
 * (c) UNIX System Laboratories, Inc.
 * All or some portions of this file are derived from material licensed
 * to the University of California by American Telephone and Telegraph
 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
 * the permission of UNIX System Laboratories, Inc.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. All advertising materials mentioning features or use of this software
 *    must display the following acknowledgement:
 *	This product includes software developed by the University of
 *	California, Berkeley and its contributors.
 * 4. Neither the name of the University nor the names of its contributors
 *    may be used to endorse or promote products derived from this software
 *    without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 *
 *	@(#)proc.h	8.15 (Berkeley) 5/19/95
 * $FreeBSD$
 */

#ifndef _SYS_PROC_H_
#define	_SYS_PROC_H_

#include <sys/callout.h>		/* For struct callout. */
#include <sys/event.h>			/* For struct klist. */
#include <sys/filedesc.h>
#include <sys/queue.h>
#include <sys/priority.h>
#include <sys/rtprio.h>			/* XXX */
#include <sys/runq.h>
#include <sys/signal.h>
#ifndef _KERNEL
#include <sys/time.h>			/* For structs itimerval, timeval. */
#else
#include <sys/pcpu.h>
#endif
#include <sys/ucred.h>
#include <machine/proc.h>		/* Machine-dependent proc substruct. */
#include <vm/uma.h>

/*
 * One structure allocated per session.
 *
 * List of locks
 * (m)		locked by s_mtx mtx
 * (e)		locked by proctree_lock sx
 * (c)		const until freeing
 */
struct session {
	int		s_count;	/* (m)		Ref cnt; pgrps in session. */
	struct	proc	*s_leader;	/* (m + e)	Session leader. */
	struct	vnode	*s_ttyvp;	/* (m)		Vnode of controlling terminal. */
	struct	tty	*s_ttyp;	/* (m)		Controlling terminal. */
	pid_t		s_sid;		/* (c)		Session ID. */
					/* (m)		Setlogin() name: */
	char		s_login[roundup(MAXLOGNAME, sizeof(long))];
	struct	mtx	s_mtx;		/* 		Mutex to protect members */
};

/*
 * One structure allocated per process group.
 *
 * List of locks
 * (m)		locked by pg_mtx mtx
 * (e)		locked by proctree_lock sx
 * (c)		const until freeing
 */
struct pgrp {
	LIST_ENTRY(pgrp) pg_hash;	/* (e)		Hash chain. */
	LIST_HEAD(, proc) pg_members;	/* (m + e)	Pointer to pgrp members. */
	struct session	*pg_session;	/* (c)		Pointer to session. */
	struct sigiolst	pg_sigiolst;	/* (m)		List of sigio sources. */
	pid_t		pg_id;		/* (c)		Pgrp id. */
	int		pg_jobc;	/* (m)		# procs qualifying pgrp for job control */
	struct	mtx	pg_mtx;		/* 		Mutex to protect members */
};

struct procsig {
	sigset_t ps_sigignore;	/* Signals being ignored. */
	sigset_t ps_sigcatch;	/* Signals being caught by user. */
	int	 ps_flag;
	struct	 sigacts *ps_sigacts;	/* Signal actions, state. */
	int	 ps_refcnt;
};

#define	PS_NOCLDWAIT	0x0001	/* No zombies if child dies */
#define	PS_NOCLDSTOP	0x0002	/* No SIGCHLD when children stop. */
#define	PS_CLDSIGIGN	0x0004	/* The SIGCHLD handler is SIG_IGN. */

/*
 * pargs, used to hold a copy of the command line, if it had a sane length.
 */
struct pargs {
	u_int	ar_ref;		/* Reference count. */
	u_int	ar_length;	/* Length. */
	u_char	ar_args[0];	/* Arguments. */
};

/*-
 * Description of a process.
 *
 * This structure contains the information needed to manage a thread of
 * control, known in UN*X as a process; it has references to substructures
 * containing descriptions of things that the process uses, but may share
 * with related processes.  The process structure and the substructures
 * are always addressable except for those marked "(CPU)" below,
 * which might be addressable only on a processor on which the process
 * is running.
 *
 * Below is a key of locks used to protect each member of struct proc.  The
 * lock is indicated by a reference to a specific character in parens in the
 * associated comment.
 *      * - not yet protected
 *      a - only touched by curproc or parent during fork/wait
 *      b - created at fork, never changes
 *      	(exception aiods switch vmspaces, but they are also
 *      	marked 'P_SYSTEM' so hopefully it will be left alone)
 *      c - locked by proc mtx
 *      d - locked by allproc_lock lock
 *      e - locked by proctree_lock lock
 *      f - session mtx
 *      g - process group mtx
 *      h - callout_lock mtx
 *      i - by curproc or the master session mtx
 *      j - locked by sched_lock mtx
 *      k - only accessed by curthread
 *      l - the attaching proc or attaching proc parent
 *      m - Giant
 *      n - not locked, lazy
 *      o - ktrace lock
 *      p - select lock (sellock)
 *
 * If the locking key specifies two identifiers (for example, p_pptr) then
 * either lock is sufficient for read access, but both locks must be held
 * for write access.
 */
struct ithd;
struct nlminfo;
struct trapframe;

/*
 * Here we define the four structures used for process information.
 *
 * The first is the thread. It might be though of as a "Kernel
 * Schedulable Entity Context".
 * This structure contains all the information as to where a thread of 
 * execution is now, or was when it was suspended, why it was suspended,
 * and anything else that will be needed to restart it when it is
 * rescheduled. Always associated with a KSE when running, but can be
 * reassigned to an equivalent KSE  when being restarted for
 * load balancing. Each of these is associated with a kernel stack
 * and a pcb.
 * 
 * It is important to remember that a particular thread structure only
 * exists as long as the system call or kernel entrance (e.g. by pagefault)
 * which it is currently executing. It should threfore NEVER be referenced
 * by pointers in long lived structures that live longer than a single
 * request. If several threads complete their work at the same time,
 * they will all rewind their stacks to the uer boundary, report their
 * completion state, and all but one will be freed. That last one will
 * be kept to provide a kernel stack and pcb for the NEXT syscall or kernel
 * entrance. (basically to save freeing and then re-allocating it) A process
 * might keep a cache of threads available to allow it to quickly
 * get one when it needs a new one. There would probably also be a system
 * cache of free threads.
 */
struct thread;

/* 
 * The second structure is the Kernel Schedulable Entity. (KSE)
 * As long as this is scheduled, it will continue to run any threads that
 * are assigned to it or the KSEGRP (see later) until either it runs out
 * of runnable threads or CPU.
 * It runs on one CPU and is assigned a quantum of time. When a thread is
 * blocked, The KSE continues to run and will search for another thread
 * in a runnable state amongst those it has. It May decide to return to user
 * mode with a new 'empty' thread if there are no runnable threads.
 * threads are associated with a KSE for cache reasons, but a sheduled KSE with
 * no runnable thread will try take a thread from a sibling KSE before
 * surrendering its quantum. In some schemes it gets it's quantum from the KSEG
 * and contributes to draining that quantum, along withthe other KSEs in
 * the group. (undecided)
 */
struct kse;

/*
 * The KSEGRP is allocated resources across a number of CPUs.
 * (Including a number of CPUxQUANTA. It parcels these QUANTA up among
 * Its KSEs, each of which should be running in a different CPU.
 * Priority and total available sheduled quanta are properties of a KSEGRP.
 * Multiple KSEGRPs in a single process compete against each other
 * for total quanta in the same way that a forked child competes against
 * it's parent process.
 */
struct ksegrp;

/*
 * A process is the owner of all system resources allocated to a task
 * except CPU quanta.
 * All KSEGs under one process see, and have the same access to, these
 * resources (e.g. files, memory, sockets, permissions kqueues).
 * A process may compete for CPU cycles on the same basis as a
 * forked process cluster by spawning several KSEGRPs. 
 */
struct proc;

/***************
 * In pictures:
 With a single run queue used by all processors:

 RUNQ: --->KSE---KSE--...               SLEEPQ:[]---THREAD---THREAD---THREAD
	   |   /                               []---THREAD
	   KSEG---THREAD--THREAD--THREAD       []
	                                       []---THREAD---THREAD

  (processors run THREADs from the KSEG until they are exhausted or
  the KSEG exhausts its quantum) 

With PER-CPU run queues:
KSEs on the separate run queues directly
They would be given priorities calculated from the KSEG.

 *
 *****************/

/*
 * Kernel runnable context (thread).
 * This is what is put to sleep and reactivated.
 * The first KSE available in the correct group will run this thread.
 * If several are available, use the one on the same CPU as last time.
 */
struct thread {
	struct proc	*td_proc;	/* Associated process. */
	struct ksegrp	*td_ksegrp;	/* Associated KSEG. */
	struct kse	*td_last_kse;	/* Where it wants to be if possible. */
	struct kse	*td_kse;	/* Current KSE if running. */
	TAILQ_ENTRY(thread) td_plist;	/* All threads in this proc */
	TAILQ_ENTRY(thread) td_kglist;	/* All threads in this ksegrp */

	/* The two queues below should someday be merged */
	TAILQ_ENTRY(thread) td_slpq; 	/* (j) Sleep queue. XXXKSE */ 
	TAILQ_ENTRY(thread) td_blkq; 	/* (j) Mutex queue. XXXKSE */ 
	TAILQ_ENTRY(thread) td_runq; 	/* (j) Run queue(s). XXXKSE */ 

	TAILQ_HEAD(, selinfo) td_selq;	/* (p) List of selinfos. */

#define	td_startzero td_flags
	int		td_flags;	/* (j) TDF_* flags. */
	int		td_dupfd;	/* (k) Ret value from fdopen. XXX */
	void		*td_wchan;	/* (j) Sleep address. */
	const char	*td_wmesg;	/* (j) Reason for sleep. */
	u_char		td_lastcpu;	/* (j) Last cpu we were on. */
	u_char		td_inktr;	/* (k) Currently handling a KTR. */
	u_char		td_inktrace;	/* (k) Currently handling a KTRACE. */
	short		td_locks;	/* (k) DEBUG: lockmgr count of locks */
	struct mtx	*td_blocked;	/* (j) Mutex process is blocked on. */
	struct ithd	*td_ithd;	/* (b) For interrupt threads only. */
	const char	*td_mtxname;	/* (j) Name of mutex blocked on. */
	LIST_HEAD(, mtx) td_contested;	/* (j) Contested locks. */
	struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */
	int		td_intr_nesting_level; /* (k) Interrupt recursion. */
#define	td_endzero td_md

#define	td_startcopy td_endzero
	/* XXXKSE p_md is in the "on your own" section in old struct proc */
	struct mdthread td_md;		/* (k) Any machine-dependent fields. */
	register_t 	td_retval[2];	/* (k) Syscall aux returns. */
	u_char		td_base_pri;	/* (j) Thread base kernel priority. */
	u_char		td_priority;	/* (j) Thread active priority. */
#define	td_endcopy td_pcb

	struct ucred	*td_ucred;	/* (k) Reference to credentials. */
	struct pcb	*td_pcb;	/* (k) Kernel VA of pcb and kstack. */
	struct callout	td_slpcallout;	/* (h) Callout for sleep. */
	struct trapframe *td_frame;	/* (k) */
	struct vm_object *td_kstack_obj;/* (a) Kstack object. */
	vm_offset_t	td_kstack;	/* Kernel VA of kstack. */
	u_int		td_critnest;	/* (k) Critical section nest level. */
};

/*
 * The schedulable entity that can be given a context to run.
 * A process may have several of these. Probably one per processor
 * but posibly a few more. In this universe they are grouped
 * with a KSEG that contains the priority and niceness
 * for the group.
 */
struct kse {
	struct proc	*ke_proc;	/* Associated process. */
	struct ksegrp	*ke_ksegrp;	/* Associated KSEG. */
	struct thread	*ke_thread;	/* Associated thread, if running. */
	TAILQ_ENTRY(kse) ke_kglist;	/* Queue of all KSEs in ke_ksegrp. */
	TAILQ_ENTRY(kse) ke_kgrlist;	/* Queue of all KSEs in this state. */
	TAILQ_ENTRY(kse) ke_procq;	/* (j) Run queue. */
	TAILQ_HEAD(, thread) ke_runq;	/* (td_runq) RUNNABLE bound to KSE. */

#define	ke_startzero ke_flags
	int		ke_flags;	/* (j) KEF_* flags. */
	/*u_int		ke_estcpu; */	/* (j) Time averaged val of cpticks. */
	int		ke_cpticks;	/* (j) Ticks of cpu time. */
	fixpt_t		ke_pctcpu;	/* (j) %cpu during p_swtime. */
	u_int64_t	ke_uu;		/* (j) Previous user time in usec. */
	u_int64_t	ke_su;		/* (j) Previous system time in usec. */
	u_int64_t	ke_iu;		/* (j) Previous intr time in usec. */
	u_int64_t	ke_uticks;	/* (j) Statclock hits in user mode. */
	u_int64_t	ke_sticks;	/* (j) Statclock hits in system mode. */
	u_int64_t	ke_iticks;	/* (j) Statclock hits in intr. */
	u_char		ke_oncpu;	/* (j) Which cpu we are on. */
	u_int		ke_slptime;	/* (j) Time since last idle. */
	char		ke_rqindex;	/* (j) Run queue index. */
#define	ke_endzero ke_priority

#define	ke_startcopy ke_endzero
	u_char		ke_priority;	/* (j) Process priority. */
	u_char		ke_usrpri;	/* (j) User pri from cpu & nice. */
#define	ke_endcopy ke_end

	int		ke_end;		/* dummy entry */
};

/*
 * Kernel-scheduled entity group (KSEG).  The scheduler considers each KSEG to
 * be an indivisible unit from a time-sharing perspective, though each KSEG may
 * contain multiple KSEs.
 */
struct ksegrp {
	struct proc	*kg_proc;	/* Process that contains this KSEG. */
	TAILQ_ENTRY(ksegrp) kg_ksegrp;	/* Queue of KSEGs in kg_proc. */
	TAILQ_HEAD(, kse) kg_kseq;	/* (ke_kglist) All KSEs. */
	TAILQ_HEAD(, kse) kg_rq;	/* (ke_kgrlist) Runnable KSEs. */
	TAILQ_HEAD(, kse) kg_iq;	/* (ke_kgrlist) Idle KSEs. */
	TAILQ_HEAD(, thread) kg_threads;/* (td_kglist) All threads. */
	TAILQ_HEAD(, thread) kg_runq;	/* (td_runq) Unbound RUNNABLE threads */
	TAILQ_HEAD(, thread) kg_slpq;	/* (td_runq) NONRUNNABLE threads. */

#define	kg_startzero kg_estcpu
	u_int		kg_estcpu;	/* Sum of the same field in KSEs. */
 	u_int		kg_slptime;	/* (j) How long completely blocked. */
#define	kg_endzero kg_pri_class

#define	kg_startcopy 	kg_endzero
	u_char		kg_pri_class;	/* (j) Scheduling class. */
	u_char		kg_user_pri;	/* (j) User pri from estcpu and nice. */
	char		kg_nice;	/* (j?/k?) Process "nice" value. */
	struct rtprio	kg_rtprio;	/* (j) Realtime priority. */
#define	kg_endcopy kg_runnable

	int		kg_runnable;	/* Num runnable threads on queue. */
	int		kg_runq_kses;	/* Num KSEs on runq. */
	int		kg_kses;	/* Num KSEs in group. */
};

/*
 * The old fashionned process. May have multiple threads, KSEGRPs
 * and KSEs. Starts off with a single embedded KSEGRP, KSE and THREAD.
 */
struct proc {
	LIST_ENTRY(proc) p_list;	/* (d) List of all processes. */
	TAILQ_HEAD(, ksegrp) p_ksegrps;	/* (kg_ksegrp) All KSEGs. */
	TAILQ_HEAD(, thread) p_threads;	/* (td_plist) Threads. (shortcut) */
	struct ucred	*p_ucred;	/* (c) Process owner's identity. */
	struct filedesc	*p_fd;		/* (b) Ptr to open files structure. */
					/* Accumulated stats for all KSEs? */
	struct pstats	*p_stats;	/* (b) Accounting/statistics (CPU). */
	struct plimit	*p_limit;	/* (m) Process limits. */
	struct vm_object *p_upages_obj; /* (a) Upages object. */
	struct procsig	*p_procsig;	/* (c) Signal actions, state (CPU). */
 
	struct ksegrp	p_ksegrp;
	struct kse	p_kse;
	struct thread	p_xxthread;

	/*
	 * The following don't make too much sense..
	 * See the td_ or ke_ versions of the same flags
	 */
	int		p_flag;		/* (c) P_* flags. */
	int		p_sflag;	/* (j) PS_* flags. */
	int		p_stat;		/* (j) S* process status. */

	pid_t		p_pid;		/* (b) Process identifier. */
	LIST_ENTRY(proc) p_hash;	/* (d) Hash chain. */
	LIST_ENTRY(proc) p_pglist;	/* (g + e) List of processes in pgrp. */
	struct proc	*p_pptr;	/* (c + e) Pointer to parent process. */
	LIST_ENTRY(proc) p_sibling;	/* (e) List of sibling processes. */
	LIST_HEAD(, proc) p_children;	/* (e) Pointer to list of children. */
	struct mtx	p_mtx;		/* (k) Lock for this struct. */

/* The following fields are all zeroed upon creation in fork. */
#define	p_startzero	p_oppid
	pid_t		p_oppid; 	/* (c + e) Save ppid in ptrace. XXX */
	struct vmspace	*p_vmspace;	/* (b) Address space. */
	u_int		p_swtime;	/* (j) Time swapped in or out. */
	struct itimerval p_realtimer;	/* (h?/k?) Alarm timer. */
	struct bintime	p_runtime;	/* (j) Real time. */
	int		p_traceflag;	/* (o) Kernel trace points. */
	struct vnode	*p_tracep;	/* (c + o) Trace to vnode. */
	sigset_t	p_siglist;	/* (c) Sigs arrived, not delivered. */
	struct vnode	*p_textvp;	/* (b) Vnode of executable. */
	char		p_lock;		/* (c) Proclock (prevent swap) count. */
	struct klist p_klist;		/* (c) Knotes attached to this proc. */
	struct sigiolst	p_sigiolst;	/* (c) List of sigio sources. */
	int		p_sigparent;	/* (c) Signal to parent on exit. */
	sigset_t	p_oldsigmask;	/* (c) Saved mask from pre sigpause. */
	int		p_sig;		/* (n) For core dump/debugger XXX. */
	u_long		p_code;		/* (n) For core dump/debugger XXX. */
	u_int		p_stops;	/* (c) Stop event bitmask. */
	u_int		p_stype;	/* (c) Stop event type. */
	char		p_step;		/* (c) Process is stopped. */
	u_char		p_pfsflags;	/* (c) Procfs flags. */
	struct nlminfo	*p_nlminfo;	/* (?) Only used by/for lockd. */
	void		*p_aioinfo;	/* (c) ASYNC I/O info. */
/* End area that is zeroed on creation. */
#define	p_startcopy	p_sigmask

/* The following fields are all copied upon creation in fork. */
#define	p_endzero	p_startcopy
	sigset_t	p_sigmask;	/* (c) Current signal mask. */
	stack_t		p_sigstk;	/* (c) Stack ptr and on-stack flag. */
	int		p_magic;	/* (b) Magic number. */
	char		p_comm[MAXCOMLEN + 1];	/* (b) Process name. */
	struct pgrp	*p_pgrp;	/* (c + e) Pointer to process group. */
	struct sysentvec *p_sysent;	/* (b) Syscall dispatch info. */
	struct pargs	*p_args;	/* (c) Process arguments. */
/* End area that is copied on creation. */
#define	p_endcopy	p_xstat

	u_short		p_xstat;	/* (c) Exit status; also stop sig. */
	struct mdproc	p_md;		/* (c) Any machine-dependent fields. */
	struct callout	p_itcallout;	/* (h) Interval timer callout. */
	struct user	*p_uarea;	/* (k) Kernel VA of u-area (CPU) */
	u_short		p_acflag;	/* (c) Accounting flags. */
	struct rusage	*p_ru;		/* (a) Exit information. XXX */
	struct proc	*p_peers;	/* (c) */
	struct proc	*p_leader;	/* (b) */
	void		*p_emuldata;	/* (c) Emulator state data. */
};

#define	p_rlimit	p_limit->pl_rlimit
#define	p_sigacts	p_procsig->ps_sigacts
#define	p_sigignore	p_procsig->ps_sigignore
#define	p_sigcatch	p_procsig->ps_sigcatch
#define	p_session	p_pgrp->pg_session
#define	p_pgid		p_pgrp->pg_id

#define	NOCPU	0xff		/* For p_oncpu when we aren't on a CPU. */

/* Status values (p_stat). */
#define	SIDL	1		/* Process being created by fork. */
#define	SRUN	2		/* Currently runnable. */
#define	SSLEEP	3		/* Sleeping on an address. */
#define	SSTOP	4		/* Process debugging or suspension. */
#define	SZOMB	5		/* Awaiting collection by parent. */
#define	SWAIT	6		/* Waiting for interrupt. */
#define	SMTX	7		/* Blocked on a mutex. */

/* These flags are kept in p_flag. */
#define	P_ADVLOCK	0x00001	/* Process may hold a POSIX advisory lock. */
#define	P_CONTROLT	0x00002	/* Has a controlling terminal. */
#define	P_KTHREAD	0x00004	/* Kernel thread. (*)*/
#define	P_NOLOAD	0x00008	/* Ignore during load avg calculations. */
#define	P_PPWAIT	0x00010	/* Parent is waiting for child to exec/exit. */
#define	P_SUGID		0x00100	/* Had set id privileges since last exec. */
#define	P_SYSTEM	0x00200	/* System proc: no sigs, stats or swapping. */
#define	P_TRACED	0x00800	/* Debugged process being traced. */
#define	P_WAITED	0x01000	/* Debugging process has waited for child. */
#define	P_WEXIT		0x02000	/* Working on exiting. */
#define	P_EXEC		0x04000	/* Process called exec. */
#define	P_KSES		0x08000	/* Process is using KSEs. */
#define	P_CONTINUED	0x10000	/* Proc has continued from a stopped state. */

/* Should be moved to machine-dependent areas. */
#define	P_UNUSED100000	0x100000
#define	P_COWINPROGRESS	0x400000 /* Snapshot copy-on-write in progress. */

#define	P_JAILED	0x1000000 /* Process is in jail. */
#define	P_OLDMASK	0x2000000 /* Need to restore mask after suspend. */
#define	P_ALTSTACK	0x4000000 /* Have alternate signal stack. */
#define	P_INEXEC	0x8000000 /* Process is in execve(). */

/* These flags are kept in p_sflag and are protected with sched_lock. */
#define	PS_INMEM	0x00001	/* Loaded into memory. */
#define	PS_PROFIL	0x00004	/* Has started profiling. */
#define	PS_ALRMPEND	0x00020	/* Pending SIGVTALRM needs to be posted. */
#define	PS_PROFPEND	0x00040	/* Pending SIGPROF needs to be posted. */
#define	PS_SWAPINREQ	0x00100	/* Swapin request due to wakeup. */
#define	PS_SWAPPING	0x00200	/* Process is being swapped. */
#define	PS_NEEDSIGCHK	0x02000	/* Process may need signal delivery. */

/* flags kept in td_flags */
#define	TDF_ONRUNQ	0x00001	/* This KE is on a run queue */
#define	TDF_SINTR	0x00008	/* Sleep is interruptible. */
#define	TDF_TIMEOUT	0x00010	/* Timing out during sleep. */
#define	TDF_SELECT	0x00040	/* Selecting; wakeup/waiting danger. */
#define	TDF_CVWAITQ	0x00080	/* Thread is on a cv_waitq (not slpq). */
#define	TDF_TIMOFAIL	0x01000	/* Timeout from sleep after we were awake. */
#define	TDF_DEADLKTREAT	0x800000 /* Lock aquisition - deadlock treatment. */

/* flags kept in ke_flags */
#define	KEF_ONRUNQ	0x00001	/* This KE is on a run queue */
#define	KEF_OWEUPC	0x00002	/* Owe process an addupc() call at next ast. */
#define	KEF_ASTPENDING	0x00400	/* KSE has a pending ast. */
#define	KEF_NEEDRESCHED	0x00800	/* Process needs to yield. */


#define	P_MAGIC		0xbeefface

#ifdef _KERNEL

#ifdef MALLOC_DECLARE
MALLOC_DECLARE(M_PARGS);
MALLOC_DECLARE(M_PGRP);
MALLOC_DECLARE(M_SESSION);
MALLOC_DECLARE(M_SUBPROC);
MALLOC_DECLARE(M_ZOMBIE);
#endif

#define	FOREACH_PROC_IN_SYSTEM(p)					\
	LIST_FOREACH((p), &allproc, p_list)
#define	FOREACH_KSEGRP_IN_PROC(p, kg)					\
	TAILQ_FOREACH((kg), &(p)->p_ksegrps, kg_ksegrp)
#define	FOREACH_THREAD_IN_GROUP(kg, td)					\
	TAILQ_FOREACH((td), &(kg)->kg_threads, td_kglist)
#define	FOREACH_KSE_IN_GROUP(kg, ke)					\
	TAILQ_FOREACH((ke), &(kg)->kg_kseq, ke_kglist)
#define	FOREACH_THREAD_IN_PROC(p, td)					\
	TAILQ_FOREACH((td), &(p)->p_threads, td_plist)

/* XXXKSE the lines below should probably only be used in 1:1 code */
#define FIRST_THREAD_IN_PROC(p) TAILQ_FIRST(&p->p_threads)
#define FIRST_KSEGRP_IN_PROC(p) TAILQ_FIRST(&p->p_ksegrps)
#define FIRST_KSE_IN_KSEGRP(kg) TAILQ_FIRST(&kg->kg_kseq)
#define FIRST_KSE_IN_PROC(p) FIRST_KSE_IN_KSEGRP(FIRST_KSEGRP_IN_PROC(p))

static __inline int
sigonstack(size_t sp)
{
	register struct thread *td = curthread;
	struct proc *p = td->td_proc;

	return ((p->p_flag & P_ALTSTACK) ?
#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
	    ((p->p_sigstk.ss_size == 0) ? (p->p_sigstk.ss_flags & SS_ONSTACK) :
		((sp - (size_t)p->p_sigstk.ss_sp) < p->p_sigstk.ss_size))
#else
	    ((sp - (size_t)p->p_sigstk.ss_sp) < p->p_sigstk.ss_size)
#endif
	    : 0);
}

/* Handy macro to determine if p1 can mangle p2. */
#define	PRISON_CHECK(p1, p2) \
	((p1)->p_prison == NULL || (p1)->p_prison == (p2)->p_prison)

/*
 * We use process IDs <= PID_MAX; PID_MAX + 1 must also fit in a pid_t,
 * as it is used to represent "no process group".
 */
#define	PID_MAX		99999
#define	NO_PID		100000

#define	SESS_LEADER(p)	((p)->p_session->s_leader == (p))
#define	SESSHOLD(s)	((s)->s_count++)
#define	SESSRELE(s) {							\
	if (--(s)->s_count == 0)					\
		FREE(s, M_SESSION);					\
}

#define	STOPEVENT(p, e, v) do {						\
	PROC_LOCK(p);							\
	_STOPEVENT((p), (e), (v));					\
	PROC_UNLOCK(p);							\
} while (0)
#define	_STOPEVENT(p, e, v) do {					\
	PROC_LOCK_ASSERT(p, MA_OWNED);					\
	if ((p)->p_stops & (e)) {					\
		stopevent((p), (e), (v));				\
	}								\
} while (0)

/* Lock and unlock a process. */
#define	PROC_LOCK(p)	mtx_lock(&(p)->p_mtx)
#define	PROC_TRYLOCK(p)	mtx_trylock(&(p)->p_mtx)
#define	PROC_UNLOCK(p)	mtx_unlock(&(p)->p_mtx)
#define	PROC_LOCKED(p)	mtx_owned(&(p)->p_mtx)
#define	PROC_LOCK_ASSERT(p, type)	mtx_assert(&(p)->p_mtx, (type))

/* Lock and unlock a process group. */
#define PGRP_LOCK(pg)	mtx_lock(&(pg)->pg_mtx)
#define PGRP_UNLOCK(pg)	mtx_unlock(&(pg)->pg_mtx)
#define	PGRP_LOCKED(pg)	mtx_owned(&(pg)->pg_mtx)
#define	PGRP_LOCK_ASSERT(pg, type)	mtx_assert(&(pg)->pg_mtx, (type))

#define	PGRP_LOCK_PGSIGNAL(pg)						\
	do {								\
		if ((pg) != NULL)					\
			PGRP_LOCK(pg);					\
	} while (0);

#define	PGRP_UNLOCK_PGSIGNAL(pg)					\
	do {								\
		if ((pg) != NULL)					\
			PGRP_UNLOCK(pg);				\
	} while (0);

/* Lock and unlock a session. */
#define SESS_LOCK(s)	mtx_lock(&(s)->s_mtx)
#define SESS_UNLOCK(s)	mtx_unlock(&(s)->s_mtx)
#define	SESS_LOCKED(s)	mtx_owned(&(s)->s_mtx)
#define	SESS_LOCK_ASSERT(s, type)	mtx_assert(&(s)->s_mtx, (type))

/* Hold process U-area in memory, normally for ptrace/procfs work. */
#define	PHOLD(p) do {							\
	PROC_LOCK(p);							\
	_PHOLD(p);							\
	PROC_UNLOCK(p);							\
} while (0)
#define	_PHOLD(p) do {							\
	PROC_LOCK_ASSERT((p), MA_OWNED);				\
	if ((p)->p_lock++ == 0)						\
		faultin((p));						\
} while (0)

#define	PRELE(p) do {							\
	PROC_LOCK((p));							\
	_PRELE((p));							\
	PROC_UNLOCK((p));						\
} while (0)
#define	_PRELE(p) do {							\
	PROC_LOCK_ASSERT((p), MA_OWNED);				\
	(--(p)->p_lock);						\
} while (0)

/* Lock and unlock process arguments. */
#define	PARGS_LOCK(p)		mtx_lock(&pargs_ref_lock)
#define	PARGS_UNLOCK(p)		mtx_unlock(&pargs_ref_lock)

#define	PIDHASH(pid)	(&pidhashtbl[(pid) & pidhash])
extern LIST_HEAD(pidhashhead, proc) *pidhashtbl;
extern u_long pidhash;

#define	PGRPHASH(pgid)	(&pgrphashtbl[(pgid) & pgrphash])
extern LIST_HEAD(pgrphashhead, pgrp) *pgrphashtbl;
extern u_long pgrphash;

extern struct sx allproc_lock;
extern struct sx proctree_lock;
extern struct mtx pargs_ref_lock;
extern struct proc proc0;		/* Process slot for swapper. */
extern struct thread thread0;		/* Primary thread in proc0 */
extern int hogticks;			/* Limit on kernel cpu hogs. */
extern int nprocs, maxproc;		/* Current and max number of procs. */
extern int maxprocperuid;		/* Max procs per uid. */
extern u_long ps_arg_cache_limit;
extern int ps_argsopen;
extern int ps_showallprocs;
extern int sched_quantum;		/* Scheduling quantum in ticks. */

LIST_HEAD(proclist, proc);
TAILQ_HEAD(procqueue, proc);
TAILQ_HEAD(threadqueue, thread);
extern struct proclist allproc;		/* List of all processes. */
extern struct proclist zombproc;	/* List of zombie processes. */
extern struct proc *initproc, *pageproc; /* Process slots for init, pager. */
extern struct proc *updateproc;		/* Process slot for syncer (sic). */

extern uma_zone_t proc_zone;

extern int lastpid;

/*
 * XXX macros for scheduler.  Shouldn't be here, but currently needed for
 * bounding the dubious p_estcpu inheritance in wait1().
 * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
 * the range 100-256 Hz (approximately).
 */
#define	ESTCPULIM(e) \
    min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \
	     RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1)
#define	INVERSE_ESTCPU_WEIGHT	8	/* 1 / (priorities per estcpu level). */
#define	NICE_WEIGHT	1		/* Priorities per nice level. */

struct	proc *pfind(pid_t);	/* Find process by id. */
struct	pgrp *pgfind(pid_t);	/* Find process group by id. */
struct	proc *zpfind(pid_t);	/* Find zombie process by id. */

void	ast(struct trapframe *framep);
struct	thread *choosethread(void);
int	cr_cansignal(struct ucred *cred, struct proc *proc, int signum);
int	enterpgrp(struct proc *p, pid_t pgid, struct pgrp *pgrp, struct session *sess);
int	enterthispgrp(struct proc *p, struct pgrp *pgrp);
void	faultin(struct proc *p);
void	fixjobc(struct proc *p, struct pgrp *pgrp, int entering);
int	fork1(struct thread *, int, struct proc **);
void	fork_exit(void (*)(void *, struct trapframe *), void *,
	    struct trapframe *);
void	fork_return(struct thread *, struct trapframe *);
int	inferior(struct proc *p);
int	leavepgrp(struct proc *p);
void	mi_switch(void);
int	p_candebug(struct thread *td, struct proc *p);
int	p_cansee(struct thread *td, struct proc *p);
int	p_cansched(struct thread *td, struct proc *p);
int	p_cansignal(struct thread *td, struct proc *p, int signum);
struct	pargs *pargs_alloc(int len);
void	pargs_drop(struct pargs *pa);
void	pargs_free(struct pargs *pa);
void	pargs_hold(struct pargs *pa);
void	procinit(void);
void	proc_linkup(struct proc *p, struct ksegrp *kg,
	    struct kse *ke, struct thread *td);
void	proc_reparent(struct proc *child, struct proc *newparent);
int	procrunnable(void);
void	remrunqueue(struct thread *);
void	resetpriority(struct ksegrp *);
int	roundrobin_interval(void);
void	schedclock(struct thread *);
int	securelevel_ge(struct ucred *cr, int level);
int	securelevel_gt(struct ucred *cr, int level);
void	setrunnable(struct thread *);
void	setrunqueue(struct thread *);
void	setsugid(struct proc *p);
void	sleepinit(void);
void	stopevent(struct proc *, u_int, u_int);
void	cpu_idle(void);
void	cpu_switch(void);
void	cpu_throw(void) __dead2;
void	unsleep(struct thread *);
void	updatepri(struct thread *);
void	userret(struct thread *, struct trapframe *, u_int);
void	maybe_resched(struct thread *);

void	cpu_exit(struct thread *);
void	cpu_sched_exit(struct thread *);
void	exit1(struct thread *, int) __dead2;
void	cpu_fork(struct thread *, struct proc *, struct thread *, int);
void	cpu_set_fork_handler(struct thread *, void (*)(void *), void *);
void	cpu_wait(struct proc *);
int	cpu_coredump(struct thread *, struct vnode *, struct ucred *);
struct thread *thread_get(struct proc *);
#endif	/* _KERNEL */

#endif	/* !_SYS_PROC_H_ */
OpenPOWER on IntegriCloud