summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authordas <das@FreeBSD.org>2004-11-20 23:00:59 +0000
committerdas <das@FreeBSD.org>2004-11-20 23:00:59 +0000
commit6175c08488edf2a144f21145ecb8c7ced37c3bbb (patch)
treeeaa83ea2ab9f7a13934ceeef53104c6151eae2c3 /sys
parentfeeee6f8b5e72ea3f0baa19c2a36457d49e50784 (diff)
downloadFreeBSD-src-6175c08488edf2a144f21145ecb8c7ced37c3bbb.zip
FreeBSD-src-6175c08488edf2a144f21145ecb8c7ced37c3bbb.tar.gz
Remove local definitions of RANGEOF() and use __rangeof() instead.
Also remove a few bogus casts.
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_fork.c15
-rw-r--r--sys/kern/kern_kse.c12
-rw-r--r--sys/kern/kern_proc.c7
-rw-r--r--sys/kern/kern_switch.c2
-rw-r--r--sys/kern/kern_thr.c14
-rw-r--r--sys/kern/kern_thread.c2
6 files changed, 17 insertions, 35 deletions
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index 2e9830a..fe3601e 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -467,22 +467,19 @@ again:
PROC_LOCK(p2);
PROC_LOCK(p1);
-#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
-
bzero(&p2->p_startzero,
- (unsigned) RANGEOF(struct proc, p_startzero, p_endzero));
+ __rangeof(struct proc, p_startzero, p_endzero));
bzero(&td2->td_startzero,
- (unsigned) RANGEOF(struct thread, td_startzero, td_endzero));
+ __rangeof(struct thread, td_startzero, td_endzero));
bzero(&kg2->kg_startzero,
- (unsigned) RANGEOF(struct ksegrp, kg_startzero, kg_endzero));
+ __rangeof(struct ksegrp, kg_startzero, kg_endzero));
bcopy(&p1->p_startcopy, &p2->p_startcopy,
- (unsigned) RANGEOF(struct proc, p_startcopy, p_endcopy));
+ __rangeof(struct proc, p_startcopy, p_endcopy));
bcopy(&td->td_startcopy, &td2->td_startcopy,
- (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
+ __rangeof(struct thread, td_startcopy, td_endcopy));
bcopy(&td->td_ksegrp->kg_startcopy, &kg2->kg_startcopy,
- (unsigned) RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
-#undef RANGEOF
+ __rangeof(struct ksegrp, kg_startcopy, kg_endcopy));
td2->td_sigstk = td->td_sigstk;
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index 5a98f82..1e0e156 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -61,8 +61,6 @@ extern int max_threads_hits;
extern struct mtx kse_zombie_lock;
-#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
-
TAILQ_HEAD(, kse_upcall) zombie_upcalls =
TAILQ_HEAD_INITIALIZER(zombie_upcalls);
@@ -600,10 +598,10 @@ kse_create(struct thread *td, struct kse_create_args *uap)
if (uap->newgroup) {
newkg = ksegrp_alloc();
- bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp,
- kg_startzero, kg_endzero));
+ bzero(&newkg->kg_startzero,
+ __rangeof(struct ksegrp, kg_startzero, kg_endzero));
bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
- RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
+ __rangeof(struct ksegrp, kg_startcopy, kg_endcopy));
sched_init_concurrency(newkg);
PROC_LOCK(p);
if (p->p_numksegrps >= max_groups_per_proc) {
@@ -1033,7 +1031,7 @@ thread_alloc_spare(struct thread *td)
spare = thread_alloc();
td->td_standin = spare;
bzero(&spare->td_startzero,
- (unsigned) RANGEOF(struct thread, td_startzero, td_endzero));
+ __rangeof(struct thread, td_startzero, td_endzero));
spare->td_proc = td->td_proc;
spare->td_ucred = crhold(td->td_ucred);
}
@@ -1068,7 +1066,7 @@ thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
* do the crhold here because we are in schedlock already.
*/
bcopy(&td->td_startcopy, &td2->td_startcopy,
- (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
+ __rangeof(struct thread, td_startcopy, td_endcopy));
thread_link(td2, ku->ku_ksegrp);
/* inherit parts of blocked thread's context as a good template */
cpu_set_upcall(td2, td);
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index 63393b3..75f19ad 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -827,13 +827,10 @@ void
pstats_fork(struct pstats *src, struct pstats *dst)
{
-#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
-
bzero(&dst->pstat_startzero,
- (unsigned)RANGEOF(struct pstats, pstat_startzero, pstat_endzero));
+ __rangeof(struct pstats, pstat_startzero, pstat_endzero));
bcopy(&src->pstat_startcopy, &dst->pstat_startcopy,
- (unsigned)RANGEOF(struct pstats, pstat_startcopy, pstat_endcopy));
-#undef RANGEOF
+ __rangeof(struct pstats, pstat_startcopy, pstat_endcopy));
}
void
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 06a34d7..4f1c7ee 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -910,7 +910,6 @@ runq_remove(struct runq *rq, struct kse *ke)
/****** functions that are temporarily here ***********/
#include <vm/uma.h>
-#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
extern struct mtx kse_zombie_lock;
/*
@@ -929,7 +928,6 @@ sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td)
sched_init_concurrency(kg);
}
-#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
/*
* thread is being either created or recycled.
* Fix up the per-scheduler resources associated with it.
diff --git a/sys/kern/kern_thr.c b/sys/kern/kern_thr.c
index 402667a..baa9224 100644
--- a/sys/kern/kern_thr.c
+++ b/sys/kern/kern_thr.c
@@ -58,12 +58,6 @@ SYSCTL_INT(_kern_threads, OID_AUTO, thr_concurrency, CTLFLAG_RW,
&thr_concurrency, 0, "a concurrency value if not default");
/*
- * Back end support functions.
- */
-
-#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
-
-/*
* System call interface.
*/
int
@@ -109,15 +103,15 @@ thr_create(struct thread *td, struct thr_create_args *uap)
}
bzero(&newtd->td_startzero,
- (unsigned) RANGEOF(struct thread, td_startzero, td_endzero));
+ __rangeof(struct thread, td_startzero, td_endzero));
bcopy(&td->td_startcopy, &newtd->td_startcopy,
- (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
+ __rangeof(struct thread, td_startcopy, td_endcopy));
if (scope_sys) {
bzero(&newkg->kg_startzero,
- (unsigned)RANGEOF(struct ksegrp, kg_startzero, kg_endzero));
+ __rangeof(struct ksegrp, kg_startzero, kg_endzero));
bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
- (unsigned)RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
+ __rangeof(struct ksegrp, kg_startcopy, kg_endcopy));
}
newtd->td_proc = td->td_proc;
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index a3f34f6..e41d813 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -72,8 +72,6 @@ SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
int virtual_cpu;
-#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
-
TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
struct mtx kse_zombie_lock;
OpenPOWER on IntegriCloud