diff options
-rw-r--r-- | sys/amd64/amd64/trap.c | 4 | ||||
-rw-r--r-- | sys/arm/arm/trap.c | 8 | ||||
-rw-r--r-- | sys/i386/i386/trap.c | 4 | ||||
-rw-r--r-- | sys/ia64/ia64/trap.c | 6 | ||||
-rw-r--r-- | sys/kern/kern_clock.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_kse.c | 4 | ||||
-rw-r--r-- | sys/kern/kern_lock.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_mutex.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_rwlock.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_switch.c | 6 | ||||
-rw-r--r-- | sys/kern/kern_sx.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_synch.c | 10 | ||||
-rw-r--r-- | sys/kern/p1003_1b.c | 2 | ||||
-rw-r--r-- | sys/kern/sched_4bsd.c | 20 | ||||
-rw-r--r-- | sys/kern/sched_ule.c | 18 | ||||
-rw-r--r-- | sys/kern/subr_pcpu.c | 6 | ||||
-rw-r--r-- | sys/kern/subr_sleepqueue.c | 10 | ||||
-rw-r--r-- | sys/kern/subr_turnstile.c | 8 | ||||
-rw-r--r-- | sys/kern/sys_generic.c | 2 | ||||
-rw-r--r-- | sys/powerpc/aim/trap.c | 2 | ||||
-rw-r--r-- | sys/powerpc/powerpc/trap.c | 2 | ||||
-rw-r--r-- | sys/sun4v/sun4v/trap.c | 2 |
22 files changed, 62 insertions, 62 deletions
diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c index 2df156c..805ac93 100644 --- a/sys/amd64/amd64/trap.c +++ b/sys/amd64/amd64/trap.c @@ -806,7 +806,7 @@ syscall(struct trapframe *frame) #endif CTR4(KTR_SYSC, "syscall enter thread %p pid %d proc %s code %d", td, - td->td_proc->p_pid, td->td_proc->p_comm, code); + td->td_proc->p_pid, td->td_name, code); td->td_syscalls++; @@ -888,7 +888,7 @@ syscall(struct trapframe *frame) userret(td, frame); CTR4(KTR_SYSC, "syscall exit thread %p pid %d proc %s code %d", td, - td->td_proc->p_pid, td->td_proc->p_comm, code); + td->td_proc->p_pid, td->td_name, code); #ifdef KTRACE if (KTRPOINT(td, KTR_SYSRET)) diff --git a/sys/arm/arm/trap.c b/sys/arm/arm/trap.c index 6e6d43d..8424a17 100644 --- a/sys/arm/arm/trap.c +++ b/sys/arm/arm/trap.c @@ -455,7 +455,7 @@ data_abort_handler(trapframe_t *tf) if (error == ENOMEM) { printf("VM: pid %d (%s), uid %d killed: " - "out of swap\n", td->td_proc->p_pid, td->td_proc->p_comm, + "out of swap\n", td->td_proc->p_pid, td->td_name, (td->td_proc->p_ucred) ? td->td_proc->p_ucred->cr_uid : -1); ksig.signb = SIGKILL; @@ -805,7 +805,7 @@ prefetch_abort_handler(trapframe_t *tf) if (error == ENOMEM) { printf("VM: pid %d (%s), uid %d killed: " - "out of swap\n", td->td_proc->p_pid, td->td_proc->p_comm, + "out of swap\n", td->td_proc->p_pid, td->td_name, (td->td_proc->p_ucred) ? td->td_proc->p_ucred->cr_uid : -1); ksig.signb = SIGKILL; @@ -927,7 +927,7 @@ syscall(struct thread *td, trapframe_t *frame, u_int32_t insn) #endif CTR4(KTR_SYSC, "syscall enter thread %p pid %d proc %s code %d", td, - td->td_proc->p_pid, td->td_proc->p_comm, code); + td->td_proc->p_pid, td->td_name, code); if (error == 0) { td->td_retval[0] = 0; td->td_retval[1] = 0; @@ -989,7 +989,7 @@ bad: userret(td, frame); CTR4(KTR_SYSC, "syscall exit thread %p pid %d proc %s code %d", td, - td->td_proc->p_pid, td->td_proc->p_comm, code); + td->td_proc->p_pid, td->td_name, code); STOPEVENT(p, S_SCX, code); PTRACESTOP_SC(p, td, S_PT_SCX); diff --git a/sys/i386/i386/trap.c b/sys/i386/i386/trap.c index ee8fe6a..14f01d3 100644 --- a/sys/i386/i386/trap.c +++ b/sys/i386/i386/trap.c @@ -992,7 +992,7 @@ syscall(struct trapframe *frame) #endif CTR4(KTR_SYSC, "syscall enter thread %p pid %d proc %s code %d", td, - td->td_proc->p_pid, td->td_proc->p_comm, code); + td->td_proc->p_pid, td->td_name, code); td->td_syscalls++; @@ -1070,7 +1070,7 @@ syscall(struct trapframe *frame) userret(td, frame); CTR4(KTR_SYSC, "syscall exit thread %p pid %d proc %s code %d", td, - td->td_proc->p_pid, td->td_proc->p_comm, code); + td->td_proc->p_pid, td->td_name, code); #ifdef KTRACE if (KTRPOINT(td, KTR_SYSRET)) diff --git a/sys/ia64/ia64/trap.c b/sys/ia64/ia64/trap.c index 373a46a..a970353 100644 --- a/sys/ia64/ia64/trap.c +++ b/sys/ia64/ia64/trap.c @@ -279,7 +279,7 @@ printtrap(int vector, struct trapframe *tf, int isfatal, int user) printf(" curthread = %p\n", curthread); if (curthread != NULL) printf(" pid = %d, comm = %s\n", - curthread->td_proc->p_pid, curthread->td_proc->p_comm); + curthread->td_proc->p_pid, curthread->td_name); printf("\n"); } @@ -1022,7 +1022,7 @@ syscall(struct trapframe *tf) ktrsyscall(code, callp->sy_narg, args); #endif CTR4(KTR_SYSC, "syscall enter thread %p pid %d proc %s code %d", td, - td->td_proc->p_pid, td->td_proc->p_comm, code); + td->td_proc->p_pid, td->td_name, code); td->td_retval[0] = 0; td->td_retval[1] = 0; @@ -1077,7 +1077,7 @@ syscall(struct trapframe *tf) userret(td, tf); CTR4(KTR_SYSC, "syscall exit thread %p pid %d proc %s code %d", td, - td->td_proc->p_pid, td->td_proc->p_comm, code); + td->td_proc->p_pid, td->td_name, code); #ifdef KTRACE if (KTRPOINT(td, KTR_SYSRET)) ktrsysret(code, error, td->td_retval[0]); diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c index 210f5d3..506b5a2 100644 --- a/sys/kern/kern_clock.c +++ b/sys/kern/kern_clock.c @@ -465,7 +465,7 @@ statclock(int usermode) if (ru->ru_maxrss < rss) ru->ru_maxrss = rss; CTR4(KTR_SCHED, "statclock: %p(%s) prio %d stathz %d", - td, td->td_proc->p_comm, td->td_priority, (stathz)?stathz:hz); + td, td->td_name, td->td_priority, (stathz)?stathz:hz); sched_clock(td); thread_unlock(td); } diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c index b044605..b80c007 100644 --- a/sys/kern/kern_kse.c +++ b/sys/kern/kern_kse.c @@ -1036,7 +1036,7 @@ thread_schedule_upcall(struct thread *td, struct kse_upcall *ku) return (NULL); } CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)", - td2, td->td_proc->p_pid, td->td_proc->p_comm); + td2, td->td_proc->p_pid, td->td_name); /* * Bzero already done in thread_alloc_spare() because we can't * do the crhold here because we are in schedlock already. @@ -1339,7 +1339,7 @@ thread_userret(struct thread *td, struct trapframe *frame) * Do the last parts of the setup needed for the upcall. */ CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)", - td, td->td_proc->p_pid, td->td_proc->p_comm); + td, td->td_proc->p_pid, td->td_name); td->td_pflags &= ~TDP_UPCALLING; if (ku->ku_flags & KUF_DOUPCALL) { diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c index 2203c21..7256630 100644 --- a/sys/kern/kern_lock.c +++ b/sys/kern/kern_lock.c @@ -708,7 +708,7 @@ db_show_lockmgr(struct lock_object *lock) td = lkp->lk_lockholder; db_printf("EXCL (count %d) %p ", lkp->lk_exclusivecount, td); db_printf("(tid %d, pid %d, \"%s\")\n", td->td_tid, - td->td_proc->p_pid, td->td_proc->p_comm); + td->td_proc->p_pid, td->td_name); } else db_printf("UNLOCKED\n"); if (lkp->lk_waitcount > 0) diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index 58f2533..036a277 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -819,7 +819,7 @@ db_show_mtx(struct lock_object *lock) if (!mtx_unowned(m) && !mtx_destroyed(m)) { td = mtx_owner(m); db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td, - td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm); + td->td_tid, td->td_proc->p_pid, td->td_name); if (mtx_recursed(m)) db_printf(" recursed: %d\n", m->mtx_recurse); } diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c index 632d45f..eaae25d 100644 --- a/sys/kern/kern_rwlock.c +++ b/sys/kern/kern_rwlock.c @@ -967,7 +967,7 @@ db_show_rwlock(struct lock_object *lock) else { td = rw_wowner(rw); db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td, - td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm); + td->td_tid, td->td_proc->p_pid, td->td_name); if (rw_recursed(rw)) db_printf(" recursed: %u\n", rw->rw_recurse); } diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c index 994e604..cdc5b8a 100644 --- a/sys/kern/kern_switch.c +++ b/sys/kern/kern_switch.c @@ -163,7 +163,7 @@ critical_enter(void) td = curthread; td->td_critnest++; CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td, - (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest); + (long)td->td_proc->p_pid, td->td_name, td->td_critnest); } void @@ -189,7 +189,7 @@ critical_exit(void) td->td_critnest--; CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td, - (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest); + (long)td->td_proc->p_pid, td->td_name, td->td_critnest); } /* @@ -260,7 +260,7 @@ maybe_preempt(struct thread *td) MPASS(TD_ON_RUNQ(td)); TD_SET_RUNNING(td); CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td, - td->td_proc->p_pid, td->td_proc->p_comm); + td->td_proc->p_pid, td->td_name); SCHED_STAT_INC(switch_preempt); mi_switch(SW_INVOL|SW_PREEMPT, td); /* diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c index 23dd4b6..8e99ee3 100644 --- a/sys/kern/kern_sx.c +++ b/sys/kern/kern_sx.c @@ -986,7 +986,7 @@ db_show_sx(struct lock_object *lock) else { td = sx_xholder(sx); db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, - td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm); + td->td_tid, td->td_proc->p_pid, td->td_name); if (sx_recursed(sx)) db_printf(" recursed: %d\n", sx->sx_recurse); } diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 71d5c27..ad797c1 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -419,16 +419,16 @@ mi_switch(int flags, struct thread *newtd) #if (KTR_COMPILE & KTR_SCHED) != 0 if (TD_IS_IDLETHREAD(td)) CTR3(KTR_SCHED, "mi_switch: %p(%s) prio %d idle", - td, td->td_proc->p_comm, td->td_priority); + td, td->td_name, td->td_priority); else if (newtd != NULL) CTR5(KTR_SCHED, "mi_switch: %p(%s) prio %d preempted by %p(%s)", - td, td->td_proc->p_comm, td->td_priority, newtd, - newtd->td_proc->p_comm); + td, td->td_name, td->td_priority, newtd, + newtd->td_name); else CTR6(KTR_SCHED, "mi_switch: %p(%s) prio %d inhibit %d wmesg %s lock %s", - td, td->td_proc->p_comm, td->td_priority, + td, td->td_name, td->td_priority, td->td_inhibitors, td->td_wmesg, td->td_lockname); #endif /* @@ -441,7 +441,7 @@ mi_switch(int flags, struct thread *newtd) #endif sched_switch(td, newtd, flags); CTR3(KTR_SCHED, "mi_switch: running %p(%s) prio %d", - td, td->td_proc->p_comm, td->td_priority); + td, td->td_name, td->td_priority); CTR4(KTR_PROC, "mi_switch: new thread %ld (kse %p, pid %ld, %s)", td->td_tid, td->td_sched, p->p_pid, p->p_comm); diff --git a/sys/kern/p1003_1b.c b/sys/kern/p1003_1b.c index e998385..5fae20d 100644 --- a/sys/kern/p1003_1b.c +++ b/sys/kern/p1003_1b.c @@ -63,7 +63,7 @@ int syscall_not_present(struct thread *td, const char *s, struct nosys_args *uap) { log(LOG_ERR, "cmd %s pid %d tried to use non-present %s\n", - td->td_proc->p_comm, td->td_proc->p_pid, s); + td->td_name, td->td_proc->p_pid, s); /* a " return nosys(p, uap); " here causes a core dump. */ diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c index a5e1449..8a42143 100644 --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -615,7 +615,7 @@ sched_exit(struct proc *p, struct thread *td) { CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d", - td, td->td_proc->p_comm, td->td_priority); + td, td->td_name, td->td_priority); PROC_SLOCK_ASSERT(p, MA_OWNED); sched_exit_thread(FIRST_THREAD_IN_PROC(p), td); } @@ -625,7 +625,7 @@ sched_exit_thread(struct thread *td, struct thread *child) { CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d", - child, child->td_proc->p_comm, child->td_priority); + child, child->td_name, child->td_priority); thread_lock(td); td->td_estcpu = ESTCPULIM(td->td_estcpu + child->td_estcpu); thread_unlock(td); @@ -679,8 +679,8 @@ static void sched_priority(struct thread *td, u_char prio) { CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)", - td, td->td_proc->p_comm, td->td_priority, prio, curthread, - curthread->td_proc->p_comm); + td, td->td_name, td->td_priority, prio, curthread, + curthread->td_name); THREAD_LOCK_ASSERT(td, MA_OWNED); if (td->td_priority == prio) @@ -1071,8 +1071,8 @@ sched_add(struct thread *td, int flags) KASSERT(td->td_flags & TDF_INMEM, ("sched_add: thread swapped out")); CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", - td, td->td_proc->p_comm, td->td_priority, curthread, - curthread->td_proc->p_comm); + td, td->td_name, td->td_priority, curthread, + curthread->td_name); /* * Now that the thread is moving to the run-queue, set the lock * to the scheduler's lock. @@ -1140,8 +1140,8 @@ sched_add(struct thread *td, int flags) KASSERT(td->td_flags & TDF_INMEM, ("sched_add: thread swapped out")); CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", - td, td->td_proc->p_comm, td->td_priority, curthread, - curthread->td_proc->p_comm); + td, td->td_name, td->td_priority, curthread, + curthread->td_name); /* * Now that the thread is moving to the run-queue, set the lock * to the scheduler's lock. @@ -1188,8 +1188,8 @@ sched_rem(struct thread *td) ("sched_rem: thread not on run queue")); mtx_assert(&sched_lock, MA_OWNED); CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", - td, td->td_proc->p_comm, td->td_priority, curthread, - curthread->td_proc->p_comm); + td, td->td_name, td->td_priority, curthread, + curthread->td_name); if ((td->td_proc->p_flag & P_NOLOAD) == 0) sched_load_rem(); diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index 2b33461..b11f617 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -339,7 +339,7 @@ runq_print(struct runq *rq) rqh = &rq->rq_queues[pri]; TAILQ_FOREACH(ts, rqh, ts_procq) { printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n", - ts->ts_thread, ts->ts_thread->td_proc->p_comm, ts->ts_thread->td_priority, ts->ts_rqindex, pri); + ts->ts_thread, ts->ts_thread->td_name, ts->ts_thread->td_priority, ts->ts_rqindex, pri); } } } @@ -1600,8 +1600,8 @@ sched_thread_priority(struct thread *td, u_char prio) struct td_sched *ts; CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)", - td, td->td_proc->p_comm, td->td_priority, prio, curthread, - curthread->td_proc->p_comm); + td, td->td_name, td->td_priority, prio, curthread, + curthread->td_name); ts = td->td_sched; THREAD_LOCK_ASSERT(td, MA_OWNED); if (td->td_priority == prio) @@ -2087,7 +2087,7 @@ sched_exit(struct proc *p, struct thread *child) struct thread *td; CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d", - child, child->td_proc->p_comm, child->td_priority); + child, child->td_name, child->td_priority); PROC_SLOCK_ASSERT(p, MA_OWNED); td = FIRST_THREAD_IN_PROC(p); @@ -2105,7 +2105,7 @@ sched_exit_thread(struct thread *td, struct thread *child) { CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d", - child, child->td_proc->p_comm, child->td_priority); + child, child->td_name, child->td_priority); #ifdef KSE /* @@ -2396,8 +2396,8 @@ sched_add(struct thread *td, int flags) int cpu; #endif CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", - td, td->td_proc->p_comm, td->td_priority, curthread, - curthread->td_proc->p_comm); + td, td->td_name, td->td_priority, curthread, + curthread->td_name); THREAD_LOCK_ASSERT(td, MA_OWNED); ts = td->td_sched; /* @@ -2450,8 +2450,8 @@ sched_rem(struct thread *td) struct td_sched *ts; CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", - td, td->td_proc->p_comm, td->td_priority, curthread, - curthread->td_proc->p_comm); + td, td->td_name, td->td_priority, curthread, + curthread->td_name); ts = td->td_sched; tdq = TDQ_CPU(ts->ts_cpu); TDQ_LOCK_ASSERT(tdq, MA_OWNED); diff --git a/sys/kern/subr_pcpu.c b/sys/kern/subr_pcpu.c index 1109b31..d0fd061 100644 --- a/sys/kern/subr_pcpu.c +++ b/sys/kern/subr_pcpu.c @@ -112,7 +112,7 @@ show_pcpu(struct pcpu *pc) td = pc->pc_curthread; if (td != NULL) db_printf("%p: pid %d \"%s\"\n", td, td->td_proc->p_pid, - td->td_proc->p_comm); + td->td_name); else db_printf("none\n"); db_printf("curpcb = %p\n", pc->pc_curpcb); @@ -120,14 +120,14 @@ show_pcpu(struct pcpu *pc) td = pc->pc_fpcurthread; if (td != NULL) db_printf("%p: pid %d \"%s\"\n", td, td->td_proc->p_pid, - td->td_proc->p_comm); + td->td_name); else db_printf("none\n"); db_printf("idlethread = "); td = pc->pc_idlethread; if (td != NULL) db_printf("%p: pid %d \"%s\"\n", td, td->td_proc->p_pid, - td->td_proc->p_comm); + td->td_name); else db_printf("none\n"); db_show_mdpcpu(pc); diff --git a/sys/kern/subr_sleepqueue.c b/sys/kern/subr_sleepqueue.c index d0856f9..8642d14 100644 --- a/sys/kern/subr_sleepqueue.c +++ b/sys/kern/subr_sleepqueue.c @@ -459,7 +459,7 @@ sleepq_switch(void *wchan) mi_switch(SW_VOL, NULL); KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING")); CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)", - (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm); + (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); } /* @@ -650,7 +650,7 @@ sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri) * do it. However, we can't assert that it is set. */ CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)", - (void *)td, (long)td->td_proc->p_pid, td->td_proc->p_comm); + (void *)td, (long)td->td_proc->p_pid, td->td_name); TD_CLR_SLEEPING(td); /* Adjust priority if requested. */ @@ -772,7 +772,7 @@ sleepq_timeout(void *arg) td = arg; CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)", - (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm); + (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); /* * First, see if the thread is asleep and get the wait channel if @@ -880,7 +880,7 @@ sleepq_abort(struct thread *td, int intrval) return; CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)", - (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm); + (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); td->td_intrval = intrval; td->td_flags |= TDF_SLEEPABORT; /* @@ -957,7 +957,7 @@ found: db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td, td->td_tid, td->td_proc->p_pid, td->td_name[i] != '\0' ? td->td_name : - td->td_proc->p_comm); + td->td_name); } } } diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c index 245240b..269faca 100644 --- a/sys/kern/subr_turnstile.c +++ b/sys/kern/subr_turnstile.c @@ -259,7 +259,7 @@ propagate_priority(struct thread *td) */ KASSERT(TD_ON_LOCK(td), ( "thread %d(%s):%d holds %s but isn't blocked on a lock\n", - td->td_tid, td->td_proc->p_comm, td->td_state, + td->td_tid, td->td_name, td->td_state, ts->ts_lockobj->lo_name)); /* @@ -1024,7 +1024,7 @@ print_thread(struct thread *td, const char *prefix) db_printf("%s%p (tid %d, pid %d, \"%s\")\n", prefix, td, td->td_tid, td->td_proc->p_pid, td->td_name[0] != '\0' ? td->td_name : - td->td_proc->p_comm); + td->td_name); } static void @@ -1107,7 +1107,7 @@ print_lockchain(struct thread *td, const char *prefix) while (!db_pager_quit) { db_printf("%sthread %d (pid %d, %s) ", prefix, td->td_tid, td->td_proc->p_pid, td->td_name[0] != '\0' ? td->td_name : - td->td_proc->p_comm); + td->td_name); switch (td->td_state) { case TDS_INACTIVE: db_printf("is inactive\n"); @@ -1190,7 +1190,7 @@ print_sleepchain(struct thread *td, const char *prefix) while (!db_pager_quit) { db_printf("%sthread %d (pid %d, %s) ", prefix, td->td_tid, td->td_proc->p_pid, td->td_name[0] != '\0' ? td->td_name : - td->td_proc->p_comm); + td->td_name); switch (td->td_state) { case TDS_INACTIVE: db_printf("is inactive\n"); diff --git a/sys/kern/sys_generic.c b/sys/kern/sys_generic.c index 16bd195..19e0b4d 100644 --- a/sys/kern/sys_generic.c +++ b/sys/kern/sys_generic.c @@ -521,7 +521,7 @@ ioctl(struct thread *td, struct ioctl_args *uap) if (uap->com > 0xffffffff) { printf( "WARNING pid %d (%s): ioctl sign-extension ioctl %lx\n", - td->td_proc->p_pid, td->td_proc->p_comm, uap->com); + td->td_proc->p_pid, td->td_name, uap->com); uap->com &= 0xffffffff; } com = uap->com; diff --git a/sys/powerpc/aim/trap.c b/sys/powerpc/aim/trap.c index a5ab7eb..2418044d 100644 --- a/sys/powerpc/aim/trap.c +++ b/sys/powerpc/aim/trap.c @@ -303,7 +303,7 @@ printtrap(u_int vector, struct trapframe *frame, int isfatal, int user) printf(" curthread = %p\n", curthread); if (curthread != NULL) printf(" pid = %d, comm = %s\n", - curthread->td_proc->p_pid, curthread->td_proc->p_comm); + curthread->td_proc->p_pid, curthread->td_name); printf("\n"); } diff --git a/sys/powerpc/powerpc/trap.c b/sys/powerpc/powerpc/trap.c index a5ab7eb..2418044d 100644 --- a/sys/powerpc/powerpc/trap.c +++ b/sys/powerpc/powerpc/trap.c @@ -303,7 +303,7 @@ printtrap(u_int vector, struct trapframe *frame, int isfatal, int user) printf(" curthread = %p\n", curthread); if (curthread != NULL) printf(" pid = %d, comm = %s\n", - curthread->td_proc->p_pid, curthread->td_proc->p_comm); + curthread->td_proc->p_pid, curthread->td_name); printf("\n"); } diff --git a/sys/sun4v/sun4v/trap.c b/sys/sun4v/sun4v/trap.c index 7ccb5a8..75d5ce0 100644 --- a/sys/sun4v/sun4v/trap.c +++ b/sys/sun4v/sun4v/trap.c @@ -370,7 +370,7 @@ trap(struct trapframe *tf, int64_t type, uint64_t data) #ifdef VERBOSE if (sig == 4 || sig == 10 || sig == 11) printf("trap: %ld:%s: 0x%lx at 0x%lx on cpu=%d sig=%d proc=%s\n", - trapno, trap_msg[trapno], data, tf->tf_tpc, curcpu, sig, curthread->td_proc->p_comm); + trapno, trap_msg[trapno], data, tf->tf_tpc, curcpu, sig, curthread->td_name); #endif /* XXX I've renumbered the traps to largely reflect what the hardware uses * so this will need to be re-visited |