summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjulian <julian@FreeBSD.org>2004-09-10 22:28:33 +0000
committerjulian <julian@FreeBSD.org>2004-09-10 22:28:33 +0000
commit7cae3c9d5b0db780f92af04b645f2716499bc051 (patch)
tree45ec30d8b6838f53409c69d14291f0f11b5f7b7c
parentb9945320c2a286dda4ad4c6529fec0f3f85f68a3 (diff)
downloadFreeBSD-src-7cae3c9d5b0db780f92af04b645f2716499bc051.zip
FreeBSD-src-7cae3c9d5b0db780f92af04b645f2716499bc051.tar.gz
Make up my mind if cpu pinning is stored in the thread structure or the
scheduler specific extension to it. Put it in the extension as the implimentation details of how the pinning is done needn't be visible outside the scheduler. Submitted by: tegge (of course!) (with changes) MFC after: 3 days
-rw-r--r--sys/i386/i386/pmap.c4
-rw-r--r--sys/kern/sched_4bsd.c24
-rw-r--r--sys/kern/sched_ule.c23
-rw-r--r--sys/sys/proc.h1
-rw-r--r--sys/sys/sched.h21
5 files changed, 53 insertions, 20 deletions
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index d7073b6..df2e1ca 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -807,7 +807,7 @@ pmap_pte_quick(pmap_t pmap, vm_offset_t va)
if (pmap_is_current(pmap))
return (vtopte(va));
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
+ KASSERT(sched_ispinned(), ("curthread not pinned"));
newpf = *pde & PG_FRAME;
if ((*PMAP1 & PG_FRAME) != newpf) {
*PMAP1 = newpf | PG_RW | PG_V | PG_A | PG_M;
@@ -1622,7 +1622,7 @@ pmap_remove_page(pmap_t pmap, vm_offset_t va)
pt_entry_t *pte;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
+ KASSERT(sched_ispinned(), ("curthread not pinned"));
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0)
return;
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 9a96344..6686339 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -87,7 +87,7 @@ struct kse {
} ke_state; /* (j) KSE status. */
int ke_cpticks; /* (j) Ticks of cpu time. */
struct runq *ke_runq; /* runq the kse is currently on */
- int ke_pinned; /* nested count of pinned to a cpu */
+ int ke_pinned; /* (k) nested count, pinned to a cpu */
};
#define ke_proc ke_thread->td_proc
@@ -764,7 +764,6 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
if ((p->p_flag & P_NOLOAD) == 0)
sched_tdcnt--;
-
/*
* We are volunteering to switch out so we get to nominate
* a successor for the rest of our quantum
@@ -1173,5 +1172,26 @@ sched_pctcpu(struct thread *td)
return (0);
}
+
+void
+sched_pin(void)
+{
+ curthread->td_sched->ke_pinned++;
+}
+
+ void
+sched_unpin(void)
+{
+ curthread->td_sched->ke_pinned--;
+}
+
+#ifdef INVARIANTS
+int
+sched_ispinned(void)
+{
+ return (curthread->td_sched->ke_pinned);
+}
+#endif
+
#define KERN_SWITCH_INCLUDE 1
#include "kern/kern_switch.c"
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 03904ef..24ae5b2 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -112,7 +112,7 @@ struct kse {
KES_ONRUNQ
} ke_state; /* (j) thread sched specific status. */
int ke_slptime;
- int ke_pinned;
+ int ke_pinned; /* (k) nested coult.. pinned to a cpu */
int ke_slice;
struct runq *ke_runq;
u_char ke_cpu; /* CPU that we have affinity for. */
@@ -1902,5 +1902,26 @@ sched_sizeof_thread(void)
{
return (sizeof(struct thread) + sizeof(struct td_sched));
}
+
+void
+sched_pin(void)
+{
+ curthread->td_sched->ke_pinned++;
+}
+
+ void
+sched_unpin(void)
+{
+ curthread->td_sched->td_pinned--;
+}
+
+#ifdef INVARIANTS
+int
+sched_ispinned(void)
+{
+ return (curthread->td_sched->ke_pinned);
+}
+#endif
+
#define KERN_SWITCH_INCLUDE 1
#include "kern/kern_switch.c"
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 57e180c..1398bb2 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -268,7 +268,6 @@ struct thread {
LIST_HEAD(, turnstile) td_contested; /* (q) Contested locks. */
struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */
int td_intr_nesting_level; /* (k) Interrupt recursion. */
- int td_pinned; /* (k) Temporary cpu pin count. */
struct kse_thr_mailbox *td_mailbox; /* (*) Userland mailbox address. */
struct ucred *td_ucred; /* (k) Reference to credentials. */
struct thread *td_standin; /* (*) Use this for an upcall. */
diff --git a/sys/sys/sched.h b/sys/sys/sched.h
index 6912390..260e589 100644
--- a/sys/sys/sched.h
+++ b/sys/sys/sched.h
@@ -82,11 +82,16 @@ void sched_rem(struct thread *td);
* hold a thread on a particular CPU.
*/
void sched_bind(struct thread *td, int cpu);
-static __inline void sched_pin(void);
void sched_unbind(struct thread *td);
-static __inline void sched_unpin(void);
+/* these only work for curthread */
+void sched_pin(void);
+void sched_unpin(void);
+#ifdef INVARIANTS
+int sched_ispinned(void);
+#endif
+
/*
* These procedures tell the process data structure allocation code how
* many bytes to actually allocate.
@@ -95,18 +100,6 @@ int sched_sizeof_ksegrp(void);
int sched_sizeof_proc(void);
int sched_sizeof_thread(void);
-static __inline void
-sched_pin(void)
-{
- curthread->td_pinned++;
-}
-
-static __inline void
-sched_unpin(void)
-{
- curthread->td_pinned--;
-}
-
/* temporarily here */
void schedinit(void);
void sched_destroyproc(struct proc *p);
OpenPOWER on IntegriCloud