summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorpeter <peter@FreeBSD.org>2002-07-08 04:24:26 +0000
committerpeter <peter@FreeBSD.org>2002-07-08 04:24:26 +0000
commit62e40d1277fdd1a346ff9f3f25a2ea493d6361ae (patch)
tree03dd4cf988623663014cdcc79fa1f7ab1fb086c1 /sys
parenta01296978c4bac0e78281b2889451de806ff5009 (diff)
downloadFreeBSD-src-62e40d1277fdd1a346ff9f3f25a2ea493d6361ae.zip
FreeBSD-src-62e40d1277fdd1a346ff9f3f25a2ea493d6361ae.tar.gz
Add a special page zero entry point intended to be called via the single
threaded VM pagezero kthread outside of Giant. For some platforms, this is really easy since it can just use the direct mapped region. For others, IPI sending is involved or there are other issues, so grab Giant when needed. We still have preemption issues to deal with, but Alan Cox has an interesting suggestion on how to minimize the problem on x86. Use Luigi's hack for preserving the (lack of) priority. Turn the idle zeroing back on since it can now actually do something useful outside of Giant in many cases.
Diffstat (limited to 'sys')
-rw-r--r--sys/alpha/alpha/pmap.c15
-rw-r--r--sys/amd64/amd64/pmap.c38
-rw-r--r--sys/i386/i386/pmap.c38
-rw-r--r--sys/ia64/ia64/pmap.c16
-rw-r--r--sys/powerpc/aim/mmu_oea.c11
-rw-r--r--sys/powerpc/powerpc/mmu_oea.c11
-rw-r--r--sys/powerpc/powerpc/pmap.c11
-rw-r--r--sys/sparc64/sparc64/pmap.c16
-rw-r--r--sys/vm/pmap.h1
-rw-r--r--sys/vm/vm_zeroidle.c16
10 files changed, 162 insertions, 11 deletions
diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c
index dba9cd8..ae33c4b 100644
--- a/sys/alpha/alpha/pmap.c
+++ b/sys/alpha/alpha/pmap.c
@@ -2525,6 +2525,21 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
bzero((char *)(caddr_t)va + off, size);
}
+
+/*
+ * pmap_zero_page_idle zeros the specified hardware page by
+ * mapping it into virtual memory and using bzero to clear
+ * its contents. This is for the vm_pagezero idle process.
+ */
+
+void
+pmap_zero_page_idle(vm_page_t m)
+{
+ vm_offset_t va = ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(m));
+ bzero((caddr_t) va, PAGE_SIZE);
+}
+
+
/*
* pmap_copy_page copies the specified (machine independent)
* page by mapping the page into virtual memory and using
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index a0c8ebe..f9829ae 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -175,9 +175,9 @@ static int pmap_pagedaemon_waken = 0;
* All those kernel PT submaps that BSD is so fond of
*/
pt_entry_t *CMAP1 = 0;
-static pt_entry_t *CMAP2, *ptmmap;
+static pt_entry_t *CMAP2, *CMAP3, *ptmmap;
caddr_t CADDR1 = 0, ptvmmap = 0;
-static caddr_t CADDR2;
+static caddr_t CADDR2, CADDR3;
static pt_entry_t *msgbufmap;
struct msgbuf *msgbufp = 0;
@@ -326,9 +326,11 @@ pmap_bootstrap(firstaddr, loadaddr)
/*
* CMAP1/CMAP2 are used for zeroing and copying pages.
+ * CMAP3 is used for the idle process page zeroing.
*/
SYSMAP(caddr_t, CMAP1, CADDR1, 1)
SYSMAP(caddr_t, CMAP2, CADDR2, 1)
+ SYSMAP(caddr_t, CMAP3, CADDR3, 1)
/*
* Crashdump maps.
@@ -2686,6 +2688,38 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
}
/*
+ * pmap_zero_page_idle zeros the specified hardware page by mapping
+ * the page into KVM and using bzero to clear its contents. This
+ * is intended to be called from the vm_pagezero process only and
+ * outside of Giant.
+ */
+void
+pmap_zero_page_idle(vm_page_t m)
+{
+ vm_offset_t phys = VM_PAGE_TO_PHYS(m);
+
+ if (*CMAP3)
+ panic("pmap_zero_page: CMAP3 busy");
+
+ *CMAP3 = PG_V | PG_RW | phys | PG_A | PG_M;
+#ifdef SMP
+ mtx_lock(&Giant); /* IPI sender not MPSAFE */
+#endif
+ invltlb_1pg((vm_offset_t)CADDR3);
+#ifdef SMP
+ mtx_unlock(&Giant);
+#endif
+
+#if defined(I686_CPU)
+ if (cpu_class == CPUCLASS_686)
+ i686_pagezero(CADDR3);
+ else
+#endif
+ bzero(CADDR3, PAGE_SIZE);
+ *CMAP3 = 0;
+}
+
+/*
* pmap_copy_page copies the specified (machine independent)
* page by mapping the page into virtual memory and using
* bcopy to copy the page, one machine dependent page at a
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index a0c8ebe..f9829ae 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -175,9 +175,9 @@ static int pmap_pagedaemon_waken = 0;
* All those kernel PT submaps that BSD is so fond of
*/
pt_entry_t *CMAP1 = 0;
-static pt_entry_t *CMAP2, *ptmmap;
+static pt_entry_t *CMAP2, *CMAP3, *ptmmap;
caddr_t CADDR1 = 0, ptvmmap = 0;
-static caddr_t CADDR2;
+static caddr_t CADDR2, CADDR3;
static pt_entry_t *msgbufmap;
struct msgbuf *msgbufp = 0;
@@ -326,9 +326,11 @@ pmap_bootstrap(firstaddr, loadaddr)
/*
* CMAP1/CMAP2 are used for zeroing and copying pages.
+ * CMAP3 is used for the idle process page zeroing.
*/
SYSMAP(caddr_t, CMAP1, CADDR1, 1)
SYSMAP(caddr_t, CMAP2, CADDR2, 1)
+ SYSMAP(caddr_t, CMAP3, CADDR3, 1)
/*
* Crashdump maps.
@@ -2686,6 +2688,38 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
}
/*
+ * pmap_zero_page_idle zeros the specified hardware page by mapping
+ * the page into KVM and using bzero to clear its contents. This
+ * is intended to be called from the vm_pagezero process only and
+ * outside of Giant.
+ */
+void
+pmap_zero_page_idle(vm_page_t m)
+{
+ vm_offset_t phys = VM_PAGE_TO_PHYS(m);
+
+ if (*CMAP3)
+ panic("pmap_zero_page: CMAP3 busy");
+
+ *CMAP3 = PG_V | PG_RW | phys | PG_A | PG_M;
+#ifdef SMP
+ mtx_lock(&Giant); /* IPI sender not MPSAFE */
+#endif
+ invltlb_1pg((vm_offset_t)CADDR3);
+#ifdef SMP
+ mtx_unlock(&Giant);
+#endif
+
+#if defined(I686_CPU)
+ if (cpu_class == CPUCLASS_686)
+ i686_pagezero(CADDR3);
+ else
+#endif
+ bzero(CADDR3, PAGE_SIZE);
+ *CMAP3 = 0;
+}
+
+/*
* pmap_copy_page copies the specified (machine independent)
* page by mapping the page into virtual memory and using
* bcopy to copy the page, one machine dependent page at a
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index 1ccac9c..7f70184 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -2035,6 +2035,22 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
bzero((char *)(caddr_t)va + off, size);
}
+
+/*
+ * pmap_zero_page_idle zeros the specified hardware page by
+ * mapping it into virtual memory and using bzero to clear
+ * its contents. This is for the vm_idlezero process.
+ */
+
+void
+pmap_zero_page_area(vm_page_t m)
+{
+ vm_offset_t va = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m));
+ bzero((caddr_t) va, PAGE_SIZE);
+}
+
+
+/*
/*
* pmap_copy_page copies the specified (machine independent)
* page by mapping the page into virtual memory and using
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 817f7f1..187e4b3 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -909,6 +909,17 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
TODO;
}
+void
+pmap_zero_page_idle(vm_page_t m)
+{
+
+ /* XXX this is called outside of Giant, is pmap_zero_page safe? */
+ /* XXX maybe have a dedicated mapping for this to avoid the problem? */
+ mtx_lock(&Giant);
+ pmap_zero_page(m);
+ mtx_unlock(&Giant);
+}
+
/*
* Map the given physical page at the specified virtual address in the
* target pmap with the protection requested. If specified the page
diff --git a/sys/powerpc/powerpc/mmu_oea.c b/sys/powerpc/powerpc/mmu_oea.c
index 817f7f1..187e4b3 100644
--- a/sys/powerpc/powerpc/mmu_oea.c
+++ b/sys/powerpc/powerpc/mmu_oea.c
@@ -909,6 +909,17 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
TODO;
}
+void
+pmap_zero_page_idle(vm_page_t m)
+{
+
+ /* XXX this is called outside of Giant, is pmap_zero_page safe? */
+ /* XXX maybe have a dedicated mapping for this to avoid the problem? */
+ mtx_lock(&Giant);
+ pmap_zero_page(m);
+ mtx_unlock(&Giant);
+}
+
/*
* Map the given physical page at the specified virtual address in the
* target pmap with the protection requested. If specified the page
diff --git a/sys/powerpc/powerpc/pmap.c b/sys/powerpc/powerpc/pmap.c
index 817f7f1..187e4b3 100644
--- a/sys/powerpc/powerpc/pmap.c
+++ b/sys/powerpc/powerpc/pmap.c
@@ -909,6 +909,17 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
TODO;
}
+void
+pmap_zero_page_idle(vm_page_t m)
+{
+
+ /* XXX this is called outside of Giant, is pmap_zero_page safe? */
+ /* XXX maybe have a dedicated mapping for this to avoid the problem? */
+ mtx_lock(&Giant);
+ pmap_zero_page(m);
+ mtx_unlock(&Giant);
+}
+
/*
* Map the given physical page at the specified virtual address in the
* target pmap with the protection requested. If specified the page
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 635b4bd..e8414f5 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -1429,6 +1429,22 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
aszero(ASI_PHYS_USE_EC, pa + off, size);
}
+void
+pmap_zero_page_idle(vm_page_t m)
+{
+ vm_offset_t pa = VM_PAGE_TO_PHYS(m);
+
+ CTR1(KTR_PMAP, "pmap_zero_page_idle: pa=%#lx", pa);
+#ifdef SMP
+ mtx_lock(&Giant);
+#endif
+ dcache_inval_phys(pa, pa + PAGE_SIZE - 1);
+#ifdef SMP
+ mtx_unlock(&Giant);
+#endif
+ aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE);
+}
+
/*
* Copy a page of physical memory by temporarily mapping it into the tlb.
*/
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 79a73f6..23e3c43 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -126,6 +126,7 @@ void pmap_remove(pmap_t, vm_offset_t, vm_offset_t);
void pmap_remove_pages(pmap_t, vm_offset_t, vm_offset_t);
void pmap_zero_page(vm_page_t);
void pmap_zero_page_area(vm_page_t, int off, int size);
+void pmap_zero_page_idle(vm_page_t);
void pmap_prefault(pmap_t, vm_offset_t, vm_map_entry_t);
int pmap_mincore(pmap_t pmap, vm_offset_t addr);
void pmap_new_thread(struct thread *td);
diff --git a/sys/vm/vm_zeroidle.c b/sys/vm/vm_zeroidle.c
index 0552c32..52a055d 100644
--- a/sys/vm/vm_zeroidle.c
+++ b/sys/vm/vm_zeroidle.c
@@ -30,7 +30,7 @@ static int cnt_prezero;
SYSCTL_INT(_vm_stats_misc, OID_AUTO,
cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, "");
-static int idlezero_enable = 0;
+static int idlezero_enable = 1;
SYSCTL_INT(_vm, OID_AUTO, idlezero_enable, CTLFLAG_RW, &idlezero_enable, 0, "");
TUNABLE_INT("vm.idlezero_enable", &idlezero_enable);
@@ -83,9 +83,9 @@ vm_page_zero_idle(void)
TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq);
m->queue = PQ_NONE;
mtx_unlock(&vm_page_queue_free_mtx);
- /* maybe drop out of Giant here */
- pmap_zero_page(m);
- /* and return here */
+ mtx_unlock(&Giant);
+ pmap_zero_page_idle(m);
+ mtx_lock(&Giant);
mtx_lock(&vm_page_queue_free_mtx);
vm_page_flag_set(m, PG_ZERO);
m->queue = PQ_FREE + m->pc;
@@ -109,7 +109,7 @@ void
vm_page_zero_idle_wakeup(void)
{
- if (vm_page_zero_check())
+ if (idlezero_enable && vm_page_zero_check())
wakeup(&zero_state);
}
@@ -119,17 +119,19 @@ vm_pagezero(void)
struct thread *td = curthread;
struct rtprio rtp;
int pages = 0;
+ int pri;
rtp.prio = RTP_PRIO_MAX;
rtp.type = RTP_PRIO_IDLE;
mtx_lock_spin(&sched_lock);
rtp_to_pri(&rtp, td->td_ksegrp);
+ pri = td->td_priority;
mtx_unlock_spin(&sched_lock);
for (;;) {
if (vm_page_zero_check()) {
pages += vm_page_zero_idle();
- if (pages > idlezero_maxrun) {
+ if (pages > idlezero_maxrun || kserunnable()) {
mtx_lock_spin(&sched_lock);
td->td_proc->p_stats->p_ru.ru_nvcsw++;
mi_switch();
@@ -137,7 +139,7 @@ vm_pagezero(void)
pages = 0;
}
} else {
- tsleep(&zero_state, PPAUSE, "pgzero", hz * 300);
+ tsleep(&zero_state, pri, "pgzero", hz * 300);
pages = 0;
}
}
OpenPOWER on IntegriCloud