summaryrefslogtreecommitdiffstats
path: root/sys/mips/include/pmap.h
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2012-08-13 17:38:38 +0000
committeralc <alc@FreeBSD.org>2012-08-13 17:38:38 +0000
commit5cc57da5bdc9726742de94e3af6857522acf2d5b (patch)
tree37b978765c9357705d31ef9ad9687aa9eaa1925c /sys/mips/include/pmap.h
parent6c62ea1c5136b13bea147d023af84b777d4219b0 (diff)
downloadFreeBSD-src-5cc57da5bdc9726742de94e3af6857522acf2d5b.zip
FreeBSD-src-5cc57da5bdc9726742de94e3af6857522acf2d5b.tar.gz
Port the new PV entry allocator from amd64/i386. This allocator has two
advantages. First, PV entries are roughly half the size. Second, this allocator doesn't access the paging queues, and thus it will allow for the removal of the page queues lock from this pmap. Fix a rather serious bug in pmap_remove_write(). After removing write access from the specified page's first mapping, pmap_remove_write() then used the wrong "next" pointer. Consequently, the page's second, third, etc. mappings were not write protected. Tested by: jchandra
Diffstat (limited to 'sys/mips/include/pmap.h')
-rw-r--r--sys/mips/include/pmap.h26
1 files changed, 21 insertions, 5 deletions
diff --git a/sys/mips/include/pmap.h b/sys/mips/include/pmap.h
index e10212b..e75ccb7 100644
--- a/sys/mips/include/pmap.h
+++ b/sys/mips/include/pmap.h
@@ -66,9 +66,9 @@
* Pmap stuff
*/
struct pv_entry;
+struct pv_chunk;
struct md_page {
- int pv_list_count;
int pv_flags;
TAILQ_HEAD(, pv_entry) pv_list;
};
@@ -82,8 +82,7 @@ struct md_page {
struct pmap {
pd_entry_t *pm_segtab; /* KVA of segment table */
- TAILQ_HEAD(, pv_entry) pm_pvlist; /* list of mappings in
- * pmap */
+ TAILQ_HEAD(, pv_chunk) pm_pvchunk; /* list of mappings in pmap */
cpuset_t pm_active; /* active on cpus */
struct {
u_int32_t asid:ASID_BITS; /* TLB address space tag */
@@ -121,13 +120,30 @@ extern struct pmap kernel_pmap_store;
* mappings of that page. An entry is a pv_entry_t, the list is pv_table.
*/
typedef struct pv_entry {
- pmap_t pv_pmap; /* pmap where mapping lies */
vm_offset_t pv_va; /* virtual address for mapping */
TAILQ_ENTRY(pv_entry) pv_list;
- TAILQ_ENTRY(pv_entry) pv_plist;
} *pv_entry_t;
/*
+ * pv_entries are allocated in chunks per-process. This avoids the
+ * need to track per-pmap assignments.
+ */
+#ifdef __mips_n64
+#define _NPCM 3
+#define _NPCPV 168
+#else
+#define _NPCM 11
+#define _NPCPV 336
+#endif
+struct pv_chunk {
+ pmap_t pc_pmap;
+ TAILQ_ENTRY(pv_chunk) pc_list;
+ u_long pc_map[_NPCM]; /* bitmap; 1 = free */
+ TAILQ_ENTRY(pv_chunk) pc_lru;
+ struct pv_entry pc_pventry[_NPCPV];
+};
+
+/*
* physmem_desc[] is a superset of phys_avail[] and describes all the
* memory present in the system.
*
OpenPOWER on IntegriCloud