summaryrefslogtreecommitdiffstats
path: root/sys/i386/include/pmap.h
diff options
context:
space:
mode:
authorkmacy <kmacy@FreeBSD.org>2010-04-30 00:46:43 +0000
committerkmacy <kmacy@FreeBSD.org>2010-04-30 00:46:43 +0000
commit1dc1263413262d13f45f42d89c554d0ddc167ca2 (patch)
tree250705d42bcb9b364f4fd2233c38faaadadf7ae0 /sys/i386/include/pmap.h
parenta43425e8835983e7c93a15a2d6a42fdb292c1676 (diff)
downloadFreeBSD-src-1dc1263413262d13f45f42d89c554d0ddc167ca2.zip
FreeBSD-src-1dc1263413262d13f45f42d89c554d0ddc167ca2.tar.gz
On Alan's advice, rather than do a wholesale conversion on a single
architecture from page queue lock to a hashed array of page locks (based on a patch by Jeff Roberson), I've implemented page lock support in the MI code and have only moved vm_page's hold_count out from under page queue mutex to page lock. This changes pmap_extract_and_hold on all pmaps. Supported by: Bitgravity Inc. Discussed with: alc, jeffr, and kib
Diffstat (limited to 'sys/i386/include/pmap.h')
-rw-r--r--sys/i386/include/pmap.h3
1 files changed, 3 insertions, 0 deletions
diff --git a/sys/i386/include/pmap.h b/sys/i386/include/pmap.h
index ae7d79d..f45a9df 100644
--- a/sys/i386/include/pmap.h
+++ b/sys/i386/include/pmap.h
@@ -420,11 +420,14 @@ struct pmap {
u_int pm_active; /* active on cpus */
struct pmap_statistics pm_stats; /* pmap statistics */
LIST_ENTRY(pmap) pm_list; /* List of all pmaps */
+ uint32_t pm_gen_count; /* generation count (pmap lock dropped) */
+ u_int pm_retries;
#ifdef PAE
pdpt_entry_t *pm_pdpt; /* KVA of page director pointer
table */
#endif
vm_page_t pm_root; /* spare page table pages */
+
};
typedef struct pmap *pmap_t;
OpenPOWER on IntegriCloud