summaryrefslogtreecommitdiffstats
path: root/sys/i386/include/pmap.h
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2008-03-27 04:34:17 +0000
committeralc <alc@FreeBSD.org>2008-03-27 04:34:17 +0000
commit2a244be0948ebf54f47b701a69bdc0acbc452299 (patch)
tree71fd377cd39a489531c67dbd62417bb6bf1cd6d3 /sys/i386/include/pmap.h
parent669aee5e4400017ab51274cecbb9a12d7913543b (diff)
downloadFreeBSD-src-2a244be0948ebf54f47b701a69bdc0acbc452299.zip
FreeBSD-src-2a244be0948ebf54f47b701a69bdc0acbc452299.tar.gz
MFamd64 with few changes:
1. Add support for automatic promotion of 4KB page mappings to 2MB page mappings. Automatic promotion can be enabled by setting the tunable "vm.pmap.pg_ps_enabled" to a non-zero value. By default, automatic promotion is disabled. Tested by: kris 2. To date, we have assumed that the TLB will only set the PG_M bit in a PTE if that PTE has the PG_RW bit set. However, this assumption does not hold on recent processors from Intel. For example, consider a PTE that has the PG_RW bit set but the PG_M bit clear. Suppose this PTE is cached in the TLB and later the PG_RW bit is cleared in the PTE, but the corresponding TLB entry is not (yet) invalidated. Historically, upon a write access using this (stale) TLB entry, the TLB would observe that the PG_RW bit had been cleared and initiate a page fault, aborting the setting of the PG_M bit in the PTE. Now, however, P4- and Core2-family processors will set the PG_M bit before observing that the PG_RW bit is clear and initiating a page fault. In other words, the write does not occur but the PG_M bit is still set. The real impact of this difference is not that great. Specifically, we should no longer assert that any PTE with the PG_M bit set must also have the PG_RW bit set, and we should ignore the state of the PG_M bit unless the PG_RW bit is set.
Diffstat (limited to 'sys/i386/include/pmap.h')
-rw-r--r--sys/i386/include/pmap.h16
1 files changed, 15 insertions, 1 deletions
diff --git a/sys/i386/include/pmap.h b/sys/i386/include/pmap.h
index 3edec14..74c7312 100644
--- a/sys/i386/include/pmap.h
+++ b/sys/i386/include/pmap.h
@@ -82,6 +82,13 @@
#define PG_N (PG_NC_PWT|PG_NC_PCD) /* Non-cacheable */
/*
+ * Promotion to a 2 or 4MB (PDE) page mapping requires that the corresponding
+ * 4KB (PTE) page mappings have identical settings for the following fields:
+ */
+#define PG_PTE_PROMOTE (PG_MANAGED | PG_W | PG_G | PG_PTE_PAT | \
+ PG_M | PG_A | PG_NC_PCD | PG_NC_PWT | PG_U | PG_RW | PG_V)
+
+/*
* Page Protection Exception bits
*/
@@ -213,6 +220,9 @@ pmap_kextract(vm_offset_t va)
#ifdef PAE
+#define pde_cmpset(pdep, old, new) \
+ atomic_cmpset_64((pdep), (old), (new))
+
static __inline pt_entry_t
pte_load(pt_entry_t *ptep)
{
@@ -269,6 +279,9 @@ extern pt_entry_t pg_nx;
#else /* PAE */
+#define pde_cmpset(pdep, old, new) \
+ atomic_cmpset_int((pdep), (old), (new))
+
static __inline pt_entry_t
pte_load(pt_entry_t *ptep)
{
@@ -330,6 +343,7 @@ struct pmap {
pdpt_entry_t *pm_pdpt; /* KVA of page director pointer
table */
#endif
+ vm_page_t pm_root; /* spare page table pages */
};
typedef struct pmap *pmap_t;
@@ -393,7 +407,6 @@ extern char *ptvmmap; /* poor name! */
extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
-#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
#define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz))
void pmap_bootstrap(vm_paddr_t);
@@ -406,6 +419,7 @@ void pmap_kremove(vm_offset_t);
void *pmap_mapbios(vm_paddr_t, vm_size_t);
void *pmap_mapdev(vm_paddr_t, vm_size_t);
void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
+boolean_t pmap_page_is_mapped(vm_page_t m);
void pmap_unmapdev(vm_offset_t, vm_size_t);
pt_entry_t *pmap_pte(pmap_t, vm_offset_t) __pure2;
void pmap_set_pg(void);
OpenPOWER on IntegriCloud