summaryrefslogtreecommitdiffstats
path: root/sys/i386
diff options
context:
space:
mode:
authorjkim <jkim@FreeBSD.org>2010-11-22 19:52:44 +0000
committerjkim <jkim@FreeBSD.org>2010-11-22 19:52:44 +0000
commit31be61effab180d09c8e91e953af32e1e7aea5e3 (patch)
tree93846e3185d960e68c051176e8750b1cd5aaab8a /sys/i386
parenta709caaae77f5abfcef4916c6b23c8aff56adc5c (diff)
downloadFreeBSD-src-31be61effab180d09c8e91e953af32e1e7aea5e3.zip
FreeBSD-src-31be61effab180d09c8e91e953af32e1e7aea5e3.tar.gz
- Disable caches and flush caches/TLBs when we update PAT as we do for MTRR.
Flushing TLBs is required to ensure cache coherency according to the AMD64 architecture manual. Flushing caches is only required when changing from a cacheable memory type (WB, WP, or WT) to an uncacheable type (WC, UC, or UC-). Since this function is only used once per processor during startup, there is no need to take any shortcuts. - Leave PAT indices 0-3 at the default of WB, WT, UC-, and UC. Program 5 as WP (from default WT) and 6 as WC (from default UC-). Leave 4 and 7 at the default of WB and UC. This is to avoid transition from a cacheable memory type to an uncacheable type to minimize possible cache incoherency. Since we perform flushing caches and TLBs now, this change may not be necessary any more but we do not want to take any chances. - Remove Apple hardware specific quirks. With the above changes, it seems this hack is no longer needed. - Improve pmap_cache_bits() with an array to map PAT memory type to index. This array is initialized early from pmap_init_pat(), so that we do not need to handle special cases in the function any more. Now this function is identical on both amd64 and i386. Reviewed by: jhb Tested by: RM (reuf_m at hotmail dot com) Ryszard Czekaj (rychoo at freeshell dot net) army.of.root (army dot of dot root at googlemail dot com) MFC after: 3 days
Diffstat (limited to 'sys/i386')
-rw-r--r--sys/i386/i386/pmap.c162
1 files changed, 70 insertions, 92 deletions
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 65a529e..b15e9b7 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -217,14 +217,20 @@ pt_entry_t pg_nx;
static uma_zone_t pdptzone;
#endif
-static int pat_works = 0; /* Is page attribute table sane? */
-
SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
+static int pat_works = 1;
+TUNABLE_INT("vm.pmap.pat_works", &pat_works);
+SYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RDTUN, &pat_works, 1,
+ "Is page attribute table fully functional?");
+
static int pg_ps_enabled = 1;
SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN, &pg_ps_enabled, 0,
"Are large page mappings enabled?");
+#define PAT_INDEX_SIZE 8
+static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */
+
/*
* Data for the pv entry allocation mechanism
*/
@@ -490,13 +496,28 @@ pmap_bootstrap(vm_paddr_t firstaddr)
void
pmap_init_pat(void)
{
+ int pat_table[PAT_INDEX_SIZE];
uint64_t pat_msr;
- char *sysenv;
- static int pat_tested = 0;
+ u_long cr0, cr4;
+ int i;
+
+ /* Set default PAT index table. */
+ for (i = 0; i < PAT_INDEX_SIZE; i++)
+ pat_table[i] = -1;
+ pat_table[PAT_WRITE_BACK] = 0;
+ pat_table[PAT_WRITE_THROUGH] = 1;
+ pat_table[PAT_UNCACHEABLE] = 3;
+ pat_table[PAT_WRITE_COMBINING] = 3;
+ pat_table[PAT_WRITE_PROTECTED] = 3;
+ pat_table[PAT_UNCACHED] = 3;
/* Bail if this CPU doesn't implement PAT. */
- if (!(cpu_feature & CPUID_PAT))
+ if ((cpu_feature & CPUID_PAT) == 0) {
+ for (i = 0; i < PAT_INDEX_SIZE; i++)
+ pat_index[i] = pat_table[i];
+ pat_works = 0;
return;
+ }
/*
* Due to some Intel errata, we can only safely use the lower 4
@@ -508,27 +529,10 @@ pmap_init_pat(void)
*
* Intel Pentium IV Processor Specification Update
* Errata N46 (PAT Index MSB May Be Calculated Incorrectly)
- *
- * Some Apple Macs based on nVidia chipsets cannot enter ACPI mode
- * via SMI# when we use upper 4 PAT entries for unknown reason.
*/
- if (!pat_tested) {
- if (cpu_vendor_id != CPU_VENDOR_INTEL ||
- (CPUID_TO_FAMILY(cpu_id) == 6 &&
- CPUID_TO_MODEL(cpu_id) >= 0xe)) {
- pat_works = 1;
- sysenv = getenv("smbios.system.product");
- if (sysenv != NULL) {
- if (strncmp(sysenv, "MacBook5,1", 10) == 0 ||
- strncmp(sysenv, "MacBookPro5,5", 13) == 0 ||
- strncmp(sysenv, "Macmini3,1", 10) == 0 ||
- strncmp(sysenv, "iMac9,1", 7) == 0)
- pat_works = 0;
- freeenv(sysenv);
- }
- }
- pat_tested = 1;
- }
+ if (cpu_vendor_id == CPU_VENDOR_INTEL &&
+ !(CPUID_TO_FAMILY(cpu_id) == 6 && CPUID_TO_MODEL(cpu_id) >= 0xe))
+ pat_works = 0;
/* Initialize default PAT entries. */
pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |
@@ -543,20 +547,48 @@ pmap_init_pat(void)
if (pat_works) {
/*
* Leave the indices 0-3 at the default of WB, WT, UC-, and UC.
- * Program 4 and 5 as WP and WC.
- * Leave 6 and 7 as UC- and UC.
+ * Program 5 and 6 as WP and WC.
+ * Leave 4 and 7 as WB and UC.
*/
- pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5));
- pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) |
- PAT_VALUE(5, PAT_WRITE_COMBINING);
+ pat_msr &= ~(PAT_MASK(5) | PAT_MASK(6));
+ pat_msr |= PAT_VALUE(5, PAT_WRITE_PROTECTED) |
+ PAT_VALUE(6, PAT_WRITE_COMBINING);
+ pat_table[PAT_UNCACHED] = 2;
+ pat_table[PAT_WRITE_PROTECTED] = 5;
+ pat_table[PAT_WRITE_COMBINING] = 6;
} else {
/*
* Just replace PAT Index 2 with WC instead of UC-.
*/
pat_msr &= ~PAT_MASK(2);
pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING);
+ pat_table[PAT_WRITE_COMBINING] = 2;
}
+
+ /* Disable PGE. */
+ cr4 = rcr4();
+ load_cr4(cr4 & ~CR4_PGE);
+
+ /* Disable caches (CD = 1, NW = 0). */
+ cr0 = rcr0();
+ load_cr0((cr0 & ~CR0_NW) | CR0_CD);
+
+ /* Flushes caches and TLBs. */
+ wbinvd();
+ invltlb();
+
+ /* Update PAT and index table. */
wrmsr(MSR_PAT, pat_msr);
+ for (i = 0; i < PAT_INDEX_SIZE; i++)
+ pat_index[i] = pat_table[i];
+
+ /* Flush caches and TLBs again. */
+ wbinvd();
+ invltlb();
+
+ /* Restore caches and PGE. */
+ load_cr0(cr0);
+ load_cr4(cr4);
}
/*
@@ -792,78 +824,24 @@ SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
int
pmap_cache_bits(int mode, boolean_t is_pde)
{
- int pat_flag, pat_index, cache_bits;
+ int cache_bits, pat_flag, pat_idx;
+
+ if (mode < 0 || mode >= PAT_INDEX_SIZE || pat_index[mode] < 0)
+ panic("Unknown caching mode %d\n", mode);
/* The PAT bit is different for PTE's and PDE's. */
pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT;
- /* If we don't support PAT, map extended modes to older ones. */
- if (!(cpu_feature & CPUID_PAT)) {
- switch (mode) {
- case PAT_UNCACHEABLE:
- case PAT_WRITE_THROUGH:
- case PAT_WRITE_BACK:
- break;
- case PAT_UNCACHED:
- case PAT_WRITE_COMBINING:
- case PAT_WRITE_PROTECTED:
- mode = PAT_UNCACHEABLE;
- break;
- }
- }
-
/* Map the caching mode to a PAT index. */
- if (pat_works) {
- switch (mode) {
- case PAT_UNCACHEABLE:
- pat_index = 3;
- break;
- case PAT_WRITE_THROUGH:
- pat_index = 1;
- break;
- case PAT_WRITE_BACK:
- pat_index = 0;
- break;
- case PAT_UNCACHED:
- pat_index = 2;
- break;
- case PAT_WRITE_COMBINING:
- pat_index = 5;
- break;
- case PAT_WRITE_PROTECTED:
- pat_index = 4;
- break;
- default:
- panic("Unknown caching mode %d\n", mode);
- }
- } else {
- switch (mode) {
- case PAT_UNCACHED:
- case PAT_UNCACHEABLE:
- case PAT_WRITE_PROTECTED:
- pat_index = 3;
- break;
- case PAT_WRITE_THROUGH:
- pat_index = 1;
- break;
- case PAT_WRITE_BACK:
- pat_index = 0;
- break;
- case PAT_WRITE_COMBINING:
- pat_index = 2;
- break;
- default:
- panic("Unknown caching mode %d\n", mode);
- }
- }
+ pat_idx = pat_index[mode];
/* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
cache_bits = 0;
- if (pat_index & 0x4)
+ if (pat_idx & 0x4)
cache_bits |= pat_flag;
- if (pat_index & 0x2)
+ if (pat_idx & 0x2)
cache_bits |= PG_NC_PCD;
- if (pat_index & 0x1)
+ if (pat_idx & 0x1)
cache_bits |= PG_NC_PWT;
return (cache_bits);
}
OpenPOWER on IntegriCloud