diff options
author | alc <alc@FreeBSD.org> | 2009-07-12 23:31:20 +0000 |
---|---|---|
committer | alc <alc@FreeBSD.org> | 2009-07-12 23:31:20 +0000 |
commit | ea60573817570658a0528f9e36f4b65d1dc10b05 (patch) | |
tree | a3d5747cacdafc73df95feaef86c79ecdf8fc918 /sys/amd64 | |
parent | c86a9c376273cc3a3010c0b5ad122b391a1b2ecd (diff) | |
download | FreeBSD-src-ea60573817570658a0528f9e36f4b65d1dc10b05.zip FreeBSD-src-ea60573817570658a0528f9e36f4b65d1dc10b05.tar.gz |
Add support to the virtual memory system for configuring machine-
dependent memory attributes:
Rename vm_cache_mode_t to vm_memattr_t. The new name reflects the
fact that there are machine-dependent memory attributes that have
nothing to do with controlling the cache's behavior.
Introduce vm_object_set_memattr() for setting the default memory
attributes that will be given to an object's pages.
Introduce and use pmap_page_{get,set}_memattr() for getting and
setting a page's machine-dependent memory attributes. Add full
support for these functions on amd64 and i386 and stubs for them on
the other architectures. The function pmap_page_set_memattr() is also
responsible for any other machine-dependent aspects of changing a
page's memory attributes, such as flushing the cache or updating the
direct map. The uses include kmem_alloc_contig(), vm_page_alloc(),
and the device pager:
kmem_alloc_contig() can now be used to allocate kernel memory with
non-default memory attributes on amd64 and i386.
vm_page_alloc() and the device pager will set the memory attributes
for the real or fictitious page according to the object's default
memory attributes.
Update the various pmap functions on amd64 and i386 that map pages to
incorporate each page's memory attributes in the mapping.
Notes: (1) Inherent to this design are safety features that prevent
the specification of inconsistent memory attributes by different
mappings on amd64 and i386. In addition, the device pager provides a
warning when a device driver creates a fictitious page with memory
attributes that are inconsistent with the real page that the
fictitious page is an alias for. (2) Storing the machine-dependent
memory attributes for amd64 and i386 as a dedicated "int" in "struct
md_page" represents a compromise between space efficiency and the ease
of MFCing these changes to RELENG_7.
In collaboration with: jhb
Approved by: re (kib)
Diffstat (limited to 'sys/amd64')
-rw-r--r-- | sys/amd64/amd64/pmap.c | 45 | ||||
-rw-r--r-- | sys/amd64/include/pmap.h | 3 | ||||
-rw-r--r-- | sys/amd64/include/vm.h | 16 |
3 files changed, 48 insertions, 16 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 5bf8b89..fce2818 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -614,6 +614,7 @@ pmap_page_init(vm_page_t m) { TAILQ_INIT(&m->md.pv_list); + m->md.pat_mode = PAT_WRITE_BACK; } /* @@ -1120,7 +1121,8 @@ pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) endpte = pte + count; while (pte < endpte) { oldpte |= *pte; - pte_store(pte, VM_PAGE_TO_PHYS(*ma) | PG_G | PG_RW | PG_V); + pte_store(pte, VM_PAGE_TO_PHYS(*ma) | PG_G | + pmap_cache_bits((*ma)->md.pat_mode, 0) | PG_RW | PG_V); pte++; ma++; } @@ -3025,7 +3027,7 @@ validate: /* * Now validate mapping with desired protection/wiring. */ - newpte = (pt_entry_t)(pa | PG_V); + newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V); if ((prot & VM_PROT_WRITE) != 0) { newpte |= PG_RW; vm_page_flag_set(m, PG_WRITEABLE); @@ -3110,7 +3112,8 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) " in pmap %p", va, pmap); return (FALSE); } - newpde = VM_PAGE_TO_PHYS(m) | PG_PS | PG_V; + newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) | + PG_PS | PG_V; if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { newpde |= PG_MANAGED; @@ -3292,7 +3295,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, */ pmap->pm_stats.resident_count++; - pa = VM_PAGE_TO_PHYS(m); + pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0); if ((prot & VM_PROT_EXECUTE) == 0) pa |= pg_nx; @@ -3333,6 +3336,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, pd_entry_t *pde; vm_paddr_t pa, ptepa; vm_page_t p, pdpg; + int pat_mode; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); KASSERT(object->type == OBJT_DEVICE, @@ -3343,6 +3347,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, p = vm_page_lookup(object, pindex); KASSERT(p->valid == VM_PAGE_BITS_ALL, ("pmap_object_init_pt: invalid page %p", p)); + pat_mode = p->md.pat_mode; /* * Abort the mapping if the first page is not physically @@ -3354,21 +3359,28 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, /* * Skip the first page. Abort the mapping if the rest of - * the pages are not physically contiguous. + * the pages are not physically contiguous or have differing + * memory attributes. */ p = TAILQ_NEXT(p, listq); for (pa = ptepa + PAGE_SIZE; pa < ptepa + size; pa += PAGE_SIZE) { KASSERT(p->valid == VM_PAGE_BITS_ALL, ("pmap_object_init_pt: invalid page %p", p)); - if (pa != VM_PAGE_TO_PHYS(p)) + if (pa != VM_PAGE_TO_PHYS(p) || + pat_mode != p->md.pat_mode) return; p = TAILQ_NEXT(p, listq); } - /* Map using 2MB pages. */ + /* + * Map using 2MB pages. Since "ptepa" is 2M aligned and + * "size" is a multiple of 2M, adding the PAT setting to "pa" + * will not affect the termination of this loop. + */ PMAP_LOCK(pmap); - for (pa = ptepa; pa < ptepa + size; pa += NBPDR) { + for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa + + size; pa += NBPDR) { pdpg = pmap_allocpde(pmap, addr, M_NOWAIT); if (pdpg == NULL) { /* @@ -4373,6 +4385,23 @@ pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va) } /* + * Sets the memory attribute for the specified page. + */ +void +pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) +{ + + m->md.pat_mode = ma; + + /* + * Update the direct mapping and flush the cache. + */ + if (pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE, + m->md.pat_mode)) + panic("memory attribute change on the direct map failed"); +} + +/* * Changes the specified virtual address range's memory type to that given by * the parameter "mode". The specified virtual address range must be * completely contained within either the direct map or the kernel map. If diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h index b428a52..11a5628 100644 --- a/sys/amd64/include/pmap.h +++ b/sys/amd64/include/pmap.h @@ -233,6 +233,7 @@ struct pv_chunk; struct md_page { TAILQ_HEAD(,pv_entry) pv_list; + int pat_mode; }; /* @@ -299,6 +300,7 @@ extern vm_paddr_t dump_avail[]; extern vm_offset_t virtual_avail; extern vm_offset_t virtual_end; +#define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode) #define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz)) void pmap_bootstrap(vm_paddr_t *); @@ -312,6 +314,7 @@ void *pmap_mapbios(vm_paddr_t, vm_size_t); void *pmap_mapdev(vm_paddr_t, vm_size_t); void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int); boolean_t pmap_page_is_mapped(vm_page_t m); +void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); void pmap_unmapdev(vm_offset_t, vm_size_t); void pmap_invalidate_page(pmap_t, vm_offset_t); void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t); diff --git a/sys/amd64/include/vm.h b/sys/amd64/include/vm.h index 4796dbc..4065a36 100644 --- a/sys/amd64/include/vm.h +++ b/sys/amd64/include/vm.h @@ -32,14 +32,14 @@ #include <machine/specialreg.h> -/* Cache control options. */ -#define VM_CACHE_UNCACHEABLE ((vm_cache_mode_t)PAT_UNCACHEABLE) -#define VM_CACHE_WRITE_COMBINING ((vm_cache_mode_t)PAT_WRITE_COMBINING) -#define VM_CACHE_WRITE_THROUGH ((vm_cache_mode_t)PAT_WRITE_THROUGH) -#define VM_CACHE_WRITE_PROTECTED ((vm_cache_mode_t)PAT_WRITE_PROTECTED) -#define VM_CACHE_WRITE_BACK ((vm_cache_mode_t)PAT_WRITE_BACK) -#define VM_CACHE_UNCACHED ((vm_cache_mode_t)PAT_UNCACHED) +/* Memory attributes. */ +#define VM_MEMATTR_UNCACHEABLE ((vm_memattr_t)PAT_UNCACHEABLE) +#define VM_MEMATTR_WRITE_COMBINING ((vm_memattr_t)PAT_WRITE_COMBINING) +#define VM_MEMATTR_WRITE_THROUGH ((vm_memattr_t)PAT_WRITE_THROUGH) +#define VM_MEMATTR_WRITE_PROTECTED ((vm_memattr_t)PAT_WRITE_PROTECTED) +#define VM_MEMATTR_WRITE_BACK ((vm_memattr_t)PAT_WRITE_BACK) +#define VM_MEMATTR_UNCACHED ((vm_memattr_t)PAT_UNCACHED) -#define VM_CACHE_DEFAULT VM_CACHE_WRITE_BACK +#define VM_MEMATTR_DEFAULT VM_MEMATTR_WRITE_BACK #endif /* !_MACHINE_VM_H_ */ |