summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2010-07-10 18:22:44 +0000
committeralc <alc@FreeBSD.org>2010-07-10 18:22:44 +0000
commit0a0ebaf1773311070ebf962f08ce9d1361b5c785 (patch)
tree2c2b932bf4e048a772810861f3123c0c1521d47a /sys/amd64
parent7b6b47da77b80ab694353b456af86171db3a0b67 (diff)
downloadFreeBSD-src-0a0ebaf1773311070ebf962f08ce9d1361b5c785.zip
FreeBSD-src-0a0ebaf1773311070ebf962f08ce9d1361b5c785.tar.gz
Reduce the number of global TLB shootdowns generated by pmap_qenter().
Specifically, teach pmap_qenter() to recognize the case when it is being asked to replace a mapping with the very same mapping and not generate a shootdown. Unfortunately, the buffer cache commonly passes an entire buffer to pmap_qenter() when only a subset of the mappings are changing. For the extension of buffers in allocbuf() this was resulting in unnecessary shootdowns. The addition of new pages to the end of the buffer need not and did not trigger a shootdown, but overwriting the initial mappings with the very same mappings was seen as a change that necessitated a shootdown. With this change, that is no longer so. For a "buildworld" on amd64, this change eliminates 14-15% of the pmap_invalidate_range() shootdowns, and about 4% of the overall shootdowns. MFC after: 3 weeks
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/pmap.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 3b03d08..74731ab 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -1331,19 +1331,22 @@ pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
void
pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
{
- pt_entry_t *endpte, oldpte, *pte;
+ pt_entry_t *endpte, oldpte, pa, *pte;
+ vm_page_t m;
oldpte = 0;
pte = vtopte(sva);
endpte = pte + count;
while (pte < endpte) {
- oldpte |= *pte;
- pte_store(pte, VM_PAGE_TO_PHYS(*ma) | PG_G |
- pmap_cache_bits((*ma)->md.pat_mode, 0) | PG_RW | PG_V);
+ m = *ma++;
+ pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
+ if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) {
+ oldpte |= *pte;
+ pte_store(pte, pa | PG_G | PG_RW | PG_V);
+ }
pte++;
- ma++;
}
- if ((oldpte & PG_V) != 0)
+ if (__predict_false((oldpte & PG_V) != 0))
pmap_invalidate_range(kernel_pmap, sva, sva + count *
PAGE_SIZE);
}
OpenPOWER on IntegriCloud