diff options
author | alc <alc@FreeBSD.org> | 2006-06-20 20:52:11 +0000 |
---|---|---|
committer | alc <alc@FreeBSD.org> | 2006-06-20 20:52:11 +0000 |
commit | 13b4d6433549f362069fbe0a26fef8d45822616d (patch) | |
tree | dd95f5367ceaac132485bd3e4de0f2ad5baa01c0 /sys | |
parent | 26a001f93c9b3d2eb5ceb24e3729f741def07ed7 (diff) | |
download | FreeBSD-src-13b4d6433549f362069fbe0a26fef8d45822616d.zip FreeBSD-src-13b4d6433549f362069fbe0a26fef8d45822616d.tar.gz |
Change get_pv_entry() such that the call to vm_page_alloc() specifies
VM_ALLOC_NORMAL instead of VM_ALLOC_SYSTEM when try is TRUE. In other
words, when get_pv_entry() is permitted to fail, it no longer tries as
hard to allocate a page.
Change pmap_enter_quick_locked() to fail rather than wait if it is
unable to allocate a page table page. This prevents a race between
pmap_enter_object() and the page daemon. Specifically, an inactive
page that is a successor to the page that was given to
pmap_enter_quick_locked() might become a cache page while
pmap_enter_quick_locked() waits and later pmap_enter_object() maps
the cache page violating the invariant that cache pages are never
mapped. Similarly, change
pmap_enter_quick_locked() to call pmap_try_insert_pv_entry() rather
than pmap_insert_entry(). Generally speaking,
pmap_enter_quick_locked() is used to create speculative mappings. So,
it should not try hard to allocate memory if free memory is scarce.
Add an assertion that the object containing m_start is locked in
pmap_enter_object(). Remove a similar assertion from
pmap_enter_quick_locked() because that function no longer accesses the
containing object.
Remove a stale comment.
Reviewed by: ups@
Diffstat (limited to 'sys')
-rw-r--r-- | sys/amd64/amd64/pmap.c | 36 | ||||
-rw-r--r-- | sys/i386/i386/pmap.c | 36 |
2 files changed, 30 insertions, 42 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 44ed928..c667f55 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -1664,7 +1664,7 @@ get_pv_entry(pmap_t pmap, int try) static const struct timeval printinterval = { 60, 0 }; static struct timeval lastprint; static vm_pindex_t colour; - int bit, field; + int bit, field, page_req; pv_entry_t pv; struct pv_chunk *pc; vm_page_t m; @@ -1697,7 +1697,8 @@ get_pv_entry(pmap_t pmap, int try) } } /* No free items, allocate another chunk */ - m = vm_page_alloc(NULL, colour, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ); + page_req = try ? VM_ALLOC_NORMAL : VM_ALLOC_SYSTEM; + m = vm_page_alloc(NULL, colour, page_req | VM_ALLOC_NOOBJ); if (m == NULL) { if (try) { pv_entry_count--; @@ -2335,6 +2336,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_page_t m, mpte; vm_pindex_t diff, psize; + VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED); psize = atop(end - start); mpte = NULL; m = m_start; @@ -2376,7 +2378,6 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); mtx_assert(&vm_page_queue_mtx, MA_OWNED); - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* @@ -2394,7 +2395,6 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, if (mpte && (mpte->pindex == ptepindex)) { mpte->wire_count++; } else { - retry: /* * Get the page directory entry */ @@ -2412,18 +2412,8 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, } else { mpte = _pmap_allocpte(pmap, ptepindex, M_NOWAIT); - if (mpte == NULL) { - PMAP_UNLOCK(pmap); - vm_page_busy(m); - vm_page_unlock_queues(); - VM_OBJECT_UNLOCK(m->object); - VM_WAIT; - VM_OBJECT_LOCK(m->object); - vm_page_lock_queues(); - vm_page_wakeup(m); - PMAP_LOCK(pmap); - goto retry; - } + if (mpte == NULL) + return (mpte); } } } else { @@ -2446,12 +2436,16 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, } /* - * Enter on the PV list if part of our managed memory. Note that we - * raise IPL while manipulating pv_table since pmap_enter can be - * called at interrupt time. + * Enter on the PV list if part of our managed memory. */ - if ((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) - pmap_insert_entry(pmap, va, m); + if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 && + !pmap_try_insert_pv_entry(pmap, va, m)) { + if (mpte != NULL) { + pmap_unwire_pte_hold(pmap, va, mpte); + mpte = NULL; + } + return (mpte); + } /* * Increment counters diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index 63b5b7c..3fcd63c 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -1742,7 +1742,7 @@ get_pv_entry(pmap_t pmap, int try) static const struct timeval printinterval = { 60, 0 }; static struct timeval lastprint; static vm_pindex_t colour; - int bit, field; + int bit, field, page_req; pv_entry_t pv; struct pv_chunk *pc; vm_page_t m; @@ -1777,7 +1777,8 @@ get_pv_entry(pmap_t pmap, int try) } } pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree); - m = vm_page_alloc(NULL, colour, VM_ALLOC_SYSTEM | + page_req = try ? VM_ALLOC_NORMAL : VM_ALLOC_SYSTEM; + m = vm_page_alloc(NULL, colour, page_req | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); if (m == NULL || pc == NULL) { if (try) { @@ -2411,6 +2412,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_page_t m, mpte; vm_pindex_t diff, psize; + VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED); psize = atop(end - start); mpte = NULL; m = m_start; @@ -2452,7 +2454,6 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); mtx_assert(&vm_page_queue_mtx, MA_OWNED); - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* @@ -2470,7 +2471,6 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, if (mpte && (mpte->pindex == ptepindex)) { mpte->wire_count++; } else { -retry: /* * Get the page directory entry */ @@ -2488,18 +2488,8 @@ retry: } else { mpte = _pmap_allocpte(pmap, ptepindex, M_NOWAIT); - if (mpte == NULL) { - PMAP_UNLOCK(pmap); - vm_page_busy(m); - vm_page_unlock_queues(); - VM_OBJECT_UNLOCK(m->object); - VM_WAIT; - VM_OBJECT_LOCK(m->object); - vm_page_lock_queues(); - vm_page_wakeup(m); - PMAP_LOCK(pmap); - goto retry; - } + if (mpte == NULL) + return (mpte); } } } else { @@ -2522,12 +2512,16 @@ retry: } /* - * Enter on the PV list if part of our managed memory. Note that we - * raise IPL while manipulating pv_table since pmap_enter can be - * called at interrupt time. + * Enter on the PV list if part of our managed memory. */ - if ((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) - pmap_insert_entry(pmap, va, m); + if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 && + !pmap_try_insert_pv_entry(pmap, va, m)) { + if (mpte != NULL) { + pmap_unwire_pte_hold(pmap, mpte); + mpte = NULL; + } + return (mpte); + } /* * Increment counters |