summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2013-08-09 11:28:55 +0000
committerattilio <attilio@FreeBSD.org>2013-08-09 11:28:55 +0000
commite9f37cac7422f86c8a65b4c123705f5dccd43fa1 (patch)
tree589f2433c8a0e985a4f0aeb058fbbf1b412b6f98 /sys/amd64
parent3f74b0e634cf4f4b3796e44533e8318ef773c3e9 (diff)
downloadFreeBSD-src-e9f37cac7422f86c8a65b4c123705f5dccd43fa1.zip
FreeBSD-src-e9f37cac7422f86c8a65b4c123705f5dccd43fa1.tar.gz
On all the architectures, avoid to preallocate the physical memory
for nodes used in vm_radix. On architectures supporting direct mapping, also avoid to pre-allocate the KVA for such nodes. In order to do so make the operations derived from vm_radix_insert() to fail and handle all the deriving failure of those. vm_radix-wise introduce a new function called vm_radix_replace(), which can replace a leaf node, already present, with a new one, and take into account the possibility, during vm_radix_insert() allocation, that the operations on the radix trie can recurse. This means that if operations in vm_radix_insert() recursed vm_radix_insert() will start from scratch again. Sponsored by: EMC / Isilon storage division Reviewed by: alc (older version) Reviewed by: jeff Tested by: pho, scottl
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/pmap.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 32dbe8a..ef267f5 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -283,7 +283,7 @@ static boolean_t pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m,
static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
-static void pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
+static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
static boolean_t pmap_is_referenced_pvh(struct md_page *pvh);
static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
@@ -1526,12 +1526,12 @@ pmap_add_delayed_free_list(vm_page_t m, vm_page_t *free, boolean_t set_PG_ZERO)
* for mapping a distinct range of virtual addresses. The pmap's collection is
* ordered by this virtual address range.
*/
-static __inline void
+static __inline int
pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
{
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
- vm_radix_insert(&pmap->pm_root, mpte);
+ return (vm_radix_insert(&pmap->pm_root, mpte));
}
/*
@@ -3439,7 +3439,13 @@ setpte:
("pmap_promote_pde: page table page is out of range"));
KASSERT(mpte->pindex == pmap_pde_pindex(va),
("pmap_promote_pde: page table page's pindex is wrong"));
- pmap_insert_pt_page(pmap, mpte);
+ if (pmap_insert_pt_page(pmap, mpte)) {
+ atomic_add_long(&pmap_pde_p_failures, 1);
+ CTR2(KTR_PMAP,
+ "pmap_promote_pde: failure for va %#lx in pmap %p", va,
+ pmap);
+ return;
+ }
/*
* Promote the pv entries.
OpenPOWER on IntegriCloud