summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2008-03-23 07:07:27 +0000
committerkib <kib@FreeBSD.org>2008-03-23 07:07:27 +0000
commit53a15ee1ea25afc63240421180f961b5bd19d113 (patch)
treea2f3d1c90343e135c9606a706e1d9e0c160756bf /sys/amd64
parentfcd39263e4bec5057472950165516a57e41471cd (diff)
downloadFreeBSD-src-53a15ee1ea25afc63240421180f961b5bd19d113.zip
FreeBSD-src-53a15ee1ea25afc63240421180f961b5bd19d113.tar.gz
Prevent the overflow in the calculation of the next page directory.
The overflow causes the wraparound with consequent corruption of the (almost) whole address space mapping. As Alan noted, pmap_copy() does not require the wrap-around checks because it cannot be applied to the kernel's pmap. The checks there are included for consistency. Reported and tested by: kris (i386/pmap.c:pmap_remove() part) Reviewed by: alc MFC after: 1 week
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/pmap.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index d2ada05..7746850 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -2444,12 +2444,16 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
pml4e = pmap_pml4e(pmap, sva);
if ((*pml4e & PG_V) == 0) {
va_next = (sva + NBPML4) & ~PML4MASK;
+ if (va_next < sva)
+ va_next = eva;
continue;
}
pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
if ((*pdpe & PG_V) == 0) {
va_next = (sva + NBPDP) & ~PDPMASK;
+ if (va_next < sva)
+ va_next = eva;
continue;
}
@@ -2457,6 +2461,8 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
* Calculate index for next page table.
*/
va_next = (sva + NBPDR) & ~PDRMASK;
+ if (va_next < sva)
+ va_next = eva;
pde = pmap_pdpe_to_pde(pdpe, sva);
ptpaddr = *pde;
@@ -2672,16 +2678,22 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
pml4e = pmap_pml4e(pmap, sva);
if ((*pml4e & PG_V) == 0) {
va_next = (sva + NBPML4) & ~PML4MASK;
+ if (va_next < sva)
+ va_next = eva;
continue;
}
pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
if ((*pdpe & PG_V) == 0) {
va_next = (sva + NBPDP) & ~PDPMASK;
+ if (va_next < sva)
+ va_next = eva;
continue;
}
va_next = (sva + NBPDR) & ~PDRMASK;
+ if (va_next < sva)
+ va_next = eva;
pde = pmap_pdpe_to_pde(pdpe, sva);
ptpaddr = *pde;
@@ -3485,16 +3497,22 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
pml4e = pmap_pml4e(src_pmap, addr);
if ((*pml4e & PG_V) == 0) {
va_next = (addr + NBPML4) & ~PML4MASK;
+ if (va_next < addr)
+ va_next = end_addr;
continue;
}
pdpe = pmap_pml4e_to_pdpe(pml4e, addr);
if ((*pdpe & PG_V) == 0) {
va_next = (addr + NBPDP) & ~PDPMASK;
+ if (va_next < addr)
+ va_next = end_addr;
continue;
}
va_next = (addr + NBPDR) & ~PDRMASK;
+ if (va_next < addr)
+ va_next = end_addr;
pde = pmap_pdpe_to_pde(pdpe, addr);
srcptepaddr = *pde;
OpenPOWER on IntegriCloud