summaryrefslogtreecommitdiffstats
path: root/sys/arm64
diff options
context:
space:
mode:
authorandrew <andrew@FreeBSD.org>2017-05-30 12:44:01 +0000
committerandrew <andrew@FreeBSD.org>2017-05-30 12:44:01 +0000
commitedb010ea9cd5ce05e055474ade71fb8687a74eb6 (patch)
tree15fdf8608cf475680c024e227c88c01ca3c49b3a /sys/arm64
parent44c9bb43d0bd6f6d94443c9efa27cbaf86a38825 (diff)
downloadFreeBSD-src-edb010ea9cd5ce05e055474ade71fb8687a74eb6.zip
FreeBSD-src-edb010ea9cd5ce05e055474ade71fb8687a74eb6.tar.gz
MFC r316734, r316761: Support Execute-Never bit in the arm64 pmap.
r316734: Start to use the User and Privileged execute-never bits in the arm64 pagetables. This sets both bits when entering an address we know shouldn't be executed. I expect we could mark all userspace pages as Privileged execute-never to ensure the kernel doesn't branch to one of these addresses. While here add the ARMv8.1 upper attributes. r316761: Set the arm64 Execute-never bits in more places. We need to set the Execute-never bits when mapping device memory as the hardware may perform speculative instruction fetches. Set the Privileged Execute-ever bit on userspace memory to stop the kernel if it is tricked into executing it.
Diffstat (limited to 'sys/arm64')
-rw-r--r--sys/arm64/arm64/pmap.c49
-rw-r--r--sys/arm64/include/pte.h5
2 files changed, 41 insertions, 13 deletions
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index 5e2e7fc..9dc3231 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -606,7 +606,7 @@ pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa, vm_paddr_t max_pa)
l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
pmap_load_store(&pagetable_dmap[l1_slot],
- (pa & ~L1_OFFSET) | ATTR_DEFAULT |
+ (pa & ~L1_OFFSET) | ATTR_DEFAULT | ATTR_XN |
ATTR_IDX(CACHED_MEMORY) | L1_BLOCK);
}
@@ -1127,7 +1127,7 @@ static void
pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
{
pd_entry_t *pde;
- pt_entry_t *pte;
+ pt_entry_t *pte, attr;
vm_offset_t va;
int lvl;
@@ -1138,6 +1138,10 @@ pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
KASSERT((size & PAGE_MASK) == 0,
("pmap_kenter: Mapping is not page-sized"));
+ attr = ATTR_DEFAULT | ATTR_IDX(mode) | L3_PAGE;
+ if (mode == DEVICE_MEMORY)
+ attr |= ATTR_XN;
+
va = sva;
while (size != 0) {
pde = pmap_pde(kernel_pmap, va, &lvl);
@@ -1146,8 +1150,7 @@ pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
KASSERT(lvl == 2, ("pmap_kenter: Invalid level %d", lvl));
pte = pmap_l2_to_l3(pde, va);
- pmap_load_store(pte, (pa & ~L3_OFFSET) | ATTR_DEFAULT |
- ATTR_IDX(mode) | L3_PAGE);
+ pmap_load_store(pte, (pa & ~L3_OFFSET) | attr);
PTE_SYNC(pte);
va += PAGE_SIZE;
@@ -1259,6 +1262,8 @@ pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
m = ma[i];
pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_AP(ATTR_AP_RW) |
ATTR_IDX(m->md.pv_memattr) | L3_PAGE;
+ if (m->md.pv_memattr == DEVICE_MEMORY)
+ pa |= ATTR_XN;
pte = pmap_l2_to_l3(pde, va);
pmap_load_store(pte, pa);
PTE_SYNC(pte);
@@ -2428,14 +2433,16 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
{
vm_offset_t va, va_next;
pd_entry_t *l0, *l1, *l2;
- pt_entry_t *l3p, l3;
+ pt_entry_t *l3p, l3, nbits;
- if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
+ KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
+ if (prot == VM_PROT_NONE) {
pmap_remove(pmap, sva, eva);
return;
}
- if ((prot & VM_PROT_WRITE) == VM_PROT_WRITE)
+ if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) ==
+ (VM_PROT_WRITE | VM_PROT_EXECUTE))
return;
PMAP_LOCK(pmap);
@@ -2480,17 +2487,25 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++,
sva += L3_SIZE) {
l3 = pmap_load(l3p);
- if (pmap_l3_valid(l3)) {
+ if (!pmap_l3_valid(l3))
+ continue;
+
+ nbits = 0;
+ if ((prot & VM_PROT_WRITE) == 0) {
if ((l3 & ATTR_SW_MANAGED) &&
pmap_page_dirty(l3)) {
vm_page_dirty(PHYS_TO_VM_PAGE(l3 &
~ATTR_MASK));
}
- pmap_set(l3p, ATTR_AP(ATTR_AP_RO));
- PTE_SYNC(l3p);
- /* XXX: Use pmap_invalidate_range */
- pmap_invalidate_page(pmap, va);
+ nbits |= ATTR_AP(ATTR_AP_RO);
}
+ if ((prot & VM_PROT_EXECUTE) == 0)
+ nbits |= ATTR_XN;
+
+ pmap_set(l3p, nbits);
+ PTE_SYNC(l3p);
+ /* XXX: Use pmap_invalidate_range */
+ pmap_invalidate_page(pmap, va);
}
}
PMAP_UNLOCK(pmap);
@@ -2709,10 +2724,12 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
L3_PAGE);
if ((prot & VM_PROT_WRITE) == 0)
new_l3 |= ATTR_AP(ATTR_AP_RO);
+ if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == DEVICE_MEMORY)
+ new_l3 |= ATTR_XN;
if ((flags & PMAP_ENTER_WIRED) != 0)
new_l3 |= ATTR_SW_WIRED;
if ((va >> 63) == 0)
- new_l3 |= ATTR_AP(ATTR_AP_USER);
+ new_l3 |= ATTR_AP(ATTR_AP_USER) | ATTR_PXN;
CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa);
@@ -3115,6 +3132,10 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) |
ATTR_AP(ATTR_AP_RO) | L3_PAGE;
+ if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == DEVICE_MEMORY)
+ pa |= ATTR_XN;
+ else if (va < VM_MAXUSER_ADDRESS)
+ pa |= ATTR_PXN;
/*
* Now validate mapping with RO protection
@@ -4251,6 +4272,8 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
l3 = pmap_load(pte);
l3 &= ~ATTR_IDX_MASK;
l3 |= ATTR_IDX(mode);
+ if (mode == DEVICE_MEMORY)
+ l3 |= ATTR_XN;
pmap_update_entry(kernel_pmap, pte, l3, tmpva,
PAGE_SIZE);
diff --git a/sys/arm64/include/pte.h b/sys/arm64/include/pte.h
index 72d0e62..50cf3d7 100644
--- a/sys/arm64/include/pte.h
+++ b/sys/arm64/include/pte.h
@@ -46,6 +46,11 @@ typedef uint64_t pt_entry_t; /* page table entry */
/* Bits 58:55 are reserved for software */
#define ATTR_SW_MANAGED (1UL << 56)
#define ATTR_SW_WIRED (1UL << 55)
+#define ATTR_UXN (1UL << 54)
+#define ATTR_PXN (1UL << 53)
+#define ATTR_XN (ATTR_PXN | ATTR_UXN)
+#define ATTR_CONTIGUOUS (1UL << 52)
+#define ATTR_DBM (1UL << 51)
#define ATTR_nG (1 << 11)
#define ATTR_AF (1 << 10)
#define ATTR_SH(x) ((x) << 8)
OpenPOWER on IntegriCloud