summaryrefslogtreecommitdiffstats
path: root/sys/sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'sys/sparc64')
-rw-r--r--sys/sparc64/sparc64/pmap.c26
1 files changed, 16 insertions, 10 deletions
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 180e5fd..c11e741 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -149,8 +149,8 @@ static int pmap_protect_tte(struct pmap *pm1, struct pmap *pm2,
*
* The page queues and pmap must be locked.
*/
-static void pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m,
- vm_prot_t prot, boolean_t wired);
+static int pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m,
+ vm_prot_t prot, u_int flags, int8_t psind);
extern int tl1_dmmu_miss_direct_patch_tsb_phys_1[];
extern int tl1_dmmu_miss_direct_patch_tsb_phys_end_1[];
@@ -1459,16 +1459,18 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
* target pmap with the protection requested. If specified the page
* will be wired down.
*/
-void
-pmap_enter(pmap_t pm, vm_offset_t va, vm_prot_t access, vm_page_t m,
- vm_prot_t prot, boolean_t wired)
+int
+pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ u_int flags, int8_t psind)
{
+ int rv;
rw_wlock(&tte_list_global_lock);
PMAP_LOCK(pm);
- pmap_enter_locked(pm, va, m, prot, wired);
+ rv = pmap_enter_locked(pm, va, m, prot, flags, psind);
rw_wunlock(&tte_list_global_lock);
PMAP_UNLOCK(pm);
+ return (rv);
}
/*
@@ -1478,14 +1480,15 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_prot_t access, vm_page_t m,
*
* The page queues and pmap must be locked.
*/
-static void
+static int
pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
- boolean_t wired)
+ u_int flags, int8_t psind __unused)
{
struct tte *tp;
vm_paddr_t pa;
vm_page_t real;
u_long data;
+ boolean_t wired;
rw_assert(&tte_list_global_lock, RA_WLOCKED);
PMAP_LOCK_ASSERT(pm, MA_OWNED);
@@ -1493,6 +1496,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
VM_OBJECT_ASSERT_LOCKED(m->object);
PMAP_STATS_INC(pmap_nenter);
pa = VM_PAGE_TO_PHYS(m);
+ wired = (flags & PMAP_ENTER_WIRED) != 0;
/*
* If this is a fake page from the device_pager, but it covers actual
@@ -1606,6 +1610,8 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
tsb_tte_enter(pm, m, va, TS_8K, data);
}
+
+ return (KERN_SUCCESS);
}
/*
@@ -1635,7 +1641,7 @@ pmap_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end,
PMAP_LOCK(pm);
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
pmap_enter_locked(pm, start + ptoa(diff), m, prot &
- (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
+ (VM_PROT_READ | VM_PROT_EXECUTE), 0, 0);
m = TAILQ_NEXT(m, listq);
}
rw_wunlock(&tte_list_global_lock);
@@ -1649,7 +1655,7 @@ pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot)
rw_wlock(&tte_list_global_lock);
PMAP_LOCK(pm);
pmap_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
- FALSE);
+ 0, 0);
rw_wunlock(&tte_list_global_lock);
PMAP_UNLOCK(pm);
}
OpenPOWER on IntegriCloud