diff options
author | jake <jake@FreeBSD.org> | 2002-02-26 06:57:30 +0000 |
---|---|---|
committer | jake <jake@FreeBSD.org> | 2002-02-26 06:57:30 +0000 |
commit | 8319be1fd26a5961e35d9dda634634be8abf9361 (patch) | |
tree | 3b1d29e5f3abc08cf297dbc645a6e5f163051500 /sys/sparc64 | |
parent | 352997488245c479160aad2ac8a25422b1d5e9ba (diff) | |
download | FreeBSD-src-8319be1fd26a5961e35d9dda634634be8abf9361.zip FreeBSD-src-8319be1fd26a5961e35d9dda634634be8abf9361.tar.gz |
Convert pmap.pm_context to an array of contexts indexed by cpuid. This
doesn't make sense for SMP right now, but it is a means to an end.
Diffstat (limited to 'sys/sparc64')
-rw-r--r-- | sys/sparc64/include/pmap.h | 2 | ||||
-rw-r--r-- | sys/sparc64/sparc64/pmap.c | 50 | ||||
-rw-r--r-- | sys/sparc64/sparc64/pv.c | 5 | ||||
-rw-r--r-- | sys/sparc64/sparc64/swtch.S | 32 | ||||
-rw-r--r-- | sys/sparc64/sparc64/swtch.s | 32 | ||||
-rw-r--r-- | sys/sparc64/sparc64/trap.c | 2 | ||||
-rw-r--r-- | sys/sparc64/sparc64/tsb.c | 8 |
7 files changed, 67 insertions, 64 deletions
diff --git a/sys/sparc64/include/pmap.h b/sys/sparc64/include/pmap.h index 2a73bc0..858d894 100644 --- a/sys/sparc64/include/pmap.h +++ b/sys/sparc64/include/pmap.h @@ -70,7 +70,7 @@ struct pmap { struct tte *pm_tsb; vm_object_t pm_tsb_obj; u_int pm_active; - u_int pm_context; + u_int pm_context[MAXCPU]; u_int pm_count; struct pmap_statistics pm_stats; }; diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c index b28b181..c4bec9e 100644 --- a/sys/sparc64/sparc64/pmap.c +++ b/sys/sparc64/sparc64/pmap.c @@ -412,7 +412,8 @@ pmap_bootstrap(vm_offset_t ekva) * Initialize the kernel pmap (which is statically allocated). */ pm = kernel_pmap; - pm->pm_context = TLB_CTX_KERNEL; + for (i = 0; i < MAXCPU; i++) + pm->pm_context[i] = TLB_CTX_KERNEL; pm->pm_active = ~0; pm->pm_count = 1; TAILQ_INIT(&pm->pm_pvlist); @@ -604,7 +605,8 @@ pmap_cache_enter(vm_page_t m, vm_offset_t va) if ((tp = tsb_tte_lookup(pv->pv_pmap, pv->pv_va)) != NULL) { atomic_clear_long(&tp->tte_data, TD_CV); tlb_page_demap(TLB_DTLB | TLB_ITLB, - pv->pv_pmap->pm_context, pv->pv_va); + pv->pv_pmap->pm_context[PCPU_GET(cpuid)], + pv->pv_va); } } pa = VM_PAGE_TO_PHYS(m); @@ -1057,7 +1059,7 @@ void pmap_pinit0(pmap_t pm) { - pm->pm_context = pmap_context_alloc(); + pm->pm_context[PCPU_GET(cpuid)] = pmap_context_alloc(); pm->pm_active = 0; pm->pm_count = 1; pm->pm_tsb = NULL; @@ -1108,7 +1110,7 @@ pmap_pinit(pmap_t pm) pmap_qenter((vm_offset_t)pm->pm_tsb, ma, TSB_PAGES); pm->pm_active = 0; - pm->pm_context = pmap_context_alloc(); + pm->pm_context[PCPU_GET(cpuid)] = pmap_context_alloc(); pm->pm_count = 1; TAILQ_INIT(&pm->pm_pvlist); bzero(&pm->pm_stats, sizeof(pm->pm_stats)); @@ -1131,8 +1133,8 @@ pmap_release(pmap_t pm) vm_object_t obj; vm_page_t m; - CTR2(KTR_PMAP, "pmap_release: ctx=%#x tsb=%p", pm->pm_context, - pm->pm_tsb); + CTR2(KTR_PMAP, "pmap_release: ctx=%#x tsb=%p", + pm->pm_context[PCPU_GET(cpuid)], pm->pm_tsb); obj = pm->pm_tsb_obj; KASSERT(obj->ref_count == 1, ("pmap_release: tsbobj ref count != 1")); KASSERT(TAILQ_EMPTY(&pm->pm_pvlist), @@ -1140,7 +1142,7 @@ pmap_release(pmap_t pm) KASSERT(pmap_resident_count(pm) == 0, ("pmap_release: resident pages %ld != 0", pmap_resident_count(pm))); - pmap_context_destroy(pm->pm_context); + pmap_context_destroy(pm->pm_context[PCPU_GET(cpuid)]); TAILQ_FOREACH(m, &obj->memq, listq) { if (vm_page_sleep_busy(m, FALSE, "pmaprl")) continue; @@ -1241,12 +1243,12 @@ pmap_remove(pmap_t pm, vm_offset_t start, vm_offset_t end) vm_offset_t va; CTR3(KTR_PMAP, "pmap_remove: ctx=%#lx start=%#lx end=%#lx", - pm->pm_context, start, end); + pm->pm_context[PCPU_GET(cpuid)], start, end); if (PMAP_REMOVE_DONE(pm)) return; if (end - start > PMAP_TSB_THRESH) { tsb_foreach(pm, NULL, start, end, pmap_remove_tte); - tlb_context_demap(pm->pm_context); + tlb_context_demap(pm->pm_context[PCPU_GET(cpuid)]); } else { for (va = start; va < end; va += PAGE_SIZE) { if ((tp = tsb_tte_lookup(pm, va)) != NULL) { @@ -1254,7 +1256,8 @@ pmap_remove(pmap_t pm, vm_offset_t start, vm_offset_t end) break; } } - tlb_range_demap(pm->pm_context, start, end - 1); + tlb_range_demap(pm->pm_context[PCPU_GET(cpuid)], + start, end - 1); } } @@ -1294,7 +1297,7 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) struct tte *tp; CTR4(KTR_PMAP, "pmap_protect: ctx=%#lx sva=%#lx eva=%#lx prot=%#lx", - pm->pm_context, sva, eva, prot); + pm->pm_context[PCPU_GET(cpuid)], sva, eva, prot); KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, ("pmap_protect: non current pmap")); @@ -1309,13 +1312,13 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) if (eva - sva > PMAP_TSB_THRESH) { tsb_foreach(pm, NULL, sva, eva, pmap_protect_tte); - tlb_context_demap(pm->pm_context); + tlb_context_demap(pm->pm_context[PCPU_GET(cpuid)]); } else { for (va = sva; va < eva; va += PAGE_SIZE) { if ((tp = tsb_tte_lookup(pm, va)) != NULL) pmap_protect_tte(pm, NULL, tp, va); } - tlb_range_demap(pm->pm_context, sva, eva - 1); + tlb_range_demap(pm->pm_context[PCPU_GET(cpuid)], sva, eva - 1); } } @@ -1337,7 +1340,7 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot, pa = VM_PAGE_TO_PHYS(m); CTR6(KTR_PMAP, "pmap_enter: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d", - pm->pm_context, m, va, pa, prot, wired); + pm->pm_context[PCPU_GET(cpuid)], m, va, pa, prot, wired); tte.tte_vpn = TV_VPN(va); tte.tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP; @@ -1385,7 +1388,8 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot, if (pmap_track_modified(pm, va)) vm_page_dirty(m); } - tlb_tte_demap(otte, pm->pm_context); + tlb_tte_demap(otte, + pm->pm_context[PCPU_GET(cpuid)]); } } else { CTR0(KTR_PMAP, "pmap_enter: replace"); @@ -1416,7 +1420,7 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot, if (pmap_cache_enter(m, va) != 0) tte.tte_data |= TD_CV; } - tlb_tte_demap(otte, pm->pm_context); + tlb_tte_demap(otte, pm->pm_context[PCPU_GET(cpuid)]); } } else { CTR0(KTR_PMAP, "pmap_enter: new"); @@ -1449,7 +1453,7 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot, if ((prot & VM_PROT_WRITE) != 0) tte.tte_data |= TD_W; } - if (pm->pm_context == TLB_CTX_KERNEL) + if (pm->pm_context[PCPU_GET(cpuid)] == TLB_CTX_KERNEL) tte.tte_data |= TD_P; if (prot & VM_PROT_WRITE) tte.tte_data |= TD_SW; @@ -1538,14 +1542,16 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, if (dst_addr != src_addr) return; if (len > PMAP_TSB_THRESH) { - tsb_foreach(src_pmap, dst_pmap, src_addr, src_addr + len, pmap_copy_tte); - tlb_context_demap(dst_pmap->pm_context); + tsb_foreach(src_pmap, dst_pmap, src_addr, src_addr + len, + pmap_copy_tte); + tlb_context_demap(dst_pmap->pm_context[PCPU_GET(cpuid)]); } else { for (va = src_addr; va < src_addr + len; va += PAGE_SIZE) { if ((tp = tsb_tte_lookup(src_pmap, va)) != NULL) pmap_copy_tte(src_pmap, dst_pmap, tp, va); } - tlb_range_demap(dst_pmap->pm_context, src_addr, src_addr + len - 1); + tlb_range_demap(dst_pmap->pm_context[PCPU_GET(cpuid)], + src_addr, src_addr + len - 1); } } @@ -1649,7 +1655,7 @@ pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva) vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); pv_free(pv); } - tlb_context_demap(pm->pm_context); + tlb_context_demap(pm->pm_context[PCPU_GET(cpuid)]); } /* @@ -1733,7 +1739,7 @@ pmap_activate(struct thread *td) * not issue any loads while we have interrupts disable below. */ pm = &td->td_proc->p_vmspace->vm_pmap; - context = pm->pm_context; + context = pm->pm_context[PCPU_GET(cpuid)]; tsb = (vm_offset_t)pm->pm_tsb; KASSERT(context != 0, ("pmap_activate: activating nucleus context")); diff --git a/sys/sparc64/sparc64/pv.c b/sys/sparc64/sparc64/pv.c index 5dd92c9..e17699d03 100644 --- a/sys/sparc64/sparc64/pv.c +++ b/sys/sparc64/sparc64/pv.c @@ -157,7 +157,8 @@ pv_bit_clear(vm_page_t m, u_long bits) vm_page_dirty(m); } atomic_clear_long(&tp->tte_data, bits); - tlb_tte_demap(*tp, pv->pv_pmap->pm_context); + tlb_tte_demap(*tp, + pv->pv_pmap->pm_context[PCPU_GET(cpuid)]); } } } @@ -250,7 +251,7 @@ pv_remove_all(vm_page_t m) vm_page_dirty(m); } atomic_clear_long(&tp->tte_data, TD_V); - tlb_tte_demap(*tp, pv->pv_pmap->pm_context); + tlb_tte_demap(*tp, pv->pv_pmap->pm_context[PCPU_GET(cpuid)]); tp->tte_vpn = 0; tp->tte_data = 0; pv->pv_pmap->pm_stats.resident_count--; diff --git a/sys/sparc64/sparc64/swtch.S b/sys/sparc64/sparc64/swtch.S index 877d123..c2d6c0b 100644 --- a/sys/sparc64/sparc64/swtch.S +++ b/sys/sparc64/sparc64/swtch.S @@ -51,7 +51,7 @@ ENTRY(cpu_switch) call choosethread ldx [PCPU(CURTHREAD)], %l0 cmp %l0, %o0 - be,a,pn %xcc, 6f + be,a,pn %xcc, 4f nop ldx [%l0 + TD_PCB], %l1 @@ -128,15 +128,18 @@ ENTRY(cpu_switch) * If they're the same we are done. */ cmp %l2, %o2 - be,a,pn %xcc, 6f + be,a,pn %xcc, 4f nop /* * If the old process has nucleus context we can skip demapping the * tsb. */ - lduw [%l2 + VM_PMAP + PM_CONTEXT], %l3 - brz,a,pn %l3, 3f + lduw [PCPU(CPUID)], %l3 + sllx %l3, 2, %l3 + add %l2, VM_PMAP + PM_CONTEXT, %l4 + lduw [%l3 + %l4], %l5 + brz,a,pn %l5, 3f nop /* @@ -160,23 +163,18 @@ ENTRY(cpu_switch) /* * If the new process has nucleus context we are done. */ -3: lduw [%o2 + VM_PMAP + PM_CONTEXT], %o3 - brz,a,pn %o3, 6f +3: lduw [PCPU(CPUID)], %o3 + sllx %o3, 2, %o3 + add %o2, VM_PMAP + PM_CONTEXT, %o4 + lduw [%o3 + %o4], %o5 + brz,a,pn %o5, 4f nop /* - * If the new process has had its context stolen, get one. - */ - cmp %o3, -1 - bne,a,pt %xcc, 4f - nop - PANIC("cpu_switch: steal context", %o4) - - /* * Install the new primary context. */ -4: mov AA_DMMU_PCXR, %o4 - stxa %o3, [%o4] ASI_DMMU + mov AA_DMMU_PCXR, %o4 + stxa %o5, [%o4] ASI_DMMU flush %o0 /* @@ -200,7 +198,7 @@ ENTRY(cpu_switch) /* * Done. Return and load the new process's window from the stack. */ -6: ret +4: ret restore END(cpu_switch) diff --git a/sys/sparc64/sparc64/swtch.s b/sys/sparc64/sparc64/swtch.s index 877d123..c2d6c0b 100644 --- a/sys/sparc64/sparc64/swtch.s +++ b/sys/sparc64/sparc64/swtch.s @@ -51,7 +51,7 @@ ENTRY(cpu_switch) call choosethread ldx [PCPU(CURTHREAD)], %l0 cmp %l0, %o0 - be,a,pn %xcc, 6f + be,a,pn %xcc, 4f nop ldx [%l0 + TD_PCB], %l1 @@ -128,15 +128,18 @@ ENTRY(cpu_switch) * If they're the same we are done. */ cmp %l2, %o2 - be,a,pn %xcc, 6f + be,a,pn %xcc, 4f nop /* * If the old process has nucleus context we can skip demapping the * tsb. */ - lduw [%l2 + VM_PMAP + PM_CONTEXT], %l3 - brz,a,pn %l3, 3f + lduw [PCPU(CPUID)], %l3 + sllx %l3, 2, %l3 + add %l2, VM_PMAP + PM_CONTEXT, %l4 + lduw [%l3 + %l4], %l5 + brz,a,pn %l5, 3f nop /* @@ -160,23 +163,18 @@ ENTRY(cpu_switch) /* * If the new process has nucleus context we are done. */ -3: lduw [%o2 + VM_PMAP + PM_CONTEXT], %o3 - brz,a,pn %o3, 6f +3: lduw [PCPU(CPUID)], %o3 + sllx %o3, 2, %o3 + add %o2, VM_PMAP + PM_CONTEXT, %o4 + lduw [%o3 + %o4], %o5 + brz,a,pn %o5, 4f nop /* - * If the new process has had its context stolen, get one. - */ - cmp %o3, -1 - bne,a,pt %xcc, 4f - nop - PANIC("cpu_switch: steal context", %o4) - - /* * Install the new primary context. */ -4: mov AA_DMMU_PCXR, %o4 - stxa %o3, [%o4] ASI_DMMU + mov AA_DMMU_PCXR, %o4 + stxa %o5, [%o4] ASI_DMMU flush %o0 /* @@ -200,7 +198,7 @@ ENTRY(cpu_switch) /* * Done. Return and load the new process's window from the stack. */ -6: ret +4: ret restore END(cpu_switch) diff --git a/sys/sparc64/sparc64/trap.c b/sys/sparc64/sparc64/trap.c index 249dd29..13e74f8 100644 --- a/sys/sparc64/sparc64/trap.c +++ b/sys/sparc64/sparc64/trap.c @@ -419,7 +419,7 @@ trap_pfault(struct thread *td, struct trapframe *tf) va = TLB_TAR_VA(tf->tf_tar); CTR4(KTR_TRAP, "trap_pfault: td=%p pm_ctx=%#lx va=%#lx ctx=%#lx", - td, p->p_vmspace->vm_pmap.pm_context, va, ctx); + td, p->p_vmspace->vm_pmap.pm_context[PCPU_GET(cpuid)], va, ctx); if (type == T_DATA_PROTECTION) { prot = VM_PROT_WRITE; diff --git a/sys/sparc64/sparc64/tsb.c b/sys/sparc64/sparc64/tsb.c index 745861b..41d9840 100644 --- a/sys/sparc64/sparc64/tsb.c +++ b/sys/sparc64/sparc64/tsb.c @@ -118,7 +118,7 @@ tsb_tte_lookup(pmap_t pm, vm_offset_t va) va = trunc_page(va); bucket = tsb_vtobucket(pm, va); CTR3(KTR_TSB, "tsb_tte_lookup: ctx=%#lx va=%#lx bucket=%p", - pm->pm_context, va, bucket); + pm->pm_context[PCPU_GET(cpuid)], va, bucket); for (i = 0; i < TSB_BUCKET_SIZE; i++) { if (tte_match(bucket[i], va)) { tp = &bucket[i]; @@ -130,7 +130,7 @@ tsb_tte_lookup(pmap_t pm, vm_offset_t va) } } CTR2(KTR_TSB, "tsb_tte_lookup: miss ctx=%#lx va=%#lx", - pm->pm_context, va); + pm->pm_context[PCPU_GET(cpuid)], va); return (NULL); } @@ -155,7 +155,7 @@ tsb_tte_enter(pmap_t pm, vm_page_t m, vm_offset_t va, struct tte tte) TSB_STATS_INC(tsb_nenter_u); bucket = tsb_vtobucket(pm, va); CTR4(KTR_TSB, "tsb_tte_enter: ctx=%#lx va=%#lx data=%#lx bucket=%p", - pm->pm_context, va, tte.tte_data, bucket); + pm->pm_context[PCPU_GET(cpuid)], va, tte.tte_data, bucket); tp = NULL; rtp = NULL; @@ -189,7 +189,7 @@ tsb_tte_enter(pmap_t pm, vm_page_t m, vm_offset_t va, struct tte tte) pmap_cache_remove(om, ova); pv_remove(pm, om, ova); } - tlb_tte_demap(*tp, pm->pm_context); + tlb_tte_demap(*tp, pm->pm_context[PCPU_GET(cpuid)]); } *tp = tte; |