diff options
Diffstat (limited to 'sys/powerpc')
36 files changed, 586 insertions, 443 deletions
diff --git a/sys/powerpc/aim/clock.c b/sys/powerpc/aim/clock.c index 0bf7cfa..ae98d26 100644 --- a/sys/powerpc/aim/clock.c +++ b/sys/powerpc/aim/clock.c @@ -85,7 +85,7 @@ static u_long ticks_per_sec = 12500000; static u_long *decr_counts[MAXCPU]; static int decr_et_start(struct eventtimer *et, - struct bintime *first, struct bintime *period); + sbintime_t first, sbintime_t period); static int decr_et_stop(struct eventtimer *et); static timecounter_get_t decr_get_timecount; @@ -195,12 +195,8 @@ decr_tc_init(void) ET_FLAGS_PERCPU; decr_et.et_quality = 1000; decr_et.et_frequency = ticks_per_sec; - decr_et.et_min_period.sec = 0; - decr_et.et_min_period.frac = - ((0x00000002LLU << 32) / ticks_per_sec) << 32; - decr_et.et_max_period.sec = 0x7fffffffLLU / ticks_per_sec; - decr_et.et_max_period.frac = - ((0x7fffffffLLU << 32) / ticks_per_sec) << 32; + decr_et.et_min_period = (0x00000002LLU << 32) / ticks_per_sec; + decr_et.et_max_period = (0x7fffffffLLU << 32) / ticks_per_sec; decr_et.et_start = decr_et_start; decr_et.et_stop = decr_et_stop; decr_et.et_priv = NULL; @@ -212,24 +208,20 @@ decr_tc_init(void) */ static int decr_et_start(struct eventtimer *et, - struct bintime *first, struct bintime *period) + sbintime_t first, sbintime_t period) { struct decr_state *s = DPCPU_PTR(decr_state); uint32_t fdiv; - if (period != NULL) { + if (period != 0) { s->mode = 1; - s->div = (decr_et.et_frequency * (period->frac >> 32)) >> 32; - if (period->sec != 0) - s->div += decr_et.et_frequency * period->sec; + s->div = (decr_et.et_frequency * period) >> 32; } else { s->mode = 2; - s->div = 0x7fffffff; + s->div = 0; } - if (first != NULL) { - fdiv = (decr_et.et_frequency * (first->frac >> 32)) >> 32; - if (first->sec != 0) - fdiv += decr_et.et_frequency * first->sec; + if (first != 0) { + fdiv = (decr_et.et_frequency * first) >> 32; } else fdiv = s->div; diff --git a/sys/powerpc/aim/machdep.c b/sys/powerpc/aim/machdep.c index 23cf745..5ae42bd 100644 --- a/sys/powerpc/aim/machdep.c +++ b/sys/powerpc/aim/machdep.c @@ -84,6 +84,7 @@ __FBSDID("$FreeBSD$"); #include <sys/mutex.h> #include <sys/ptrace.h> #include <sys/reboot.h> +#include <sys/rwlock.h> #include <sys/signalvar.h> #include <sys/syscallsubr.h> #include <sys/sysctl.h> diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c index b173760..ddd22ae 100644 --- a/sys/powerpc/aim/mmu_oea.c +++ b/sys/powerpc/aim/mmu_oea.c @@ -136,7 +136,6 @@ __FBSDID("$FreeBSD$"); #include <vm/vm_object.h> #include <vm/vm_extern.h> #include <vm/vm_pageout.h> -#include <vm/vm_pager.h> #include <vm/uma.h> #include <machine/cpu.h> @@ -276,6 +275,8 @@ void moea_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); void moea_clear_modify(mmu_t, vm_page_t); void moea_clear_reference(mmu_t, vm_page_t); void moea_copy_page(mmu_t, vm_page_t, vm_page_t); +void moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, + vm_page_t *mb, vm_offset_t b_offset, int xfersize); void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, vm_prot_t); @@ -321,6 +322,7 @@ static mmu_method_t moea_methods[] = { MMUMETHOD(mmu_clear_modify, moea_clear_modify), MMUMETHOD(mmu_clear_reference, moea_clear_reference), MMUMETHOD(mmu_copy_page, moea_copy_page), + MMUMETHOD(mmu_copy_pages, moea_copy_pages), MMUMETHOD(mmu_enter, moea_enter), MMUMETHOD(mmu_enter_object, moea_enter_object), MMUMETHOD(mmu_enter_quick, moea_enter_quick), @@ -1044,6 +1046,30 @@ moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) bcopy((void *)src, (void *)dst, PAGE_SIZE); } +void +moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, + vm_page_t *mb, vm_offset_t b_offset, int xfersize) +{ + void *a_cp, *b_cp; + vm_offset_t a_pg_offset, b_pg_offset; + int cnt; + + while (xfersize > 0) { + a_pg_offset = a_offset & PAGE_MASK; + cnt = min(xfersize, PAGE_SIZE - a_pg_offset); + a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) + + a_pg_offset; + b_pg_offset = b_offset & PAGE_MASK; + cnt = min(cnt, PAGE_SIZE - b_pg_offset); + b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) + + b_pg_offset; + bcopy(a_cp, b_cp, cnt); + a_offset += cnt; + b_offset += cnt; + xfersize -= cnt; + } +} + /* * Zero a page of physical memory by temporarily mapping it into the tlb. */ @@ -1122,9 +1148,8 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, if (pmap_bootstrapped) rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); - KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || - VM_OBJECT_LOCKED(m->object), - ("moea_enter_locked: page %p is not busy", m)); + if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0) + VM_OBJECT_ASSERT_WLOCKED(m->object); /* XXX change the pvo head for fake pages */ if ((m->oflags & VPO_UNMANAGED) != 0) { @@ -1293,7 +1318,7 @@ moea_is_modified(mmu_t mmu, vm_page_t m) * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can have PTE_CHG set. */ - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(m->object); if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0) return (FALSE); @@ -1333,7 +1358,7 @@ moea_clear_modify(mmu_t mmu, vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("moea_clear_modify: page %p is not managed", m)); - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(m->object); KASSERT((m->oflags & VPO_BUSY) == 0, ("moea_clear_modify: page %p is busy", m)); @@ -1368,7 +1393,7 @@ moea_remove_write(mmu_t mmu, vm_page_t m) * another thread while the object is locked. Thus, if PGA_WRITEABLE * is clear, no page table entries need updating. */ - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(m->object); if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0) return; diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c index 00dab9b..66194df 100644 --- a/sys/powerpc/aim/mmu_oea64.c +++ b/sys/powerpc/aim/mmu_oea64.c @@ -140,7 +140,6 @@ __FBSDID("$FreeBSD$"); #include <vm/vm_object.h> #include <vm/vm_extern.h> #include <vm/vm_pageout.h> -#include <vm/vm_pager.h> #include <vm/uma.h> #include <machine/_inttypes.h> @@ -291,6 +290,8 @@ void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); void moea64_clear_modify(mmu_t, vm_page_t); void moea64_clear_reference(mmu_t, vm_page_t); void moea64_copy_page(mmu_t, vm_page_t, vm_page_t); +void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, + vm_page_t *mb, vm_offset_t b_offset, int xfersize); void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, vm_prot_t); @@ -335,6 +336,7 @@ static mmu_method_t moea64_methods[] = { MMUMETHOD(mmu_clear_modify, moea64_clear_modify), MMUMETHOD(mmu_clear_reference, moea64_clear_reference), MMUMETHOD(mmu_copy_page, moea64_copy_page), + MMUMETHOD(mmu_copy_pages, moea64_copy_pages), MMUMETHOD(mmu_enter, moea64_enter), MMUMETHOD(mmu_enter_object, moea64_enter_object), MMUMETHOD(mmu_enter_quick, moea64_enter_quick), @@ -646,6 +648,14 @@ moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart, moea64_kenter(mmup, pa, pa); } ENABLE_TRANS(msr); + + /* + * Allow user to override unmapped_buf_allowed for testing. + * XXXKIB Only direct map implementation was tested. + */ + if (!TUNABLE_INT_FETCH("vfs.unmapped_buf_allowed", + &unmapped_buf_allowed)) + unmapped_buf_allowed = hw_direct_map; } void @@ -1105,6 +1115,72 @@ moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) } } +static inline void +moea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, + vm_page_t *mb, vm_offset_t b_offset, int xfersize) +{ + void *a_cp, *b_cp; + vm_offset_t a_pg_offset, b_pg_offset; + int cnt; + + while (xfersize > 0) { + a_pg_offset = a_offset & PAGE_MASK; + cnt = min(xfersize, PAGE_SIZE - a_pg_offset); + a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) + + a_pg_offset; + b_pg_offset = b_offset & PAGE_MASK; + cnt = min(cnt, PAGE_SIZE - b_pg_offset); + b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) + + b_pg_offset; + bcopy(a_cp, b_cp, cnt); + a_offset += cnt; + b_offset += cnt; + xfersize -= cnt; + } +} + +static inline void +moea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, + vm_page_t *mb, vm_offset_t b_offset, int xfersize) +{ + void *a_cp, *b_cp; + vm_offset_t a_pg_offset, b_pg_offset; + int cnt; + + mtx_lock(&moea64_scratchpage_mtx); + while (xfersize > 0) { + a_pg_offset = a_offset & PAGE_MASK; + cnt = min(xfersize, PAGE_SIZE - a_pg_offset); + moea64_set_scratchpage_pa(mmu, 0, + VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])); + a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset; + b_pg_offset = b_offset & PAGE_MASK; + cnt = min(cnt, PAGE_SIZE - b_pg_offset); + moea64_set_scratchpage_pa(mmu, 1, + VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])); + b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset; + bcopy(a_cp, b_cp, cnt); + a_offset += cnt; + b_offset += cnt; + xfersize -= cnt; + } + mtx_unlock(&moea64_scratchpage_mtx); +} + +void +moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, + vm_page_t *mb, vm_offset_t b_offset, int xfersize) +{ + + if (hw_direct_map) { + moea64_copy_pages_dmap(mmu, ma, a_offset, mb, b_offset, + xfersize); + } else { + moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset, + xfersize); + } +} + void moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) { @@ -1184,9 +1260,8 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, pvo_flags = PVO_MANAGED; } - KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || - VM_OBJECT_LOCKED(m->object), - ("moea64_enter: page %p is not busy", m)); + if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0) + VM_OBJECT_ASSERT_WLOCKED(m->object); /* XXX change the pvo head for fake pages */ if ((m->oflags & VPO_UNMANAGED) != 0) { @@ -1449,7 +1524,7 @@ moea64_is_modified(mmu_t mmu, vm_page_t m) * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can have LPTE_CHG set. */ - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(m->object); if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0) return (FALSE); @@ -1484,7 +1559,7 @@ moea64_clear_modify(mmu_t mmu, vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("moea64_clear_modify: page %p is not managed", m)); - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(m->object); KASSERT((m->oflags & VPO_BUSY) == 0, ("moea64_clear_modify: page %p is busy", m)); @@ -1517,7 +1592,7 @@ moea64_remove_write(mmu_t mmu, vm_page_t m) * another thread while the object is locked. Thus, if PGA_WRITEABLE * is clear, no page table entries need updating. */ - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(m->object); if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0) return; diff --git a/sys/powerpc/aim/moea64_native.c b/sys/powerpc/aim/moea64_native.c index 3da2f5a..de0d4ba 100644 --- a/sys/powerpc/aim/moea64_native.c +++ b/sys/powerpc/aim/moea64_native.c @@ -117,7 +117,6 @@ __FBSDID("$FreeBSD$"); #include <vm/vm_object.h> #include <vm/vm_extern.h> #include <vm/vm_pageout.h> -#include <vm/vm_pager.h> #include <machine/md_var.h> #include <machine/mmuvar.h> diff --git a/sys/powerpc/aim/trap.c b/sys/powerpc/aim/trap.c index d30aded..97f4ca1 100644 --- a/sys/powerpc/aim/trap.c +++ b/sys/powerpc/aim/trap.c @@ -130,6 +130,7 @@ systrace_probe_func_t systrace_probe_func; dtrace_fasttrap_probe_ptr_t dtrace_fasttrap_probe_ptr; dtrace_pid_probe_ptr_t dtrace_pid_probe_ptr; dtrace_return_probe_ptr_t dtrace_return_probe_ptr; +int (*dtrace_invop_jump_addr)(struct trapframe *); #endif static struct powerpc_exception powerpc_exceptions[] = { @@ -220,10 +221,8 @@ trap(struct trapframe *frame) /* * XXXDTRACE: add fasttrap and pid probes handlers here (if ever) */ - if (!user) { - if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type)) - return; - } + if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type)) + return; #endif if (user) { @@ -296,7 +295,7 @@ trap(struct trapframe *frame) /* Identify the trap reason */ if (frame->srr1 & EXC_PGM_TRAP) sig = SIGTRAP; - else if (ppc_instr_emulate(frame) == 0) + else if (ppc_instr_emulate(frame) == 0) frame->srr0 += 4; else sig = SIGILL; @@ -311,6 +310,16 @@ trap(struct trapframe *frame) KASSERT(cold || td->td_ucred != NULL, ("kernel trap doesn't have ucred")); switch (type) { +#ifdef KDTRACE_HOOKS + case EXC_PGM: + if (frame->srr1 & EXC_PGM_TRAP) { + if (*(uintptr_t *)frame->srr0 == 0x7c810808) { + if (dtrace_invop_jump_addr != NULL) { + dtrace_invop_jump_addr(frame); + } + } + } +#endif #ifdef __powerpc64__ case EXC_DSE: if ((frame->cpu.aim.dar & SEGMENT_MASK) == USER_ADDR) { @@ -376,6 +385,8 @@ printtrap(u_int vector, struct trapframe *frame, int isfatal, int user) case EXC_DSI: printf(" virtual address = 0x%" PRIxPTR "\n", frame->cpu.aim.dar); + printf(" dsisr = 0x%" PRIxPTR "\n", + frame->cpu.aim.dsisr); break; case EXC_ISE: case EXC_ISI: diff --git a/sys/powerpc/aim/trap_subr32.S b/sys/powerpc/aim/trap_subr32.S index a00cc3d..d137fd4 100644 --- a/sys/powerpc/aim/trap_subr32.S +++ b/sys/powerpc/aim/trap_subr32.S @@ -242,13 +242,6 @@ #ifdef KDTRACE_HOOKS .data - .globl dtrace_invop_jump_addr - .align 4 - .type dtrace_invop_jump_addr, @object - .size dtrace_invop_jump_addr, 4 -dtrace_invop_jump_addr: - .word 0 - .word 0 .globl dtrace_invop_calltrap_addr .align 4 .type dtrace_invop_calltrap_addr, @object diff --git a/sys/powerpc/aim/trap_subr64.S b/sys/powerpc/aim/trap_subr64.S index 0a12753..8550227 100644 --- a/sys/powerpc/aim/trap_subr64.S +++ b/sys/powerpc/aim/trap_subr64.S @@ -276,13 +276,6 @@ restore_kernsrs: #ifdef KDTRACE_HOOKS .data - .globl dtrace_invop_jump_addr - .align 8 - .type dtrace_invop_jump_addr, @object - .size dtrace_invop_jump_addr, 8 -dtrace_invop_jump_addr: - .word 0 - .word 0 .globl dtrace_invop_calltrap_addr .align 8 .type dtrace_invop_calltrap_addr, @object diff --git a/sys/powerpc/booke/clock.c b/sys/powerpc/booke/clock.c index 9958160..5827e73 100644 --- a/sys/powerpc/booke/clock.c +++ b/sys/powerpc/booke/clock.c @@ -88,7 +88,7 @@ static u_long *decr_counts[MAXCPU]; #define DIFF19041970 2082844800 static int decr_et_start(struct eventtimer *et, - struct bintime *first, struct bintime *period); + sbintime_t first, sbintime_t period); static int decr_et_stop(struct eventtimer *et); static timecounter_get_t decr_get_timecount; @@ -193,12 +193,8 @@ decr_tc_init(void) ET_FLAGS_PERCPU; decr_et.et_quality = 1000; decr_et.et_frequency = ticks_per_sec; - decr_et.et_min_period.sec = 0; - decr_et.et_min_period.frac = - ((0x00000002LLU << 32) / ticks_per_sec) << 32; - decr_et.et_max_period.sec = 0xfffffffeLLU / ticks_per_sec; - decr_et.et_max_period.frac = - ((0xfffffffeLLU << 32) / ticks_per_sec) << 32; + decr_et.et_min_period = (0x00000002LLU << 32) / ticks_per_sec; + decr_et.et_max_period = (0xfffffffeLLU << 32) / ticks_per_sec; decr_et.et_start = decr_et_start; decr_et.et_stop = decr_et_stop; decr_et.et_priv = NULL; @@ -209,26 +205,21 @@ decr_tc_init(void) * Event timer start method. */ static int -decr_et_start(struct eventtimer *et, - struct bintime *first, struct bintime *period) +decr_et_start(struct eventtimer *et, sbintime_t first, sbintime_t period) { struct decr_state *s = DPCPU_PTR(decr_state); uint32_t fdiv, tcr; - if (period != NULL) { + if (period != 0) { s->mode = 1; - s->div = (decr_et.et_frequency * (period->frac >> 32)) >> 32; - if (period->sec != 0) - s->div += decr_et.et_frequency * period->sec; + s->div = (decr_et.et_frequency * period) >> 32; } else { s->mode = 2; - s->div = 0xffffffff; + s->div = 0; } - if (first != NULL) { - fdiv = (decr_et.et_frequency * (first->frac >> 32)) >> 32; - if (first->sec != 0) - fdiv += decr_et.et_frequency * first->sec; - } else + if (first != 0) + fdiv = (decr_et.et_frequency * first) >> 32; + else fdiv = s->div; tcr = mfspr(SPR_TCR); diff --git a/sys/powerpc/booke/machdep.c b/sys/powerpc/booke/machdep.c index 1522f9f..e9a2d52 100644 --- a/sys/powerpc/booke/machdep.c +++ b/sys/powerpc/booke/machdep.c @@ -101,6 +101,7 @@ __FBSDID("$FreeBSD$"); #include <sys/kernel.h> #include <sys/lock.h> #include <sys/mutex.h> +#include <sys/rwlock.h> #include <sys/sysctl.h> #include <sys/exec.h> #include <sys/ktr.h> diff --git a/sys/powerpc/booke/platform_bare.c b/sys/powerpc/booke/platform_bare.c index c5739f7..65fb554 100644 --- a/sys/powerpc/booke/platform_bare.c +++ b/sys/powerpc/booke/platform_bare.c @@ -89,7 +89,7 @@ static platform_method_t bare_methods[] = { PLATFORMMETHOD(platform_reset, booke_reset), - { 0, 0 } + PLATFORMMETHOD_END }; static platform_def_t bare_platform = { diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c index f6e5f9c..b9f4838 100644 --- a/sys/powerpc/booke/pmap.c +++ b/sys/powerpc/booke/pmap.c @@ -217,7 +217,6 @@ static struct rwlock_padalign pvh_global_lock; /* Data for the pv entry allocation mechanism */ static uma_zone_t pvzone; -static struct vm_object pvzone_obj; static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; #define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ @@ -275,6 +274,8 @@ static void mmu_booke_clear_reference(mmu_t, vm_page_t); static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t); static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); +static void mmu_booke_copy_pages(mmu_t, vm_page_t *, + vm_offset_t, vm_page_t *, vm_offset_t, int); static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, @@ -335,6 +336,7 @@ static mmu_method_t mmu_booke_methods[] = { MMUMETHOD(mmu_clear_reference, mmu_booke_clear_reference), MMUMETHOD(mmu_copy, mmu_booke_copy), MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), + MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages), MMUMETHOD(mmu_enter, mmu_booke_enter), MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), @@ -1343,7 +1345,7 @@ mmu_booke_init(mmu_t mmu) TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); pv_entry_high_water = 9 * (pv_entry_max / 10); - uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); + uma_zone_reserve_kva(pvzone, pv_entry_max); /* Pre-fill pvzone with initial number of pv entries. */ uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); @@ -1561,9 +1563,8 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, KASSERT((va <= VM_MAXUSER_ADDRESS), ("mmu_booke_enter_locked: user pmap, non user va")); } - KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || - VM_OBJECT_LOCKED(m->object), - ("mmu_booke_enter_locked: page %p is not busy", m)); + if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0) + VM_OBJECT_ASSERT_WLOCKED(m->object); PMAP_LOCK_ASSERT(pmap, MA_OWNED); @@ -1960,7 +1961,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m) * another thread while the object is locked. Thus, if PGA_WRITEABLE * is clear, no page table entries need updating. */ - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(m->object); if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0) return; @@ -2138,6 +2139,36 @@ mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) mtx_unlock(©_page_mutex); } +static inline void +mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, + vm_page_t *mb, vm_offset_t b_offset, int xfersize) +{ + void *a_cp, *b_cp; + vm_offset_t a_pg_offset, b_pg_offset; + int cnt; + + mtx_lock(©_page_mutex); + while (xfersize > 0) { + a_pg_offset = a_offset & PAGE_MASK; + cnt = min(xfersize, PAGE_SIZE - a_pg_offset); + mmu_booke_kenter(mmu, copy_page_src_va, + VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])); + a_cp = (char *)copy_page_src_va + a_pg_offset; + b_pg_offset = b_offset & PAGE_MASK; + cnt = min(cnt, PAGE_SIZE - b_pg_offset); + mmu_booke_kenter(mmu, copy_page_dst_va, + VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])); + b_cp = (char *)copy_page_dst_va + b_pg_offset; + bcopy(a_cp, b_cp, cnt); + mmu_booke_kremove(mmu, copy_page_dst_va); + mmu_booke_kremove(mmu, copy_page_src_va); + a_offset += cnt; + b_offset += cnt; + xfersize -= cnt; + } + mtx_unlock(©_page_mutex); +} + /* * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it * into virtual memory and using bzero to clear its contents. This is intended @@ -2175,7 +2206,7 @@ mmu_booke_is_modified(mmu_t mmu, vm_page_t m) * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can be modified. */ - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(m->object); if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0) return (rv); @@ -2247,7 +2278,7 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("mmu_booke_clear_modify: page %p is not managed", m)); - VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(m->object); KASSERT((m->oflags & VPO_BUSY) == 0, ("mmu_booke_clear_modify: page %p is busy", m)); @@ -2662,7 +2693,7 @@ mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size) { - VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + VM_OBJECT_ASSERT_WLOCKED(object); KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, ("mmu_booke_object_init_pt: non-device object")); } diff --git a/sys/powerpc/conf/GENERIC b/sys/powerpc/conf/GENERIC index f7c3511..d91e00d 100644 --- a/sys/powerpc/conf/GENERIC +++ b/sys/powerpc/conf/GENERIC @@ -100,7 +100,6 @@ device agp # ATA controllers device ahci # AHCI-compatible SATA controllers device ata # Legacy ATA/SATA controllers -options ATA_CAM # Handle legacy controllers with CAM device mvs # Marvell 88SX50XX/88SX60XX/88SX70XX/SoC SATA device siis # SiliconImage SiI3124/SiI3132/SiI3531 SATA diff --git a/sys/powerpc/conf/GENERIC64 b/sys/powerpc/conf/GENERIC64 index a2a283f..1cdf195 100644 --- a/sys/powerpc/conf/GENERIC64 +++ b/sys/powerpc/conf/GENERIC64 @@ -95,7 +95,6 @@ device agp # ATA controllers device ahci # AHCI-compatible SATA controllers device ata # Legacy ATA/SATA controllers -options ATA_CAM # Handle legacy controllers with CAM device mvs # Marvell 88SX50XX/88SX60XX/88SX70XX/SoC SATA device siis # SiliconImage SiI3124/SiI3132/SiI3531 SATA diff --git a/sys/powerpc/conf/MPC85XX b/sys/powerpc/conf/MPC85XX index 00b0dca..e222ddb 100644 --- a/sys/powerpc/conf/MPC85XX +++ b/sys/powerpc/conf/MPC85XX @@ -57,7 +57,6 @@ options WITNESS options WITNESS_SKIPSPIN device ata -options ATA_CAM device bpf device cfi device crypto diff --git a/sys/powerpc/conf/Makefile b/sys/powerpc/conf/Makefile index f665238..562bc46 100644 --- a/sys/powerpc/conf/Makefile +++ b/sys/powerpc/conf/Makefile @@ -1,8 +1,5 @@ # $FreeBSD$ TARGET=powerpc -.if ${MACHINE_ARCH} == powerpc || ${MACHINE_ARCH} == powerpc64 -TARGET_ARCH?=${MACHINE_ARCH} -.endif .include "${.CURDIR}/../../conf/makeLINT.mk" diff --git a/sys/powerpc/include/counter.h b/sys/powerpc/include/counter.h new file mode 100644 index 0000000..81b6bbc --- /dev/null +++ b/sys/powerpc/include/counter.h @@ -0,0 +1,84 @@ +/*- + * Copyright (c) 2012, 2013 Konstantin Belousov <kib@FreeBSD.org> + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef __MACHINE_COUNTER_H__ +#define __MACHINE_COUNTER_H__ + +#include <sys/pcpu.h> +#ifdef INVARIANTS +#include <sys/proc.h> +#endif + +#if defined(AIM) && defined(__powerpc64__) + +#define counter_enter() do {} while (0) +#define counter_exit() do {} while (0) + +#define counter_u64_add_protected(c, i) counter_u64_add(c, i) + +extern struct pcpu __pcpu[MAXCPU]; + +static inline void +counter_u64_add(counter_u64_t c, int64_t inc) +{ + uint64_t ccpu, old; + + __asm __volatile("\n" + "1:\n\t" + "mfsprg %0, 0\n\t" + "ldarx %1, %0, %2\n\t" + "add %1, %1, %3\n\t" + "stdcx. %1, %0, %2\n\t" + "bne- 1b" + : "=&b" (ccpu), "=&r" (old) + : "r" ((char *)c - (char *)&__pcpu[0]), "r" (inc) + : "cc", "memory"); +} + +#else /* !AIM || !64bit */ + +#define counter_enter() critical_enter() +#define counter_exit() critical_exit() + +#define counter_u64_add_protected(c, inc) do { \ + CRITICAL_ASSERT(curthread); \ + *(uint64_t *)zpcpu_get(c) += (inc); \ +} while (0) + +static inline void +counter_u64_add(counter_u64_t c, int64_t inc) +{ + + counter_enter(); + counter_u64_add_protected(c, inc); + counter_exit(); +} + +#endif /* AIM 64bit */ + +#endif /* ! __MACHINE_COUNTER_H__ */ diff --git a/sys/powerpc/include/pcpu.h b/sys/powerpc/include/pcpu.h index bd14661..3f6a234 100644 --- a/sys/powerpc/include/pcpu.h +++ b/sys/powerpc/include/pcpu.h @@ -51,13 +51,15 @@ struct pmap; register_t pc_disisave[CPUSAVE_LEN]; \ register_t pc_dbsave[CPUSAVE_LEN]; -#define PCPU_MD_AIM32_FIELDS +#define PCPU_MD_AIM32_FIELDS \ + /* char __pad[0] */ #define PCPU_MD_AIM64_FIELDS \ struct slb pc_slb[64]; \ struct slb **pc_userslb; \ register_t pc_slbsave[18]; \ - uint8_t pc_slbstack[1024]; + uint8_t pc_slbstack[1024]; \ + char __pad[1137] #ifdef __powerpc64__ #define PCPU_MD_AIM_FIELDS PCPU_MD_AIM64_FIELDS @@ -76,7 +78,8 @@ struct pmap; register_t pc_booke_tlbsave[BOOKE_TLBSAVE_LEN]; \ register_t pc_booke_tlb_level; \ uint32_t *pc_booke_tlb_lock; \ - int pc_tid_next; + int pc_tid_next; \ + char __pad[173] /* Definitions for register offsets within the exception tmp save areas */ #define CPUSAVE_R27 0 /* where r27 gets saved */ @@ -109,7 +112,6 @@ struct pmap; #define TLBSAVE_BOOKE_R30 14 #define TLBSAVE_BOOKE_R31 15 -#ifndef COMPILING_LINT #ifdef AIM #define PCPU_MD_FIELDS \ PCPU_MD_COMMON_FIELDS \ @@ -120,12 +122,7 @@ struct pmap; PCPU_MD_COMMON_FIELDS \ PCPU_MD_BOOKE_FIELDS #endif -#else -#define PCPU_MD_FIELDS \ - PCPU_MD_COMMON_FIELDS \ - PCPU_MD_AIM_FIELDS \ - PCPU_MD_BOOKE_FIELDS -#endif + /* * Catch-all for ports (e.g. lsof, used by gtop) */ diff --git a/sys/powerpc/include/platformvar.h b/sys/powerpc/include/platformvar.h index b7c011d..21a1d04 100644 --- a/sys/powerpc/include/platformvar.h +++ b/sys/powerpc/include/platformvar.h @@ -81,7 +81,8 @@ typedef struct platform_kobj *platform_t; typedef struct kobj_class platform_def_t; #define platform_method_t kobj_method_t -#define PLATFORMMETHOD KOBJMETHOD +#define PLATFORMMETHOD KOBJMETHOD +#define PLATFORMMETHOD_END KOBJMETHOD_END #define PLATFORM_DEF(name) DATA_SET(platform_set, name) diff --git a/sys/powerpc/include/vmparam.h b/sys/powerpc/include/vmparam.h index 8368a41..4b36d9d 100644 --- a/sys/powerpc/include/vmparam.h +++ b/sys/powerpc/include/vmparam.h @@ -121,11 +121,6 @@ #endif /* AIM/E500 */ -/* XXX max. amount of KVM to be used by buffers. */ -#ifndef VM_MAX_KERNEL_BUF -#define VM_MAX_KERNEL_BUF (SEGMENT_LENGTH * 7 / 10) -#endif - #if !defined(LOCORE) struct pmap_physseg { struct pv_entry *pvent; diff --git a/sys/powerpc/powermac/ata_dbdma.c b/sys/powerpc/powermac/ata_dbdma.c index 23434c8..0e3fbde 100644 --- a/sys/powerpc/powermac/ata_dbdma.c +++ b/sys/powerpc/powermac/ata_dbdma.c @@ -23,16 +23,16 @@ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * - * $FreeBSD$ */ +#include <sys/cdefs.h> +__FBSDID("* $FreeBSD$"); + /* * Common routines for the DMA engine on both the Apple Kauai and MacIO * ATA controllers. */ -#include "opt_ata.h" #include <sys/param.h> #include <sys/systm.h> #include <sys/kernel.h> diff --git a/sys/powerpc/powermac/ata_kauai.c b/sys/powerpc/powermac/ata_kauai.c index ef260d4..1d72f75 100644 --- a/sys/powerpc/powermac/ata_kauai.c +++ b/sys/powerpc/powermac/ata_kauai.c @@ -31,7 +31,6 @@ __FBSDID("$FreeBSD$"); /* * Mac 'Kauai' PCI ATA controller */ -#include "opt_ata.h" #include <sys/param.h> #include <sys/systm.h> #include <sys/kernel.h> @@ -99,7 +98,7 @@ static device_method_t ata_kauai_methods[] = { /* ATA interface */ DEVMETHOD(ata_setmode, ata_kauai_setmode), - { 0, 0 } + DEVMETHOD_END }; struct ata_kauai_softc { @@ -120,15 +119,15 @@ static driver_t ata_kauai_driver = { sizeof(struct ata_kauai_softc), }; -DRIVER_MODULE(ata, pci, ata_kauai_driver, ata_devclass, 0, 0); +DRIVER_MODULE(ata, pci, ata_kauai_driver, ata_devclass, NULL, NULL); MODULE_DEPEND(ata, ata, 1, 1, 1); /* * PCI ID search table */ -static struct kauai_pci_dev { - u_int32_t kpd_devid; - char *kpd_desc; +static const struct kauai_pci_dev { + u_int32_t kpd_devid; + const char *kpd_desc; } kauai_pci_devlist[] = { { 0x0033106b, "Uninorth2 Kauai ATA Controller" }, { 0x003b106b, "Intrepid Kauai ATA Controller" }, @@ -152,6 +151,7 @@ static const u_int pio_timing_kauai[] = { 0x05000249, /* PIO3 */ 0x04000148 /* PIO4 */ }; + static const u_int pio_timing_shasta[] = { 0x0a000c97, /* PIO0 */ 0x07000712, /* PIO1 */ @@ -165,6 +165,7 @@ static const u_int dma_timing_kauai[] = { 0x00209000, /* WDMA1 */ 0x00148000 /* WDMA2 */ }; + static const u_int dma_timing_shasta[] = { 0x00820800, /* WDMA0 */ 0x0028b000, /* WDMA1 */ @@ -179,6 +180,7 @@ static const u_int udma_timing_kauai[] = { 0x00002a31, /* UDMA4 */ 0x00002921 /* UDMA5 */ }; + static const u_int udma_timing_shasta[] = { 0x00035901, /* UDMA0 */ 0x000348b1, /* UDMA1 */ @@ -368,4 +370,3 @@ ata_kauai_begin_transaction(struct ata_request *request) return ata_begin_transaction(request); } - diff --git a/sys/powerpc/powermac/ata_macio.c b/sys/powerpc/powermac/ata_macio.c index 447009d..e15ab00 100644 --- a/sys/powerpc/powermac/ata_macio.c +++ b/sys/powerpc/powermac/ata_macio.c @@ -23,14 +23,14 @@ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * - * $FreeBSD$ */ +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + /* * Mac-io ATA controller */ -#include "opt_ata.h" #include <sys/param.h> #include <sys/systm.h> #include <sys/kernel.h> @@ -85,7 +85,7 @@ struct ide_timings { int active; /* minimum command active time [ns] */ }; -struct ide_timings pio_timings[5] = { +static const struct ide_timings pio_timings[5] = { { 600, 180 }, /* PIO 0 */ { 390, 150 }, /* PIO 1 */ { 240, 105 }, /* PIO 2 */ @@ -122,7 +122,7 @@ static device_method_t ata_macio_methods[] = { /* ATA interface */ DEVMETHOD(ata_setmode, ata_macio_setmode), - { 0, 0 } + DEVMETHOD_END }; struct ata_macio_softc { @@ -143,7 +143,7 @@ static driver_t ata_macio_driver = { sizeof(struct ata_macio_softc), }; -DRIVER_MODULE(ata, macio, ata_macio_driver, ata_devclass, 0, 0); +DRIVER_MODULE(ata, macio, ata_macio_driver, ata_devclass, NULL, NULL); MODULE_DEPEND(ata, ata, 1, 1, 1); static int @@ -332,4 +332,3 @@ ata_macio_begin_transaction(struct ata_request *request) return ata_begin_transaction(request); } - diff --git a/sys/powerpc/powermac/platform_powermac.c b/sys/powerpc/powermac/platform_powermac.c index 978b33a..52d5c4a 100644 --- a/sys/powerpc/powermac/platform_powermac.c +++ b/sys/powerpc/powermac/platform_powermac.c @@ -77,7 +77,7 @@ static platform_method_t powermac_methods[] = { PLATFORMMETHOD(platform_reset, powermac_reset), - { 0, 0 } + PLATFORMMETHOD_END }; static platform_def_t powermac_platform = { diff --git a/sys/powerpc/powerpc/busdma_machdep.c b/sys/powerpc/powerpc/busdma_machdep.c index 7953f91..7708b94 100644 --- a/sys/powerpc/powerpc/busdma_machdep.c +++ b/sys/powerpc/powerpc/busdma_machdep.c @@ -40,10 +40,10 @@ __FBSDID("$FreeBSD$"); #include <sys/ktr.h> #include <sys/lock.h> #include <sys/proc.h> +#include <sys/memdesc.h> #include <sys/mutex.h> -#include <sys/mbuf.h> -#include <sys/uio.h> #include <sys/sysctl.h> +#include <sys/uio.h> #include <vm/vm.h> #include <vm/vm_extern.h> @@ -87,6 +87,7 @@ struct bounce_page { vm_offset_t vaddr; /* kva of bounce buffer */ bus_addr_t busaddr; /* Physical address */ vm_offset_t datavaddr; /* kva of client data */ + bus_addr_t dataaddr; /* client physical address */ bus_size_t datacount; /* client data count */ STAILQ_ENTRY(bounce_page) links; }; @@ -125,8 +126,7 @@ struct bus_dmamap { int pagesneeded; int pagesreserved; bus_dma_tag_t dmat; - void *buf; /* unmapped buffer pointer */ - bus_size_t buflen; /* unmapped buffer length */ + struct memdesc mem; bus_dma_segment_t *segments; int nsegs; bus_dmamap_callback_t *callback; @@ -144,7 +144,8 @@ static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit); static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, - vm_offset_t vaddr, bus_size_t size); + vm_offset_t vaddr, bus_addr_t addr, + bus_size_t size); static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); @@ -564,32 +565,45 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); } -/* - * Utility function to load a linear buffer. lastaddrp holds state - * between invocations (for multiple-buffer loads). segp contains - * the starting segment on entrance, and the ending segment on exit. - * first indicates if this is the first invocation of this function. - */ -static __inline int -_bus_dmamap_load_buffer(bus_dma_tag_t dmat, - bus_dmamap_t map, - void *buf, bus_size_t buflen, - pmap_t pmap, - int flags, - bus_addr_t *lastaddrp, - bus_dma_segment_t *segs, - int *segp, - int first) +static void +_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, + bus_size_t buflen, int flags) { + bus_addr_t curaddr; bus_size_t sgsize; - bus_addr_t curaddr, lastaddr, baddr, bmask; - vm_offset_t vaddr; - bus_addr_t paddr; - int seg; - if (map->pagesneeded == 0 && ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) { - vm_offset_t vendaddr; + if (map->pagesneeded == 0) { + CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " + "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), + dmat->boundary, dmat->alignment); + CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded); + /* + * Count the number of bounce pages + * needed in order to complete this transfer + */ + curaddr = buf; + while (buflen != 0) { + sgsize = MIN(buflen, dmat->maxsegsz); + if (run_filter(dmat, curaddr) != 0) { + sgsize = MIN(sgsize, PAGE_SIZE); + map->pagesneeded++; + } + curaddr += sgsize; + buflen -= sgsize; + } + CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); + } +} + +static void +_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, + void *buf, bus_size_t buflen, int flags) +{ + vm_offset_t vaddr; + vm_offset_t vendaddr; + bus_addr_t paddr; + if (map->pagesneeded == 0) { CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), dmat->boundary, dmat->alignment); @@ -605,10 +619,10 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_size_t sg_len; sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); - if (pmap) - paddr = pmap_extract(pmap, vaddr); - else + if (pmap == kernel_pmap) paddr = pmap_kextract(vaddr); + else + paddr = pmap_extract(pmap, vaddr); if (run_filter(dmat, paddr) != 0) { sg_len = roundup2(sg_len, dmat->alignment); map->pagesneeded++; @@ -617,315 +631,230 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat, } CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); } +} + +static int +_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) +{ /* Reserve Necessary Bounce Pages */ - if (map->pagesneeded != 0) { - mtx_lock(&bounce_lock); - if (flags & BUS_DMA_NOWAIT) { - if (reserve_bounce_pages(dmat, map, 0) != 0) { - mtx_unlock(&bounce_lock); - return (ENOMEM); - } - } else { - if (reserve_bounce_pages(dmat, map, 1) != 0) { - /* Queue us for resources */ - map->dmat = dmat; - map->buf = buf; - map->buflen = buflen; - STAILQ_INSERT_TAIL(&bounce_map_waitinglist, - map, links); - mtx_unlock(&bounce_lock); - return (EINPROGRESS); - } + mtx_lock(&bounce_lock); + if (flags & BUS_DMA_NOWAIT) { + if (reserve_bounce_pages(dmat, map, 0) != 0) { + mtx_unlock(&bounce_lock); + return (ENOMEM); + } + } else { + if (reserve_bounce_pages(dmat, map, 1) != 0) { + /* Queue us for resources */ + STAILQ_INSERT_TAIL(&bounce_map_waitinglist, + map, links); + mtx_unlock(&bounce_lock); + return (EINPROGRESS); } - mtx_unlock(&bounce_lock); } + mtx_unlock(&bounce_lock); - vaddr = (vm_offset_t)buf; - lastaddr = *lastaddrp; - bmask = ~(dmat->boundary - 1); - - for (seg = *segp; buflen > 0 ; ) { - bus_size_t max_sgsize; - - /* - * Get the physical address for this segment. - */ - if (pmap) - curaddr = pmap_extract(pmap, vaddr); - else - curaddr = pmap_kextract(vaddr); + return (0); +} - /* - * Compute the segment size, and adjust counts. - */ - max_sgsize = MIN(buflen, dmat->maxsegsz); - sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK); - if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) { - sgsize = roundup2(sgsize, dmat->alignment); - sgsize = MIN(sgsize, max_sgsize); - curaddr = add_bounce_page(dmat, map, vaddr, sgsize); - } else { - sgsize = MIN(sgsize, max_sgsize); - } +/* + * Add a single contiguous physical range to the segment list. + */ +static int +_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, + bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) +{ + bus_addr_t baddr, bmask; + int seg; - /* - * Make sure we don't cross any boundaries. - */ - if (dmat->boundary > 0) { - baddr = (curaddr + dmat->boundary) & bmask; - if (sgsize > (baddr - curaddr)) - sgsize = (baddr - curaddr); - } + /* + * Make sure we don't cross any boundaries. + */ + bmask = ~(dmat->boundary - 1); + if (dmat->boundary > 0) { + baddr = (curaddr + dmat->boundary) & bmask; + if (sgsize > (baddr - curaddr)) + sgsize = (baddr - curaddr); + } - /* - * Insert chunk into a segment, coalescing with - * previous segment if possible. - */ - if (first) { + /* + * Insert chunk into a segment, coalescing with + * previous segment if possible. + */ + seg = *segp; + if (seg == -1) { + seg = 0; + segs[seg].ds_addr = curaddr; + segs[seg].ds_len = sgsize; + } else { + if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && + (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && + (dmat->boundary == 0 || + (segs[seg].ds_addr & bmask) == (curaddr & bmask))) + segs[seg].ds_len += sgsize; + else { + if (++seg >= dmat->nsegments) + return (0); segs[seg].ds_addr = curaddr; segs[seg].ds_len = sgsize; - first = 0; - } else { - if (curaddr == lastaddr && - (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && - (dmat->boundary == 0 || - (segs[seg].ds_addr & bmask) == (curaddr & bmask))) - segs[seg].ds_len += sgsize; - else { - if (++seg >= dmat->nsegments) - break; - segs[seg].ds_addr = curaddr; - segs[seg].ds_len = sgsize; - } } - - lastaddr = curaddr + sgsize; - vaddr += sgsize; - buflen -= sgsize; } - *segp = seg; - *lastaddrp = lastaddr; - - /* - * Did we fit? - */ - return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ + return (sgsize); } /* - * Map the buffer buf into bus space using the dmamap map. + * Utility function to load a physical buffer. segp contains + * the starting segment on entrace, and the ending segment on exit. */ int -bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, - bus_size_t buflen, bus_dmamap_callback_t *callback, - void *callback_arg, int flags) +_bus_dmamap_load_phys(bus_dma_tag_t dmat, + bus_dmamap_t map, + vm_paddr_t buf, bus_size_t buflen, + int flags, + bus_dma_segment_t *segs, + int *segp) { - bus_addr_t lastaddr = 0; - int error; - - if (dmat->flags & BUS_DMA_COULD_BOUNCE) { - flags |= BUS_DMA_WAITOK; - map->callback = callback; - map->callback_arg = callback_arg; - } - - map->nsegs = 0; - error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags, - &lastaddr, map->segments, &map->nsegs, 1); - map->nsegs++; + bus_addr_t curaddr; + bus_size_t sgsize; + int error; - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, dmat->flags, error, map->nsegs); + if (segs == NULL) + segs = map->segments; - if (error == EINPROGRESS) { - return (error); + if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { + _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); + if (map->pagesneeded != 0) { + error = _bus_dmamap_reserve_pages(dmat, map, flags); + if (error) + return (error); + } } - if (dmat->iommu != NULL) - IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, dmat->lowaddr, - dmat->highaddr, dmat->alignment, dmat->boundary, - dmat->iommu_cookie); - - if (error) - (*callback)(callback_arg, map->segments, 0, error); - else - (*callback)(callback_arg, map->segments, map->nsegs, 0); + while (buflen > 0) { + curaddr = buf; + sgsize = MIN(buflen, dmat->maxsegsz); + if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) { + sgsize = MIN(sgsize, PAGE_SIZE); + curaddr = add_bounce_page(dmat, map, 0, curaddr, + sgsize); + } + sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, + segp); + if (sgsize == 0) + break; + buf += sgsize; + buflen -= sgsize; + } /* - * Return ENOMEM to the caller so that it can pass it up the stack. - * This error only happens when NOWAIT is set, so deferal is disabled. + * Did we fit? */ - if (error == ENOMEM) - return (error); - - return (0); + return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ } - /* - * Like _bus_dmamap_load(), but for mbufs. + * Utility function to load a linear buffer. segp contains + * the starting segment on entrance, and the ending segment on exit. */ int -bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, - struct mbuf *m0, - bus_dmamap_callback2_t *callback, void *callback_arg, - int flags) +_bus_dmamap_load_buffer(bus_dma_tag_t dmat, + bus_dmamap_t map, + void *buf, bus_size_t buflen, + pmap_t pmap, + int flags, + bus_dma_segment_t *segs, + int *segp) { + bus_size_t sgsize; + bus_addr_t curaddr; + vm_offset_t vaddr; int error; - M_ASSERTPKTHDR(m0); + if (segs == NULL) + segs = map->segments; - flags |= BUS_DMA_NOWAIT; - map->nsegs = 0; - error = 0; - if (m0->m_pkthdr.len <= dmat->maxsize) { - int first = 1; - bus_addr_t lastaddr = 0; - struct mbuf *m; - - for (m = m0; m != NULL && error == 0; m = m->m_next) { - if (m->m_len > 0) { - error = _bus_dmamap_load_buffer(dmat, map, - m->m_data, m->m_len, - NULL, flags, &lastaddr, - map->segments, &map->nsegs, first); - first = 0; - } + if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { + _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); + if (map->pagesneeded != 0) { + error = _bus_dmamap_reserve_pages(dmat, map, flags); + if (error) + return (error); } - } else { - error = EINVAL; } - map->nsegs++; - if (dmat->iommu != NULL) - IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, dmat->lowaddr, - dmat->highaddr, dmat->alignment, dmat->boundary, - dmat->iommu_cookie); - - if (error) { - /* force "no valid mappings" in callback */ - (*callback)(callback_arg, map->segments, 0, 0, error); - } else { - (*callback)(callback_arg, map->segments, - map->nsegs, m0->m_pkthdr.len, error); - } - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, dmat->flags, error, map->nsegs); - return (error); -} + vaddr = (vm_offset_t)buf; -int -bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, - struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, - int flags) -{ - int error; + while (buflen > 0) { + bus_size_t max_sgsize; - M_ASSERTPKTHDR(m0); + /* + * Get the physical address for this segment. + */ + if (pmap == kernel_pmap) + curaddr = pmap_kextract(vaddr); + else + curaddr = pmap_extract(pmap, vaddr); - flags |= BUS_DMA_NOWAIT; - *nsegs = 0; - error = 0; - if (m0->m_pkthdr.len <= dmat->maxsize) { - int first = 1; - bus_addr_t lastaddr = 0; - struct mbuf *m; - - for (m = m0; m != NULL && error == 0; m = m->m_next) { - if (m->m_len > 0) { - error = _bus_dmamap_load_buffer(dmat, map, - m->m_data, m->m_len, - NULL, flags, &lastaddr, - segs, nsegs, first); - first = 0; - } + /* + * Compute the segment size, and adjust counts. + */ + max_sgsize = MIN(buflen, dmat->maxsegsz); + sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK); + if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) { + sgsize = roundup2(sgsize, dmat->alignment); + sgsize = MIN(sgsize, max_sgsize); + curaddr = add_bounce_page(dmat, map, vaddr, curaddr, + sgsize); + } else { + sgsize = MIN(sgsize, max_sgsize); } - } else { - error = EINVAL; - } - - /* XXX FIXME: Having to increment nsegs is really annoying */ - ++*nsegs; - if (dmat->iommu != NULL) - IOMMU_MAP(dmat->iommu, segs, nsegs, dmat->lowaddr, - dmat->highaddr, dmat->alignment, dmat->boundary, - dmat->iommu_cookie); - - map->nsegs = *nsegs; - memcpy(map->segments, segs, map->nsegs*sizeof(segs[0])); + sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, + segp); + if (sgsize == 0) + break; + vaddr += sgsize; + buflen -= sgsize; + } - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, dmat->flags, error, *nsegs); - return (error); + /* + * Did we fit? + */ + return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ } -/* - * Like _bus_dmamap_load(), but for uios. - */ -int -bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, - struct uio *uio, - bus_dmamap_callback2_t *callback, void *callback_arg, - int flags) +void +__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, + struct memdesc *mem, bus_dmamap_callback_t *callback, + void *callback_arg) { - bus_addr_t lastaddr = 0; - int error, first, i; - bus_size_t resid; - struct iovec *iov; - pmap_t pmap; - - flags |= BUS_DMA_NOWAIT; - resid = uio->uio_resid; - iov = uio->uio_iov; - - if (uio->uio_segflg == UIO_USERSPACE) { - KASSERT(uio->uio_td != NULL, - ("bus_dmamap_load_uio: USERSPACE but no proc")); - pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); - } else - pmap = NULL; - - map->nsegs = 0; - error = 0; - first = 1; - for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { - /* - * Now at the first iovec to load. Load each iovec - * until we have exhausted the residual count. - */ - bus_size_t minlen = - resid < iov[i].iov_len ? resid : iov[i].iov_len; - caddr_t addr = (caddr_t) iov[i].iov_base; - if (minlen > 0) { - error = _bus_dmamap_load_buffer(dmat, map, - addr, minlen, pmap, flags, &lastaddr, - map->segments, &map->nsegs, first); - first = 0; - - resid -= minlen; - } + if (dmat->flags & BUS_DMA_COULD_BOUNCE) { + map->dmat = dmat; + map->mem = *mem; + map->callback = callback; + map->callback_arg = callback_arg; } +} - map->nsegs++; +bus_dma_segment_t * +_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_segment_t *segs, int nsegs, int error) +{ + + if (segs != NULL) + memcpy(map->segments, segs, map->nsegs*sizeof(segs[0])); + else + segs = map->segments; + map->nsegs = nsegs; if (dmat->iommu != NULL) - IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, dmat->lowaddr, - dmat->highaddr, dmat->alignment, dmat->boundary, - dmat->iommu_cookie); + IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, + dmat->lowaddr, dmat->highaddr, dmat->alignment, + dmat->boundary, dmat->iommu_cookie); - if (error) { - /* force "no valid mappings" in callback */ - (*callback)(callback_arg, map->segments, 0, 0, error); - } else { - (*callback)(callback_arg, map->segments, - map->nsegs, uio->uio_resid, error); - } - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, dmat->flags, error, map->nsegs); - return (error); + return (segs); } /* @@ -963,9 +892,14 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) if (op & BUS_DMASYNC_PREWRITE) { while (bpage != NULL) { - bcopy((void *)bpage->datavaddr, - (void *)bpage->vaddr, - bpage->datacount); + if (bpage->datavaddr != 0) + bcopy((void *)bpage->datavaddr, + (void *)bpage->vaddr, + bpage->datacount); + else + physcopyout(bpage->dataaddr, + (void *)bpage->vaddr, + bpage->datacount); bpage = STAILQ_NEXT(bpage, links); } dmat->bounce_zone->total_bounced++; @@ -973,9 +907,13 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) if (op & BUS_DMASYNC_POSTREAD) { while (bpage != NULL) { - bcopy((void *)bpage->vaddr, - (void *)bpage->datavaddr, - bpage->datacount); + if (bpage->datavaddr != 0) + bcopy((void *)bpage->vaddr, + (void *)bpage->datavaddr, + bpage->datacount); + else + physcopyin((void *)bpage->vaddr, + bpage->dataaddr, bpage->datacount); bpage = STAILQ_NEXT(bpage, links); } dmat->bounce_zone->total_bounced++; @@ -1142,7 +1080,7 @@ reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, - bus_size_t size) + bus_addr_t addr, bus_size_t size) { struct bounce_zone *bz; struct bounce_page *bpage; @@ -1174,6 +1112,7 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, bpage->busaddr |= vaddr & PAGE_MASK; } bpage->datavaddr = vaddr; + bpage->dataaddr = addr; bpage->datacount = size; STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); return (bpage->busaddr); @@ -1227,8 +1166,9 @@ busdma_swi(void) mtx_unlock(&bounce_lock); dmat = map->dmat; (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); - bus_dmamap_load(map->dmat, map, map->buf, map->buflen, - map->callback, map->callback_arg, /*flags*/0); + bus_dmamap_load_mem(map->dmat, map, &map->mem, + map->callback, map->callback_arg, + BUS_DMA_WAITOK); (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); mtx_lock(&bounce_lock); } diff --git a/sys/powerpc/powerpc/cpu.c b/sys/powerpc/powerpc/cpu.c index d177c8b..d67f359 100644 --- a/sys/powerpc/powerpc/cpu.c +++ b/sys/powerpc/powerpc/cpu.c @@ -79,9 +79,9 @@ static void cpu_970_setup(int cpuid, uint16_t vers); static void cpu_booke_setup(int cpuid, uint16_t vers); int powerpc_pow_enabled; -void (*cpu_idle_hook)(void) = NULL; -static void cpu_idle_60x(void); -static void cpu_idle_booke(void); +void (*cpu_idle_hook)(sbintime_t) = NULL; +static void cpu_idle_60x(sbintime_t); +static void cpu_idle_booke(sbintime_t); struct cputab { const char *name; @@ -516,6 +516,7 @@ cpu_feature_bit(SYSCTL_HANDLER_ARGS) void cpu_idle(int busy) { + sbintime_t sbt = -1; #ifdef INVARIANTS if ((mfmsr() & PSL_EE) != PSL_EE) { @@ -531,9 +532,9 @@ cpu_idle(int busy) if (cpu_idle_hook != NULL) { if (!busy) { critical_enter(); - cpu_idleclock(); + sbt = cpu_idleclock(); } - cpu_idle_hook(); + cpu_idle_hook(sbt); if (!busy) { cpu_activeclock(); critical_exit(); @@ -551,7 +552,7 @@ cpu_idle_wakeup(int cpu) } static void -cpu_idle_60x(void) +cpu_idle_60x(sbintime_t sbt) { register_t msr; uint16_t vers; @@ -586,7 +587,7 @@ cpu_idle_60x(void) } static void -cpu_idle_booke(void) +cpu_idle_booke(sbintime_t sbt) { register_t msr; diff --git a/sys/powerpc/powerpc/mmu_if.m b/sys/powerpc/powerpc/mmu_if.m index 8cd6e52..0382bd8 100644 --- a/sys/powerpc/powerpc/mmu_if.m +++ b/sys/powerpc/powerpc/mmu_if.m @@ -215,6 +215,14 @@ METHOD void copy_page { vm_page_t _dst; }; +METHOD void copy_pages { + mmu_t _mmu; + vm_page_t *_ma; + vm_offset_t _a_offset; + vm_page_t *_mb; + vm_offset_t _b_offset; + int _xfersize; +}; /** * @brief Create a mapping between a virtual/physical address pair in the diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c index c919196..7fd98f4 100644 --- a/sys/powerpc/powerpc/pmap_dispatch.c +++ b/sys/powerpc/powerpc/pmap_dispatch.c @@ -133,6 +133,16 @@ pmap_copy_page(vm_page_t src, vm_page_t dst) } void +pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], + vm_offset_t b_offset, int xfersize) +{ + + CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma, + a_offset, mb, b_offset, xfersize); + MMU_COPY_PAGES(mmu_obj, ma, a_offset, mb, b_offset, xfersize); +} + +void pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t p, vm_prot_t prot, boolean_t wired) { @@ -564,3 +574,5 @@ pmap_mmu_install(char *name, int prio) return (FALSE); } + +int unmapped_buf_allowed; diff --git a/sys/powerpc/ps3/mmu_ps3.c b/sys/powerpc/ps3/mmu_ps3.c index 5a7fe93..dfca9a7 100644 --- a/sys/powerpc/ps3/mmu_ps3.c +++ b/sys/powerpc/ps3/mmu_ps3.c @@ -45,7 +45,6 @@ __FBSDID("$FreeBSD$"); #include <vm/vm_object.h> #include <vm/vm_extern.h> #include <vm/vm_pageout.h> -#include <vm/vm_pager.h> #include <vm/uma.h> #include <powerpc/aim/mmu_oea64.h> diff --git a/sys/powerpc/ps3/platform_ps3.c b/sys/powerpc/ps3/platform_ps3.c index f06c2cf..207382d 100644 --- a/sys/powerpc/ps3/platform_ps3.c +++ b/sys/powerpc/ps3/platform_ps3.c @@ -70,7 +70,7 @@ static int ps3_smp_start_cpu(platform_t, struct pcpu *cpu); static struct cpu_group *ps3_smp_topo(platform_t); #endif static void ps3_reset(platform_t); -static void ps3_cpu_idle(void); +static void ps3_cpu_idle(sbintime_t); static platform_method_t ps3_methods[] = { PLATFORMMETHOD(platform_probe, ps3_probe), @@ -89,7 +89,7 @@ static platform_method_t ps3_methods[] = { PLATFORMMETHOD(platform_reset, ps3_reset), - { 0, 0 } + PLATFORMMETHOD_END }; static platform_def_t ps3_platform = { @@ -245,7 +245,7 @@ ps3_real_maxaddr(platform_t plat) } static void -ps3_cpu_idle(void) +ps3_cpu_idle(sbintime_t sbt) { lv1_pause(0); } diff --git a/sys/powerpc/ps3/ps3cdrom.c b/sys/powerpc/ps3/ps3cdrom.c index ba3c3ad..a654b4f 100644 --- a/sys/powerpc/ps3/ps3cdrom.c +++ b/sys/powerpc/ps3/ps3cdrom.c @@ -369,9 +369,8 @@ ps3cdrom_action(struct cam_sim *sim, union ccb *ccb) TAILQ_REMOVE(&sc->sc_free_xferq, xp, x_queue); - err = bus_dmamap_load(sc->sc_dmatag, xp->x_dmamap, - ccb->csio.data_ptr, ccb->csio.dxfer_len, ps3cdrom_transfer, - xp, 0); + err = bus_dmamap_load_ccb(sc->sc_dmatag, xp->x_dmamap, + ccb, ps3cdrom_transfer, xp, 0); if (err && err != EINPROGRESS) { device_printf(dev, "Could not load DMA map (%d)\n", err); diff --git a/sys/powerpc/psim/ata_iobus.c b/sys/powerpc/psim/ata_iobus.c index 60f6c50..69c2036 100644 --- a/sys/powerpc/psim/ata_iobus.c +++ b/sys/powerpc/psim/ata_iobus.c @@ -23,14 +23,14 @@ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * - * $FreeBSD$ */ +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + /* * PSIM local bus ATA controller */ -#include "opt_ata.h" #include <sys/param.h> #include <sys/systm.h> #include <sys/kernel.h> @@ -80,7 +80,7 @@ static device_method_t ata_iobus_methods[] = { DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), - { 0, 0 } + DEVMETHOD_END }; static driver_t ata_iobus_driver = { @@ -91,7 +91,8 @@ static driver_t ata_iobus_driver = { static devclass_t ata_iobus_devclass; -DRIVER_MODULE(ataiobus, iobus, ata_iobus_driver, ata_iobus_devclass, 0, 0); +DRIVER_MODULE(ataiobus, iobus, ata_iobus_driver, ata_iobus_devclass, NULL, + NULL); MODULE_DEPEND(ata, ata, 1, 1, 1); static int @@ -221,7 +222,7 @@ static device_method_t ata_iobus_sub_methods[] = { /* ATA interface */ DEVMETHOD(ata_setmode, ata_iobus_sub_setmode), - { 0, 0 } + DEVMETHOD_END }; static driver_t ata_iobus_sub_driver = { @@ -230,7 +231,7 @@ static driver_t ata_iobus_sub_driver = { sizeof(struct ata_channel), }; -DRIVER_MODULE(ata, ataiobus, ata_iobus_sub_driver, ata_devclass, 0, 0); +DRIVER_MODULE(ata, ataiobus, ata_iobus_sub_driver, ata_devclass, NULL, NULL); static int ata_iobus_sub_probe(device_t dev) diff --git a/sys/powerpc/wii/platform_wii.c b/sys/powerpc/wii/platform_wii.c index dcf1fc0..bc35105 100644 --- a/sys/powerpc/wii/platform_wii.c +++ b/sys/powerpc/wii/platform_wii.c @@ -60,7 +60,7 @@ static void wii_mem_regions(platform_t, struct mem_region **, int *, struct mem_region **, int *); static unsigned long wii_timebase_freq(platform_t, struct cpuref *cpuref); static void wii_reset(platform_t); -static void wii_cpu_idle(void); +static void wii_cpu_idle(sbintime_t sbt); static platform_method_t wii_methods[] = { PLATFORMMETHOD(platform_probe, wii_probe), @@ -69,7 +69,7 @@ static platform_method_t wii_methods[] = { PLATFORMMETHOD(platform_timebase_freq, wii_timebase_freq), PLATFORMMETHOD(platform_reset, wii_reset), - { 0, 0 } + PLATFORMMETHOD_END }; static platform_def_t wii_platform = { @@ -155,6 +155,6 @@ wii_reset(platform_t plat) } static void -wii_cpu_idle(void) +wii_cpu_idle(sbintime_t sbt) { } diff --git a/sys/powerpc/wii/wii_gpio.c b/sys/powerpc/wii/wii_gpio.c index fbc3118..40af773 100644 --- a/sys/powerpc/wii/wii_gpio.c +++ b/sys/powerpc/wii/wii_gpio.c @@ -86,7 +86,7 @@ static device_method_t wiigpio_methods[] = { DEVMETHOD(gpio_pin_set, wiigpio_pin_set), DEVMETHOD(gpio_pin_toggle, wiigpio_pin_toggle), - { 0, 0 }, + DEVMETHOD_END }; static driver_t wiigpio_driver = { diff --git a/sys/powerpc/wii/wii_ipc.c b/sys/powerpc/wii/wii_ipc.c index 0db4aad..7f49d31 100644 --- a/sys/powerpc/wii/wii_ipc.c +++ b/sys/powerpc/wii/wii_ipc.c @@ -57,7 +57,7 @@ static device_method_t wiiipc_methods[] = { DEVMETHOD(device_probe, wiiipc_probe), DEVMETHOD(device_attach, wiiipc_attach), - { 0, 0 }, + DEVMETHOD_END }; static driver_t wiiipc_driver = { diff --git a/sys/powerpc/wii/wii_pic.c b/sys/powerpc/wii/wii_pic.c index 4088677..e6c6ae4 100644 --- a/sys/powerpc/wii/wii_pic.c +++ b/sys/powerpc/wii/wii_pic.c @@ -73,7 +73,7 @@ static device_method_t wiipic_methods[] = { DEVMETHOD(pic_mask, wiipic_mask), DEVMETHOD(pic_unmask, wiipic_unmask), - { 0, 0 }, + DEVMETHOD_END }; static driver_t wiipic_driver = { |