summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/Kconfig.debug2
-rw-r--r--arch/powerpc/boot/dts/mpc832x_mds.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc832x_rdb.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8349emitx.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8349emitxgp.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc834x_mds.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc836x_mds.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc836x_rdk.dts16
-rw-r--r--arch/powerpc/boot/dts/mpc8377_mds.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8378_mds.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8379_mds.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8536ds.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8540ads.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8541cds.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8544ds.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8548cds.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8555cds.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8560ads.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8568mds.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8610_hpcd.dts5
-rw-r--r--arch/powerpc/boot/io.h2
-rw-r--r--arch/powerpc/include/asm/8253pit.h10
-rw-r--r--arch/powerpc/include/asm/8xx_immap.h564
-rw-r--r--arch/powerpc/include/asm/Kbuild37
-rw-r--r--arch/powerpc/include/asm/a.out.h20
-rw-r--r--arch/powerpc/include/asm/abs_addr.h75
-rw-r--r--arch/powerpc/include/asm/agp.h22
-rw-r--r--arch/powerpc/include/asm/asm-compat.h69
-rw-r--r--arch/powerpc/include/asm/atomic.h479
-rw-r--r--arch/powerpc/include/asm/auxvec.h19
-rw-r--r--arch/powerpc/include/asm/backlight.h41
-rw-r--r--arch/powerpc/include/asm/bitops.h410
-rw-r--r--arch/powerpc/include/asm/bootx.h171
-rw-r--r--arch/powerpc/include/asm/btext.h28
-rw-r--r--arch/powerpc/include/asm/bug.h121
-rw-r--r--arch/powerpc/include/asm/bugs.h18
-rw-r--r--arch/powerpc/include/asm/byteorder.h89
-rw-r--r--arch/powerpc/include/asm/cache.h45
-rw-r--r--arch/powerpc/include/asm/cacheflush.h75
-rw-r--r--arch/powerpc/include/asm/cell-pmu.h105
-rw-r--r--arch/powerpc/include/asm/cell-regs.h315
-rw-r--r--arch/powerpc/include/asm/checksum.h117
-rw-r--r--arch/powerpc/include/asm/clk_interface.h20
-rw-r--r--arch/powerpc/include/asm/code-patching.h54
-rw-r--r--arch/powerpc/include/asm/compat.h214
-rw-r--r--arch/powerpc/include/asm/cpm.h106
-rw-r--r--arch/powerpc/include/asm/cpm1.h652
-rw-r--r--arch/powerpc/include/asm/cpm2.h1195
-rw-r--r--arch/powerpc/include/asm/cputable.h514
-rw-r--r--arch/powerpc/include/asm/cputhreads.h71
-rw-r--r--arch/powerpc/include/asm/cputime.h235
-rw-r--r--arch/powerpc/include/asm/current.h40
-rw-r--r--arch/powerpc/include/asm/dbdma.h108
-rw-r--r--arch/powerpc/include/asm/dcr-generic.h49
-rw-r--r--arch/powerpc/include/asm/dcr-mmio.h61
-rw-r--r--arch/powerpc/include/asm/dcr-native.h116
-rw-r--r--arch/powerpc/include/asm/dcr-regs.h149
-rw-r--r--arch/powerpc/include/asm/dcr.h78
-rw-r--r--arch/powerpc/include/asm/delay.h34
-rw-r--r--arch/powerpc/include/asm/device.h24
-rw-r--r--arch/powerpc/include/asm/div64.h1
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h474
-rw-r--r--arch/powerpc/include/asm/dma.h360
-rw-r--r--arch/powerpc/include/asm/edac.h40
-rw-r--r--arch/powerpc/include/asm/eeh.h211
-rw-r--r--arch/powerpc/include/asm/eeh_event.h53
-rw-r--r--arch/powerpc/include/asm/elf.h424
-rw-r--r--arch/powerpc/include/asm/emergency-restart.h1
-rw-r--r--arch/powerpc/include/asm/errno.h11
-rw-r--r--arch/powerpc/include/asm/exception.h311
-rw-r--r--arch/powerpc/include/asm/fb.h21
-rw-r--r--arch/powerpc/include/asm/fcntl.h11
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h126
-rw-r--r--arch/powerpc/include/asm/firmware.h132
-rw-r--r--arch/powerpc/include/asm/fixmap.h106
-rw-r--r--arch/powerpc/include/asm/floppy.h213
-rw-r--r--arch/powerpc/include/asm/fs_pd.h50
-rw-r--r--arch/powerpc/include/asm/fsl_gtm.h47
-rw-r--r--arch/powerpc/include/asm/fsl_lbc.h311
-rw-r--r--arch/powerpc/include/asm/ftrace.h14
-rw-r--r--arch/powerpc/include/asm/futex.h117
-rw-r--r--arch/powerpc/include/asm/gpio.h56
-rw-r--r--arch/powerpc/include/asm/grackle.h12
-rw-r--r--arch/powerpc/include/asm/hardirq.h29
-rw-r--r--arch/powerpc/include/asm/heathrow.h67
-rw-r--r--arch/powerpc/include/asm/highmem.h138
-rw-r--r--arch/powerpc/include/asm/hugetlb.h75
-rw-r--r--arch/powerpc/include/asm/hvcall.h296
-rw-r--r--arch/powerpc/include/asm/hvconsole.h41
-rw-r--r--arch/powerpc/include/asm/hvcserver.h59
-rw-r--r--arch/powerpc/include/asm/hw_irq.h135
-rw-r--r--arch/powerpc/include/asm/hydra.h102
-rw-r--r--arch/powerpc/include/asm/i8259.h12
-rw-r--r--arch/powerpc/include/asm/ibmebus.h60
-rw-r--r--arch/powerpc/include/asm/ide.h58
-rw-r--r--arch/powerpc/include/asm/immap_86xx.h156
-rw-r--r--arch/powerpc/include/asm/immap_cpm2.h650
-rw-r--r--arch/powerpc/include/asm/immap_qe.h483
-rw-r--r--arch/powerpc/include/asm/io-defs.h60
-rw-r--r--arch/powerpc/include/asm/io.h787
-rw-r--r--arch/powerpc/include/asm/ioctl.h13
-rw-r--r--arch/powerpc/include/asm/ioctls.h110
-rw-r--r--arch/powerpc/include/asm/iommu.h131
-rw-r--r--arch/powerpc/include/asm/ipcbuf.h34
-rw-r--r--arch/powerpc/include/asm/ipic.h84
-rw-r--r--arch/powerpc/include/asm/irq.h366
-rw-r--r--arch/powerpc/include/asm/irq_regs.h2
-rw-r--r--arch/powerpc/include/asm/irqflags.h42
-rw-r--r--arch/powerpc/include/asm/iseries/alpaca.h31
-rw-r--r--arch/powerpc/include/asm/iseries/hv_call.h111
-rw-r--r--arch/powerpc/include/asm/iseries/hv_call_event.h201
-rw-r--r--arch/powerpc/include/asm/iseries/hv_call_sc.h50
-rw-r--r--arch/powerpc/include/asm/iseries/hv_call_xm.h61
-rw-r--r--arch/powerpc/include/asm/iseries/hv_lp_config.h128
-rw-r--r--arch/powerpc/include/asm/iseries/hv_lp_event.h162
-rw-r--r--arch/powerpc/include/asm/iseries/hv_types.h112
-rw-r--r--arch/powerpc/include/asm/iseries/iommu.h41
-rw-r--r--arch/powerpc/include/asm/iseries/it_lp_queue.h78
-rw-r--r--arch/powerpc/include/asm/iseries/lpar_map.h85
-rw-r--r--arch/powerpc/include/asm/iseries/mf.h51
-rw-r--r--arch/powerpc/include/asm/iseries/vio.h265
-rw-r--r--arch/powerpc/include/asm/kdebug.h15
-rw-r--r--arch/powerpc/include/asm/kdump.h35
-rw-r--r--arch/powerpc/include/asm/kexec.h160
-rw-r--r--arch/powerpc/include/asm/keylargo.h261
-rw-r--r--arch/powerpc/include/asm/kgdb.h63
-rw-r--r--arch/powerpc/include/asm/kmap_types.h33
-rw-r--r--arch/powerpc/include/asm/kprobes.h118
-rw-r--r--arch/powerpc/include/asm/kvm.h55
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h55
-rw-r--r--arch/powerpc/include/asm/kvm_host.h155
-rw-r--r--arch/powerpc/include/asm/kvm_para.h37
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h95
-rw-r--r--arch/powerpc/include/asm/libata-portmap.h12
-rw-r--r--arch/powerpc/include/asm/linkage.h6
-rw-r--r--arch/powerpc/include/asm/lmb.h15
-rw-r--r--arch/powerpc/include/asm/local.h200
-rw-r--r--arch/powerpc/include/asm/lppaca.h159
-rw-r--r--arch/powerpc/include/asm/lv1call.h348
-rw-r--r--arch/powerpc/include/asm/machdep.h365
-rw-r--r--arch/powerpc/include/asm/macio.h142
-rw-r--r--arch/powerpc/include/asm/mc146818rtc.h36
-rw-r--r--arch/powerpc/include/asm/mediabay.h43
-rw-r--r--arch/powerpc/include/asm/mman.h63
-rw-r--r--arch/powerpc/include/asm/mmu-40x.h63
-rw-r--r--arch/powerpc/include/asm/mmu-44x.h76
-rw-r--r--arch/powerpc/include/asm/mmu-8xx.h145
-rw-r--r--arch/powerpc/include/asm/mmu-fsl-booke.h82
-rw-r--r--arch/powerpc/include/asm/mmu-hash32.h83
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h478
-rw-r--r--arch/powerpc/include/asm/mmu.h26
-rw-r--r--arch/powerpc/include/asm/mmu_context.h280
-rw-r--r--arch/powerpc/include/asm/mmzone.h47
-rw-r--r--arch/powerpc/include/asm/module.h77
-rw-r--r--arch/powerpc/include/asm/mpc512x.h22
-rw-r--r--arch/powerpc/include/asm/mpc52xx.h295
-rw-r--r--arch/powerpc/include/asm/mpc52xx_psc.h276
-rw-r--r--arch/powerpc/include/asm/mpc6xx.h6
-rw-r--r--arch/powerpc/include/asm/mpc8260.h25
-rw-r--r--arch/powerpc/include/asm/mpc86xx.h33
-rw-r--r--arch/powerpc/include/asm/mpc8xx.h12
-rw-r--r--arch/powerpc/include/asm/mpic.h481
-rw-r--r--arch/powerpc/include/asm/msgbuf.h33
-rw-r--r--arch/powerpc/include/asm/mutex.h9
-rw-r--r--arch/powerpc/include/asm/nvram.h139
-rw-r--r--arch/powerpc/include/asm/of_device.h31
-rw-r--r--arch/powerpc/include/asm/of_platform.h42
-rw-r--r--arch/powerpc/include/asm/ohare.h54
-rw-r--r--arch/powerpc/include/asm/oprofile_impl.h134
-rw-r--r--arch/powerpc/include/asm/pSeries_reconfig.h29
-rw-r--r--arch/powerpc/include/asm/paca.h112
-rw-r--r--arch/powerpc/include/asm/page.h225
-rw-r--r--arch/powerpc/include/asm/page_32.h38
-rw-r--r--arch/powerpc/include/asm/page_64.h185
-rw-r--r--arch/powerpc/include/asm/param.h22
-rw-r--r--arch/powerpc/include/asm/parport.h39
-rw-r--r--arch/powerpc/include/asm/pasemi_dma.h538
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h302
-rw-r--r--arch/powerpc/include/asm/pci.h228
-rw-r--r--arch/powerpc/include/asm/percpu.h24
-rw-r--r--arch/powerpc/include/asm/pgalloc-32.h43
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h166
-rw-r--r--arch/powerpc/include/asm/pgalloc.h12
-rw-r--r--arch/powerpc/include/asm/pgtable-4k.h117
-rw-r--r--arch/powerpc/include/asm/pgtable-64k.h155
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h802
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h468
-rw-r--r--arch/powerpc/include/asm/pgtable.h57
-rw-r--r--arch/powerpc/include/asm/phyp_dump.h47
-rw-r--r--arch/powerpc/include/asm/pmac_feature.h405
-rw-r--r--arch/powerpc/include/asm/pmac_low_i2c.h107
-rw-r--r--arch/powerpc/include/asm/pmac_pfunc.h252
-rw-r--r--arch/powerpc/include/asm/pmc.h37
-rw-r--r--arch/powerpc/include/asm/pmi.h66
-rw-r--r--arch/powerpc/include/asm/poll.h1
-rw-r--r--arch/powerpc/include/asm/posix_types.h128
-rw-r--r--arch/powerpc/include/asm/ppc-pci.h149
-rw-r--r--arch/powerpc/include/asm/ppc4xx.h18
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h689
-rw-r--r--arch/powerpc/include/asm/processor.h314
-rw-r--r--arch/powerpc/include/asm/prom.h356
-rw-r--r--arch/powerpc/include/asm/ps3.h519
-rw-r--r--arch/powerpc/include/asm/ps3av.h744
-rw-r--r--arch/powerpc/include/asm/ps3fb.h44
-rw-r--r--arch/powerpc/include/asm/ps3stor.h71
-rw-r--r--arch/powerpc/include/asm/ptrace.h293
-rw-r--r--arch/powerpc/include/asm/qe.h642
-rw-r--r--arch/powerpc/include/asm/qe_ic.h128
-rw-r--r--arch/powerpc/include/asm/reg.h788
-rw-r--r--arch/powerpc/include/asm/reg_8xx.h42
-rw-r--r--arch/powerpc/include/asm/reg_booke.h501
-rw-r--r--arch/powerpc/include/asm/reg_fsl_emb.h72
-rw-r--r--arch/powerpc/include/asm/resource.h1
-rw-r--r--arch/powerpc/include/asm/rheap.h89
-rw-r--r--arch/powerpc/include/asm/rio.h18
-rw-r--r--arch/powerpc/include/asm/rtas.h247
-rw-r--r--arch/powerpc/include/asm/rtc.h78
-rw-r--r--arch/powerpc/include/asm/rwsem.h173
-rw-r--r--arch/powerpc/include/asm/scatterlist.h50
-rw-r--r--arch/powerpc/include/asm/seccomp.h20
-rw-r--r--arch/powerpc/include/asm/sections.h22
-rw-r--r--arch/powerpc/include/asm/sembuf.h36
-rw-r--r--arch/powerpc/include/asm/serial.h24
-rw-r--r--arch/powerpc/include/asm/setjmp.h18
-rw-r--r--arch/powerpc/include/asm/setup.h6
-rw-r--r--arch/powerpc/include/asm/shmbuf.h59
-rw-r--r--arch/powerpc/include/asm/shmparam.h6
-rw-r--r--arch/powerpc/include/asm/sigcontext.h87
-rw-r--r--arch/powerpc/include/asm/siginfo.h26
-rw-r--r--arch/powerpc/include/asm/signal.h150
-rw-r--r--arch/powerpc/include/asm/smp.h127
-rw-r--r--arch/powerpc/include/asm/smu.h700
-rw-r--r--arch/powerpc/include/asm/socket.h64
-rw-r--r--arch/powerpc/include/asm/sockios.h20
-rw-r--r--arch/powerpc/include/asm/sparsemem.h32
-rw-r--r--arch/powerpc/include/asm/spinlock.h295
-rw-r--r--arch/powerpc/include/asm/spinlock_types.h20
-rw-r--r--arch/powerpc/include/asm/spu.h732
-rw-r--r--arch/powerpc/include/asm/spu_csa.h266
-rw-r--r--arch/powerpc/include/asm/spu_info.h54
-rw-r--r--arch/powerpc/include/asm/spu_priv1.h236
-rw-r--r--arch/powerpc/include/asm/sstep.h27
-rw-r--r--arch/powerpc/include/asm/stat.h81
-rw-r--r--arch/powerpc/include/asm/statfs.h60
-rw-r--r--arch/powerpc/include/asm/string.h32
-rw-r--r--arch/powerpc/include/asm/suspend.h9
-rw-r--r--arch/powerpc/include/asm/synch.h44
-rw-r--r--arch/powerpc/include/asm/syscall.h84
-rw-r--r--arch/powerpc/include/asm/syscalls.h52
-rw-r--r--arch/powerpc/include/asm/systbl.h324
-rw-r--r--arch/powerpc/include/asm/system.h548
-rw-r--r--arch/powerpc/include/asm/tce.h50
-rw-r--r--arch/powerpc/include/asm/termbits.h209
-rw-r--r--arch/powerpc/include/asm/termios.h85
-rw-r--r--arch/powerpc/include/asm/thread_info.h161
-rw-r--r--arch/powerpc/include/asm/time.h255
-rw-r--r--arch/powerpc/include/asm/timex.h50
-rw-r--r--arch/powerpc/include/asm/tlb.h81
-rw-r--r--arch/powerpc/include/asm/tlbflush.h166
-rw-r--r--arch/powerpc/include/asm/topology.h117
-rw-r--r--arch/powerpc/include/asm/tsi108.h121
-rw-r--r--arch/powerpc/include/asm/tsi108_irq.h124
-rw-r--r--arch/powerpc/include/asm/tsi108_pci.h45
-rw-r--r--arch/powerpc/include/asm/types.h75
-rw-r--r--arch/powerpc/include/asm/uaccess.h496
-rw-r--r--arch/powerpc/include/asm/ucc.h64
-rw-r--r--arch/powerpc/include/asm/ucc_fast.h244
-rw-r--r--arch/powerpc/include/asm/ucc_slow.h290
-rw-r--r--arch/powerpc/include/asm/ucontext.h40
-rw-r--r--arch/powerpc/include/asm/udbg.h55
-rw-r--r--arch/powerpc/include/asm/uic.h21
-rw-r--r--arch/powerpc/include/asm/unaligned.h16
-rw-r--r--arch/powerpc/include/asm/uninorth.h229
-rw-r--r--arch/powerpc/include/asm/unistd.h398
-rw-r--r--arch/powerpc/include/asm/user.h51
-rw-r--r--arch/powerpc/include/asm/vdso.h78
-rw-r--r--arch/powerpc/include/asm/vdso_datapage.h121
-rw-r--r--arch/powerpc/include/asm/vga.h53
-rw-r--r--arch/powerpc/include/asm/vio.h118
-rw-r--r--arch/powerpc/include/asm/xilinx_intc.h20
-rw-r--r--arch/powerpc/include/asm/xmon.h33
-rw-r--r--arch/powerpc/include/asm/xor.h1
-rw-r--r--arch/powerpc/kernel/Makefile14
-rw-r--r--arch/powerpc/kernel/cpu_setup_44x.S6
-rw-r--r--arch/powerpc/kernel/entry_32.S17
-rw-r--r--arch/powerpc/kernel/entry_64.S10
-rw-r--r--arch/powerpc/kernel/irq.c25
-rw-r--r--arch/powerpc/kernel/legacy_serial.c44
-rw-r--r--arch/powerpc/kernel/lparcfg.c4
-rw-r--r--arch/powerpc/kernel/process.c10
-rw-r--r--arch/powerpc/kernel/prom_init.c39
-rw-r--r--arch/powerpc/kernel/ptrace.c64
-rw-r--r--arch/powerpc/kernel/ptrace32.c2
-rw-r--r--arch/powerpc/kernel/setup-common.c24
-rw-r--r--arch/powerpc/kernel/setup_64.c3
-rw-r--r--arch/powerpc/kernel/signal.c23
-rw-r--r--arch/powerpc/kernel/smp.c119
-rw-r--r--arch/powerpc/kernel/stacktrace.c1
-rw-r--r--arch/powerpc/kernel/sysfs.c311
-rw-r--r--arch/powerpc/kernel/vdso.c2
-rw-r--r--arch/powerpc/kernel/vio.c6
-rw-r--r--arch/powerpc/lib/Makefile2
-rw-r--r--arch/powerpc/mm/Makefile3
-rw-r--r--arch/powerpc/mm/gup.c280
-rw-r--r--arch/powerpc/mm/hugetlbpage.c9
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c4
-rw-r--r--arch/powerpc/mm/tlb_64.c2
-rw-r--r--arch/powerpc/platforms/52xx/Makefile4
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_mds.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_rdb.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_itx.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_mds.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_mds.c1
-rw-r--r--arch/powerpc/platforms/83xx/sbc834x.c1
-rw-r--r--arch/powerpc/platforms/85xx/ksi8560.c1
-rw-r--r--arch/powerpc/platforms/85xx/mpc8536_ds.c1
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ads.c1
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c1
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c1
-rw-r--r--arch/powerpc/platforms/85xx/sbc8560.c1
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_smp.c2
-rw-r--r--arch/powerpc/platforms/8xx/Kconfig10
-rw-r--r--arch/powerpc/platforms/Kconfig3
-rw-r--r--arch/powerpc/platforms/Makefile6
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c6
-rw-r--r--arch/powerpc/platforms/powermac/Makefile3
-rw-r--r--arch/powerpc/platforms/powermac/setup.c72
-rw-r--r--arch/powerpc/platforms/powermac/udbg_scc.c12
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c8
-rw-r--r--arch/powerpc/sysdev/Makefile2
-rw-r--r--arch/powerpc/sysdev/cpm1.c267
-rw-r--r--arch/powerpc/sysdev/cpm2.c45
-rw-r--r--arch/powerpc/sysdev/cpm_common.c123
-rw-r--r--arch/powerpc/sysdev/rtc_cmos_setup.c23
337 files changed, 40591 insertions, 277 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index fe88418..63c9caf 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -42,6 +42,9 @@ config GENERIC_HARDIRQS
bool
default y
+config HAVE_GET_USER_PAGES_FAST
+ def_bool PPC64
+
config HAVE_SETUP_PER_CPU_AREA
def_bool PPC64
@@ -117,6 +120,7 @@ config PPC
select HAVE_KPROBES
select HAVE_ARCH_KGDB
select HAVE_KRETPROBES
+ select HAVE_ARCH_TRACEHOOK
select HAVE_LMB
select HAVE_DMA_ATTRS if PPC64
select USE_GENERIC_SMP_HELPERS if SMP
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 8c8aadb..4ebc52a 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -97,7 +97,7 @@ config IRQSTACKS
config VIRQ_DEBUG
bool "Expose hardware/virtual IRQ mapping via debugfs"
- depends on DEBUG_FS && PPC_MERGE
+ depends on DEBUG_FS
help
This option will show the mapping relationship between hardware irq
numbers and virtual irq numbers. The mapping is exposed via debugfs
diff --git a/arch/powerpc/boot/dts/mpc832x_mds.dts b/arch/powerpc/boot/dts/mpc832x_mds.dts
index 7345743..fbc9304 100644
--- a/arch/powerpc/boot/dts/mpc832x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc832x_mds.dts
@@ -68,6 +68,7 @@
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
+ compatible = "simple-bus";
ranges = <0x0 0xe0000000 0x00100000>;
reg = <0xe0000000 0x00000200>;
bus-frequency = <132000000>;
diff --git a/arch/powerpc/boot/dts/mpc832x_rdb.dts b/arch/powerpc/boot/dts/mpc832x_rdb.dts
index e74c045..b157d18 100644
--- a/arch/powerpc/boot/dts/mpc832x_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc832x_rdb.dts
@@ -51,6 +51,7 @@
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
+ compatible = "simple-bus";
ranges = <0x0 0xe0000000 0x00100000>;
reg = <0xe0000000 0x00000200>;
bus-frequency = <0>;
diff --git a/arch/powerpc/boot/dts/mpc8349emitx.dts b/arch/powerpc/boot/dts/mpc8349emitx.dts
index 8dfab56..700e076 100644
--- a/arch/powerpc/boot/dts/mpc8349emitx.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitx.dts
@@ -52,6 +52,7 @@
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
+ compatible = "simple-bus";
ranges = <0x0 0xe0000000 0x00100000>;
reg = <0xe0000000 0x00000200>;
bus-frequency = <0>; // from bootloader
diff --git a/arch/powerpc/boot/dts/mpc8349emitxgp.dts b/arch/powerpc/boot/dts/mpc8349emitxgp.dts
index 49ca349..cdd3063 100644
--- a/arch/powerpc/boot/dts/mpc8349emitxgp.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitxgp.dts
@@ -50,6 +50,7 @@
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
+ compatible = "simple-bus";
ranges = <0x0 0xe0000000 0x00100000>;
reg = <0xe0000000 0x00000200>;
bus-frequency = <0>; // from bootloader
diff --git a/arch/powerpc/boot/dts/mpc834x_mds.dts b/arch/powerpc/boot/dts/mpc834x_mds.dts
index ba586cb..783241c 100644
--- a/arch/powerpc/boot/dts/mpc834x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc834x_mds.dts
@@ -57,6 +57,7 @@
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
+ compatible = "simple-bus";
ranges = <0x0 0xe0000000 0x00100000>;
reg = <0xe0000000 0x00000200>;
bus-frequency = <0>;
diff --git a/arch/powerpc/boot/dts/mpc836x_mds.dts b/arch/powerpc/boot/dts/mpc836x_mds.dts
index 3701dae..a3b76a7 100644
--- a/arch/powerpc/boot/dts/mpc836x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc836x_mds.dts
@@ -61,6 +61,7 @@
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
+ compatible = "simple-bus";
ranges = <0x0 0xe0000000 0x00100000>;
reg = <0xe0000000 0x00000200>;
bus-frequency = <264000000>;
diff --git a/arch/powerpc/boot/dts/mpc836x_rdk.dts b/arch/powerpc/boot/dts/mpc836x_rdk.dts
index 8acd1d6..89c9202 100644
--- a/arch/powerpc/boot/dts/mpc836x_rdk.dts
+++ b/arch/powerpc/boot/dts/mpc836x_rdk.dts
@@ -149,18 +149,14 @@
};
crypto@30000 {
- compatible = "fsl,sec2-crypto";
+ compatible = "fsl,sec2.0";
reg = <0x30000 0x10000>;
- interrupts = <11 8>;
+ interrupts = <11 0x8>;
interrupt-parent = <&ipic>;
- num-channels = <4>;
- channel-fifo-len = <24>;
- exec-units-mask = <0x7e>;
- /*
- * desc mask is for rev1.x, we need runtime fixup
- * for >=2.x
- */
- descriptor-types-mask = <0x1010ebf>;
+ fsl,num-channels = <4>;
+ fsl,channel-fifo-len = <24>;
+ fsl,exec-units-mask = <0x7e>;
+ fsl,descriptor-types-mask = <0x01010ebf>;
};
ipic: interrupt-controller@700 {
diff --git a/arch/powerpc/boot/dts/mpc8377_mds.dts b/arch/powerpc/boot/dts/mpc8377_mds.dts
index 0a700cb..432782b 100644
--- a/arch/powerpc/boot/dts/mpc8377_mds.dts
+++ b/arch/powerpc/boot/dts/mpc8377_mds.dts
@@ -117,6 +117,7 @@
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
+ compatible = "simple-bus";
ranges = <0x0 0xe0000000 0x00100000>;
reg = <0xe0000000 0x00000200>;
bus-frequency = <0>;
diff --git a/arch/powerpc/boot/dts/mpc8378_mds.dts b/arch/powerpc/boot/dts/mpc8378_mds.dts
index 29c8c76..ed32c8d 100644
--- a/arch/powerpc/boot/dts/mpc8378_mds.dts
+++ b/arch/powerpc/boot/dts/mpc8378_mds.dts
@@ -117,6 +117,7 @@
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
+ compatible = "simple-bus";
ranges = <0x0 0xe0000000 0x00100000>;
reg = <0xe0000000 0x00000200>;
bus-frequency = <0>;
diff --git a/arch/powerpc/boot/dts/mpc8379_mds.dts b/arch/powerpc/boot/dts/mpc8379_mds.dts
index d641a89..f4db9ed 100644
--- a/arch/powerpc/boot/dts/mpc8379_mds.dts
+++ b/arch/powerpc/boot/dts/mpc8379_mds.dts
@@ -117,6 +117,7 @@
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
+ compatible = "simple-bus";
ranges = <0x0 0xe0000000 0x00100000>;
reg = <0xe0000000 0x00000200>;
bus-frequency = <0>;
diff --git a/arch/powerpc/boot/dts/mpc8536ds.dts b/arch/powerpc/boot/dts/mpc8536ds.dts
index 02cfa24..1505d68 100644
--- a/arch/powerpc/boot/dts/mpc8536ds.dts
+++ b/arch/powerpc/boot/dts/mpc8536ds.dts
@@ -49,6 +49,7 @@
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
+ compatible = "simple-bus";
ranges = <0x0 0xffe00000 0x100000>;
reg = <0xffe00000 0x1000>;
bus-frequency = <0>; // Filled out by uboot.
diff --git a/arch/powerpc/boot/dts/mpc8540ads.dts b/arch/powerpc/boot/dts/mpc8540ads.dts
index f2273a8..9568bfa 100644
--- a/arch/powerpc/boot/dts/mpc8540ads.dts
+++ b/arch/powerpc/boot/dts/mpc8540ads.dts
@@ -53,6 +53,7 @@
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
+ compatible = "simple-bus";
ranges = <0x0 0xe0000000 0x100000>;
reg = <0xe0000000 0x100000>; // CCSRBAR 1M
bus-frequency = <0>;
diff --git a/arch/powerpc/boot/dts/mpc8541cds.dts b/arch/powerpc/boot/dts/mpc8541cds.dts
index c4469f1..6480f4f 100644
--- a/arch/powerpc/boot/dts/mpc8541cds.dts
+++ b/arch/powerpc/boot/dts/mpc8541cds.dts
@@ -53,6 +53,7 @@
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
+ compatible = "simple-bus";
ranges = <0x0 0xe0000000 0x100000>;
reg = <0xe0000000 0x1000>; // CCSRBAR 1M
bus-frequency = <0>;
diff --git a/arch/powerpc/boot/dts/mpc8544ds.dts b/arch/powerpc/boot/dts/mpc8544ds.dts
index 7d3829d..f1fb207 100644
--- a/arch/powerpc/boot/dts/mpc8544ds.dts
+++ b/arch/powerpc/boot/dts/mpc8544ds.dts
@@ -54,6 +54,7 @@
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
+ compatible = "simple-bus";
ranges = <0x0 0xe0000000 0x100000>;
reg = <0xe0000000 0x1000>; // CCSRBAR 1M
diff --git a/arch/powerpc/boot/dts/mpc8548cds.dts b/arch/powerpc/boot/dts/mpc8548cds.dts
index d84466b..431b496 100644
--- a/arch/powerpc/boot/dts/mpc8548cds.dts
+++ b/arch/powerpc/boot/dts/mpc8548cds.dts
@@ -58,6 +58,7 @@
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
+ compatible = "simple-bus";
ranges = <0x0 0xe0000000 0x100000>;
reg = <0xe0000000 0x1000>; // CCSRBAR
bus-frequency = <0>;
diff --git a/arch/powerpc/boot/dts/mpc8555cds.dts b/arch/powerpc/boot/dts/mpc8555cds.dts
index e03a780..d833a5c 100644
--- a/arch/powerpc/boot/dts/mpc8555cds.dts
+++ b/arch/powerpc/boot/dts/mpc8555cds.dts
@@ -53,6 +53,7 @@
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
+ compatible = "simple-bus";
ranges = <0x0 0xe0000000 0x100000>;
reg = <0xe0000000 0x1000>; // CCSRBAR 1M
bus-frequency = <0>;
diff --git a/arch/powerpc/boot/dts/mpc8560ads.dts b/arch/powerpc/boot/dts/mpc8560ads.dts
index ba8159d..4d1f2f2 100644
--- a/arch/powerpc/boot/dts/mpc8560ads.dts
+++ b/arch/powerpc/boot/dts/mpc8560ads.dts
@@ -53,6 +53,7 @@
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
+ compatible = "simple-bus";
ranges = <0x0 0xe0000000 0x100000>;
reg = <0xe0000000 0x200>;
bus-frequency = <330000000>;
diff --git a/arch/powerpc/boot/dts/mpc8568mds.dts b/arch/powerpc/boot/dts/mpc8568mds.dts
index 9c30a34..a15f103 100644
--- a/arch/powerpc/boot/dts/mpc8568mds.dts
+++ b/arch/powerpc/boot/dts/mpc8568mds.dts
@@ -60,6 +60,7 @@
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
+ compatible = "simple-bus";
ranges = <0x0 0xe0000000 0x100000>;
reg = <0xe0000000 0x1000>;
bus-frequency = <0>;
diff --git a/arch/powerpc/boot/dts/mpc8572ds.dts b/arch/powerpc/boot/dts/mpc8572ds.dts
index 08c61e3..e124dd1 100644
--- a/arch/powerpc/boot/dts/mpc8572ds.dts
+++ b/arch/powerpc/boot/dts/mpc8572ds.dts
@@ -68,6 +68,7 @@
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
+ compatible = "simple-bus";
ranges = <0x0 0xffe00000 0x100000>;
reg = <0xffe00000 0x1000>; // CCSRBAR & soc regs, remove once parse code for immrbase fixed
bus-frequency = <0>; // Filled out by uboot.
diff --git a/arch/powerpc/boot/dts/mpc8610_hpcd.dts b/arch/powerpc/boot/dts/mpc8610_hpcd.dts
index 666185f..3b3a106 100644
--- a/arch/powerpc/boot/dts/mpc8610_hpcd.dts
+++ b/arch/powerpc/boot/dts/mpc8610_hpcd.dts
@@ -202,6 +202,11 @@
fsl,has-rstcr;
};
+ wdt@e4000 {
+ compatible = "fsl,mpc8610-wdt";
+ reg = <0xe4000 0x100>;
+ };
+
i2s@16000 {
compatible = "fsl,mpc8610-ssi";
cell-index = <0>;
diff --git a/arch/powerpc/boot/io.h b/arch/powerpc/boot/io.h
index ccaedae..7c09f48 100644
--- a/arch/powerpc/boot/io.h
+++ b/arch/powerpc/boot/io.h
@@ -6,7 +6,7 @@
/*
* Low-level I/O routines.
*
- * Copied from <file:include/asm-powerpc/io.h> (which has no copyright)
+ * Copied from <file:arch/powerpc/include/asm/io.h> (which has no copyright)
*/
static inline int in_8(const volatile unsigned char *addr)
{
diff --git a/arch/powerpc/include/asm/8253pit.h b/arch/powerpc/include/asm/8253pit.h
new file mode 100644
index 0000000..b70d6e5
--- /dev/null
+++ b/arch/powerpc/include/asm/8253pit.h
@@ -0,0 +1,10 @@
+#ifndef _ASM_POWERPC_8253PIT_H
+#define _ASM_POWERPC_8253PIT_H
+
+/*
+ * 8253/8254 Programmable Interval Timer
+ */
+
+#define PIT_TICK_RATE 1193182UL
+
+#endif /* _ASM_POWERPC_8253PIT_H */
diff --git a/arch/powerpc/include/asm/8xx_immap.h b/arch/powerpc/include/asm/8xx_immap.h
new file mode 100644
index 0000000..4b0e152
--- /dev/null
+++ b/arch/powerpc/include/asm/8xx_immap.h
@@ -0,0 +1,564 @@
+/*
+ * MPC8xx Internal Memory Map
+ * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
+ *
+ * The I/O on the MPC860 is comprised of blocks of special registers
+ * and the dual port ram for the Communication Processor Module.
+ * Within this space are functional units such as the SIU, memory
+ * controller, system timers, and other control functions. It is
+ * a combination that I found difficult to separate into logical
+ * functional files.....but anyone else is welcome to try. -- Dan
+ */
+#ifdef __KERNEL__
+#ifndef __IMMAP_8XX__
+#define __IMMAP_8XX__
+
+/* System configuration registers.
+*/
+typedef struct sys_conf {
+ uint sc_siumcr;
+ uint sc_sypcr;
+ uint sc_swt;
+ char res1[2];
+ ushort sc_swsr;
+ uint sc_sipend;
+ uint sc_simask;
+ uint sc_siel;
+ uint sc_sivec;
+ uint sc_tesr;
+ char res2[0xc];
+ uint sc_sdcr;
+ char res3[0x4c];
+} sysconf8xx_t;
+
+/* PCMCIA configuration registers.
+*/
+typedef struct pcmcia_conf {
+ uint pcmc_pbr0;
+ uint pcmc_por0;
+ uint pcmc_pbr1;
+ uint pcmc_por1;
+ uint pcmc_pbr2;
+ uint pcmc_por2;
+ uint pcmc_pbr3;
+ uint pcmc_por3;
+ uint pcmc_pbr4;
+ uint pcmc_por4;
+ uint pcmc_pbr5;
+ uint pcmc_por5;
+ uint pcmc_pbr6;
+ uint pcmc_por6;
+ uint pcmc_pbr7;
+ uint pcmc_por7;
+ char res1[0x20];
+ uint pcmc_pgcra;
+ uint pcmc_pgcrb;
+ uint pcmc_pscr;
+ char res2[4];
+ uint pcmc_pipr;
+ char res3[4];
+ uint pcmc_per;
+ char res4[4];
+} pcmconf8xx_t;
+
+/* Memory controller registers.
+*/
+typedef struct mem_ctlr {
+ uint memc_br0;
+ uint memc_or0;
+ uint memc_br1;
+ uint memc_or1;
+ uint memc_br2;
+ uint memc_or2;
+ uint memc_br3;
+ uint memc_or3;
+ uint memc_br4;
+ uint memc_or4;
+ uint memc_br5;
+ uint memc_or5;
+ uint memc_br6;
+ uint memc_or6;
+ uint memc_br7;
+ uint memc_or7;
+ char res1[0x24];
+ uint memc_mar;
+ uint memc_mcr;
+ char res2[4];
+ uint memc_mamr;
+ uint memc_mbmr;
+ ushort memc_mstat;
+ ushort memc_mptpr;
+ uint memc_mdr;
+ char res3[0x80];
+} memctl8xx_t;
+
+/*-----------------------------------------------------------------------
+ * BR - Memory Controler: Base Register 16-9
+ */
+#define BR_BA_MSK 0xffff8000 /* Base Address Mask */
+#define BR_AT_MSK 0x00007000 /* Address Type Mask */
+#define BR_PS_MSK 0x00000c00 /* Port Size Mask */
+#define BR_PS_32 0x00000000 /* 32 bit port size */
+#define BR_PS_16 0x00000800 /* 16 bit port size */
+#define BR_PS_8 0x00000400 /* 8 bit port size */
+#define BR_PARE 0x00000200 /* Parity Enable */
+#define BR_WP 0x00000100 /* Write Protect */
+#define BR_MS_MSK 0x000000c0 /* Machine Select Mask */
+#define BR_MS_GPCM 0x00000000 /* G.P.C.M. Machine Select */
+#define BR_MS_UPMA 0x00000080 /* U.P.M.A Machine Select */
+#define BR_MS_UPMB 0x000000c0 /* U.P.M.B Machine Select */
+#define BR_V 0x00000001 /* Bank Valid */
+
+/*-----------------------------------------------------------------------
+ * OR - Memory Controler: Option Register 16-11
+ */
+#define OR_AM_MSK 0xffff8000 /* Address Mask Mask */
+#define OR_ATM_MSK 0x00007000 /* Address Type Mask Mask */
+#define OR_CSNT_SAM 0x00000800 /* Chip Select Negation Time/ Start */
+ /* Address Multiplex */
+#define OR_ACS_MSK 0x00000600 /* Address to Chip Select Setup mask */
+#define OR_ACS_DIV1 0x00000000 /* CS is output at the same time */
+#define OR_ACS_DIV4 0x00000400 /* CS is output 1/4 a clock later */
+#define OR_ACS_DIV2 0x00000600 /* CS is output 1/2 a clock later */
+#define OR_G5LA 0x00000400 /* Output #GPL5 on #GPL_A5 */
+#define OR_G5LS 0x00000200 /* Drive #GPL high on falling edge of...*/
+#define OR_BI 0x00000100 /* Burst inhibit */
+#define OR_SCY_MSK 0x000000f0 /* Cycle Length in Clocks */
+#define OR_SCY_0_CLK 0x00000000 /* 0 clock cycles wait states */
+#define OR_SCY_1_CLK 0x00000010 /* 1 clock cycles wait states */
+#define OR_SCY_2_CLK 0x00000020 /* 2 clock cycles wait states */
+#define OR_SCY_3_CLK 0x00000030 /* 3 clock cycles wait states */
+#define OR_SCY_4_CLK 0x00000040 /* 4 clock cycles wait states */
+#define OR_SCY_5_CLK 0x00000050 /* 5 clock cycles wait states */
+#define OR_SCY_6_CLK 0x00000060 /* 6 clock cycles wait states */
+#define OR_SCY_7_CLK 0x00000070 /* 7 clock cycles wait states */
+#define OR_SCY_8_CLK 0x00000080 /* 8 clock cycles wait states */
+#define OR_SCY_9_CLK 0x00000090 /* 9 clock cycles wait states */
+#define OR_SCY_10_CLK 0x000000a0 /* 10 clock cycles wait states */
+#define OR_SCY_11_CLK 0x000000b0 /* 11 clock cycles wait states */
+#define OR_SCY_12_CLK 0x000000c0 /* 12 clock cycles wait states */
+#define OR_SCY_13_CLK 0x000000d0 /* 13 clock cycles wait states */
+#define OR_SCY_14_CLK 0x000000e0 /* 14 clock cycles wait states */
+#define OR_SCY_15_CLK 0x000000f0 /* 15 clock cycles wait states */
+#define OR_SETA 0x00000008 /* External Transfer Acknowledge */
+#define OR_TRLX 0x00000004 /* Timing Relaxed */
+#define OR_EHTR 0x00000002 /* Extended Hold Time on Read */
+
+/* System Integration Timers.
+*/
+typedef struct sys_int_timers {
+ ushort sit_tbscr;
+ char res0[0x02];
+ uint sit_tbreff0;
+ uint sit_tbreff1;
+ char res1[0x14];
+ ushort sit_rtcsc;
+ char res2[0x02];
+ uint sit_rtc;
+ uint sit_rtsec;
+ uint sit_rtcal;
+ char res3[0x10];
+ ushort sit_piscr;
+ char res4[2];
+ uint sit_pitc;
+ uint sit_pitr;
+ char res5[0x34];
+} sit8xx_t;
+
+#define TBSCR_TBIRQ_MASK ((ushort)0xff00)
+#define TBSCR_REFA ((ushort)0x0080)
+#define TBSCR_REFB ((ushort)0x0040)
+#define TBSCR_REFAE ((ushort)0x0008)
+#define TBSCR_REFBE ((ushort)0x0004)
+#define TBSCR_TBF ((ushort)0x0002)
+#define TBSCR_TBE ((ushort)0x0001)
+
+#define RTCSC_RTCIRQ_MASK ((ushort)0xff00)
+#define RTCSC_SEC ((ushort)0x0080)
+#define RTCSC_ALR ((ushort)0x0040)
+#define RTCSC_38K ((ushort)0x0010)
+#define RTCSC_SIE ((ushort)0x0008)
+#define RTCSC_ALE ((ushort)0x0004)
+#define RTCSC_RTF ((ushort)0x0002)
+#define RTCSC_RTE ((ushort)0x0001)
+
+#define PISCR_PIRQ_MASK ((ushort)0xff00)
+#define PISCR_PS ((ushort)0x0080)
+#define PISCR_PIE ((ushort)0x0004)
+#define PISCR_PTF ((ushort)0x0002)
+#define PISCR_PTE ((ushort)0x0001)
+
+/* Clocks and Reset.
+*/
+typedef struct clk_and_reset {
+ uint car_sccr;
+ uint car_plprcr;
+ uint car_rsr;
+ char res[0x74]; /* Reserved area */
+} car8xx_t;
+
+/* System Integration Timers keys.
+*/
+typedef struct sitk {
+ uint sitk_tbscrk;
+ uint sitk_tbreff0k;
+ uint sitk_tbreff1k;
+ uint sitk_tbk;
+ char res1[0x10];
+ uint sitk_rtcsck;
+ uint sitk_rtck;
+ uint sitk_rtseck;
+ uint sitk_rtcalk;
+ char res2[0x10];
+ uint sitk_piscrk;
+ uint sitk_pitck;
+ char res3[0x38];
+} sitk8xx_t;
+
+/* Clocks and reset keys.
+*/
+typedef struct cark {
+ uint cark_sccrk;
+ uint cark_plprcrk;
+ uint cark_rsrk;
+ char res[0x474];
+} cark8xx_t;
+
+/* The key to unlock registers maintained by keep-alive power.
+*/
+#define KAPWR_KEY ((unsigned int)0x55ccaa33)
+
+/* Video interface. MPC823 Only.
+*/
+typedef struct vid823 {
+ ushort vid_vccr;
+ ushort res1;
+ u_char vid_vsr;
+ u_char res2;
+ u_char vid_vcmr;
+ u_char res3;
+ uint vid_vbcb;
+ uint res4;
+ uint vid_vfcr0;
+ uint vid_vfaa0;
+ uint vid_vfba0;
+ uint vid_vfcr1;
+ uint vid_vfaa1;
+ uint vid_vfba1;
+ u_char res5[0x18];
+} vid823_t;
+
+/* LCD interface. 823 Only.
+*/
+typedef struct lcd {
+ uint lcd_lccr;
+ uint lcd_lchcr;
+ uint lcd_lcvcr;
+ char res1[4];
+ uint lcd_lcfaa;
+ uint lcd_lcfba;
+ char lcd_lcsr;
+ char res2[0x7];
+} lcd823_t;
+
+/* I2C
+*/
+typedef struct i2c {
+ u_char i2c_i2mod;
+ char res1[3];
+ u_char i2c_i2add;
+ char res2[3];
+ u_char i2c_i2brg;
+ char res3[3];
+ u_char i2c_i2com;
+ char res4[3];
+ u_char i2c_i2cer;
+ char res5[3];
+ u_char i2c_i2cmr;
+ char res6[0x8b];
+} i2c8xx_t;
+
+/* DMA control/status registers.
+*/
+typedef struct sdma_csr {
+ char res1[4];
+ uint sdma_sdar;
+ u_char sdma_sdsr;
+ char res3[3];
+ u_char sdma_sdmr;
+ char res4[3];
+ u_char sdma_idsr1;
+ char res5[3];
+ u_char sdma_idmr1;
+ char res6[3];
+ u_char sdma_idsr2;
+ char res7[3];
+ u_char sdma_idmr2;
+ char res8[0x13];
+} sdma8xx_t;
+
+/* Communication Processor Module Interrupt Controller.
+*/
+typedef struct cpm_ic {
+ ushort cpic_civr;
+ char res[0xe];
+ uint cpic_cicr;
+ uint cpic_cipr;
+ uint cpic_cimr;
+ uint cpic_cisr;
+} cpic8xx_t;
+
+/* Input/Output Port control/status registers.
+*/
+typedef struct io_port {
+ ushort iop_padir;
+ ushort iop_papar;
+ ushort iop_paodr;
+ ushort iop_padat;
+ char res1[8];
+ ushort iop_pcdir;
+ ushort iop_pcpar;
+ ushort iop_pcso;
+ ushort iop_pcdat;
+ ushort iop_pcint;
+ char res2[6];
+ ushort iop_pddir;
+ ushort iop_pdpar;
+ char res3[2];
+ ushort iop_pddat;
+ uint utmode;
+ char res4[4];
+} iop8xx_t;
+
+/* Communication Processor Module Timers
+*/
+typedef struct cpm_timers {
+ ushort cpmt_tgcr;
+ char res1[0xe];
+ ushort cpmt_tmr1;
+ ushort cpmt_tmr2;
+ ushort cpmt_trr1;
+ ushort cpmt_trr2;
+ ushort cpmt_tcr1;
+ ushort cpmt_tcr2;
+ ushort cpmt_tcn1;
+ ushort cpmt_tcn2;
+ ushort cpmt_tmr3;
+ ushort cpmt_tmr4;
+ ushort cpmt_trr3;
+ ushort cpmt_trr4;
+ ushort cpmt_tcr3;
+ ushort cpmt_tcr4;
+ ushort cpmt_tcn3;
+ ushort cpmt_tcn4;
+ ushort cpmt_ter1;
+ ushort cpmt_ter2;
+ ushort cpmt_ter3;
+ ushort cpmt_ter4;
+ char res2[8];
+} cpmtimer8xx_t;
+
+/* Finally, the Communication Processor stuff.....
+*/
+typedef struct scc { /* Serial communication channels */
+ uint scc_gsmrl;
+ uint scc_gsmrh;
+ ushort scc_psmr;
+ char res1[2];
+ ushort scc_todr;
+ ushort scc_dsr;
+ ushort scc_scce;
+ char res2[2];
+ ushort scc_sccm;
+ char res3;
+ u_char scc_sccs;
+ char res4[8];
+} scc_t;
+
+typedef struct smc { /* Serial management channels */
+ char res1[2];
+ ushort smc_smcmr;
+ char res2[2];
+ u_char smc_smce;
+ char res3[3];
+ u_char smc_smcm;
+ char res4[5];
+} smc_t;
+
+/* MPC860T Fast Ethernet Controller. It isn't part of the CPM, but
+ * it fits within the address space.
+ */
+
+typedef struct fec {
+ uint fec_addr_low; /* lower 32 bits of station address */
+ ushort fec_addr_high; /* upper 16 bits of station address */
+ ushort res1; /* reserved */
+ uint fec_hash_table_high; /* upper 32-bits of hash table */
+ uint fec_hash_table_low; /* lower 32-bits of hash table */
+ uint fec_r_des_start; /* beginning of Rx descriptor ring */
+ uint fec_x_des_start; /* beginning of Tx descriptor ring */
+ uint fec_r_buff_size; /* Rx buffer size */
+ uint res2[9]; /* reserved */
+ uint fec_ecntrl; /* ethernet control register */
+ uint fec_ievent; /* interrupt event register */
+ uint fec_imask; /* interrupt mask register */
+ uint fec_ivec; /* interrupt level and vector status */
+ uint fec_r_des_active; /* Rx ring updated flag */
+ uint fec_x_des_active; /* Tx ring updated flag */
+ uint res3[10]; /* reserved */
+ uint fec_mii_data; /* MII data register */
+ uint fec_mii_speed; /* MII speed control register */
+ uint res4[17]; /* reserved */
+ uint fec_r_bound; /* end of RAM (read-only) */
+ uint fec_r_fstart; /* Rx FIFO start address */
+ uint res5[6]; /* reserved */
+ uint fec_x_fstart; /* Tx FIFO start address */
+ uint res6[17]; /* reserved */
+ uint fec_fun_code; /* fec SDMA function code */
+ uint res7[3]; /* reserved */
+ uint fec_r_cntrl; /* Rx control register */
+ uint fec_r_hash; /* Rx hash register */
+ uint res8[14]; /* reserved */
+ uint fec_x_cntrl; /* Tx control register */
+ uint res9[0x1e]; /* reserved */
+} fec_t;
+
+/* The FEC and LCD color map share the same address space....
+ * I guess we will never see an 823T :-).
+ */
+union fec_lcd {
+ fec_t fl_un_fec;
+ u_char fl_un_cmap[0x200];
+};
+
+typedef struct comm_proc {
+ /* General control and status registers.
+ */
+ ushort cp_cpcr;
+ u_char res1[2];
+ ushort cp_rccr;
+ u_char res2;
+ u_char cp_rmds;
+ u_char res3[4];
+ ushort cp_cpmcr1;
+ ushort cp_cpmcr2;
+ ushort cp_cpmcr3;
+ ushort cp_cpmcr4;
+ u_char res4[2];
+ ushort cp_rter;
+ u_char res5[2];
+ ushort cp_rtmr;
+ u_char res6[0x14];
+
+ /* Baud rate generators.
+ */
+ uint cp_brgc1;
+ uint cp_brgc2;
+ uint cp_brgc3;
+ uint cp_brgc4;
+
+ /* Serial Communication Channels.
+ */
+ scc_t cp_scc[4];
+
+ /* Serial Management Channels.
+ */
+ smc_t cp_smc[2];
+
+ /* Serial Peripheral Interface.
+ */
+ ushort cp_spmode;
+ u_char res7[4];
+ u_char cp_spie;
+ u_char res8[3];
+ u_char cp_spim;
+ u_char res9[2];
+ u_char cp_spcom;
+ u_char res10[2];
+
+ /* Parallel Interface Port.
+ */
+ u_char res11[2];
+ ushort cp_pipc;
+ u_char res12[2];
+ ushort cp_ptpr;
+ uint cp_pbdir;
+ uint cp_pbpar;
+ u_char res13[2];
+ ushort cp_pbodr;
+ uint cp_pbdat;
+
+ /* Port E - MPC87x/88x only.
+ */
+ uint cp_pedir;
+ uint cp_pepar;
+ uint cp_peso;
+ uint cp_peodr;
+ uint cp_pedat;
+
+ /* Communications Processor Timing Register -
+ Contains RMII Timing for the FECs on MPC87x/88x only.
+ */
+ uint cp_cptr;
+
+ /* Serial Interface and Time Slot Assignment.
+ */
+ uint cp_simode;
+ u_char cp_sigmr;
+ u_char res15;
+ u_char cp_sistr;
+ u_char cp_sicmr;
+ u_char res16[4];
+ uint cp_sicr;
+ uint cp_sirp;
+ u_char res17[0xc];
+
+ /* 256 bytes of MPC823 video controller RAM array.
+ */
+ u_char cp_vcram[0x100];
+ u_char cp_siram[0x200];
+
+ /* The fast ethernet controller is not really part of the CPM,
+ * but it resides in the address space.
+ * The LCD color map is also here.
+ */
+ union fec_lcd fl_un;
+#define cp_fec fl_un.fl_un_fec
+#define lcd_cmap fl_un.fl_un_cmap
+ char res18[0xE00];
+
+ /* The DUET family has a second FEC here */
+ fec_t cp_fec2;
+#define cp_fec1 cp_fec /* consistency macro */
+
+ /* Dual Ported RAM follows.
+ * There are many different formats for this memory area
+ * depending upon the devices used and options chosen.
+ * Some processors don't have all of it populated.
+ */
+ u_char cp_dpmem[0x1C00]; /* BD / Data / ucode */
+ u_char cp_dparam[0x400]; /* Parameter RAM */
+} cpm8xx_t;
+
+/* Internal memory map.
+*/
+typedef struct immap {
+ sysconf8xx_t im_siu_conf; /* SIU Configuration */
+ pcmconf8xx_t im_pcmcia; /* PCMCIA Configuration */
+ memctl8xx_t im_memctl; /* Memory Controller */
+ sit8xx_t im_sit; /* System integration timers */
+ car8xx_t im_clkrst; /* Clocks and reset */
+ sitk8xx_t im_sitk; /* Sys int timer keys */
+ cark8xx_t im_clkrstk; /* Clocks and reset keys */
+ vid823_t im_vid; /* Video (823 only) */
+ lcd823_t im_lcd; /* LCD (823 only) */
+ i2c8xx_t im_i2c; /* I2C control/status */
+ sdma8xx_t im_sdma; /* SDMA control/status */
+ cpic8xx_t im_cpic; /* CPM Interrupt Controller */
+ iop8xx_t im_ioport; /* IO Port control/status */
+ cpmtimer8xx_t im_cpmtimer; /* CPM timers */
+ cpm8xx_t im_cpm; /* Communication processor */
+} immap_t;
+
+#endif /* __IMMAP_8XX__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
new file mode 100644
index 0000000..5ab7d7f
--- /dev/null
+++ b/arch/powerpc/include/asm/Kbuild
@@ -0,0 +1,37 @@
+include include/asm-generic/Kbuild.asm
+
+header-y += auxvec.h
+header-y += ioctls.h
+header-y += sembuf.h
+header-y += siginfo.h
+header-y += stat.h
+header-y += errno.h
+header-y += ipcbuf.h
+header-y += msgbuf.h
+header-y += shmbuf.h
+header-y += socket.h
+header-y += termbits.h
+header-y += fcntl.h
+header-y += poll.h
+header-y += sockios.h
+header-y += ucontext.h
+header-y += ioctl.h
+header-y += linkage.h
+header-y += resource.h
+header-y += sigcontext.h
+header-y += statfs.h
+header-y += ps3fb.h
+
+unifdef-y += bootx.h
+unifdef-y += byteorder.h
+unifdef-y += cputable.h
+unifdef-y += elf.h
+unifdef-y += nvram.h
+unifdef-y += param.h
+unifdef-y += posix_types.h
+unifdef-y += seccomp.h
+unifdef-y += signal.h
+unifdef-y += spu_info.h
+unifdef-y += termios.h
+unifdef-y += types.h
+unifdef-y += unistd.h
diff --git a/arch/powerpc/include/asm/a.out.h b/arch/powerpc/include/asm/a.out.h
new file mode 100644
index 0000000..89cead6
--- /dev/null
+++ b/arch/powerpc/include/asm/a.out.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_POWERPC_A_OUT_H
+#define _ASM_POWERPC_A_OUT_H
+
+struct exec
+{
+ unsigned long a_info; /* Use macros N_MAGIC, etc for access */
+ unsigned a_text; /* length of text, in bytes */
+ unsigned a_data; /* length of data, in bytes */
+ unsigned a_bss; /* length of uninitialized data area for file, in bytes */
+ unsigned a_syms; /* length of symbol table data in file, in bytes */
+ unsigned a_entry; /* start address */
+ unsigned a_trsize; /* length of relocation info for text, in bytes */
+ unsigned a_drsize; /* length of relocation info for data, in bytes */
+};
+
+#define N_TRSIZE(a) ((a).a_trsize)
+#define N_DRSIZE(a) ((a).a_drsize)
+#define N_SYMSIZE(a) ((a).a_syms)
+
+#endif /* _ASM_POWERPC_A_OUT_H */
diff --git a/arch/powerpc/include/asm/abs_addr.h b/arch/powerpc/include/asm/abs_addr.h
new file mode 100644
index 0000000..98324c5
--- /dev/null
+++ b/arch/powerpc/include/asm/abs_addr.h
@@ -0,0 +1,75 @@
+#ifndef _ASM_POWERPC_ABS_ADDR_H
+#define _ASM_POWERPC_ABS_ADDR_H
+#ifdef __KERNEL__
+
+
+/*
+ * c 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/lmb.h>
+
+#include <asm/types.h>
+#include <asm/page.h>
+#include <asm/prom.h>
+#include <asm/firmware.h>
+
+struct mschunks_map {
+ unsigned long num_chunks;
+ unsigned long chunk_size;
+ unsigned long chunk_shift;
+ unsigned long chunk_mask;
+ u32 *mapping;
+};
+
+extern struct mschunks_map mschunks_map;
+
+/* Chunks are 256 KB */
+#define MSCHUNKS_CHUNK_SHIFT (18)
+#define MSCHUNKS_CHUNK_SIZE (1UL << MSCHUNKS_CHUNK_SHIFT)
+#define MSCHUNKS_OFFSET_MASK (MSCHUNKS_CHUNK_SIZE - 1)
+
+static inline unsigned long chunk_to_addr(unsigned long chunk)
+{
+ return chunk << MSCHUNKS_CHUNK_SHIFT;
+}
+
+static inline unsigned long addr_to_chunk(unsigned long addr)
+{
+ return addr >> MSCHUNKS_CHUNK_SHIFT;
+}
+
+static inline unsigned long phys_to_abs(unsigned long pa)
+{
+ unsigned long chunk;
+
+ /* This is a no-op on non-iSeries */
+ if (!firmware_has_feature(FW_FEATURE_ISERIES))
+ return pa;
+
+ chunk = addr_to_chunk(pa);
+
+ if (chunk < mschunks_map.num_chunks)
+ chunk = mschunks_map.mapping[chunk];
+
+ return chunk_to_addr(chunk) + (pa & MSCHUNKS_OFFSET_MASK);
+}
+
+/* Convenience macros */
+#define virt_to_abs(va) phys_to_abs(__pa(va))
+#define abs_to_virt(aa) __va(aa)
+
+/*
+ * Converts Virtual Address to Real Address for
+ * Legacy iSeries Hypervisor calls
+ */
+#define iseries_hv_addr(virtaddr) \
+ (0x8000000000000000 | virt_to_abs(virtaddr))
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_ABS_ADDR_H */
diff --git a/arch/powerpc/include/asm/agp.h b/arch/powerpc/include/asm/agp.h
new file mode 100644
index 0000000..86455c4
--- /dev/null
+++ b/arch/powerpc/include/asm/agp.h
@@ -0,0 +1,22 @@
+#ifndef _ASM_POWERPC_AGP_H
+#define _ASM_POWERPC_AGP_H
+#ifdef __KERNEL__
+
+#include <asm/io.h>
+
+#define map_page_into_agp(page)
+#define unmap_page_from_agp(page)
+#define flush_agp_cache() mb()
+
+/* Convert a physical address to an address suitable for the GART. */
+#define phys_to_gart(x) (x)
+#define gart_to_phys(x) (x)
+
+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
+#define alloc_gatt_pages(order) \
+ ((char *)__get_free_pages(GFP_KERNEL, (order)))
+#define free_gatt_pages(table, order) \
+ free_pages((unsigned long)(table), (order))
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_AGP_H */
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
new file mode 100644
index 0000000..8f0fe79
--- /dev/null
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -0,0 +1,69 @@
+#ifndef _ASM_POWERPC_ASM_COMPAT_H
+#define _ASM_POWERPC_ASM_COMPAT_H
+
+#include <asm/types.h>
+
+#ifdef __ASSEMBLY__
+# define stringify_in_c(...) __VA_ARGS__
+# define ASM_CONST(x) x
+#else
+/* This version of stringify will deal with commas... */
+# define __stringify_in_c(...) #__VA_ARGS__
+# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
+# define __ASM_CONST(x) x##UL
+# define ASM_CONST(x) __ASM_CONST(x)
+#endif
+
+
+#ifdef __powerpc64__
+
+/* operations for longs and pointers */
+#define PPC_LL stringify_in_c(ld)
+#define PPC_STL stringify_in_c(std)
+#define PPC_LCMPI stringify_in_c(cmpdi)
+#define PPC_LONG stringify_in_c(.llong)
+#define PPC_LONG_ALIGN stringify_in_c(.balign 8)
+#define PPC_TLNEI stringify_in_c(tdnei)
+#define PPC_LLARX stringify_in_c(ldarx)
+#define PPC_STLCX stringify_in_c(stdcx.)
+#define PPC_CNTLZL stringify_in_c(cntlzd)
+
+/* Move to CR, single-entry optimized version. Only available
+ * on POWER4 and later.
+ */
+#ifdef CONFIG_POWER4_ONLY
+#define PPC_MTOCRF stringify_in_c(mtocrf)
+#else
+#define PPC_MTOCRF stringify_in_c(mtcrf)
+#endif
+
+#else /* 32-bit */
+
+/* operations for longs and pointers */
+#define PPC_LL stringify_in_c(lwz)
+#define PPC_STL stringify_in_c(stw)
+#define PPC_LCMPI stringify_in_c(cmpwi)
+#define PPC_LONG stringify_in_c(.long)
+#define PPC_LONG_ALIGN stringify_in_c(.balign 4)
+#define PPC_TLNEI stringify_in_c(twnei)
+#define PPC_LLARX stringify_in_c(lwarx)
+#define PPC_STLCX stringify_in_c(stwcx.)
+#define PPC_CNTLZL stringify_in_c(cntlzw)
+#define PPC_MTOCRF stringify_in_c(mtcrf)
+
+#endif
+
+#ifdef __KERNEL__
+#ifdef CONFIG_IBM405_ERR77
+/* Erratum #77 on the 405 means we need a sync or dcbt before every
+ * stwcx. The old ATOMIC_SYNC_FIX covered some but not all of this.
+ */
+#define PPC405_ERR77(ra,rb) stringify_in_c(dcbt ra, rb;)
+#define PPC405_ERR77_SYNC stringify_in_c(sync;)
+#else
+#define PPC405_ERR77(ra,rb)
+#define PPC405_ERR77_SYNC
+#endif
+#endif
+
+#endif /* _ASM_POWERPC_ASM_COMPAT_H */
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
new file mode 100644
index 0000000..f3fc733
--- /dev/null
+++ b/arch/powerpc/include/asm/atomic.h
@@ -0,0 +1,479 @@
+#ifndef _ASM_POWERPC_ATOMIC_H_
+#define _ASM_POWERPC_ATOMIC_H_
+
+/*
+ * PowerPC atomic operations
+ */
+
+typedef struct { int counter; } atomic_t;
+
+#ifdef __KERNEL__
+#include <linux/compiler.h>
+#include <asm/synch.h>
+#include <asm/asm-compat.h>
+#include <asm/system.h>
+
+#define ATOMIC_INIT(i) { (i) }
+
+static __inline__ int atomic_read(const atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
+
+ return t;
+}
+
+static __inline__ void atomic_set(atomic_t *v, int i)
+{
+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
+}
+
+static __inline__ void atomic_add(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%3 # atomic_add\n\
+ add %0,%2,%0\n"
+ PPC405_ERR77(0,%3)
+" stwcx. %0,0,%3 \n\
+ bne- 1b"
+ : "=&r" (t), "+m" (v->counter)
+ : "r" (a), "r" (&v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_add_return(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ LWSYNC_ON_SMP
+"1: lwarx %0,0,%2 # atomic_add_return\n\
+ add %0,%1,%0\n"
+ PPC405_ERR77(0,%2)
+" stwcx. %0,0,%2 \n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (a), "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
+
+static __inline__ void atomic_sub(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%3 # atomic_sub\n\
+ subf %0,%2,%0\n"
+ PPC405_ERR77(0,%3)
+" stwcx. %0,0,%3 \n\
+ bne- 1b"
+ : "=&r" (t), "+m" (v->counter)
+ : "r" (a), "r" (&v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_sub_return(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ LWSYNC_ON_SMP
+"1: lwarx %0,0,%2 # atomic_sub_return\n\
+ subf %0,%1,%0\n"
+ PPC405_ERR77(0,%2)
+" stwcx. %0,0,%2 \n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (a), "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+static __inline__ void atomic_inc(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # atomic_inc\n\
+ addic %0,%0,1\n"
+ PPC405_ERR77(0,%2)
+" stwcx. %0,0,%2 \n\
+ bne- 1b"
+ : "=&r" (t), "+m" (v->counter)
+ : "r" (&v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_inc_return(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ LWSYNC_ON_SMP
+"1: lwarx %0,0,%1 # atomic_inc_return\n\
+ addic %0,%0,1\n"
+ PPC405_ERR77(0,%1)
+" stwcx. %0,0,%1 \n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+/*
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+
+static __inline__ void atomic_dec(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # atomic_dec\n\
+ addic %0,%0,-1\n"
+ PPC405_ERR77(0,%2)\
+" stwcx. %0,0,%2\n\
+ bne- 1b"
+ : "=&r" (t), "+m" (v->counter)
+ : "r" (&v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_dec_return(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ LWSYNC_ON_SMP
+"1: lwarx %0,0,%1 # atomic_dec_return\n\
+ addic %0,%0,-1\n"
+ PPC405_ERR77(0,%1)
+" stwcx. %0,0,%1\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+/**
+ * atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int t;
+
+ __asm__ __volatile__ (
+ LWSYNC_ON_SMP
+"1: lwarx %0,0,%1 # atomic_add_unless\n\
+ cmpw 0,%0,%3 \n\
+ beq- 2f \n\
+ add %0,%2,%0 \n"
+ PPC405_ERR77(0,%2)
+" stwcx. %0,0,%1 \n\
+ bne- 1b \n"
+ ISYNC_ON_SMP
+" subf %0,%2,%0 \n\
+2:"
+ : "=&r" (t)
+ : "r" (&v->counter), "r" (a), "r" (u)
+ : "cc", "memory");
+
+ return t != u;
+}
+
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
+
+/*
+ * Atomically test *v and decrement if it is greater than 0.
+ * The function returns the old value of *v minus 1, even if
+ * the atomic variable, v, was not decremented.
+ */
+static __inline__ int atomic_dec_if_positive(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ LWSYNC_ON_SMP
+"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
+ cmpwi %0,1\n\
+ addi %0,%0,-1\n\
+ blt- 2f\n"
+ PPC405_ERR77(0,%1)
+" stwcx. %0,0,%1\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ "\n\
+2:" : "=&b" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#define smp_mb__before_atomic_dec() smp_mb()
+#define smp_mb__after_atomic_dec() smp_mb()
+#define smp_mb__before_atomic_inc() smp_mb()
+#define smp_mb__after_atomic_inc() smp_mb()
+
+#ifdef __powerpc64__
+
+typedef struct { long counter; } atomic64_t;
+
+#define ATOMIC64_INIT(i) { (i) }
+
+static __inline__ long atomic64_read(const atomic64_t *v)
+{
+ long t;
+
+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
+
+ return t;
+}
+
+static __inline__ void atomic64_set(atomic64_t *v, long i)
+{
+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
+}
+
+static __inline__ void atomic64_add(long a, atomic64_t *v)
+{
+ long t;
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%3 # atomic64_add\n\
+ add %0,%2,%0\n\
+ stdcx. %0,0,%3 \n\
+ bne- 1b"
+ : "=&r" (t), "+m" (v->counter)
+ : "r" (a), "r" (&v->counter)
+ : "cc");
+}
+
+static __inline__ long atomic64_add_return(long a, atomic64_t *v)
+{
+ long t;
+
+ __asm__ __volatile__(
+ LWSYNC_ON_SMP
+"1: ldarx %0,0,%2 # atomic64_add_return\n\
+ add %0,%1,%0\n\
+ stdcx. %0,0,%2 \n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (a), "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
+
+static __inline__ void atomic64_sub(long a, atomic64_t *v)
+{
+ long t;
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%3 # atomic64_sub\n\
+ subf %0,%2,%0\n\
+ stdcx. %0,0,%3 \n\
+ bne- 1b"
+ : "=&r" (t), "+m" (v->counter)
+ : "r" (a), "r" (&v->counter)
+ : "cc");
+}
+
+static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
+{
+ long t;
+
+ __asm__ __volatile__(
+ LWSYNC_ON_SMP
+"1: ldarx %0,0,%2 # atomic64_sub_return\n\
+ subf %0,%1,%0\n\
+ stdcx. %0,0,%2 \n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (a), "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+static __inline__ void atomic64_inc(atomic64_t *v)
+{
+ long t;
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%2 # atomic64_inc\n\
+ addic %0,%0,1\n\
+ stdcx. %0,0,%2 \n\
+ bne- 1b"
+ : "=&r" (t), "+m" (v->counter)
+ : "r" (&v->counter)
+ : "cc");
+}
+
+static __inline__ long atomic64_inc_return(atomic64_t *v)
+{
+ long t;
+
+ __asm__ __volatile__(
+ LWSYNC_ON_SMP
+"1: ldarx %0,0,%1 # atomic64_inc_return\n\
+ addic %0,%0,1\n\
+ stdcx. %0,0,%1 \n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+/*
+ * atomic64_inc_and_test - increment and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+
+static __inline__ void atomic64_dec(atomic64_t *v)
+{
+ long t;
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%2 # atomic64_dec\n\
+ addic %0,%0,-1\n\
+ stdcx. %0,0,%2\n\
+ bne- 1b"
+ : "=&r" (t), "+m" (v->counter)
+ : "r" (&v->counter)
+ : "cc");
+}
+
+static __inline__ long atomic64_dec_return(atomic64_t *v)
+{
+ long t;
+
+ __asm__ __volatile__(
+ LWSYNC_ON_SMP
+"1: ldarx %0,0,%1 # atomic64_dec_return\n\
+ addic %0,%0,-1\n\
+ stdcx. %0,0,%1\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
+#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
+
+/*
+ * Atomically test *v and decrement if it is greater than 0.
+ * The function returns the old value of *v minus 1.
+ */
+static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long t;
+
+ __asm__ __volatile__(
+ LWSYNC_ON_SMP
+"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
+ addic. %0,%0,-1\n\
+ blt- 2f\n\
+ stdcx. %0,0,%1\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ "\n\
+2:" : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+
+/**
+ * atomic64_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+ long t;
+
+ __asm__ __volatile__ (
+ LWSYNC_ON_SMP
+"1: ldarx %0,0,%1 # atomic_add_unless\n\
+ cmpd 0,%0,%3 \n\
+ beq- 2f \n\
+ add %0,%2,%0 \n"
+" stdcx. %0,0,%1 \n\
+ bne- 1b \n"
+ ISYNC_ON_SMP
+" subf %0,%2,%0 \n\
+2:"
+ : "=&r" (t)
+ : "r" (&v->counter), "r" (a), "r" (u)
+ : "cc", "memory");
+
+ return t != u;
+}
+
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
+#endif /* __powerpc64__ */
+
+#include <asm-generic/atomic.h>
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_ATOMIC_H_ */
diff --git a/arch/powerpc/include/asm/auxvec.h b/arch/powerpc/include/asm/auxvec.h
new file mode 100644
index 0000000..19a099b
--- /dev/null
+++ b/arch/powerpc/include/asm/auxvec.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_POWERPC_AUXVEC_H
+#define _ASM_POWERPC_AUXVEC_H
+
+/*
+ * We need to put in some extra aux table entries to tell glibc what
+ * the cache block size is, so it can use the dcbz instruction safely.
+ */
+#define AT_DCACHEBSIZE 19
+#define AT_ICACHEBSIZE 20
+#define AT_UCACHEBSIZE 21
+/* A special ignored type value for PPC, for glibc compatibility. */
+#define AT_IGNOREPPC 22
+
+/* The vDSO location. We have to use the same value as x86 for glibc's
+ * sake :-)
+ */
+#define AT_SYSINFO_EHDR 33
+
+#endif
diff --git a/arch/powerpc/include/asm/backlight.h b/arch/powerpc/include/asm/backlight.h
new file mode 100644
index 0000000..8cf5c37
--- /dev/null
+++ b/arch/powerpc/include/asm/backlight.h
@@ -0,0 +1,41 @@
+/*
+ * Routines for handling backlight control on PowerBooks
+ *
+ * For now, implementation resides in
+ * arch/powerpc/platforms/powermac/backlight.c
+ *
+ */
+#ifndef __ASM_POWERPC_BACKLIGHT_H
+#define __ASM_POWERPC_BACKLIGHT_H
+#ifdef __KERNEL__
+
+#include <linux/fb.h>
+#include <linux/mutex.h>
+
+/* For locking instructions, see the implementation file */
+extern struct backlight_device *pmac_backlight;
+extern struct mutex pmac_backlight_mutex;
+
+extern int pmac_backlight_curve_lookup(struct fb_info *info, int value);
+
+extern int pmac_has_backlight_type(const char *type);
+
+extern void pmac_backlight_key(int direction);
+static inline void pmac_backlight_key_up(void)
+{
+ pmac_backlight_key(0);
+}
+static inline void pmac_backlight_key_down(void)
+{
+ pmac_backlight_key(1);
+}
+
+extern void pmac_backlight_set_legacy_brightness_pmu(int brightness);
+extern int pmac_backlight_set_legacy_brightness(int brightness);
+extern int pmac_backlight_get_legacy_brightness(void);
+
+extern void pmac_backlight_enable(void);
+extern void pmac_backlight_disable(void);
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
new file mode 100644
index 0000000..897eade
--- /dev/null
+++ b/arch/powerpc/include/asm/bitops.h
@@ -0,0 +1,410 @@
+/*
+ * PowerPC atomic bit operations.
+ *
+ * Merged version by David Gibson <david@gibson.dropbear.id.au>.
+ * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don
+ * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard. They
+ * originally took it from the ppc32 code.
+ *
+ * Within a word, bits are numbered LSB first. Lot's of places make
+ * this assumption by directly testing bits with (val & (1<<nr)).
+ * This can cause confusion for large (> 1 word) bitmaps on a
+ * big-endian system because, unlike little endian, the number of each
+ * bit depends on the word size.
+ *
+ * The bitop functions are defined to work on unsigned longs, so for a
+ * ppc64 system the bits end up numbered:
+ * |63..............0|127............64|191...........128|255...........196|
+ * and on ppc32:
+ * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224|
+ *
+ * There are a few little-endian macros used mostly for filesystem
+ * bitmaps, these work on similar bit arrays layouts, but
+ * byte-oriented:
+ * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
+ *
+ * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
+ * number field needs to be reversed compared to the big-endian bit
+ * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_BITOPS_H
+#define _ASM_POWERPC_BITOPS_H
+
+#ifdef __KERNEL__
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <linux/compiler.h>
+#include <asm/asm-compat.h>
+#include <asm/synch.h>
+
+/*
+ * clear_bit doesn't imply a memory barrier
+ */
+#define smp_mb__before_clear_bit() smp_mb()
+#define smp_mb__after_clear_bit() smp_mb()
+
+#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
+
+static __inline__ void set_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long old;
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ __asm__ __volatile__(
+"1:" PPC_LLARX "%0,0,%3 # set_bit\n"
+ "or %0,%0,%2\n"
+ PPC405_ERR77(0,%3)
+ PPC_STLCX "%0,0,%3\n"
+ "bne- 1b"
+ : "=&r" (old), "+m" (*p)
+ : "r" (mask), "r" (p)
+ : "cc" );
+}
+
+static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long old;
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ __asm__ __volatile__(
+"1:" PPC_LLARX "%0,0,%3 # clear_bit\n"
+ "andc %0,%0,%2\n"
+ PPC405_ERR77(0,%3)
+ PPC_STLCX "%0,0,%3\n"
+ "bne- 1b"
+ : "=&r" (old), "+m" (*p)
+ : "r" (mask), "r" (p)
+ : "cc" );
+}
+
+static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr)
+{
+ unsigned long old;
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ __asm__ __volatile__(
+ LWSYNC_ON_SMP
+"1:" PPC_LLARX "%0,0,%3 # clear_bit_unlock\n"
+ "andc %0,%0,%2\n"
+ PPC405_ERR77(0,%3)
+ PPC_STLCX "%0,0,%3\n"
+ "bne- 1b"
+ : "=&r" (old), "+m" (*p)
+ : "r" (mask), "r" (p)
+ : "cc", "memory");
+}
+
+static __inline__ void change_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long old;
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ __asm__ __volatile__(
+"1:" PPC_LLARX "%0,0,%3 # change_bit\n"
+ "xor %0,%0,%2\n"
+ PPC405_ERR77(0,%3)
+ PPC_STLCX "%0,0,%3\n"
+ "bne- 1b"
+ : "=&r" (old), "+m" (*p)
+ : "r" (mask), "r" (p)
+ : "cc" );
+}
+
+static __inline__ int test_and_set_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ unsigned long old, t;
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ __asm__ __volatile__(
+ LWSYNC_ON_SMP
+"1:" PPC_LLARX "%0,0,%3 # test_and_set_bit\n"
+ "or %1,%0,%2 \n"
+ PPC405_ERR77(0,%3)
+ PPC_STLCX "%1,0,%3 \n"
+ "bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (old), "=&r" (t)
+ : "r" (mask), "r" (p)
+ : "cc", "memory");
+
+ return (old & mask) != 0;
+}
+
+static __inline__ int test_and_set_bit_lock(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ unsigned long old, t;
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ __asm__ __volatile__(
+"1:" PPC_LLARX "%0,0,%3 # test_and_set_bit_lock\n"
+ "or %1,%0,%2 \n"
+ PPC405_ERR77(0,%3)
+ PPC_STLCX "%1,0,%3 \n"
+ "bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (old), "=&r" (t)
+ : "r" (mask), "r" (p)
+ : "cc", "memory");
+
+ return (old & mask) != 0;
+}
+
+static __inline__ int test_and_clear_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ unsigned long old, t;
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ __asm__ __volatile__(
+ LWSYNC_ON_SMP
+"1:" PPC_LLARX "%0,0,%3 # test_and_clear_bit\n"
+ "andc %1,%0,%2 \n"
+ PPC405_ERR77(0,%3)
+ PPC_STLCX "%1,0,%3 \n"
+ "bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (old), "=&r" (t)
+ : "r" (mask), "r" (p)
+ : "cc", "memory");
+
+ return (old & mask) != 0;
+}
+
+static __inline__ int test_and_change_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ unsigned long old, t;
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ __asm__ __volatile__(
+ LWSYNC_ON_SMP
+"1:" PPC_LLARX "%0,0,%3 # test_and_change_bit\n"
+ "xor %1,%0,%2 \n"
+ PPC405_ERR77(0,%3)
+ PPC_STLCX "%1,0,%3 \n"
+ "bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (old), "=&r" (t)
+ : "r" (mask), "r" (p)
+ : "cc", "memory");
+
+ return (old & mask) != 0;
+}
+
+static __inline__ void set_bits(unsigned long mask, unsigned long *addr)
+{
+ unsigned long old;
+
+ __asm__ __volatile__(
+"1:" PPC_LLARX "%0,0,%3 # set_bits\n"
+ "or %0,%0,%2\n"
+ PPC_STLCX "%0,0,%3\n"
+ "bne- 1b"
+ : "=&r" (old), "+m" (*addr)
+ : "r" (mask), "r" (addr)
+ : "cc");
+}
+
+#include <asm-generic/bitops/non-atomic.h>
+
+static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr)
+{
+ __asm__ __volatile__(LWSYNC_ON_SMP "" ::: "memory");
+ __clear_bit(nr, addr);
+}
+
+/*
+ * Return the zero-based bit position (LE, not IBM bit numbering) of
+ * the most significant 1-bit in a double word.
+ */
+static __inline__ __attribute__((const))
+int __ilog2(unsigned long x)
+{
+ int lz;
+
+ asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x));
+ return BITS_PER_LONG - 1 - lz;
+}
+
+static inline __attribute__((const))
+int __ilog2_u32(u32 n)
+{
+ int bit;
+ asm ("cntlzw %0,%1" : "=r" (bit) : "r" (n));
+ return 31 - bit;
+}
+
+#ifdef __powerpc64__
+static inline __attribute__((const))
+int __ilog2_u64(u64 n)
+{
+ int bit;
+ asm ("cntlzd %0,%1" : "=r" (bit) : "r" (n));
+ return 63 - bit;
+}
+#endif
+
+/*
+ * Determines the bit position of the least significant 0 bit in the
+ * specified double word. The returned bit position will be
+ * zero-based, starting from the right side (63/31 - 0).
+ */
+static __inline__ unsigned long ffz(unsigned long x)
+{
+ /* no zero exists anywhere in the 8 byte area. */
+ if ((x = ~x) == 0)
+ return BITS_PER_LONG;
+
+ /*
+ * Calculate the bit position of the least signficant '1' bit in x
+ * (since x has been changed this will actually be the least signficant
+ * '0' bit in * the original x). Note: (x & -x) gives us a mask that
+ * is the least significant * (RIGHT-most) 1-bit of the value in x.
+ */
+ return __ilog2(x & -x);
+}
+
+static __inline__ int __ffs(unsigned long x)
+{
+ return __ilog2(x & -x);
+}
+
+/*
+ * ffs: find first bit set. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+static __inline__ int ffs(int x)
+{
+ unsigned long i = (unsigned long)x;
+ return __ilog2(i & -i) + 1;
+}
+
+/*
+ * fls: find last (most-significant) bit set.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+static __inline__ int fls(unsigned int x)
+{
+ int lz;
+
+ asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
+ return 32 - lz;
+}
+
+static __inline__ unsigned long __fls(unsigned long x)
+{
+ return __ilog2(x);
+}
+
+/*
+ * 64-bit can do this using one cntlzd (count leading zeroes doubleword)
+ * instruction; for 32-bit we use the generic version, which does two
+ * 32-bit fls calls.
+ */
+#ifdef __powerpc64__
+static __inline__ int fls64(__u64 x)
+{
+ int lz;
+
+ asm ("cntlzd %0,%1" : "=r" (lz) : "r" (x));
+ return 64 - lz;
+}
+#else
+#include <asm-generic/bitops/fls64.h>
+#endif /* __powerpc64__ */
+
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/find.h>
+
+/* Little-endian versions */
+
+static __inline__ int test_le_bit(unsigned long nr,
+ __const__ unsigned long *addr)
+{
+ __const__ unsigned char *tmp = (__const__ unsigned char *) addr;
+ return (tmp[nr >> 3] >> (nr & 7)) & 1;
+}
+
+#define __set_le_bit(nr, addr) \
+ __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+#define __clear_le_bit(nr, addr) \
+ __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+
+#define test_and_set_le_bit(nr, addr) \
+ test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+#define test_and_clear_le_bit(nr, addr) \
+ test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+
+#define __test_and_set_le_bit(nr, addr) \
+ __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+#define __test_and_clear_le_bit(nr, addr) \
+ __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+
+#define find_first_zero_le_bit(addr, size) generic_find_next_zero_le_bit((addr), (size), 0)
+unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset);
+
+unsigned long generic_find_next_le_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset);
+/* Bitmap functions for the ext2 filesystem */
+
+#define ext2_set_bit(nr,addr) \
+ __test_and_set_le_bit((nr), (unsigned long*)addr)
+#define ext2_clear_bit(nr, addr) \
+ __test_and_clear_le_bit((nr), (unsigned long*)addr)
+
+#define ext2_set_bit_atomic(lock, nr, addr) \
+ test_and_set_le_bit((nr), (unsigned long*)addr)
+#define ext2_clear_bit_atomic(lock, nr, addr) \
+ test_and_clear_le_bit((nr), (unsigned long*)addr)
+
+#define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
+
+#define ext2_find_first_zero_bit(addr, size) \
+ find_first_zero_le_bit((unsigned long*)addr, size)
+#define ext2_find_next_zero_bit(addr, size, off) \
+ generic_find_next_zero_le_bit((unsigned long*)addr, size, off)
+
+#define ext2_find_next_bit(addr, size, off) \
+ generic_find_next_le_bit((unsigned long *)addr, size, off)
+/* Bitmap functions for the minix filesystem. */
+
+#define minix_test_and_set_bit(nr,addr) \
+ __test_and_set_le_bit(nr, (unsigned long *)addr)
+#define minix_set_bit(nr,addr) \
+ __set_le_bit(nr, (unsigned long *)addr)
+#define minix_test_and_clear_bit(nr,addr) \
+ __test_and_clear_le_bit(nr, (unsigned long *)addr)
+#define minix_test_bit(nr,addr) \
+ test_le_bit(nr, (unsigned long *)addr)
+
+#define minix_find_first_zero_bit(addr,size) \
+ find_first_zero_le_bit((unsigned long *)addr, size)
+
+#include <asm-generic/bitops/sched.h>
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_BITOPS_H */
diff --git a/arch/powerpc/include/asm/bootx.h b/arch/powerpc/include/asm/bootx.h
new file mode 100644
index 0000000..57b82e3
--- /dev/null
+++ b/arch/powerpc/include/asm/bootx.h
@@ -0,0 +1,171 @@
+/*
+ * This file describes the structure passed from the BootX application
+ * (for MacOS) when it is used to boot Linux.
+ *
+ * Written by Benjamin Herrenschmidt.
+ */
+
+
+#ifndef __ASM_BOOTX_H__
+#define __ASM_BOOTX_H__
+
+#include <asm/types.h>
+
+#ifdef macintosh
+#include <Types.h>
+#include "linux_type_defs.h"
+#endif
+
+#ifdef macintosh
+/* All this requires PowerPC alignment */
+#pragma options align=power
+#endif
+
+/* On kernel entry:
+ *
+ * r3 = 0x426f6f58 ('BooX')
+ * r4 = pointer to boot_infos
+ * r5 = NULL
+ *
+ * Data and instruction translation disabled, interrupts
+ * disabled, kernel loaded at physical 0x00000000 on PCI
+ * machines (will be different on NuBus).
+ */
+
+#define BOOT_INFO_VERSION 5
+#define BOOT_INFO_COMPATIBLE_VERSION 1
+
+/* Bit in the architecture flag mask. More to be defined in
+ future versions. Note that either BOOT_ARCH_PCI or
+ BOOT_ARCH_NUBUS is set. The other BOOT_ARCH_NUBUS_xxx are
+ set additionally when BOOT_ARCH_NUBUS is set.
+ */
+#define BOOT_ARCH_PCI 0x00000001UL
+#define BOOT_ARCH_NUBUS 0x00000002UL
+#define BOOT_ARCH_NUBUS_PDM 0x00000010UL
+#define BOOT_ARCH_NUBUS_PERFORMA 0x00000020UL
+#define BOOT_ARCH_NUBUS_POWERBOOK 0x00000040UL
+
+/* Maximum number of ranges in phys memory map */
+#define MAX_MEM_MAP_SIZE 26
+
+/* This is the format of an element in the physical memory map. Note that
+ the map is optional and current BootX will only build it for pre-PCI
+ machines */
+typedef struct boot_info_map_entry
+{
+ __u32 physAddr; /* Physical starting address */
+ __u32 size; /* Size in bytes */
+} boot_info_map_entry_t;
+
+
+/* Here are the boot informations that are passed to the bootstrap
+ * Note that the kernel arguments and the device tree are appended
+ * at the end of this structure. */
+typedef struct boot_infos
+{
+ /* Version of this structure */
+ __u32 version;
+ /* backward compatible down to version: */
+ __u32 compatible_version;
+
+ /* NEW (vers. 2) this holds the current _logical_ base addr of
+ the frame buffer (for use by early boot message) */
+ __u8* logicalDisplayBase;
+
+ /* NEW (vers. 4) Apple's machine identification */
+ __u32 machineID;
+
+ /* NEW (vers. 4) Detected hw architecture */
+ __u32 architecture;
+
+ /* The device tree (internal addresses relative to the beginning of the tree,
+ * device tree offset relative to the beginning of this structure).
+ * On pre-PCI macintosh (BOOT_ARCH_PCI bit set to 0 in architecture), this
+ * field is 0.
+ */
+ __u32 deviceTreeOffset; /* Device tree offset */
+ __u32 deviceTreeSize; /* Size of the device tree */
+
+ /* Some infos about the current MacOS display */
+ __u32 dispDeviceRect[4]; /* left,top,right,bottom */
+ __u32 dispDeviceDepth; /* (8, 16 or 32) */
+ __u8* dispDeviceBase; /* base address (physical) */
+ __u32 dispDeviceRowBytes; /* rowbytes (in bytes) */
+ __u32 dispDeviceColorsOffset; /* Colormap (8 bits only) or 0 (*) */
+ /* Optional offset in the registry to the current
+ * MacOS display. (Can be 0 when not detected) */
+ __u32 dispDeviceRegEntryOffset;
+
+ /* Optional pointer to boot ramdisk (offset from this structure) */
+ __u32 ramDisk;
+ __u32 ramDiskSize; /* size of ramdisk image */
+
+ /* Kernel command line arguments (offset from this structure) */
+ __u32 kernelParamsOffset;
+
+ /* ALL BELOW NEW (vers. 4) */
+
+ /* This defines the physical memory. Valid with BOOT_ARCH_NUBUS flag
+ (non-PCI) only. On PCI, memory is contiguous and it's size is in the
+ device-tree. */
+ boot_info_map_entry_t
+ physMemoryMap[MAX_MEM_MAP_SIZE]; /* Where the phys memory is */
+ __u32 physMemoryMapSize; /* How many entries in map */
+
+
+ /* The framebuffer size (optional, currently 0) */
+ __u32 frameBufferSize; /* Represents a max size, can be 0. */
+
+ /* NEW (vers. 5) */
+
+ /* Total params size (args + colormap + device tree + ramdisk) */
+ __u32 totalParamsSize;
+
+} boot_infos_t;
+
+#ifdef __KERNEL__
+/* (*) The format of the colormap is 256 * 3 * 2 bytes. Each color index
+ * is represented by 3 short words containing a 16 bits (unsigned) color
+ * component. Later versions may contain the gamma table for direct-color
+ * devices here.
+ */
+#define BOOTX_COLORTABLE_SIZE (256UL*3UL*2UL)
+
+/* BootX passes the device-tree using a format that comes from earlier
+ * ppc32 kernels. This used to match what is in prom.h, but not anymore
+ * so we now define it here
+ */
+struct bootx_dt_prop {
+ u32 name;
+ int length;
+ u32 value;
+ u32 next;
+};
+
+struct bootx_dt_node {
+ u32 unused0;
+ u32 unused1;
+ u32 phandle; /* not really available */
+ u32 unused2;
+ u32 unused3;
+ u32 unused4;
+ u32 unused5;
+ u32 full_name;
+ u32 properties;
+ u32 parent;
+ u32 child;
+ u32 sibling;
+ u32 next;
+ u32 allnext;
+};
+
+extern void bootx_init(unsigned long r4, unsigned long phys);
+
+#endif /* __KERNEL__ */
+
+#ifdef macintosh
+#pragma options align=reset
+#endif
+
+#endif
diff --git a/arch/powerpc/include/asm/btext.h b/arch/powerpc/include/asm/btext.h
new file mode 100644
index 0000000..906f46e
--- /dev/null
+++ b/arch/powerpc/include/asm/btext.h
@@ -0,0 +1,28 @@
+/*
+ * Definitions for using the procedures in btext.c.
+ *
+ * Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ */
+#ifndef __PPC_BTEXT_H
+#define __PPC_BTEXT_H
+#ifdef __KERNEL__
+
+extern int btext_find_display(int allow_nonstdout);
+extern void btext_update_display(unsigned long phys, int width, int height,
+ int depth, int pitch);
+extern void btext_setup_display(int width, int height, int depth, int pitch,
+ unsigned long address);
+extern void btext_prepare_BAT(void);
+extern void btext_unmap(void);
+
+extern void btext_drawchar(char c);
+extern void btext_drawstring(const char *str);
+extern void btext_drawhex(unsigned long v);
+extern void btext_drawtext(const char *c, unsigned int len);
+
+extern void btext_clearscreen(void);
+extern void btext_flushscreen(void);
+extern void btext_flushline(void);
+
+#endif /* __KERNEL__ */
+#endif /* __PPC_BTEXT_H */
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
new file mode 100644
index 0000000..e55d1f6
--- /dev/null
+++ b/arch/powerpc/include/asm/bug.h
@@ -0,0 +1,121 @@
+#ifndef _ASM_POWERPC_BUG_H
+#define _ASM_POWERPC_BUG_H
+#ifdef __KERNEL__
+
+#include <asm/asm-compat.h>
+/*
+ * Define an illegal instr to trap on the bug.
+ * We don't use 0 because that marks the end of a function
+ * in the ELF ABI. That's "Boo Boo" in case you wonder...
+ */
+#define BUG_OPCODE .long 0x00b00b00 /* For asm */
+#define BUG_ILLEGAL_INSTR "0x00b00b00" /* For BUG macro */
+
+#ifdef CONFIG_BUG
+
+#ifdef __ASSEMBLY__
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+.macro EMIT_BUG_ENTRY addr,file,line,flags
+ .section __bug_table,"a"
+5001: PPC_LONG \addr, 5002f
+ .short \line, \flags
+ .org 5001b+BUG_ENTRY_SIZE
+ .previous
+ .section .rodata,"a"
+5002: .asciz "\file"
+ .previous
+.endm
+#else
+ .macro EMIT_BUG_ENTRY addr,file,line,flags
+ .section __bug_table,"a"
+5001: PPC_LONG \addr
+ .short \flags
+ .org 5001b+BUG_ENTRY_SIZE
+ .previous
+.endm
+#endif /* verbose */
+
+#else /* !__ASSEMBLY__ */
+/* _EMIT_BUG_ENTRY expects args %0,%1,%2,%3 to be FILE, LINE, flags and
+ sizeof(struct bug_entry), respectively */
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#define _EMIT_BUG_ENTRY \
+ ".section __bug_table,\"a\"\n" \
+ "2:\t" PPC_LONG "1b, %0\n" \
+ "\t.short %1, %2\n" \
+ ".org 2b+%3\n" \
+ ".previous\n"
+#else
+#define _EMIT_BUG_ENTRY \
+ ".section __bug_table,\"a\"\n" \
+ "2:\t" PPC_LONG "1b\n" \
+ "\t.short %2\n" \
+ ".org 2b+%3\n" \
+ ".previous\n"
+#endif
+
+/*
+ * BUG_ON() and WARN_ON() do their best to cooperate with compile-time
+ * optimisations. However depending on the complexity of the condition
+ * some compiler versions may not produce optimal results.
+ */
+
+#define BUG() do { \
+ __asm__ __volatile__( \
+ "1: twi 31,0,0\n" \
+ _EMIT_BUG_ENTRY \
+ : : "i" (__FILE__), "i" (__LINE__), \
+ "i" (0), "i" (sizeof(struct bug_entry))); \
+ for(;;) ; \
+} while (0)
+
+#define BUG_ON(x) do { \
+ if (__builtin_constant_p(x)) { \
+ if (x) \
+ BUG(); \
+ } else { \
+ __asm__ __volatile__( \
+ "1: "PPC_TLNEI" %4,0\n" \
+ _EMIT_BUG_ENTRY \
+ : : "i" (__FILE__), "i" (__LINE__), "i" (0), \
+ "i" (sizeof(struct bug_entry)), \
+ "r" ((__force long)(x))); \
+ } \
+} while (0)
+
+#define __WARN() do { \
+ __asm__ __volatile__( \
+ "1: twi 31,0,0\n" \
+ _EMIT_BUG_ENTRY \
+ : : "i" (__FILE__), "i" (__LINE__), \
+ "i" (BUGFLAG_WARNING), \
+ "i" (sizeof(struct bug_entry))); \
+} while (0)
+
+#define WARN_ON(x) ({ \
+ int __ret_warn_on = !!(x); \
+ if (__builtin_constant_p(__ret_warn_on)) { \
+ if (__ret_warn_on) \
+ __WARN(); \
+ } else { \
+ __asm__ __volatile__( \
+ "1: "PPC_TLNEI" %4,0\n" \
+ _EMIT_BUG_ENTRY \
+ : : "i" (__FILE__), "i" (__LINE__), \
+ "i" (BUGFLAG_WARNING), \
+ "i" (sizeof(struct bug_entry)), \
+ "r" (__ret_warn_on)); \
+ } \
+ unlikely(__ret_warn_on); \
+})
+
+#define HAVE_ARCH_BUG
+#define HAVE_ARCH_BUG_ON
+#define HAVE_ARCH_WARN_ON
+#endif /* __ASSEMBLY __ */
+#endif /* CONFIG_BUG */
+
+#include <asm-generic/bug.h>
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_BUG_H */
diff --git a/arch/powerpc/include/asm/bugs.h b/arch/powerpc/include/asm/bugs.h
new file mode 100644
index 0000000..42fdb73
--- /dev/null
+++ b/arch/powerpc/include/asm/bugs.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_POWERPC_BUGS_H
+#define _ASM_POWERPC_BUGS_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * This file is included by 'init/main.c' to check for
+ * architecture-dependent bugs.
+ */
+
+static inline void check_bugs(void) { }
+
+#endif /* _ASM_POWERPC_BUGS_H */
diff --git a/arch/powerpc/include/asm/byteorder.h b/arch/powerpc/include/asm/byteorder.h
new file mode 100644
index 0000000..b377522
--- /dev/null
+++ b/arch/powerpc/include/asm/byteorder.h
@@ -0,0 +1,89 @@
+#ifndef _ASM_POWERPC_BYTEORDER_H
+#define _ASM_POWERPC_BYTEORDER_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/types.h>
+#include <linux/compiler.h>
+
+#ifdef __GNUC__
+#ifdef __KERNEL__
+
+static __inline__ __u16 ld_le16(const volatile __u16 *addr)
+{
+ __u16 val;
+
+ __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
+ return val;
+}
+
+static __inline__ void st_le16(volatile __u16 *addr, const __u16 val)
+{
+ __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
+}
+
+static __inline__ __u32 ld_le32(const volatile __u32 *addr)
+{
+ __u32 val;
+
+ __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
+ return val;
+}
+
+static __inline__ void st_le32(volatile __u32 *addr, const __u32 val)
+{
+ __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
+}
+
+static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 value)
+{
+ __u16 result;
+
+ __asm__("rlwimi %0,%1,8,16,23"
+ : "=r" (result)
+ : "r" (value), "0" (value >> 8));
+ return result;
+}
+
+static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value)
+{
+ __u32 result;
+
+ __asm__("rlwimi %0,%1,24,16,23\n\t"
+ "rlwimi %0,%1,8,8,15\n\t"
+ "rlwimi %0,%1,24,0,7"
+ : "=r" (result)
+ : "r" (value), "0" (value >> 24));
+ return result;
+}
+
+#define __arch__swab16(x) ___arch__swab16(x)
+#define __arch__swab32(x) ___arch__swab32(x)
+
+/* The same, but returns converted value from the location pointer by addr. */
+#define __arch__swab16p(addr) ld_le16(addr)
+#define __arch__swab32p(addr) ld_le32(addr)
+
+/* The same, but do the conversion in situ, ie. put the value back to addr. */
+#define __arch__swab16s(addr) st_le16(addr,*addr)
+#define __arch__swab32s(addr) st_le32(addr,*addr)
+
+#endif /* __KERNEL__ */
+
+#ifndef __STRICT_ANSI__
+#define __BYTEORDER_HAS_U64__
+#ifndef __powerpc64__
+#define __SWAB_64_THRU_32__
+#endif /* __powerpc64__ */
+#endif /* __STRICT_ANSI__ */
+
+#endif /* __GNUC__ */
+
+#include <linux/byteorder/big_endian.h>
+
+#endif /* _ASM_POWERPC_BYTEORDER_H */
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
new file mode 100644
index 0000000..81de6eb
--- /dev/null
+++ b/arch/powerpc/include/asm/cache.h
@@ -0,0 +1,45 @@
+#ifndef _ASM_POWERPC_CACHE_H
+#define _ASM_POWERPC_CACHE_H
+
+#ifdef __KERNEL__
+
+
+/* bytes per L1 cache line */
+#if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
+#define L1_CACHE_SHIFT 4
+#define MAX_COPY_PREFETCH 1
+#elif defined(CONFIG_PPC_E500MC)
+#define L1_CACHE_SHIFT 6
+#define MAX_COPY_PREFETCH 4
+#elif defined(CONFIG_PPC32)
+#define L1_CACHE_SHIFT 5
+#define MAX_COPY_PREFETCH 4
+#else /* CONFIG_PPC64 */
+#define L1_CACHE_SHIFT 7
+#endif
+
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+#if defined(__powerpc64__) && !defined(__ASSEMBLY__)
+struct ppc64_caches {
+ u32 dsize; /* L1 d-cache size */
+ u32 dline_size; /* L1 d-cache line size */
+ u32 log_dline_size;
+ u32 dlines_per_page;
+ u32 isize; /* L1 i-cache size */
+ u32 iline_size; /* L1 i-cache line size */
+ u32 log_iline_size;
+ u32 ilines_per_page;
+};
+
+extern struct ppc64_caches ppc64_caches;
+#endif /* __powerpc64__ && ! __ASSEMBLY__ */
+
+#if !defined(__ASSEMBLY__)
+#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_CACHE_H */
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
new file mode 100644
index 0000000..ba667a383
--- /dev/null
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -0,0 +1,75 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_CACHEFLUSH_H
+#define _ASM_POWERPC_CACHEFLUSH_H
+
+#ifdef __KERNEL__
+
+#include <linux/mm.h>
+#include <asm/cputable.h>
+
+/*
+ * No cache flushing is required when address mappings are changed,
+ * because the caches on PowerPCs are physically addressed.
+ */
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_dup_mm(mm) do { } while (0)
+#define flush_cache_range(vma, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
+#define flush_icache_page(vma, page) do { } while (0)
+#define flush_cache_vmap(start, end) do { } while (0)
+#define flush_cache_vunmap(start, end) do { } while (0)
+
+extern void flush_dcache_page(struct page *page);
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+extern void __flush_icache_range(unsigned long, unsigned long);
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+ if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+ __flush_icache_range(start, stop);
+}
+
+extern void flush_icache_user_range(struct vm_area_struct *vma,
+ struct page *page, unsigned long addr,
+ int len);
+extern void __flush_dcache_icache(void *page_va);
+extern void flush_dcache_icache_page(struct page *page);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
+extern void __flush_dcache_icache_phys(unsigned long physaddr);
+#endif /* CONFIG_PPC32 && !CONFIG_BOOKE */
+
+extern void flush_dcache_range(unsigned long start, unsigned long stop);
+#ifdef CONFIG_PPC32
+extern void clean_dcache_range(unsigned long start, unsigned long stop);
+extern void invalidate_dcache_range(unsigned long start, unsigned long stop);
+#endif /* CONFIG_PPC32 */
+#ifdef CONFIG_PPC64
+extern void flush_inval_dcache_range(unsigned long start, unsigned long stop);
+extern void flush_dcache_phys_range(unsigned long start, unsigned long stop);
+#endif
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ memcpy(dst, src, len); \
+ flush_icache_user_range(vma, page, vaddr, len); \
+ } while (0)
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
+
+
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+/* internal debugging function */
+void kernel_map_pages(struct page *page, int numpages, int enable);
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_CACHEFLUSH_H */
diff --git a/arch/powerpc/include/asm/cell-pmu.h b/arch/powerpc/include/asm/cell-pmu.h
new file mode 100644
index 0000000..8066eed
--- /dev/null
+++ b/arch/powerpc/include/asm/cell-pmu.h
@@ -0,0 +1,105 @@
+/*
+ * Cell Broadband Engine Performance Monitor
+ *
+ * (C) Copyright IBM Corporation 2006
+ *
+ * Author:
+ * David Erb (djerb@us.ibm.com)
+ * Kevin Corry (kevcorry@us.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __ASM_CELL_PMU_H__
+#define __ASM_CELL_PMU_H__
+
+/* The Cell PMU has four hardware performance counters, which can be
+ * configured as four 32-bit counters or eight 16-bit counters.
+ */
+#define NR_PHYS_CTRS 4
+#define NR_CTRS (NR_PHYS_CTRS * 2)
+
+/* Macros for the pm_control register. */
+#define CBE_PM_16BIT_CTR(ctr) (1 << (24 - ((ctr) & (NR_PHYS_CTRS - 1))))
+#define CBE_PM_ENABLE_PERF_MON 0x80000000
+#define CBE_PM_STOP_AT_MAX 0x40000000
+#define CBE_PM_TRACE_MODE_GET(pm_control) (((pm_control) >> 28) & 0x3)
+#define CBE_PM_TRACE_MODE_SET(mode) (((mode) & 0x3) << 28)
+#define CBE_PM_COUNT_MODE_SET(count) (((count) & 0x3) << 18)
+#define CBE_PM_FREEZE_ALL_CTRS 0x00100000
+#define CBE_PM_ENABLE_EXT_TRACE 0x00008000
+
+/* Macros for the trace_address register. */
+#define CBE_PM_TRACE_BUF_FULL 0x00000800
+#define CBE_PM_TRACE_BUF_EMPTY 0x00000400
+#define CBE_PM_TRACE_BUF_DATA_COUNT(ta) ((ta) & 0x3ff)
+#define CBE_PM_TRACE_BUF_MAX_COUNT 0x400
+
+/* Macros for the pm07_control registers. */
+#define CBE_PM_CTR_INPUT_MUX(pm07_control) (((pm07_control) >> 26) & 0x3f)
+#define CBE_PM_CTR_INPUT_CONTROL 0x02000000
+#define CBE_PM_CTR_POLARITY 0x01000000
+#define CBE_PM_CTR_COUNT_CYCLES 0x00800000
+#define CBE_PM_CTR_ENABLE 0x00400000
+#define PM07_CTR_INPUT_MUX(x) (((x) & 0x3F) << 26)
+#define PM07_CTR_INPUT_CONTROL(x) (((x) & 1) << 25)
+#define PM07_CTR_POLARITY(x) (((x) & 1) << 24)
+#define PM07_CTR_COUNT_CYCLES(x) (((x) & 1) << 23)
+#define PM07_CTR_ENABLE(x) (((x) & 1) << 22)
+
+/* Macros for the pm_status register. */
+#define CBE_PM_CTR_OVERFLOW_INTR(ctr) (1 << (31 - ((ctr) & 7)))
+
+enum pm_reg_name {
+ group_control,
+ debug_bus_control,
+ trace_address,
+ ext_tr_timer,
+ pm_status,
+ pm_control,
+ pm_interval,
+ pm_start_stop,
+};
+
+/* Routines for reading/writing the PMU registers. */
+extern u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr);
+extern void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val);
+extern u32 cbe_read_ctr(u32 cpu, u32 ctr);
+extern void cbe_write_ctr(u32 cpu, u32 ctr, u32 val);
+
+extern u32 cbe_read_pm07_control(u32 cpu, u32 ctr);
+extern void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val);
+extern u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg);
+extern void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val);
+
+extern u32 cbe_get_ctr_size(u32 cpu, u32 phys_ctr);
+extern void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size);
+
+extern void cbe_enable_pm(u32 cpu);
+extern void cbe_disable_pm(u32 cpu);
+
+extern void cbe_read_trace_buffer(u32 cpu, u64 *buf);
+
+extern void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask);
+extern void cbe_disable_pm_interrupts(u32 cpu);
+extern u32 cbe_get_and_clear_pm_interrupts(u32 cpu);
+extern void cbe_sync_irq(int node);
+
+#define CBE_COUNT_SUPERVISOR_MODE 0
+#define CBE_COUNT_HYPERVISOR_MODE 1
+#define CBE_COUNT_PROBLEM_MODE 2
+#define CBE_COUNT_ALL_MODES 3
+
+#endif /* __ASM_CELL_PMU_H__ */
diff --git a/arch/powerpc/include/asm/cell-regs.h b/arch/powerpc/include/asm/cell-regs.h
new file mode 100644
index 0000000..fd6fd00
--- /dev/null
+++ b/arch/powerpc/include/asm/cell-regs.h
@@ -0,0 +1,315 @@
+/*
+ * cbe_regs.h
+ *
+ * This file is intended to hold the various register definitions for CBE
+ * on-chip system devices (memory controller, IO controller, etc...)
+ *
+ * (C) Copyright IBM Corporation 2001,2006
+ *
+ * Authors: Maximino Aguilar (maguilar@us.ibm.com)
+ * David J. Erb (djerb@us.ibm.com)
+ *
+ * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
+ */
+
+#ifndef CBE_REGS_H
+#define CBE_REGS_H
+
+#include <asm/cell-pmu.h>
+
+/*
+ *
+ * Some HID register definitions
+ *
+ */
+
+/* CBE specific HID0 bits */
+#define HID0_CBE_THERM_WAKEUP 0x0000020000000000ul
+#define HID0_CBE_SYSERR_WAKEUP 0x0000008000000000ul
+#define HID0_CBE_THERM_INT_EN 0x0000000400000000ul
+#define HID0_CBE_SYSERR_INT_EN 0x0000000200000000ul
+
+#define MAX_CBE 2
+
+/*
+ *
+ * Pervasive unit register definitions
+ *
+ */
+
+union spe_reg {
+ u64 val;
+ u8 spe[8];
+};
+
+union ppe_spe_reg {
+ u64 val;
+ struct {
+ u32 ppe;
+ u32 spe;
+ };
+};
+
+
+struct cbe_pmd_regs {
+ /* Debug Bus Control */
+ u64 pad_0x0000; /* 0x0000 */
+
+ u64 group_control; /* 0x0008 */
+
+ u8 pad_0x0010_0x00a8 [0x00a8 - 0x0010]; /* 0x0010 */
+
+ u64 debug_bus_control; /* 0x00a8 */
+
+ u8 pad_0x00b0_0x0100 [0x0100 - 0x00b0]; /* 0x00b0 */
+
+ u64 trace_aux_data; /* 0x0100 */
+ u64 trace_buffer_0_63; /* 0x0108 */
+ u64 trace_buffer_64_127; /* 0x0110 */
+ u64 trace_address; /* 0x0118 */
+ u64 ext_tr_timer; /* 0x0120 */
+
+ u8 pad_0x0128_0x0400 [0x0400 - 0x0128]; /* 0x0128 */
+
+ /* Performance Monitor */
+ u64 pm_status; /* 0x0400 */
+ u64 pm_control; /* 0x0408 */
+ u64 pm_interval; /* 0x0410 */
+ u64 pm_ctr[4]; /* 0x0418 */
+ u64 pm_start_stop; /* 0x0438 */
+ u64 pm07_control[8]; /* 0x0440 */
+
+ u8 pad_0x0480_0x0800 [0x0800 - 0x0480]; /* 0x0480 */
+
+ /* Thermal Sensor Registers */
+ union spe_reg ts_ctsr1; /* 0x0800 */
+ u64 ts_ctsr2; /* 0x0808 */
+ union spe_reg ts_mtsr1; /* 0x0810 */
+ u64 ts_mtsr2; /* 0x0818 */
+ union spe_reg ts_itr1; /* 0x0820 */
+ u64 ts_itr2; /* 0x0828 */
+ u64 ts_gitr; /* 0x0830 */
+ u64 ts_isr; /* 0x0838 */
+ u64 ts_imr; /* 0x0840 */
+ union spe_reg tm_cr1; /* 0x0848 */
+ u64 tm_cr2; /* 0x0850 */
+ u64 tm_simr; /* 0x0858 */
+ union ppe_spe_reg tm_tpr; /* 0x0860 */
+ union spe_reg tm_str1; /* 0x0868 */
+ u64 tm_str2; /* 0x0870 */
+ union ppe_spe_reg tm_tsr; /* 0x0878 */
+
+ /* Power Management */
+ u64 pmcr; /* 0x0880 */
+#define CBE_PMD_PAUSE_ZERO_CONTROL 0x10000
+ u64 pmsr; /* 0x0888 */
+
+ /* Time Base Register */
+ u64 tbr; /* 0x0890 */
+
+ u8 pad_0x0898_0x0c00 [0x0c00 - 0x0898]; /* 0x0898 */
+
+ /* Fault Isolation Registers */
+ u64 checkstop_fir; /* 0x0c00 */
+ u64 recoverable_fir; /* 0x0c08 */
+ u64 spec_att_mchk_fir; /* 0x0c10 */
+ u32 fir_mode_reg; /* 0x0c18 */
+ u8 pad_0x0c1c_0x0c20 [4]; /* 0x0c1c */
+#define CBE_PMD_FIR_MODE_M8 0x00800
+ u64 fir_enable_mask; /* 0x0c20 */
+
+ u8 pad_0x0c28_0x0ca8 [0x0ca8 - 0x0c28]; /* 0x0c28 */
+ u64 ras_esc_0; /* 0x0ca8 */
+ u8 pad_0x0cb0_0x1000 [0x1000 - 0x0cb0]; /* 0x0cb0 */
+};
+
+extern struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np);
+extern struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu);
+
+/*
+ * PMU shadow registers
+ *
+ * Many of the registers in the performance monitoring unit are write-only,
+ * so we need to save a copy of what we write to those registers.
+ *
+ * The actual data counters are read/write. However, writing to the counters
+ * only takes effect if the PMU is enabled. Otherwise the value is stored in
+ * a hardware latch until the next time the PMU is enabled. So we save a copy
+ * of the counter values if we need to read them back while the PMU is
+ * disabled. The counter_value_in_latch field is a bitmap indicating which
+ * counters currently have a value waiting to be written.
+ */
+
+struct cbe_pmd_shadow_regs {
+ u32 group_control;
+ u32 debug_bus_control;
+ u32 trace_address;
+ u32 ext_tr_timer;
+ u32 pm_status;
+ u32 pm_control;
+ u32 pm_interval;
+ u32 pm_start_stop;
+ u32 pm07_control[NR_CTRS];
+
+ u32 pm_ctr[NR_PHYS_CTRS];
+ u32 counter_value_in_latch;
+};
+
+extern struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np);
+extern struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu);
+
+/*
+ *
+ * IIC unit register definitions
+ *
+ */
+
+struct cbe_iic_pending_bits {
+ u32 data;
+ u8 flags;
+ u8 class;
+ u8 source;
+ u8 prio;
+};
+
+#define CBE_IIC_IRQ_VALID 0x80
+#define CBE_IIC_IRQ_IPI 0x40
+
+struct cbe_iic_thread_regs {
+ struct cbe_iic_pending_bits pending;
+ struct cbe_iic_pending_bits pending_destr;
+ u64 generate;
+ u64 prio;
+};
+
+struct cbe_iic_regs {
+ u8 pad_0x0000_0x0400[0x0400 - 0x0000]; /* 0x0000 */
+
+ /* IIC interrupt registers */
+ struct cbe_iic_thread_regs thread[2]; /* 0x0400 */
+
+ u64 iic_ir; /* 0x0440 */
+#define CBE_IIC_IR_PRIO(x) (((x) & 0xf) << 12)
+#define CBE_IIC_IR_DEST_NODE(x) (((x) & 0xf) << 4)
+#define CBE_IIC_IR_DEST_UNIT(x) ((x) & 0xf)
+#define CBE_IIC_IR_IOC_0 0x0
+#define CBE_IIC_IR_IOC_1S 0xb
+#define CBE_IIC_IR_PT_0 0xe
+#define CBE_IIC_IR_PT_1 0xf
+
+ u64 iic_is; /* 0x0448 */
+#define CBE_IIC_IS_PMI 0x2
+
+ u8 pad_0x0450_0x0500[0x0500 - 0x0450]; /* 0x0450 */
+
+ /* IOC FIR */
+ u64 ioc_fir_reset; /* 0x0500 */
+ u64 ioc_fir_set; /* 0x0508 */
+ u64 ioc_checkstop_enable; /* 0x0510 */
+ u64 ioc_fir_error_mask; /* 0x0518 */
+ u64 ioc_syserr_enable; /* 0x0520 */
+ u64 ioc_fir; /* 0x0528 */
+
+ u8 pad_0x0530_0x1000[0x1000 - 0x0530]; /* 0x0530 */
+};
+
+extern struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np);
+extern struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu);
+
+
+struct cbe_mic_tm_regs {
+ u8 pad_0x0000_0x0040[0x0040 - 0x0000]; /* 0x0000 */
+
+ u64 mic_ctl_cnfg2; /* 0x0040 */
+#define CBE_MIC_ENABLE_AUX_TRC 0x8000000000000000LL
+#define CBE_MIC_DISABLE_PWR_SAV_2 0x0200000000000000LL
+#define CBE_MIC_DISABLE_AUX_TRC_WRAP 0x0100000000000000LL
+#define CBE_MIC_ENABLE_AUX_TRC_INT 0x0080000000000000LL
+
+ u64 pad_0x0048; /* 0x0048 */
+
+ u64 mic_aux_trc_base; /* 0x0050 */
+ u64 mic_aux_trc_max_addr; /* 0x0058 */
+ u64 mic_aux_trc_cur_addr; /* 0x0060 */
+ u64 mic_aux_trc_grf_addr; /* 0x0068 */
+ u64 mic_aux_trc_grf_data; /* 0x0070 */
+
+ u64 pad_0x0078; /* 0x0078 */
+
+ u64 mic_ctl_cnfg_0; /* 0x0080 */
+#define CBE_MIC_DISABLE_PWR_SAV_0 0x8000000000000000LL
+
+ u64 pad_0x0088; /* 0x0088 */
+
+ u64 slow_fast_timer_0; /* 0x0090 */
+ u64 slow_next_timer_0; /* 0x0098 */
+
+ u8 pad_0x00a0_0x00f8[0x00f8 - 0x00a0]; /* 0x00a0 */
+ u64 mic_df_ecc_address_0; /* 0x00f8 */
+
+ u8 pad_0x0100_0x01b8[0x01b8 - 0x0100]; /* 0x0100 */
+ u64 mic_df_ecc_address_1; /* 0x01b8 */
+
+ u64 mic_ctl_cnfg_1; /* 0x01c0 */
+#define CBE_MIC_DISABLE_PWR_SAV_1 0x8000000000000000LL
+
+ u64 pad_0x01c8; /* 0x01c8 */
+
+ u64 slow_fast_timer_1; /* 0x01d0 */
+ u64 slow_next_timer_1; /* 0x01d8 */
+
+ u8 pad_0x01e0_0x0208[0x0208 - 0x01e0]; /* 0x01e0 */
+ u64 mic_exc; /* 0x0208 */
+#define CBE_MIC_EXC_BLOCK_SCRUB 0x0800000000000000ULL
+#define CBE_MIC_EXC_FAST_SCRUB 0x0100000000000000ULL
+
+ u64 mic_mnt_cfg; /* 0x0210 */
+#define CBE_MIC_MNT_CFG_CHAN_0_POP 0x0002000000000000ULL
+#define CBE_MIC_MNT_CFG_CHAN_1_POP 0x0004000000000000ULL
+
+ u64 mic_df_config; /* 0x0218 */
+#define CBE_MIC_ECC_DISABLE_0 0x4000000000000000ULL
+#define CBE_MIC_ECC_REP_SINGLE_0 0x2000000000000000ULL
+#define CBE_MIC_ECC_DISABLE_1 0x0080000000000000ULL
+#define CBE_MIC_ECC_REP_SINGLE_1 0x0040000000000000ULL
+
+ u8 pad_0x0220_0x0230[0x0230 - 0x0220]; /* 0x0220 */
+ u64 mic_fir; /* 0x0230 */
+#define CBE_MIC_FIR_ECC_SINGLE_0_ERR 0x0200000000000000ULL
+#define CBE_MIC_FIR_ECC_MULTI_0_ERR 0x0100000000000000ULL
+#define CBE_MIC_FIR_ECC_SINGLE_1_ERR 0x0080000000000000ULL
+#define CBE_MIC_FIR_ECC_MULTI_1_ERR 0x0040000000000000ULL
+#define CBE_MIC_FIR_ECC_ERR_MASK 0xffff000000000000ULL
+#define CBE_MIC_FIR_ECC_SINGLE_0_CTE 0x0000020000000000ULL
+#define CBE_MIC_FIR_ECC_MULTI_0_CTE 0x0000010000000000ULL
+#define CBE_MIC_FIR_ECC_SINGLE_1_CTE 0x0000008000000000ULL
+#define CBE_MIC_FIR_ECC_MULTI_1_CTE 0x0000004000000000ULL
+#define CBE_MIC_FIR_ECC_CTE_MASK 0x0000ffff00000000ULL
+#define CBE_MIC_FIR_ECC_SINGLE_0_RESET 0x0000000002000000ULL
+#define CBE_MIC_FIR_ECC_MULTI_0_RESET 0x0000000001000000ULL
+#define CBE_MIC_FIR_ECC_SINGLE_1_RESET 0x0000000000800000ULL
+#define CBE_MIC_FIR_ECC_MULTI_1_RESET 0x0000000000400000ULL
+#define CBE_MIC_FIR_ECC_RESET_MASK 0x00000000ffff0000ULL
+#define CBE_MIC_FIR_ECC_SINGLE_0_SET 0x0000000000000200ULL
+#define CBE_MIC_FIR_ECC_MULTI_0_SET 0x0000000000000100ULL
+#define CBE_MIC_FIR_ECC_SINGLE_1_SET 0x0000000000000080ULL
+#define CBE_MIC_FIR_ECC_MULTI_1_SET 0x0000000000000040ULL
+#define CBE_MIC_FIR_ECC_SET_MASK 0x000000000000ffffULL
+ u64 mic_fir_debug; /* 0x0238 */
+
+ u8 pad_0x0240_0x1000[0x1000 - 0x0240]; /* 0x0240 */
+};
+
+extern struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np);
+extern struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu);
+
+/* some utility functions to deal with SMT */
+extern u32 cbe_get_hw_thread_id(int cpu);
+extern u32 cbe_cpu_to_node(int cpu);
+extern u32 cbe_node_to_cpu(int node);
+
+/* Init this module early */
+extern void cbe_regs_init(void);
+
+
+#endif /* CBE_REGS_H */
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
new file mode 100644
index 0000000..7cdf358
--- /dev/null
+++ b/arch/powerpc/include/asm/checksum.h
@@ -0,0 +1,117 @@
+#ifndef _ASM_POWERPC_CHECKSUM_H
+#define _ASM_POWERPC_CHECKSUM_H
+#ifdef __KERNEL__
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries. ihl is the number
+ * of 32-bit words and is always >= 5.
+ */
+extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+extern __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum);
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+extern __wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * Computes the checksum of a memory block at src, length len,
+ * and adds in "sum" (32-bit), while copying the block to dst.
+ * If an access exception occurs on src or dst, it stores -EFAULT
+ * to *src_err or *dst_err respectively (if that pointer is not
+ * NULL), and, for an error on src, zeroes the rest of dst.
+ *
+ * Like csum_partial, this must be called with even lengths,
+ * except for the last fragment.
+ */
+extern __wsum csum_partial_copy_generic(const void *src, void *dst,
+ int len, __wsum sum,
+ int *src_err, int *dst_err);
+/*
+ * the same as csum_partial, but copies from src to dst while it
+ * checksums.
+ */
+#define csum_partial_copy_from_user(src, dst, len, sum, errp) \
+ csum_partial_copy_generic((__force const void *)(src), (dst), (len), (sum), (errp), NULL)
+
+#define csum_partial_copy_nocheck(src, dst, len, sum) \
+ csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL)
+
+
+/*
+ * turns a 32-bit partial checksum (e.g. from csum_partial) into a
+ * 1's complement 16-bit checksum.
+ */
+static inline __sum16 csum_fold(__wsum sum)
+{
+ unsigned int tmp;
+
+ /* swap the two 16-bit halves of sum */
+ __asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp) : "r" (sum));
+ /* if there is a carry from adding the two 16-bit halves,
+ it will carry from the lower half into the upper half,
+ giving us the correct sum in the upper half. */
+ return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+static inline __sum16 ip_compute_csum(const void *buff, int len)
+{
+ return csum_fold(csum_partial(buff, len, 0));
+}
+
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
+{
+#ifdef __powerpc64__
+ unsigned long s = (__force u32)sum;
+
+ s += (__force u32)saddr;
+ s += (__force u32)daddr;
+ s += proto + len;
+ s += (s >> 32);
+ return (__force __wsum) s;
+#else
+ __asm__("\n\
+ addc %0,%0,%1 \n\
+ adde %0,%0,%2 \n\
+ adde %0,%0,%3 \n\
+ addze %0,%0 \n\
+ "
+ : "=r" (sum)
+ : "r" (daddr), "r"(saddr), "r"(proto + len), "0"(sum));
+ return sum;
+#endif
+}
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/powerpc/include/asm/clk_interface.h b/arch/powerpc/include/asm/clk_interface.h
new file mode 100644
index 0000000..ab1882c
--- /dev/null
+++ b/arch/powerpc/include/asm/clk_interface.h
@@ -0,0 +1,20 @@
+#ifndef __ASM_POWERPC_CLK_INTERFACE_H
+#define __ASM_POWERPC_CLK_INTERFACE_H
+
+#include <linux/clk.h>
+
+struct clk_interface {
+ struct clk* (*clk_get) (struct device *dev, const char *id);
+ int (*clk_enable) (struct clk *clk);
+ void (*clk_disable) (struct clk *clk);
+ unsigned long (*clk_get_rate) (struct clk *clk);
+ void (*clk_put) (struct clk *clk);
+ long (*clk_round_rate) (struct clk *clk, unsigned long rate);
+ int (*clk_set_rate) (struct clk *clk, unsigned long rate);
+ int (*clk_set_parent) (struct clk *clk, struct clk *parent);
+ struct clk* (*clk_get_parent) (struct clk *clk);
+};
+
+extern struct clk_interface clk_functions;
+
+#endif /* __ASM_POWERPC_CLK_INTERFACE_H */
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
new file mode 100644
index 0000000..107d9b9
--- /dev/null
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -0,0 +1,54 @@
+#ifndef _ASM_POWERPC_CODE_PATCHING_H
+#define _ASM_POWERPC_CODE_PATCHING_H
+
+/*
+ * Copyright 2008, Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/types.h>
+
+#define PPC_NOP_INSTR 0x60000000
+#define PPC_LWSYNC_INSTR 0x7c2004ac
+
+/* Flags for create_branch:
+ * "b" == create_branch(addr, target, 0);
+ * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE);
+ * "bl" == create_branch(addr, target, BRANCH_SET_LINK);
+ * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK);
+ */
+#define BRANCH_SET_LINK 0x1
+#define BRANCH_ABSOLUTE 0x2
+
+unsigned int create_branch(const unsigned int *addr,
+ unsigned long target, int flags);
+unsigned int create_cond_branch(const unsigned int *addr,
+ unsigned long target, int flags);
+void patch_branch(unsigned int *addr, unsigned long target, int flags);
+void patch_instruction(unsigned int *addr, unsigned int instr);
+
+int instr_is_relative_branch(unsigned int instr);
+int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr);
+unsigned long branch_target(const unsigned int *instr);
+unsigned int translate_branch(const unsigned int *dest,
+ const unsigned int *src);
+
+static inline unsigned long ppc_function_entry(void *func)
+{
+#ifdef CONFIG_PPC64
+ /*
+ * On PPC64 the function pointer actually points to the function's
+ * descriptor. The first entry in the descriptor is the address
+ * of the function text.
+ */
+ return ((func_descr_t *)func)->entry;
+#else
+ return (unsigned long)func;
+#endif
+}
+
+#endif /* _ASM_POWERPC_CODE_PATCHING_H */
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
new file mode 100644
index 0000000..d811a8c
--- /dev/null
+++ b/arch/powerpc/include/asm/compat.h
@@ -0,0 +1,214 @@
+#ifndef _ASM_POWERPC_COMPAT_H
+#define _ASM_POWERPC_COMPAT_H
+#ifdef __KERNEL__
+/*
+ * Architecture specific compatibility types
+ */
+#include <linux/types.h>
+#include <linux/sched.h>
+
+#define COMPAT_USER_HZ 100
+
+typedef u32 compat_size_t;
+typedef s32 compat_ssize_t;
+typedef s32 compat_time_t;
+typedef s32 compat_clock_t;
+typedef s32 compat_pid_t;
+typedef u32 __compat_uid_t;
+typedef u32 __compat_gid_t;
+typedef u32 __compat_uid32_t;
+typedef u32 __compat_gid32_t;
+typedef u32 compat_mode_t;
+typedef u32 compat_ino_t;
+typedef u32 compat_dev_t;
+typedef s32 compat_off_t;
+typedef s64 compat_loff_t;
+typedef s16 compat_nlink_t;
+typedef u16 compat_ipc_pid_t;
+typedef s32 compat_daddr_t;
+typedef u32 compat_caddr_t;
+typedef __kernel_fsid_t compat_fsid_t;
+typedef s32 compat_key_t;
+typedef s32 compat_timer_t;
+
+typedef s32 compat_int_t;
+typedef s32 compat_long_t;
+typedef s64 compat_s64;
+typedef u32 compat_uint_t;
+typedef u32 compat_ulong_t;
+typedef u64 compat_u64;
+
+struct compat_timespec {
+ compat_time_t tv_sec;
+ s32 tv_nsec;
+};
+
+struct compat_timeval {
+ compat_time_t tv_sec;
+ s32 tv_usec;
+};
+
+struct compat_stat {
+ compat_dev_t st_dev;
+ compat_ino_t st_ino;
+ compat_mode_t st_mode;
+ compat_nlink_t st_nlink;
+ __compat_uid32_t st_uid;
+ __compat_gid32_t st_gid;
+ compat_dev_t st_rdev;
+ compat_off_t st_size;
+ compat_off_t st_blksize;
+ compat_off_t st_blocks;
+ compat_time_t st_atime;
+ u32 st_atime_nsec;
+ compat_time_t st_mtime;
+ u32 st_mtime_nsec;
+ compat_time_t st_ctime;
+ u32 st_ctime_nsec;
+ u32 __unused4[2];
+};
+
+struct compat_flock {
+ short l_type;
+ short l_whence;
+ compat_off_t l_start;
+ compat_off_t l_len;
+ compat_pid_t l_pid;
+};
+
+#define F_GETLK64 12 /* using 'struct flock64' */
+#define F_SETLK64 13
+#define F_SETLKW64 14
+
+struct compat_flock64 {
+ short l_type;
+ short l_whence;
+ compat_loff_t l_start;
+ compat_loff_t l_len;
+ compat_pid_t l_pid;
+};
+
+struct compat_statfs {
+ int f_type;
+ int f_bsize;
+ int f_blocks;
+ int f_bfree;
+ int f_bavail;
+ int f_files;
+ int f_ffree;
+ compat_fsid_t f_fsid;
+ int f_namelen; /* SunOS ignores this field. */
+ int f_frsize;
+ int f_spare[5];
+};
+
+#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
+#define COMPAT_RLIM_INFINITY 0xffffffff
+
+typedef u32 compat_old_sigset_t;
+
+#define _COMPAT_NSIG 64
+#define _COMPAT_NSIG_BPW 32
+
+typedef u32 compat_sigset_word;
+
+#define COMPAT_OFF_T_MAX 0x7fffffff
+#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
+
+/*
+ * A pointer passed in from user mode. This should not
+ * be used for syscall parameters, just declare them
+ * as pointers because the syscall entry code will have
+ * appropriately converted them already.
+ */
+typedef u32 compat_uptr_t;
+
+static inline void __user *compat_ptr(compat_uptr_t uptr)
+{
+ return (void __user *)(unsigned long)uptr;
+}
+
+static inline compat_uptr_t ptr_to_compat(void __user *uptr)
+{
+ return (u32)(unsigned long)uptr;
+}
+
+static inline void __user *compat_alloc_user_space(long len)
+{
+ struct pt_regs *regs = current->thread.regs;
+ unsigned long usp = regs->gpr[1];
+
+ /*
+ * We cant access below the stack pointer in the 32bit ABI and
+ * can access 288 bytes in the 64bit ABI
+ */
+ if (!(test_thread_flag(TIF_32BIT)))
+ usp -= 288;
+
+ return (void __user *) (usp - len);
+}
+
+/*
+ * ipc64_perm is actually 32/64bit clean but since the compat layer refers to
+ * it we may as well define it.
+ */
+struct compat_ipc64_perm {
+ compat_key_t key;
+ __compat_uid_t uid;
+ __compat_gid_t gid;
+ __compat_uid_t cuid;
+ __compat_gid_t cgid;
+ compat_mode_t mode;
+ unsigned int seq;
+ unsigned int __pad2;
+ unsigned long __unused1; /* yes they really are 64bit pads */
+ unsigned long __unused2;
+};
+
+struct compat_semid64_ds {
+ struct compat_ipc64_perm sem_perm;
+ unsigned int __unused1;
+ compat_time_t sem_otime;
+ unsigned int __unused2;
+ compat_time_t sem_ctime;
+ compat_ulong_t sem_nsems;
+ compat_ulong_t __unused3;
+ compat_ulong_t __unused4;
+};
+
+struct compat_msqid64_ds {
+ struct compat_ipc64_perm msg_perm;
+ unsigned int __unused1;
+ compat_time_t msg_stime;
+ unsigned int __unused2;
+ compat_time_t msg_rtime;
+ unsigned int __unused3;
+ compat_time_t msg_ctime;
+ compat_ulong_t msg_cbytes;
+ compat_ulong_t msg_qnum;
+ compat_ulong_t msg_qbytes;
+ compat_pid_t msg_lspid;
+ compat_pid_t msg_lrpid;
+ compat_ulong_t __unused4;
+ compat_ulong_t __unused5;
+};
+
+struct compat_shmid64_ds {
+ struct compat_ipc64_perm shm_perm;
+ unsigned int __unused1;
+ compat_time_t shm_atime;
+ unsigned int __unused2;
+ compat_time_t shm_dtime;
+ unsigned int __unused3;
+ compat_time_t shm_ctime;
+ unsigned int __unused4;
+ compat_size_t shm_segsz;
+ compat_pid_t shm_cpid;
+ compat_pid_t shm_lpid;
+ compat_ulong_t shm_nattch;
+ compat_ulong_t __unused5;
+ compat_ulong_t __unused6;
+};
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_COMPAT_H */
diff --git a/arch/powerpc/include/asm/cpm.h b/arch/powerpc/include/asm/cpm.h
new file mode 100644
index 0000000..24d79e3
--- /dev/null
+++ b/arch/powerpc/include/asm/cpm.h
@@ -0,0 +1,106 @@
+#ifndef __CPM_H
+#define __CPM_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/of.h>
+
+/* Opcodes common to CPM1 and CPM2
+*/
+#define CPM_CR_INIT_TRX ((ushort)0x0000)
+#define CPM_CR_INIT_RX ((ushort)0x0001)
+#define CPM_CR_INIT_TX ((ushort)0x0002)
+#define CPM_CR_HUNT_MODE ((ushort)0x0003)
+#define CPM_CR_STOP_TX ((ushort)0x0004)
+#define CPM_CR_GRA_STOP_TX ((ushort)0x0005)
+#define CPM_CR_RESTART_TX ((ushort)0x0006)
+#define CPM_CR_CLOSE_RX_BD ((ushort)0x0007)
+#define CPM_CR_SET_GADDR ((ushort)0x0008)
+#define CPM_CR_SET_TIMER ((ushort)0x0008)
+#define CPM_CR_STOP_IDMA ((ushort)0x000b)
+
+/* Buffer descriptors used by many of the CPM protocols. */
+typedef struct cpm_buf_desc {
+ ushort cbd_sc; /* Status and Control */
+ ushort cbd_datlen; /* Data length in buffer */
+ uint cbd_bufaddr; /* Buffer address in host memory */
+} cbd_t;
+
+/* Buffer descriptor control/status used by serial
+ */
+
+#define BD_SC_EMPTY (0x8000) /* Receive is empty */
+#define BD_SC_READY (0x8000) /* Transmit is ready */
+#define BD_SC_WRAP (0x2000) /* Last buffer descriptor */
+#define BD_SC_INTRPT (0x1000) /* Interrupt on change */
+#define BD_SC_LAST (0x0800) /* Last buffer in frame */
+#define BD_SC_TC (0x0400) /* Transmit CRC */
+#define BD_SC_CM (0x0200) /* Continous mode */
+#define BD_SC_ID (0x0100) /* Rec'd too many idles */
+#define BD_SC_P (0x0100) /* xmt preamble */
+#define BD_SC_BR (0x0020) /* Break received */
+#define BD_SC_FR (0x0010) /* Framing error */
+#define BD_SC_PR (0x0008) /* Parity error */
+#define BD_SC_NAK (0x0004) /* NAK - did not respond */
+#define BD_SC_OV (0x0002) /* Overrun */
+#define BD_SC_UN (0x0002) /* Underrun */
+#define BD_SC_CD (0x0001) /* */
+#define BD_SC_CL (0x0001) /* Collision */
+
+/* Buffer descriptor control/status used by Ethernet receive.
+ * Common to SCC and FCC.
+ */
+#define BD_ENET_RX_EMPTY (0x8000)
+#define BD_ENET_RX_WRAP (0x2000)
+#define BD_ENET_RX_INTR (0x1000)
+#define BD_ENET_RX_LAST (0x0800)
+#define BD_ENET_RX_FIRST (0x0400)
+#define BD_ENET_RX_MISS (0x0100)
+#define BD_ENET_RX_BC (0x0080) /* FCC Only */
+#define BD_ENET_RX_MC (0x0040) /* FCC Only */
+#define BD_ENET_RX_LG (0x0020)
+#define BD_ENET_RX_NO (0x0010)
+#define BD_ENET_RX_SH (0x0008)
+#define BD_ENET_RX_CR (0x0004)
+#define BD_ENET_RX_OV (0x0002)
+#define BD_ENET_RX_CL (0x0001)
+#define BD_ENET_RX_STATS (0x01ff) /* All status bits */
+
+/* Buffer descriptor control/status used by Ethernet transmit.
+ * Common to SCC and FCC.
+ */
+#define BD_ENET_TX_READY (0x8000)
+#define BD_ENET_TX_PAD (0x4000)
+#define BD_ENET_TX_WRAP (0x2000)
+#define BD_ENET_TX_INTR (0x1000)
+#define BD_ENET_TX_LAST (0x0800)
+#define BD_ENET_TX_TC (0x0400)
+#define BD_ENET_TX_DEF (0x0200)
+#define BD_ENET_TX_HB (0x0100)
+#define BD_ENET_TX_LC (0x0080)
+#define BD_ENET_TX_RL (0x0040)
+#define BD_ENET_TX_RCMASK (0x003c)
+#define BD_ENET_TX_UN (0x0002)
+#define BD_ENET_TX_CSL (0x0001)
+#define BD_ENET_TX_STATS (0x03ff) /* All status bits */
+
+/* Buffer descriptor control/status used by Transparent mode SCC.
+ */
+#define BD_SCC_TX_LAST (0x0800)
+
+/* Buffer descriptor control/status used by I2C.
+ */
+#define BD_I2C_START (0x0400)
+
+int cpm_muram_init(void);
+unsigned long cpm_muram_alloc(unsigned long size, unsigned long align);
+int cpm_muram_free(unsigned long offset);
+unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size);
+void __iomem *cpm_muram_addr(unsigned long offset);
+unsigned long cpm_muram_offset(void __iomem *addr);
+dma_addr_t cpm_muram_dma(void __iomem *addr);
+int cpm_command(u32 command, u8 opcode);
+
+int cpm2_gpiochip_add32(struct device_node *np);
+
+#endif
diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h
new file mode 100644
index 0000000..2ff79874
--- /dev/null
+++ b/arch/powerpc/include/asm/cpm1.h
@@ -0,0 +1,652 @@
+/*
+ * MPC8xx Communication Processor Module.
+ * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
+ *
+ * This file contains structures and information for the communication
+ * processor channels. Some CPM control and status is available
+ * throught the MPC8xx internal memory map. See immap.h for details.
+ * This file only contains what I need for the moment, not the total
+ * CPM capabilities. I (or someone else) will add definitions as they
+ * are needed. -- Dan
+ *
+ * On the MBX board, EPPC-Bug loads CPM microcode into the first 512
+ * bytes of the DP RAM and relocates the I2C parameter area to the
+ * IDMA1 space. The remaining DP RAM is available for buffer descriptors
+ * or other use.
+ */
+#ifndef __CPM1__
+#define __CPM1__
+
+#include <asm/8xx_immap.h>
+#include <asm/ptrace.h>
+#include <asm/cpm.h>
+
+/* CPM Command register.
+*/
+#define CPM_CR_RST ((ushort)0x8000)
+#define CPM_CR_OPCODE ((ushort)0x0f00)
+#define CPM_CR_CHAN ((ushort)0x00f0)
+#define CPM_CR_FLG ((ushort)0x0001)
+
+/* Channel numbers.
+*/
+#define CPM_CR_CH_SCC1 ((ushort)0x0000)
+#define CPM_CR_CH_I2C ((ushort)0x0001) /* I2C and IDMA1 */
+#define CPM_CR_CH_SCC2 ((ushort)0x0004)
+#define CPM_CR_CH_SPI ((ushort)0x0005) /* SPI / IDMA2 / Timers */
+#define CPM_CR_CH_TIMER CPM_CR_CH_SPI
+#define CPM_CR_CH_SCC3 ((ushort)0x0008)
+#define CPM_CR_CH_SMC1 ((ushort)0x0009) /* SMC1 / DSP1 */
+#define CPM_CR_CH_SCC4 ((ushort)0x000c)
+#define CPM_CR_CH_SMC2 ((ushort)0x000d) /* SMC2 / DSP2 */
+
+#define mk_cr_cmd(CH, CMD) ((CMD << 8) | (CH << 4))
+
+/* Export the base address of the communication processor registers
+ * and dual port ram.
+ */
+extern cpm8xx_t __iomem *cpmp; /* Pointer to comm processor */
+
+#define cpm_dpalloc cpm_muram_alloc
+#define cpm_dpfree cpm_muram_free
+#define cpm_dpram_addr cpm_muram_addr
+#define cpm_dpram_phys cpm_muram_dma
+
+extern void cpm_setbrg(uint brg, uint rate);
+
+extern void cpm_load_patch(cpm8xx_t *cp);
+
+extern void cpm_reset(void);
+
+/* Parameter RAM offsets.
+*/
+#define PROFF_SCC1 ((uint)0x0000)
+#define PROFF_IIC ((uint)0x0080)
+#define PROFF_SCC2 ((uint)0x0100)
+#define PROFF_SPI ((uint)0x0180)
+#define PROFF_SCC3 ((uint)0x0200)
+#define PROFF_SMC1 ((uint)0x0280)
+#define PROFF_SCC4 ((uint)0x0300)
+#define PROFF_SMC2 ((uint)0x0380)
+
+/* Define enough so I can at least use the serial port as a UART.
+ * The MBX uses SMC1 as the host serial port.
+ */
+typedef struct smc_uart {
+ ushort smc_rbase; /* Rx Buffer descriptor base address */
+ ushort smc_tbase; /* Tx Buffer descriptor base address */
+ u_char smc_rfcr; /* Rx function code */
+ u_char smc_tfcr; /* Tx function code */
+ ushort smc_mrblr; /* Max receive buffer length */
+ uint smc_rstate; /* Internal */
+ uint smc_idp; /* Internal */
+ ushort smc_rbptr; /* Internal */
+ ushort smc_ibc; /* Internal */
+ uint smc_rxtmp; /* Internal */
+ uint smc_tstate; /* Internal */
+ uint smc_tdp; /* Internal */
+ ushort smc_tbptr; /* Internal */
+ ushort smc_tbc; /* Internal */
+ uint smc_txtmp; /* Internal */
+ ushort smc_maxidl; /* Maximum idle characters */
+ ushort smc_tmpidl; /* Temporary idle counter */
+ ushort smc_brklen; /* Last received break length */
+ ushort smc_brkec; /* rcv'd break condition counter */
+ ushort smc_brkcr; /* xmt break count register */
+ ushort smc_rmask; /* Temporary bit mask */
+ char res1[8]; /* Reserved */
+ ushort smc_rpbase; /* Relocation pointer */
+} smc_uart_t;
+
+/* Function code bits.
+*/
+#define SMC_EB ((u_char)0x10) /* Set big endian byte order */
+
+/* SMC uart mode register.
+*/
+#define SMCMR_REN ((ushort)0x0001)
+#define SMCMR_TEN ((ushort)0x0002)
+#define SMCMR_DM ((ushort)0x000c)
+#define SMCMR_SM_GCI ((ushort)0x0000)
+#define SMCMR_SM_UART ((ushort)0x0020)
+#define SMCMR_SM_TRANS ((ushort)0x0030)
+#define SMCMR_SM_MASK ((ushort)0x0030)
+#define SMCMR_PM_EVEN ((ushort)0x0100) /* Even parity, else odd */
+#define SMCMR_REVD SMCMR_PM_EVEN
+#define SMCMR_PEN ((ushort)0x0200) /* Parity enable */
+#define SMCMR_BS SMCMR_PEN
+#define SMCMR_SL ((ushort)0x0400) /* Two stops, else one */
+#define SMCR_CLEN_MASK ((ushort)0x7800) /* Character length */
+#define smcr_mk_clen(C) (((C) << 11) & SMCR_CLEN_MASK)
+
+/* SMC2 as Centronics parallel printer. It is half duplex, in that
+ * it can only receive or transmit. The parameter ram values for
+ * each direction are either unique or properly overlap, so we can
+ * include them in one structure.
+ */
+typedef struct smc_centronics {
+ ushort scent_rbase;
+ ushort scent_tbase;
+ u_char scent_cfcr;
+ u_char scent_smask;
+ ushort scent_mrblr;
+ uint scent_rstate;
+ uint scent_r_ptr;
+ ushort scent_rbptr;
+ ushort scent_r_cnt;
+ uint scent_rtemp;
+ uint scent_tstate;
+ uint scent_t_ptr;
+ ushort scent_tbptr;
+ ushort scent_t_cnt;
+ uint scent_ttemp;
+ ushort scent_max_sl;
+ ushort scent_sl_cnt;
+ ushort scent_character1;
+ ushort scent_character2;
+ ushort scent_character3;
+ ushort scent_character4;
+ ushort scent_character5;
+ ushort scent_character6;
+ ushort scent_character7;
+ ushort scent_character8;
+ ushort scent_rccm;
+ ushort scent_rccr;
+} smc_cent_t;
+
+/* Centronics Status Mask Register.
+*/
+#define SMC_CENT_F ((u_char)0x08)
+#define SMC_CENT_PE ((u_char)0x04)
+#define SMC_CENT_S ((u_char)0x02)
+
+/* SMC Event and Mask register.
+*/
+#define SMCM_BRKE ((unsigned char)0x40) /* When in UART Mode */
+#define SMCM_BRK ((unsigned char)0x10) /* When in UART Mode */
+#define SMCM_TXE ((unsigned char)0x10) /* When in Transparent Mode */
+#define SMCM_BSY ((unsigned char)0x04)
+#define SMCM_TX ((unsigned char)0x02)
+#define SMCM_RX ((unsigned char)0x01)
+
+/* Baud rate generators.
+*/
+#define CPM_BRG_RST ((uint)0x00020000)
+#define CPM_BRG_EN ((uint)0x00010000)
+#define CPM_BRG_EXTC_INT ((uint)0x00000000)
+#define CPM_BRG_EXTC_CLK2 ((uint)0x00004000)
+#define CPM_BRG_EXTC_CLK6 ((uint)0x00008000)
+#define CPM_BRG_ATB ((uint)0x00002000)
+#define CPM_BRG_CD_MASK ((uint)0x00001ffe)
+#define CPM_BRG_DIV16 ((uint)0x00000001)
+
+/* SI Clock Route Register
+*/
+#define SICR_RCLK_SCC1_BRG1 ((uint)0x00000000)
+#define SICR_TCLK_SCC1_BRG1 ((uint)0x00000000)
+#define SICR_RCLK_SCC2_BRG2 ((uint)0x00000800)
+#define SICR_TCLK_SCC2_BRG2 ((uint)0x00000100)
+#define SICR_RCLK_SCC3_BRG3 ((uint)0x00100000)
+#define SICR_TCLK_SCC3_BRG3 ((uint)0x00020000)
+#define SICR_RCLK_SCC4_BRG4 ((uint)0x18000000)
+#define SICR_TCLK_SCC4_BRG4 ((uint)0x03000000)
+
+/* SCCs.
+*/
+#define SCC_GSMRH_IRP ((uint)0x00040000)
+#define SCC_GSMRH_GDE ((uint)0x00010000)
+#define SCC_GSMRH_TCRC_CCITT ((uint)0x00008000)
+#define SCC_GSMRH_TCRC_BISYNC ((uint)0x00004000)
+#define SCC_GSMRH_TCRC_HDLC ((uint)0x00000000)
+#define SCC_GSMRH_REVD ((uint)0x00002000)
+#define SCC_GSMRH_TRX ((uint)0x00001000)
+#define SCC_GSMRH_TTX ((uint)0x00000800)
+#define SCC_GSMRH_CDP ((uint)0x00000400)
+#define SCC_GSMRH_CTSP ((uint)0x00000200)
+#define SCC_GSMRH_CDS ((uint)0x00000100)
+#define SCC_GSMRH_CTSS ((uint)0x00000080)
+#define SCC_GSMRH_TFL ((uint)0x00000040)
+#define SCC_GSMRH_RFW ((uint)0x00000020)
+#define SCC_GSMRH_TXSY ((uint)0x00000010)
+#define SCC_GSMRH_SYNL16 ((uint)0x0000000c)
+#define SCC_GSMRH_SYNL8 ((uint)0x00000008)
+#define SCC_GSMRH_SYNL4 ((uint)0x00000004)
+#define SCC_GSMRH_RTSM ((uint)0x00000002)
+#define SCC_GSMRH_RSYN ((uint)0x00000001)
+
+#define SCC_GSMRL_SIR ((uint)0x80000000) /* SCC2 only */
+#define SCC_GSMRL_EDGE_NONE ((uint)0x60000000)
+#define SCC_GSMRL_EDGE_NEG ((uint)0x40000000)
+#define SCC_GSMRL_EDGE_POS ((uint)0x20000000)
+#define SCC_GSMRL_EDGE_BOTH ((uint)0x00000000)
+#define SCC_GSMRL_TCI ((uint)0x10000000)
+#define SCC_GSMRL_TSNC_3 ((uint)0x0c000000)
+#define SCC_GSMRL_TSNC_4 ((uint)0x08000000)
+#define SCC_GSMRL_TSNC_14 ((uint)0x04000000)
+#define SCC_GSMRL_TSNC_INF ((uint)0x00000000)
+#define SCC_GSMRL_RINV ((uint)0x02000000)
+#define SCC_GSMRL_TINV ((uint)0x01000000)
+#define SCC_GSMRL_TPL_128 ((uint)0x00c00000)
+#define SCC_GSMRL_TPL_64 ((uint)0x00a00000)
+#define SCC_GSMRL_TPL_48 ((uint)0x00800000)
+#define SCC_GSMRL_TPL_32 ((uint)0x00600000)
+#define SCC_GSMRL_TPL_16 ((uint)0x00400000)
+#define SCC_GSMRL_TPL_8 ((uint)0x00200000)
+#define SCC_GSMRL_TPL_NONE ((uint)0x00000000)
+#define SCC_GSMRL_TPP_ALL1 ((uint)0x00180000)
+#define SCC_GSMRL_TPP_01 ((uint)0x00100000)
+#define SCC_GSMRL_TPP_10 ((uint)0x00080000)
+#define SCC_GSMRL_TPP_ZEROS ((uint)0x00000000)
+#define SCC_GSMRL_TEND ((uint)0x00040000)
+#define SCC_GSMRL_TDCR_32 ((uint)0x00030000)
+#define SCC_GSMRL_TDCR_16 ((uint)0x00020000)
+#define SCC_GSMRL_TDCR_8 ((uint)0x00010000)
+#define SCC_GSMRL_TDCR_1 ((uint)0x00000000)
+#define SCC_GSMRL_RDCR_32 ((uint)0x0000c000)
+#define SCC_GSMRL_RDCR_16 ((uint)0x00008000)
+#define SCC_GSMRL_RDCR_8 ((uint)0x00004000)
+#define SCC_GSMRL_RDCR_1 ((uint)0x00000000)
+#define SCC_GSMRL_RENC_DFMAN ((uint)0x00003000)
+#define SCC_GSMRL_RENC_MANCH ((uint)0x00002000)
+#define SCC_GSMRL_RENC_FM0 ((uint)0x00001000)
+#define SCC_GSMRL_RENC_NRZI ((uint)0x00000800)
+#define SCC_GSMRL_RENC_NRZ ((uint)0x00000000)
+#define SCC_GSMRL_TENC_DFMAN ((uint)0x00000600)
+#define SCC_GSMRL_TENC_MANCH ((uint)0x00000400)
+#define SCC_GSMRL_TENC_FM0 ((uint)0x00000200)
+#define SCC_GSMRL_TENC_NRZI ((uint)0x00000100)
+#define SCC_GSMRL_TENC_NRZ ((uint)0x00000000)
+#define SCC_GSMRL_DIAG_LE ((uint)0x000000c0) /* Loop and echo */
+#define SCC_GSMRL_DIAG_ECHO ((uint)0x00000080)
+#define SCC_GSMRL_DIAG_LOOP ((uint)0x00000040)
+#define SCC_GSMRL_DIAG_NORM ((uint)0x00000000)
+#define SCC_GSMRL_ENR ((uint)0x00000020)
+#define SCC_GSMRL_ENT ((uint)0x00000010)
+#define SCC_GSMRL_MODE_ENET ((uint)0x0000000c)
+#define SCC_GSMRL_MODE_QMC ((uint)0x0000000a)
+#define SCC_GSMRL_MODE_DDCMP ((uint)0x00000009)
+#define SCC_GSMRL_MODE_BISYNC ((uint)0x00000008)
+#define SCC_GSMRL_MODE_V14 ((uint)0x00000007)
+#define SCC_GSMRL_MODE_AHDLC ((uint)0x00000006)
+#define SCC_GSMRL_MODE_PROFIBUS ((uint)0x00000005)
+#define SCC_GSMRL_MODE_UART ((uint)0x00000004)
+#define SCC_GSMRL_MODE_SS7 ((uint)0x00000003)
+#define SCC_GSMRL_MODE_ATALK ((uint)0x00000002)
+#define SCC_GSMRL_MODE_HDLC ((uint)0x00000000)
+
+#define SCC_TODR_TOD ((ushort)0x8000)
+
+/* SCC Event and Mask register.
+*/
+#define SCCM_TXE ((unsigned char)0x10)
+#define SCCM_BSY ((unsigned char)0x04)
+#define SCCM_TX ((unsigned char)0x02)
+#define SCCM_RX ((unsigned char)0x01)
+
+typedef struct scc_param {
+ ushort scc_rbase; /* Rx Buffer descriptor base address */
+ ushort scc_tbase; /* Tx Buffer descriptor base address */
+ u_char scc_rfcr; /* Rx function code */
+ u_char scc_tfcr; /* Tx function code */
+ ushort scc_mrblr; /* Max receive buffer length */
+ uint scc_rstate; /* Internal */
+ uint scc_idp; /* Internal */
+ ushort scc_rbptr; /* Internal */
+ ushort scc_ibc; /* Internal */
+ uint scc_rxtmp; /* Internal */
+ uint scc_tstate; /* Internal */
+ uint scc_tdp; /* Internal */
+ ushort scc_tbptr; /* Internal */
+ ushort scc_tbc; /* Internal */
+ uint scc_txtmp; /* Internal */
+ uint scc_rcrc; /* Internal */
+ uint scc_tcrc; /* Internal */
+} sccp_t;
+
+/* Function code bits.
+*/
+#define SCC_EB ((u_char)0x10) /* Set big endian byte order */
+
+/* CPM Ethernet through SCCx.
+ */
+typedef struct scc_enet {
+ sccp_t sen_genscc;
+ uint sen_cpres; /* Preset CRC */
+ uint sen_cmask; /* Constant mask for CRC */
+ uint sen_crcec; /* CRC Error counter */
+ uint sen_alec; /* alignment error counter */
+ uint sen_disfc; /* discard frame counter */
+ ushort sen_pads; /* Tx short frame pad character */
+ ushort sen_retlim; /* Retry limit threshold */
+ ushort sen_retcnt; /* Retry limit counter */
+ ushort sen_maxflr; /* maximum frame length register */
+ ushort sen_minflr; /* minimum frame length register */
+ ushort sen_maxd1; /* maximum DMA1 length */
+ ushort sen_maxd2; /* maximum DMA2 length */
+ ushort sen_maxd; /* Rx max DMA */
+ ushort sen_dmacnt; /* Rx DMA counter */
+ ushort sen_maxb; /* Max BD byte count */
+ ushort sen_gaddr1; /* Group address filter */
+ ushort sen_gaddr2;
+ ushort sen_gaddr3;
+ ushort sen_gaddr4;
+ uint sen_tbuf0data0; /* Save area 0 - current frame */
+ uint sen_tbuf0data1; /* Save area 1 - current frame */
+ uint sen_tbuf0rba; /* Internal */
+ uint sen_tbuf0crc; /* Internal */
+ ushort sen_tbuf0bcnt; /* Internal */
+ ushort sen_paddrh; /* physical address (MSB) */
+ ushort sen_paddrm;
+ ushort sen_paddrl; /* physical address (LSB) */
+ ushort sen_pper; /* persistence */
+ ushort sen_rfbdptr; /* Rx first BD pointer */
+ ushort sen_tfbdptr; /* Tx first BD pointer */
+ ushort sen_tlbdptr; /* Tx last BD pointer */
+ uint sen_tbuf1data0; /* Save area 0 - current frame */
+ uint sen_tbuf1data1; /* Save area 1 - current frame */
+ uint sen_tbuf1rba; /* Internal */
+ uint sen_tbuf1crc; /* Internal */
+ ushort sen_tbuf1bcnt; /* Internal */
+ ushort sen_txlen; /* Tx Frame length counter */
+ ushort sen_iaddr1; /* Individual address filter */
+ ushort sen_iaddr2;
+ ushort sen_iaddr3;
+ ushort sen_iaddr4;
+ ushort sen_boffcnt; /* Backoff counter */
+
+ /* NOTE: Some versions of the manual have the following items
+ * incorrectly documented. Below is the proper order.
+ */
+ ushort sen_taddrh; /* temp address (MSB) */
+ ushort sen_taddrm;
+ ushort sen_taddrl; /* temp address (LSB) */
+} scc_enet_t;
+
+/* SCC Event register as used by Ethernet.
+*/
+#define SCCE_ENET_GRA ((ushort)0x0080) /* Graceful stop complete */
+#define SCCE_ENET_TXE ((ushort)0x0010) /* Transmit Error */
+#define SCCE_ENET_RXF ((ushort)0x0008) /* Full frame received */
+#define SCCE_ENET_BSY ((ushort)0x0004) /* All incoming buffers full */
+#define SCCE_ENET_TXB ((ushort)0x0002) /* A buffer was transmitted */
+#define SCCE_ENET_RXB ((ushort)0x0001) /* A buffer was received */
+
+/* SCC Mode Register (PMSR) as used by Ethernet.
+*/
+#define SCC_PSMR_HBC ((ushort)0x8000) /* Enable heartbeat */
+#define SCC_PSMR_FC ((ushort)0x4000) /* Force collision */
+#define SCC_PSMR_RSH ((ushort)0x2000) /* Receive short frames */
+#define SCC_PSMR_IAM ((ushort)0x1000) /* Check individual hash */
+#define SCC_PSMR_ENCRC ((ushort)0x0800) /* Ethernet CRC mode */
+#define SCC_PSMR_PRO ((ushort)0x0200) /* Promiscuous mode */
+#define SCC_PSMR_BRO ((ushort)0x0100) /* Catch broadcast pkts */
+#define SCC_PSMR_SBT ((ushort)0x0080) /* Special backoff timer */
+#define SCC_PSMR_LPB ((ushort)0x0040) /* Set Loopback mode */
+#define SCC_PSMR_SIP ((ushort)0x0020) /* Sample Input Pins */
+#define SCC_PSMR_LCW ((ushort)0x0010) /* Late collision window */
+#define SCC_PSMR_NIB22 ((ushort)0x000a) /* Start frame search */
+#define SCC_PSMR_FDE ((ushort)0x0001) /* Full duplex enable */
+
+/* SCC as UART
+*/
+typedef struct scc_uart {
+ sccp_t scc_genscc;
+ char res1[8]; /* Reserved */
+ ushort scc_maxidl; /* Maximum idle chars */
+ ushort scc_idlc; /* temp idle counter */
+ ushort scc_brkcr; /* Break count register */
+ ushort scc_parec; /* receive parity error counter */
+ ushort scc_frmec; /* receive framing error counter */
+ ushort scc_nosec; /* receive noise counter */
+ ushort scc_brkec; /* receive break condition counter */
+ ushort scc_brkln; /* last received break length */
+ ushort scc_uaddr1; /* UART address character 1 */
+ ushort scc_uaddr2; /* UART address character 2 */
+ ushort scc_rtemp; /* Temp storage */
+ ushort scc_toseq; /* Transmit out of sequence char */
+ ushort scc_char1; /* control character 1 */
+ ushort scc_char2; /* control character 2 */
+ ushort scc_char3; /* control character 3 */
+ ushort scc_char4; /* control character 4 */
+ ushort scc_char5; /* control character 5 */
+ ushort scc_char6; /* control character 6 */
+ ushort scc_char7; /* control character 7 */
+ ushort scc_char8; /* control character 8 */
+ ushort scc_rccm; /* receive control character mask */
+ ushort scc_rccr; /* receive control character register */
+ ushort scc_rlbc; /* receive last break character */
+} scc_uart_t;
+
+/* SCC Event and Mask registers when it is used as a UART.
+*/
+#define UART_SCCM_GLR ((ushort)0x1000)
+#define UART_SCCM_GLT ((ushort)0x0800)
+#define UART_SCCM_AB ((ushort)0x0200)
+#define UART_SCCM_IDL ((ushort)0x0100)
+#define UART_SCCM_GRA ((ushort)0x0080)
+#define UART_SCCM_BRKE ((ushort)0x0040)
+#define UART_SCCM_BRKS ((ushort)0x0020)
+#define UART_SCCM_CCR ((ushort)0x0008)
+#define UART_SCCM_BSY ((ushort)0x0004)
+#define UART_SCCM_TX ((ushort)0x0002)
+#define UART_SCCM_RX ((ushort)0x0001)
+
+/* The SCC PMSR when used as a UART.
+*/
+#define SCU_PSMR_FLC ((ushort)0x8000)
+#define SCU_PSMR_SL ((ushort)0x4000)
+#define SCU_PSMR_CL ((ushort)0x3000)
+#define SCU_PSMR_UM ((ushort)0x0c00)
+#define SCU_PSMR_FRZ ((ushort)0x0200)
+#define SCU_PSMR_RZS ((ushort)0x0100)
+#define SCU_PSMR_SYN ((ushort)0x0080)
+#define SCU_PSMR_DRT ((ushort)0x0040)
+#define SCU_PSMR_PEN ((ushort)0x0010)
+#define SCU_PSMR_RPM ((ushort)0x000c)
+#define SCU_PSMR_REVP ((ushort)0x0008)
+#define SCU_PSMR_TPM ((ushort)0x0003)
+#define SCU_PSMR_TEVP ((ushort)0x0002)
+
+/* CPM Transparent mode SCC.
+ */
+typedef struct scc_trans {
+ sccp_t st_genscc;
+ uint st_cpres; /* Preset CRC */
+ uint st_cmask; /* Constant mask for CRC */
+} scc_trans_t;
+
+/* IIC parameter RAM.
+*/
+typedef struct iic {
+ ushort iic_rbase; /* Rx Buffer descriptor base address */
+ ushort iic_tbase; /* Tx Buffer descriptor base address */
+ u_char iic_rfcr; /* Rx function code */
+ u_char iic_tfcr; /* Tx function code */
+ ushort iic_mrblr; /* Max receive buffer length */
+ uint iic_rstate; /* Internal */
+ uint iic_rdp; /* Internal */
+ ushort iic_rbptr; /* Internal */
+ ushort iic_rbc; /* Internal */
+ uint iic_rxtmp; /* Internal */
+ uint iic_tstate; /* Internal */
+ uint iic_tdp; /* Internal */
+ ushort iic_tbptr; /* Internal */
+ ushort iic_tbc; /* Internal */
+ uint iic_txtmp; /* Internal */
+ char res1[4]; /* Reserved */
+ ushort iic_rpbase; /* Relocation pointer */
+ char res2[2]; /* Reserved */
+} iic_t;
+
+/* SPI parameter RAM.
+*/
+typedef struct spi {
+ ushort spi_rbase; /* Rx Buffer descriptor base address */
+ ushort spi_tbase; /* Tx Buffer descriptor base address */
+ u_char spi_rfcr; /* Rx function code */
+ u_char spi_tfcr; /* Tx function code */
+ ushort spi_mrblr; /* Max receive buffer length */
+ uint spi_rstate; /* Internal */
+ uint spi_rdp; /* Internal */
+ ushort spi_rbptr; /* Internal */
+ ushort spi_rbc; /* Internal */
+ uint spi_rxtmp; /* Internal */
+ uint spi_tstate; /* Internal */
+ uint spi_tdp; /* Internal */
+ ushort spi_tbptr; /* Internal */
+ ushort spi_tbc; /* Internal */
+ uint spi_txtmp; /* Internal */
+ uint spi_res;
+ ushort spi_rpbase; /* Relocation pointer */
+ ushort spi_res2;
+} spi_t;
+
+/* SPI Mode register.
+*/
+#define SPMODE_LOOP ((ushort)0x4000) /* Loopback */
+#define SPMODE_CI ((ushort)0x2000) /* Clock Invert */
+#define SPMODE_CP ((ushort)0x1000) /* Clock Phase */
+#define SPMODE_DIV16 ((ushort)0x0800) /* BRG/16 mode */
+#define SPMODE_REV ((ushort)0x0400) /* Reversed Data */
+#define SPMODE_MSTR ((ushort)0x0200) /* SPI Master */
+#define SPMODE_EN ((ushort)0x0100) /* Enable */
+#define SPMODE_LENMSK ((ushort)0x00f0) /* character length */
+#define SPMODE_LEN4 ((ushort)0x0030) /* 4 bits per char */
+#define SPMODE_LEN8 ((ushort)0x0070) /* 8 bits per char */
+#define SPMODE_LEN16 ((ushort)0x00f0) /* 16 bits per char */
+#define SPMODE_PMMSK ((ushort)0x000f) /* prescale modulus */
+
+/* SPIE fields */
+#define SPIE_MME 0x20
+#define SPIE_TXE 0x10
+#define SPIE_BSY 0x04
+#define SPIE_TXB 0x02
+#define SPIE_RXB 0x01
+
+/*
+ * RISC Controller Configuration Register definitons
+ */
+#define RCCR_TIME 0x8000 /* RISC Timer Enable */
+#define RCCR_TIMEP(t) (((t) & 0x3F)<<8) /* RISC Timer Period */
+#define RCCR_TIME_MASK 0x00FF /* not RISC Timer related bits */
+
+/* RISC Timer Parameter RAM offset */
+#define PROFF_RTMR ((uint)0x01B0)
+
+typedef struct risc_timer_pram {
+ unsigned short tm_base; /* RISC Timer Table Base Address */
+ unsigned short tm_ptr; /* RISC Timer Table Pointer (internal) */
+ unsigned short r_tmr; /* RISC Timer Mode Register */
+ unsigned short r_tmv; /* RISC Timer Valid Register */
+ unsigned long tm_cmd; /* RISC Timer Command Register */
+ unsigned long tm_cnt; /* RISC Timer Internal Count */
+} rt_pram_t;
+
+/* Bits in RISC Timer Command Register */
+#define TM_CMD_VALID 0x80000000 /* Valid - Enables the timer */
+#define TM_CMD_RESTART 0x40000000 /* Restart - for automatic restart */
+#define TM_CMD_PWM 0x20000000 /* Run in Pulse Width Modulation Mode */
+#define TM_CMD_NUM(n) (((n)&0xF)<<16) /* Timer Number */
+#define TM_CMD_PERIOD(p) ((p)&0xFFFF) /* Timer Period */
+
+/* CPM interrupts. There are nearly 32 interrupts generated by CPM
+ * channels or devices. All of these are presented to the PPC core
+ * as a single interrupt. The CPM interrupt handler dispatches its
+ * own handlers, in a similar fashion to the PPC core handler. We
+ * use the table as defined in the manuals (i.e. no special high
+ * priority and SCC1 == SCCa, etc...).
+ */
+#define CPMVEC_NR 32
+#define CPMVEC_PIO_PC15 ((ushort)0x1f)
+#define CPMVEC_SCC1 ((ushort)0x1e)
+#define CPMVEC_SCC2 ((ushort)0x1d)
+#define CPMVEC_SCC3 ((ushort)0x1c)
+#define CPMVEC_SCC4 ((ushort)0x1b)
+#define CPMVEC_PIO_PC14 ((ushort)0x1a)
+#define CPMVEC_TIMER1 ((ushort)0x19)
+#define CPMVEC_PIO_PC13 ((ushort)0x18)
+#define CPMVEC_PIO_PC12 ((ushort)0x17)
+#define CPMVEC_SDMA_CB_ERR ((ushort)0x16)
+#define CPMVEC_IDMA1 ((ushort)0x15)
+#define CPMVEC_IDMA2 ((ushort)0x14)
+#define CPMVEC_TIMER2 ((ushort)0x12)
+#define CPMVEC_RISCTIMER ((ushort)0x11)
+#define CPMVEC_I2C ((ushort)0x10)
+#define CPMVEC_PIO_PC11 ((ushort)0x0f)
+#define CPMVEC_PIO_PC10 ((ushort)0x0e)
+#define CPMVEC_TIMER3 ((ushort)0x0c)
+#define CPMVEC_PIO_PC9 ((ushort)0x0b)
+#define CPMVEC_PIO_PC8 ((ushort)0x0a)
+#define CPMVEC_PIO_PC7 ((ushort)0x09)
+#define CPMVEC_TIMER4 ((ushort)0x07)
+#define CPMVEC_PIO_PC6 ((ushort)0x06)
+#define CPMVEC_SPI ((ushort)0x05)
+#define CPMVEC_SMC1 ((ushort)0x04)
+#define CPMVEC_SMC2 ((ushort)0x03)
+#define CPMVEC_PIO_PC5 ((ushort)0x02)
+#define CPMVEC_PIO_PC4 ((ushort)0x01)
+#define CPMVEC_ERROR ((ushort)0x00)
+
+/* CPM interrupt configuration vector.
+*/
+#define CICR_SCD_SCC4 ((uint)0x00c00000) /* SCC4 @ SCCd */
+#define CICR_SCC_SCC3 ((uint)0x00200000) /* SCC3 @ SCCc */
+#define CICR_SCB_SCC2 ((uint)0x00040000) /* SCC2 @ SCCb */
+#define CICR_SCA_SCC1 ((uint)0x00000000) /* SCC1 @ SCCa */
+#define CICR_IRL_MASK ((uint)0x0000e000) /* Core interrupt */
+#define CICR_HP_MASK ((uint)0x00001f00) /* Hi-pri int. */
+#define CICR_IEN ((uint)0x00000080) /* Int. enable */
+#define CICR_SPS ((uint)0x00000001) /* SCC Spread */
+
+#define IMAP_ADDR (get_immrbase())
+
+#define CPM_PIN_INPUT 0
+#define CPM_PIN_OUTPUT 1
+#define CPM_PIN_PRIMARY 0
+#define CPM_PIN_SECONDARY 2
+#define CPM_PIN_GPIO 4
+#define CPM_PIN_OPENDRAIN 8
+
+enum cpm_port {
+ CPM_PORTA,
+ CPM_PORTB,
+ CPM_PORTC,
+ CPM_PORTD,
+ CPM_PORTE,
+};
+
+void cpm1_set_pin(enum cpm_port port, int pin, int flags);
+
+enum cpm_clk_dir {
+ CPM_CLK_RX,
+ CPM_CLK_TX,
+ CPM_CLK_RTX
+};
+
+enum cpm_clk_target {
+ CPM_CLK_SCC1,
+ CPM_CLK_SCC2,
+ CPM_CLK_SCC3,
+ CPM_CLK_SCC4,
+ CPM_CLK_SMC1,
+ CPM_CLK_SMC2,
+};
+
+enum cpm_clk {
+ CPM_BRG1, /* Baud Rate Generator 1 */
+ CPM_BRG2, /* Baud Rate Generator 2 */
+ CPM_BRG3, /* Baud Rate Generator 3 */
+ CPM_BRG4, /* Baud Rate Generator 4 */
+ CPM_CLK1, /* Clock 1 */
+ CPM_CLK2, /* Clock 2 */
+ CPM_CLK3, /* Clock 3 */
+ CPM_CLK4, /* Clock 4 */
+ CPM_CLK5, /* Clock 5 */
+ CPM_CLK6, /* Clock 6 */
+ CPM_CLK7, /* Clock 7 */
+ CPM_CLK8, /* Clock 8 */
+};
+
+int cpm1_clk_setup(enum cpm_clk_target target, int clock, int mode);
+
+#endif /* __CPM1__ */
diff --git a/arch/powerpc/include/asm/cpm2.h b/arch/powerpc/include/asm/cpm2.h
new file mode 100644
index 0000000..2a6fa01
--- /dev/null
+++ b/arch/powerpc/include/asm/cpm2.h
@@ -0,0 +1,1195 @@
+/*
+ * Communication Processor Module v2.
+ *
+ * This file contains structures and information for the communication
+ * processor channels found in the dual port RAM or parameter RAM.
+ * All CPM control and status is available through the CPM2 internal
+ * memory map. See immap_cpm2.h for details.
+ */
+#ifdef __KERNEL__
+#ifndef __CPM2__
+#define __CPM2__
+
+#include <asm/immap_cpm2.h>
+#include <asm/cpm.h>
+#include <sysdev/fsl_soc.h>
+
+#ifdef CONFIG_PPC_85xx
+#define CPM_MAP_ADDR (get_immrbase() + 0x80000)
+#endif
+
+/* CPM Command register.
+*/
+#define CPM_CR_RST ((uint)0x80000000)
+#define CPM_CR_PAGE ((uint)0x7c000000)
+#define CPM_CR_SBLOCK ((uint)0x03e00000)
+#define CPM_CR_FLG ((uint)0x00010000)
+#define CPM_CR_MCN ((uint)0x00003fc0)
+#define CPM_CR_OPCODE ((uint)0x0000000f)
+
+/* Device sub-block and page codes.
+*/
+#define CPM_CR_SCC1_SBLOCK (0x04)
+#define CPM_CR_SCC2_SBLOCK (0x05)
+#define CPM_CR_SCC3_SBLOCK (0x06)
+#define CPM_CR_SCC4_SBLOCK (0x07)
+#define CPM_CR_SMC1_SBLOCK (0x08)
+#define CPM_CR_SMC2_SBLOCK (0x09)
+#define CPM_CR_SPI_SBLOCK (0x0a)
+#define CPM_CR_I2C_SBLOCK (0x0b)
+#define CPM_CR_TIMER_SBLOCK (0x0f)
+#define CPM_CR_RAND_SBLOCK (0x0e)
+#define CPM_CR_FCC1_SBLOCK (0x10)
+#define CPM_CR_FCC2_SBLOCK (0x11)
+#define CPM_CR_FCC3_SBLOCK (0x12)
+#define CPM_CR_IDMA1_SBLOCK (0x14)
+#define CPM_CR_IDMA2_SBLOCK (0x15)
+#define CPM_CR_IDMA3_SBLOCK (0x16)
+#define CPM_CR_IDMA4_SBLOCK (0x17)
+#define CPM_CR_MCC1_SBLOCK (0x1c)
+
+#define CPM_CR_FCC_SBLOCK(x) (x + 0x10)
+
+#define CPM_CR_SCC1_PAGE (0x00)
+#define CPM_CR_SCC2_PAGE (0x01)
+#define CPM_CR_SCC3_PAGE (0x02)
+#define CPM_CR_SCC4_PAGE (0x03)
+#define CPM_CR_SMC1_PAGE (0x07)
+#define CPM_CR_SMC2_PAGE (0x08)
+#define CPM_CR_SPI_PAGE (0x09)
+#define CPM_CR_I2C_PAGE (0x0a)
+#define CPM_CR_TIMER_PAGE (0x0a)
+#define CPM_CR_RAND_PAGE (0x0a)
+#define CPM_CR_FCC1_PAGE (0x04)
+#define CPM_CR_FCC2_PAGE (0x05)
+#define CPM_CR_FCC3_PAGE (0x06)
+#define CPM_CR_IDMA1_PAGE (0x07)
+#define CPM_CR_IDMA2_PAGE (0x08)
+#define CPM_CR_IDMA3_PAGE (0x09)
+#define CPM_CR_IDMA4_PAGE (0x0a)
+#define CPM_CR_MCC1_PAGE (0x07)
+#define CPM_CR_MCC2_PAGE (0x08)
+
+#define CPM_CR_FCC_PAGE(x) (x + 0x04)
+
+/* CPM2-specific opcodes (see cpm.h for common opcodes)
+*/
+#define CPM_CR_START_IDMA ((ushort)0x0009)
+
+#define mk_cr_cmd(PG, SBC, MCN, OP) \
+ ((PG << 26) | (SBC << 21) | (MCN << 6) | OP)
+
+/* The number of pages of host memory we allocate for CPM. This is
+ * done early in kernel initialization to get physically contiguous
+ * pages.
+ */
+#define NUM_CPM_HOST_PAGES 2
+
+/* Export the base address of the communication processor registers
+ * and dual port ram.
+ */
+extern cpm_cpm2_t __iomem *cpmp; /* Pointer to comm processor */
+
+#define cpm_dpalloc cpm_muram_alloc
+#define cpm_dpfree cpm_muram_free
+#define cpm_dpram_addr cpm_muram_addr
+
+extern void cpm2_reset(void);
+
+/* Baud rate generators.
+*/
+#define CPM_BRG_RST ((uint)0x00020000)
+#define CPM_BRG_EN ((uint)0x00010000)
+#define CPM_BRG_EXTC_INT ((uint)0x00000000)
+#define CPM_BRG_EXTC_CLK3_9 ((uint)0x00004000)
+#define CPM_BRG_EXTC_CLK5_15 ((uint)0x00008000)
+#define CPM_BRG_ATB ((uint)0x00002000)
+#define CPM_BRG_CD_MASK ((uint)0x00001ffe)
+#define CPM_BRG_DIV16 ((uint)0x00000001)
+
+#define CPM2_BRG_INT_CLK (get_brgfreq())
+#define CPM2_BRG_UART_CLK (CPM2_BRG_INT_CLK/16)
+
+extern void __cpm2_setbrg(uint brg, uint rate, uint clk, int div16, int src);
+
+/* This function is used by UARTS, or anything else that uses a 16x
+ * oversampled clock.
+ */
+static inline void cpm_setbrg(uint brg, uint rate)
+{
+ __cpm2_setbrg(brg, rate, CPM2_BRG_UART_CLK, 0, CPM_BRG_EXTC_INT);
+}
+
+/* This function is used to set high speed synchronous baud rate
+ * clocks.
+ */
+static inline void cpm2_fastbrg(uint brg, uint rate, int div16)
+{
+ __cpm2_setbrg(brg, rate, CPM2_BRG_INT_CLK, div16, CPM_BRG_EXTC_INT);
+}
+
+/* Function code bits, usually generic to devices.
+*/
+#define CPMFCR_GBL ((u_char)0x20) /* Set memory snooping */
+#define CPMFCR_EB ((u_char)0x10) /* Set big endian byte order */
+#define CPMFCR_TC2 ((u_char)0x04) /* Transfer code 2 value */
+#define CPMFCR_DTB ((u_char)0x02) /* Use local bus for data when set */
+#define CPMFCR_BDB ((u_char)0x01) /* Use local bus for BD when set */
+
+/* Parameter RAM offsets from the base.
+*/
+#define PROFF_SCC1 ((uint)0x8000)
+#define PROFF_SCC2 ((uint)0x8100)
+#define PROFF_SCC3 ((uint)0x8200)
+#define PROFF_SCC4 ((uint)0x8300)
+#define PROFF_FCC1 ((uint)0x8400)
+#define PROFF_FCC2 ((uint)0x8500)
+#define PROFF_FCC3 ((uint)0x8600)
+#define PROFF_MCC1 ((uint)0x8700)
+#define PROFF_SMC1_BASE ((uint)0x87fc)
+#define PROFF_IDMA1_BASE ((uint)0x87fe)
+#define PROFF_MCC2 ((uint)0x8800)
+#define PROFF_SMC2_BASE ((uint)0x88fc)
+#define PROFF_IDMA2_BASE ((uint)0x88fe)
+#define PROFF_SPI_BASE ((uint)0x89fc)
+#define PROFF_IDMA3_BASE ((uint)0x89fe)
+#define PROFF_TIMERS ((uint)0x8ae0)
+#define PROFF_REVNUM ((uint)0x8af0)
+#define PROFF_RAND ((uint)0x8af8)
+#define PROFF_I2C_BASE ((uint)0x8afc)
+#define PROFF_IDMA4_BASE ((uint)0x8afe)
+
+#define PROFF_SCC_SIZE ((uint)0x100)
+#define PROFF_FCC_SIZE ((uint)0x100)
+#define PROFF_SMC_SIZE ((uint)64)
+
+/* The SMCs are relocated to any of the first eight DPRAM pages.
+ * We will fix these at the first locations of DPRAM, until we
+ * get some microcode patches :-).
+ * The parameter ram space for the SMCs is fifty-some bytes, and
+ * they are required to start on a 64 byte boundary.
+ */
+#define PROFF_SMC1 (0)
+#define PROFF_SMC2 (64)
+
+
+/* Define enough so I can at least use the serial port as a UART.
+ */
+typedef struct smc_uart {
+ ushort smc_rbase; /* Rx Buffer descriptor base address */
+ ushort smc_tbase; /* Tx Buffer descriptor base address */
+ u_char smc_rfcr; /* Rx function code */
+ u_char smc_tfcr; /* Tx function code */
+ ushort smc_mrblr; /* Max receive buffer length */
+ uint smc_rstate; /* Internal */
+ uint smc_idp; /* Internal */
+ ushort smc_rbptr; /* Internal */
+ ushort smc_ibc; /* Internal */
+ uint smc_rxtmp; /* Internal */
+ uint smc_tstate; /* Internal */
+ uint smc_tdp; /* Internal */
+ ushort smc_tbptr; /* Internal */
+ ushort smc_tbc; /* Internal */
+ uint smc_txtmp; /* Internal */
+ ushort smc_maxidl; /* Maximum idle characters */
+ ushort smc_tmpidl; /* Temporary idle counter */
+ ushort smc_brklen; /* Last received break length */
+ ushort smc_brkec; /* rcv'd break condition counter */
+ ushort smc_brkcr; /* xmt break count register */
+ ushort smc_rmask; /* Temporary bit mask */
+ uint smc_stmp; /* SDMA Temp */
+} smc_uart_t;
+
+/* SMC uart mode register (Internal memory map).
+*/
+#define SMCMR_REN ((ushort)0x0001)
+#define SMCMR_TEN ((ushort)0x0002)
+#define SMCMR_DM ((ushort)0x000c)
+#define SMCMR_SM_GCI ((ushort)0x0000)
+#define SMCMR_SM_UART ((ushort)0x0020)
+#define SMCMR_SM_TRANS ((ushort)0x0030)
+#define SMCMR_SM_MASK ((ushort)0x0030)
+#define SMCMR_PM_EVEN ((ushort)0x0100) /* Even parity, else odd */
+#define SMCMR_REVD SMCMR_PM_EVEN
+#define SMCMR_PEN ((ushort)0x0200) /* Parity enable */
+#define SMCMR_BS SMCMR_PEN
+#define SMCMR_SL ((ushort)0x0400) /* Two stops, else one */
+#define SMCR_CLEN_MASK ((ushort)0x7800) /* Character length */
+#define smcr_mk_clen(C) (((C) << 11) & SMCR_CLEN_MASK)
+
+/* SMC Event and Mask register.
+*/
+#define SMCM_BRKE ((unsigned char)0x40) /* When in UART Mode */
+#define SMCM_BRK ((unsigned char)0x10) /* When in UART Mode */
+#define SMCM_TXE ((unsigned char)0x10)
+#define SMCM_BSY ((unsigned char)0x04)
+#define SMCM_TX ((unsigned char)0x02)
+#define SMCM_RX ((unsigned char)0x01)
+
+/* SCCs.
+*/
+#define SCC_GSMRH_IRP ((uint)0x00040000)
+#define SCC_GSMRH_GDE ((uint)0x00010000)
+#define SCC_GSMRH_TCRC_CCITT ((uint)0x00008000)
+#define SCC_GSMRH_TCRC_BISYNC ((uint)0x00004000)
+#define SCC_GSMRH_TCRC_HDLC ((uint)0x00000000)
+#define SCC_GSMRH_REVD ((uint)0x00002000)
+#define SCC_GSMRH_TRX ((uint)0x00001000)
+#define SCC_GSMRH_TTX ((uint)0x00000800)
+#define SCC_GSMRH_CDP ((uint)0x00000400)
+#define SCC_GSMRH_CTSP ((uint)0x00000200)
+#define SCC_GSMRH_CDS ((uint)0x00000100)
+#define SCC_GSMRH_CTSS ((uint)0x00000080)
+#define SCC_GSMRH_TFL ((uint)0x00000040)
+#define SCC_GSMRH_RFW ((uint)0x00000020)
+#define SCC_GSMRH_TXSY ((uint)0x00000010)
+#define SCC_GSMRH_SYNL16 ((uint)0x0000000c)
+#define SCC_GSMRH_SYNL8 ((uint)0x00000008)
+#define SCC_GSMRH_SYNL4 ((uint)0x00000004)
+#define SCC_GSMRH_RTSM ((uint)0x00000002)
+#define SCC_GSMRH_RSYN ((uint)0x00000001)
+
+#define SCC_GSMRL_SIR ((uint)0x80000000) /* SCC2 only */
+#define SCC_GSMRL_EDGE_NONE ((uint)0x60000000)
+#define SCC_GSMRL_EDGE_NEG ((uint)0x40000000)
+#define SCC_GSMRL_EDGE_POS ((uint)0x20000000)
+#define SCC_GSMRL_EDGE_BOTH ((uint)0x00000000)
+#define SCC_GSMRL_TCI ((uint)0x10000000)
+#define SCC_GSMRL_TSNC_3 ((uint)0x0c000000)
+#define SCC_GSMRL_TSNC_4 ((uint)0x08000000)
+#define SCC_GSMRL_TSNC_14 ((uint)0x04000000)
+#define SCC_GSMRL_TSNC_INF ((uint)0x00000000)
+#define SCC_GSMRL_RINV ((uint)0x02000000)
+#define SCC_GSMRL_TINV ((uint)0x01000000)
+#define SCC_GSMRL_TPL_128 ((uint)0x00c00000)
+#define SCC_GSMRL_TPL_64 ((uint)0x00a00000)
+#define SCC_GSMRL_TPL_48 ((uint)0x00800000)
+#define SCC_GSMRL_TPL_32 ((uint)0x00600000)
+#define SCC_GSMRL_TPL_16 ((uint)0x00400000)
+#define SCC_GSMRL_TPL_8 ((uint)0x00200000)
+#define SCC_GSMRL_TPL_NONE ((uint)0x00000000)
+#define SCC_GSMRL_TPP_ALL1 ((uint)0x00180000)
+#define SCC_GSMRL_TPP_01 ((uint)0x00100000)
+#define SCC_GSMRL_TPP_10 ((uint)0x00080000)
+#define SCC_GSMRL_TPP_ZEROS ((uint)0x00000000)
+#define SCC_GSMRL_TEND ((uint)0x00040000)
+#define SCC_GSMRL_TDCR_32 ((uint)0x00030000)
+#define SCC_GSMRL_TDCR_16 ((uint)0x00020000)
+#define SCC_GSMRL_TDCR_8 ((uint)0x00010000)
+#define SCC_GSMRL_TDCR_1 ((uint)0x00000000)
+#define SCC_GSMRL_RDCR_32 ((uint)0x0000c000)
+#define SCC_GSMRL_RDCR_16 ((uint)0x00008000)
+#define SCC_GSMRL_RDCR_8 ((uint)0x00004000)
+#define SCC_GSMRL_RDCR_1 ((uint)0x00000000)
+#define SCC_GSMRL_RENC_DFMAN ((uint)0x00003000)
+#define SCC_GSMRL_RENC_MANCH ((uint)0x00002000)
+#define SCC_GSMRL_RENC_FM0 ((uint)0x00001000)
+#define SCC_GSMRL_RENC_NRZI ((uint)0x00000800)
+#define SCC_GSMRL_RENC_NRZ ((uint)0x00000000)
+#define SCC_GSMRL_TENC_DFMAN ((uint)0x00000600)
+#define SCC_GSMRL_TENC_MANCH ((uint)0x00000400)
+#define SCC_GSMRL_TENC_FM0 ((uint)0x00000200)
+#define SCC_GSMRL_TENC_NRZI ((uint)0x00000100)
+#define SCC_GSMRL_TENC_NRZ ((uint)0x00000000)
+#define SCC_GSMRL_DIAG_LE ((uint)0x000000c0) /* Loop and echo */
+#define SCC_GSMRL_DIAG_ECHO ((uint)0x00000080)
+#define SCC_GSMRL_DIAG_LOOP ((uint)0x00000040)
+#define SCC_GSMRL_DIAG_NORM ((uint)0x00000000)
+#define SCC_GSMRL_ENR ((uint)0x00000020)
+#define SCC_GSMRL_ENT ((uint)0x00000010)
+#define SCC_GSMRL_MODE_ENET ((uint)0x0000000c)
+#define SCC_GSMRL_MODE_DDCMP ((uint)0x00000009)
+#define SCC_GSMRL_MODE_BISYNC ((uint)0x00000008)
+#define SCC_GSMRL_MODE_V14 ((uint)0x00000007)
+#define SCC_GSMRL_MODE_AHDLC ((uint)0x00000006)
+#define SCC_GSMRL_MODE_PROFIBUS ((uint)0x00000005)
+#define SCC_GSMRL_MODE_UART ((uint)0x00000004)
+#define SCC_GSMRL_MODE_SS7 ((uint)0x00000003)
+#define SCC_GSMRL_MODE_ATALK ((uint)0x00000002)
+#define SCC_GSMRL_MODE_HDLC ((uint)0x00000000)
+
+#define SCC_TODR_TOD ((ushort)0x8000)
+
+/* SCC Event and Mask register.
+*/
+#define SCCM_TXE ((unsigned char)0x10)
+#define SCCM_BSY ((unsigned char)0x04)
+#define SCCM_TX ((unsigned char)0x02)
+#define SCCM_RX ((unsigned char)0x01)
+
+typedef struct scc_param {
+ ushort scc_rbase; /* Rx Buffer descriptor base address */
+ ushort scc_tbase; /* Tx Buffer descriptor base address */
+ u_char scc_rfcr; /* Rx function code */
+ u_char scc_tfcr; /* Tx function code */
+ ushort scc_mrblr; /* Max receive buffer length */
+ uint scc_rstate; /* Internal */
+ uint scc_idp; /* Internal */
+ ushort scc_rbptr; /* Internal */
+ ushort scc_ibc; /* Internal */
+ uint scc_rxtmp; /* Internal */
+ uint scc_tstate; /* Internal */
+ uint scc_tdp; /* Internal */
+ ushort scc_tbptr; /* Internal */
+ ushort scc_tbc; /* Internal */
+ uint scc_txtmp; /* Internal */
+ uint scc_rcrc; /* Internal */
+ uint scc_tcrc; /* Internal */
+} sccp_t;
+
+/* CPM Ethernet through SCC1.
+ */
+typedef struct scc_enet {
+ sccp_t sen_genscc;
+ uint sen_cpres; /* Preset CRC */
+ uint sen_cmask; /* Constant mask for CRC */
+ uint sen_crcec; /* CRC Error counter */
+ uint sen_alec; /* alignment error counter */
+ uint sen_disfc; /* discard frame counter */
+ ushort sen_pads; /* Tx short frame pad character */
+ ushort sen_retlim; /* Retry limit threshold */
+ ushort sen_retcnt; /* Retry limit counter */
+ ushort sen_maxflr; /* maximum frame length register */
+ ushort sen_minflr; /* minimum frame length register */
+ ushort sen_maxd1; /* maximum DMA1 length */
+ ushort sen_maxd2; /* maximum DMA2 length */
+ ushort sen_maxd; /* Rx max DMA */
+ ushort sen_dmacnt; /* Rx DMA counter */
+ ushort sen_maxb; /* Max BD byte count */
+ ushort sen_gaddr1; /* Group address filter */
+ ushort sen_gaddr2;
+ ushort sen_gaddr3;
+ ushort sen_gaddr4;
+ uint sen_tbuf0data0; /* Save area 0 - current frame */
+ uint sen_tbuf0data1; /* Save area 1 - current frame */
+ uint sen_tbuf0rba; /* Internal */
+ uint sen_tbuf0crc; /* Internal */
+ ushort sen_tbuf0bcnt; /* Internal */
+ ushort sen_paddrh; /* physical address (MSB) */
+ ushort sen_paddrm;
+ ushort sen_paddrl; /* physical address (LSB) */
+ ushort sen_pper; /* persistence */
+ ushort sen_rfbdptr; /* Rx first BD pointer */
+ ushort sen_tfbdptr; /* Tx first BD pointer */
+ ushort sen_tlbdptr; /* Tx last BD pointer */
+ uint sen_tbuf1data0; /* Save area 0 - current frame */
+ uint sen_tbuf1data1; /* Save area 1 - current frame */
+ uint sen_tbuf1rba; /* Internal */
+ uint sen_tbuf1crc; /* Internal */
+ ushort sen_tbuf1bcnt; /* Internal */
+ ushort sen_txlen; /* Tx Frame length counter */
+ ushort sen_iaddr1; /* Individual address filter */
+ ushort sen_iaddr2;
+ ushort sen_iaddr3;
+ ushort sen_iaddr4;
+ ushort sen_boffcnt; /* Backoff counter */
+
+ /* NOTE: Some versions of the manual have the following items
+ * incorrectly documented. Below is the proper order.
+ */
+ ushort sen_taddrh; /* temp address (MSB) */
+ ushort sen_taddrm;
+ ushort sen_taddrl; /* temp address (LSB) */
+} scc_enet_t;
+
+
+/* SCC Event register as used by Ethernet.
+*/
+#define SCCE_ENET_GRA ((ushort)0x0080) /* Graceful stop complete */
+#define SCCE_ENET_TXE ((ushort)0x0010) /* Transmit Error */
+#define SCCE_ENET_RXF ((ushort)0x0008) /* Full frame received */
+#define SCCE_ENET_BSY ((ushort)0x0004) /* All incoming buffers full */
+#define SCCE_ENET_TXB ((ushort)0x0002) /* A buffer was transmitted */
+#define SCCE_ENET_RXB ((ushort)0x0001) /* A buffer was received */
+
+/* SCC Mode Register (PSMR) as used by Ethernet.
+*/
+#define SCC_PSMR_HBC ((ushort)0x8000) /* Enable heartbeat */
+#define SCC_PSMR_FC ((ushort)0x4000) /* Force collision */
+#define SCC_PSMR_RSH ((ushort)0x2000) /* Receive short frames */
+#define SCC_PSMR_IAM ((ushort)0x1000) /* Check individual hash */
+#define SCC_PSMR_ENCRC ((ushort)0x0800) /* Ethernet CRC mode */
+#define SCC_PSMR_PRO ((ushort)0x0200) /* Promiscuous mode */
+#define SCC_PSMR_BRO ((ushort)0x0100) /* Catch broadcast pkts */
+#define SCC_PSMR_SBT ((ushort)0x0080) /* Special backoff timer */
+#define SCC_PSMR_LPB ((ushort)0x0040) /* Set Loopback mode */
+#define SCC_PSMR_SIP ((ushort)0x0020) /* Sample Input Pins */
+#define SCC_PSMR_LCW ((ushort)0x0010) /* Late collision window */
+#define SCC_PSMR_NIB22 ((ushort)0x000a) /* Start frame search */
+#define SCC_PSMR_FDE ((ushort)0x0001) /* Full duplex enable */
+
+/* SCC as UART
+*/
+typedef struct scc_uart {
+ sccp_t scc_genscc;
+ uint scc_res1; /* Reserved */
+ uint scc_res2; /* Reserved */
+ ushort scc_maxidl; /* Maximum idle chars */
+ ushort scc_idlc; /* temp idle counter */
+ ushort scc_brkcr; /* Break count register */
+ ushort scc_parec; /* receive parity error counter */
+ ushort scc_frmec; /* receive framing error counter */
+ ushort scc_nosec; /* receive noise counter */
+ ushort scc_brkec; /* receive break condition counter */
+ ushort scc_brkln; /* last received break length */
+ ushort scc_uaddr1; /* UART address character 1 */
+ ushort scc_uaddr2; /* UART address character 2 */
+ ushort scc_rtemp; /* Temp storage */
+ ushort scc_toseq; /* Transmit out of sequence char */
+ ushort scc_char1; /* control character 1 */
+ ushort scc_char2; /* control character 2 */
+ ushort scc_char3; /* control character 3 */
+ ushort scc_char4; /* control character 4 */
+ ushort scc_char5; /* control character 5 */
+ ushort scc_char6; /* control character 6 */
+ ushort scc_char7; /* control character 7 */
+ ushort scc_char8; /* control character 8 */
+ ushort scc_rccm; /* receive control character mask */
+ ushort scc_rccr; /* receive control character register */
+ ushort scc_rlbc; /* receive last break character */
+} scc_uart_t;
+
+/* SCC Event and Mask registers when it is used as a UART.
+*/
+#define UART_SCCM_GLR ((ushort)0x1000)
+#define UART_SCCM_GLT ((ushort)0x0800)
+#define UART_SCCM_AB ((ushort)0x0200)
+#define UART_SCCM_IDL ((ushort)0x0100)
+#define UART_SCCM_GRA ((ushort)0x0080)
+#define UART_SCCM_BRKE ((ushort)0x0040)
+#define UART_SCCM_BRKS ((ushort)0x0020)
+#define UART_SCCM_CCR ((ushort)0x0008)
+#define UART_SCCM_BSY ((ushort)0x0004)
+#define UART_SCCM_TX ((ushort)0x0002)
+#define UART_SCCM_RX ((ushort)0x0001)
+
+/* The SCC PSMR when used as a UART.
+*/
+#define SCU_PSMR_FLC ((ushort)0x8000)
+#define SCU_PSMR_SL ((ushort)0x4000)
+#define SCU_PSMR_CL ((ushort)0x3000)
+#define SCU_PSMR_UM ((ushort)0x0c00)
+#define SCU_PSMR_FRZ ((ushort)0x0200)
+#define SCU_PSMR_RZS ((ushort)0x0100)
+#define SCU_PSMR_SYN ((ushort)0x0080)
+#define SCU_PSMR_DRT ((ushort)0x0040)
+#define SCU_PSMR_PEN ((ushort)0x0010)
+#define SCU_PSMR_RPM ((ushort)0x000c)
+#define SCU_PSMR_REVP ((ushort)0x0008)
+#define SCU_PSMR_TPM ((ushort)0x0003)
+#define SCU_PSMR_TEVP ((ushort)0x0002)
+
+/* CPM Transparent mode SCC.
+ */
+typedef struct scc_trans {
+ sccp_t st_genscc;
+ uint st_cpres; /* Preset CRC */
+ uint st_cmask; /* Constant mask for CRC */
+} scc_trans_t;
+
+/* How about some FCCs.....
+*/
+#define FCC_GFMR_DIAG_NORM ((uint)0x00000000)
+#define FCC_GFMR_DIAG_LE ((uint)0x40000000)
+#define FCC_GFMR_DIAG_AE ((uint)0x80000000)
+#define FCC_GFMR_DIAG_ALE ((uint)0xc0000000)
+#define FCC_GFMR_TCI ((uint)0x20000000)
+#define FCC_GFMR_TRX ((uint)0x10000000)
+#define FCC_GFMR_TTX ((uint)0x08000000)
+#define FCC_GFMR_TTX ((uint)0x08000000)
+#define FCC_GFMR_CDP ((uint)0x04000000)
+#define FCC_GFMR_CTSP ((uint)0x02000000)
+#define FCC_GFMR_CDS ((uint)0x01000000)
+#define FCC_GFMR_CTSS ((uint)0x00800000)
+#define FCC_GFMR_SYNL_NONE ((uint)0x00000000)
+#define FCC_GFMR_SYNL_AUTO ((uint)0x00004000)
+#define FCC_GFMR_SYNL_8 ((uint)0x00008000)
+#define FCC_GFMR_SYNL_16 ((uint)0x0000c000)
+#define FCC_GFMR_RTSM ((uint)0x00002000)
+#define FCC_GFMR_RENC_NRZ ((uint)0x00000000)
+#define FCC_GFMR_RENC_NRZI ((uint)0x00000800)
+#define FCC_GFMR_REVD ((uint)0x00000400)
+#define FCC_GFMR_TENC_NRZ ((uint)0x00000000)
+#define FCC_GFMR_TENC_NRZI ((uint)0x00000100)
+#define FCC_GFMR_TCRC_16 ((uint)0x00000000)
+#define FCC_GFMR_TCRC_32 ((uint)0x00000080)
+#define FCC_GFMR_ENR ((uint)0x00000020)
+#define FCC_GFMR_ENT ((uint)0x00000010)
+#define FCC_GFMR_MODE_ENET ((uint)0x0000000c)
+#define FCC_GFMR_MODE_ATM ((uint)0x0000000a)
+#define FCC_GFMR_MODE_HDLC ((uint)0x00000000)
+
+/* Generic FCC parameter ram.
+*/
+typedef struct fcc_param {
+ ushort fcc_riptr; /* Rx Internal temp pointer */
+ ushort fcc_tiptr; /* Tx Internal temp pointer */
+ ushort fcc_res1;
+ ushort fcc_mrblr; /* Max receive buffer length, mod 32 bytes */
+ uint fcc_rstate; /* Upper byte is Func code, must be set */
+ uint fcc_rbase; /* Receive BD base */
+ ushort fcc_rbdstat; /* RxBD status */
+ ushort fcc_rbdlen; /* RxBD down counter */
+ uint fcc_rdptr; /* RxBD internal data pointer */
+ uint fcc_tstate; /* Upper byte is Func code, must be set */
+ uint fcc_tbase; /* Transmit BD base */
+ ushort fcc_tbdstat; /* TxBD status */
+ ushort fcc_tbdlen; /* TxBD down counter */
+ uint fcc_tdptr; /* TxBD internal data pointer */
+ uint fcc_rbptr; /* Rx BD Internal buf pointer */
+ uint fcc_tbptr; /* Tx BD Internal buf pointer */
+ uint fcc_rcrc; /* Rx temp CRC */
+ uint fcc_res2;
+ uint fcc_tcrc; /* Tx temp CRC */
+} fccp_t;
+
+
+/* Ethernet controller through FCC.
+*/
+typedef struct fcc_enet {
+ fccp_t fen_genfcc;
+ uint fen_statbuf; /* Internal status buffer */
+ uint fen_camptr; /* CAM address */
+ uint fen_cmask; /* Constant mask for CRC */
+ uint fen_cpres; /* Preset CRC */
+ uint fen_crcec; /* CRC Error counter */
+ uint fen_alec; /* alignment error counter */
+ uint fen_disfc; /* discard frame counter */
+ ushort fen_retlim; /* Retry limit */
+ ushort fen_retcnt; /* Retry counter */
+ ushort fen_pper; /* Persistence */
+ ushort fen_boffcnt; /* backoff counter */
+ uint fen_gaddrh; /* Group address filter, high 32-bits */
+ uint fen_gaddrl; /* Group address filter, low 32-bits */
+ ushort fen_tfcstat; /* out of sequence TxBD */
+ ushort fen_tfclen;
+ uint fen_tfcptr;
+ ushort fen_mflr; /* Maximum frame length (1518) */
+ ushort fen_paddrh; /* MAC address */
+ ushort fen_paddrm;
+ ushort fen_paddrl;
+ ushort fen_ibdcount; /* Internal BD counter */
+ ushort fen_ibdstart; /* Internal BD start pointer */
+ ushort fen_ibdend; /* Internal BD end pointer */
+ ushort fen_txlen; /* Internal Tx frame length counter */
+ uint fen_ibdbase[8]; /* Internal use */
+ uint fen_iaddrh; /* Individual address filter */
+ uint fen_iaddrl;
+ ushort fen_minflr; /* Minimum frame length (64) */
+ ushort fen_taddrh; /* Filter transfer MAC address */
+ ushort fen_taddrm;
+ ushort fen_taddrl;
+ ushort fen_padptr; /* Pointer to pad byte buffer */
+ ushort fen_cftype; /* control frame type */
+ ushort fen_cfrange; /* control frame range */
+ ushort fen_maxb; /* maximum BD count */
+ ushort fen_maxd1; /* Max DMA1 length (1520) */
+ ushort fen_maxd2; /* Max DMA2 length (1520) */
+ ushort fen_maxd; /* internal max DMA count */
+ ushort fen_dmacnt; /* internal DMA counter */
+ uint fen_octc; /* Total octect counter */
+ uint fen_colc; /* Total collision counter */
+ uint fen_broc; /* Total broadcast packet counter */
+ uint fen_mulc; /* Total multicast packet count */
+ uint fen_uspc; /* Total packets < 64 bytes */
+ uint fen_frgc; /* Total packets < 64 bytes with errors */
+ uint fen_ospc; /* Total packets > 1518 */
+ uint fen_jbrc; /* Total packets > 1518 with errors */
+ uint fen_p64c; /* Total packets == 64 bytes */
+ uint fen_p65c; /* Total packets 64 < bytes <= 127 */
+ uint fen_p128c; /* Total packets 127 < bytes <= 255 */
+ uint fen_p256c; /* Total packets 256 < bytes <= 511 */
+ uint fen_p512c; /* Total packets 512 < bytes <= 1023 */
+ uint fen_p1024c; /* Total packets 1024 < bytes <= 1518 */
+ uint fen_cambuf; /* Internal CAM buffer poiner */
+ ushort fen_rfthr; /* Received frames threshold */
+ ushort fen_rfcnt; /* Received frames count */
+} fcc_enet_t;
+
+/* FCC Event/Mask register as used by Ethernet.
+*/
+#define FCC_ENET_GRA ((ushort)0x0080) /* Graceful stop complete */
+#define FCC_ENET_RXC ((ushort)0x0040) /* Control Frame Received */
+#define FCC_ENET_TXC ((ushort)0x0020) /* Out of seq. Tx sent */
+#define FCC_ENET_TXE ((ushort)0x0010) /* Transmit Error */
+#define FCC_ENET_RXF ((ushort)0x0008) /* Full frame received */
+#define FCC_ENET_BSY ((ushort)0x0004) /* Busy. Rx Frame dropped */
+#define FCC_ENET_TXB ((ushort)0x0002) /* A buffer was transmitted */
+#define FCC_ENET_RXB ((ushort)0x0001) /* A buffer was received */
+
+/* FCC Mode Register (FPSMR) as used by Ethernet.
+*/
+#define FCC_PSMR_HBC ((uint)0x80000000) /* Enable heartbeat */
+#define FCC_PSMR_FC ((uint)0x40000000) /* Force Collision */
+#define FCC_PSMR_SBT ((uint)0x20000000) /* Stop backoff timer */
+#define FCC_PSMR_LPB ((uint)0x10000000) /* Local protect. 1 = FDX */
+#define FCC_PSMR_LCW ((uint)0x08000000) /* Late collision select */
+#define FCC_PSMR_FDE ((uint)0x04000000) /* Full Duplex Enable */
+#define FCC_PSMR_MON ((uint)0x02000000) /* RMON Enable */
+#define FCC_PSMR_PRO ((uint)0x00400000) /* Promiscuous Enable */
+#define FCC_PSMR_FCE ((uint)0x00200000) /* Flow Control Enable */
+#define FCC_PSMR_RSH ((uint)0x00100000) /* Receive Short Frames */
+#define FCC_PSMR_CAM ((uint)0x00000400) /* CAM enable */
+#define FCC_PSMR_BRO ((uint)0x00000200) /* Broadcast pkt discard */
+#define FCC_PSMR_ENCRC ((uint)0x00000080) /* Use 32-bit CRC */
+
+/* IIC parameter RAM.
+*/
+typedef struct iic {
+ ushort iic_rbase; /* Rx Buffer descriptor base address */
+ ushort iic_tbase; /* Tx Buffer descriptor base address */
+ u_char iic_rfcr; /* Rx function code */
+ u_char iic_tfcr; /* Tx function code */
+ ushort iic_mrblr; /* Max receive buffer length */
+ uint iic_rstate; /* Internal */
+ uint iic_rdp; /* Internal */
+ ushort iic_rbptr; /* Internal */
+ ushort iic_rbc; /* Internal */
+ uint iic_rxtmp; /* Internal */
+ uint iic_tstate; /* Internal */
+ uint iic_tdp; /* Internal */
+ ushort iic_tbptr; /* Internal */
+ ushort iic_tbc; /* Internal */
+ uint iic_txtmp; /* Internal */
+} iic_t;
+
+/* SPI parameter RAM.
+*/
+typedef struct spi {
+ ushort spi_rbase; /* Rx Buffer descriptor base address */
+ ushort spi_tbase; /* Tx Buffer descriptor base address */
+ u_char spi_rfcr; /* Rx function code */
+ u_char spi_tfcr; /* Tx function code */
+ ushort spi_mrblr; /* Max receive buffer length */
+ uint spi_rstate; /* Internal */
+ uint spi_rdp; /* Internal */
+ ushort spi_rbptr; /* Internal */
+ ushort spi_rbc; /* Internal */
+ uint spi_rxtmp; /* Internal */
+ uint spi_tstate; /* Internal */
+ uint spi_tdp; /* Internal */
+ ushort spi_tbptr; /* Internal */
+ ushort spi_tbc; /* Internal */
+ uint spi_txtmp; /* Internal */
+ uint spi_res; /* Tx temp. */
+ uint spi_res1[4]; /* SDMA temp. */
+} spi_t;
+
+/* SPI Mode register.
+*/
+#define SPMODE_LOOP ((ushort)0x4000) /* Loopback */
+#define SPMODE_CI ((ushort)0x2000) /* Clock Invert */
+#define SPMODE_CP ((ushort)0x1000) /* Clock Phase */
+#define SPMODE_DIV16 ((ushort)0x0800) /* BRG/16 mode */
+#define SPMODE_REV ((ushort)0x0400) /* Reversed Data */
+#define SPMODE_MSTR ((ushort)0x0200) /* SPI Master */
+#define SPMODE_EN ((ushort)0x0100) /* Enable */
+#define SPMODE_LENMSK ((ushort)0x00f0) /* character length */
+#define SPMODE_PMMSK ((ushort)0x000f) /* prescale modulus */
+
+#define SPMODE_LEN(x) ((((x)-1)&0xF)<<4)
+#define SPMODE_PM(x) ((x) &0xF)
+
+#define SPI_EB ((u_char)0x10) /* big endian byte order */
+
+/* IDMA parameter RAM
+*/
+typedef struct idma {
+ ushort ibase; /* IDMA buffer descriptor table base address */
+ ushort dcm; /* DMA channel mode */
+ ushort ibdptr; /* IDMA current buffer descriptor pointer */
+ ushort dpr_buf; /* IDMA transfer buffer base address */
+ ushort buf_inv; /* internal buffer inventory */
+ ushort ss_max; /* steady-state maximum transfer size */
+ ushort dpr_in_ptr; /* write pointer inside the internal buffer */
+ ushort sts; /* source transfer size */
+ ushort dpr_out_ptr; /* read pointer inside the internal buffer */
+ ushort seob; /* source end of burst */
+ ushort deob; /* destination end of burst */
+ ushort dts; /* destination transfer size */
+ ushort ret_add; /* return address when working in ERM=1 mode */
+ ushort res0; /* reserved */
+ uint bd_cnt; /* internal byte count */
+ uint s_ptr; /* source internal data pointer */
+ uint d_ptr; /* destination internal data pointer */
+ uint istate; /* internal state */
+ u_char res1[20]; /* pad to 64-byte length */
+} idma_t;
+
+/* DMA channel mode bit fields
+*/
+#define IDMA_DCM_FB ((ushort)0x8000) /* fly-by mode */
+#define IDMA_DCM_LP ((ushort)0x4000) /* low priority */
+#define IDMA_DCM_TC2 ((ushort)0x0400) /* value driven on TC[2] */
+#define IDMA_DCM_DMA_WRAP_MASK ((ushort)0x01c0) /* mask for DMA wrap */
+#define IDMA_DCM_DMA_WRAP_64 ((ushort)0x0000) /* 64-byte DMA xfer buffer */
+#define IDMA_DCM_DMA_WRAP_128 ((ushort)0x0040) /* 128-byte DMA xfer buffer */
+#define IDMA_DCM_DMA_WRAP_256 ((ushort)0x0080) /* 256-byte DMA xfer buffer */
+#define IDMA_DCM_DMA_WRAP_512 ((ushort)0x00c0) /* 512-byte DMA xfer buffer */
+#define IDMA_DCM_DMA_WRAP_1024 ((ushort)0x0100) /* 1024-byte DMA xfer buffer */
+#define IDMA_DCM_DMA_WRAP_2048 ((ushort)0x0140) /* 2048-byte DMA xfer buffer */
+#define IDMA_DCM_SINC ((ushort)0x0020) /* source inc addr */
+#define IDMA_DCM_DINC ((ushort)0x0010) /* destination inc addr */
+#define IDMA_DCM_ERM ((ushort)0x0008) /* external request mode */
+#define IDMA_DCM_DT ((ushort)0x0004) /* DONE treatment */
+#define IDMA_DCM_SD_MASK ((ushort)0x0003) /* mask for SD bit field */
+#define IDMA_DCM_SD_MEM2MEM ((ushort)0x0000) /* memory-to-memory xfer */
+#define IDMA_DCM_SD_PER2MEM ((ushort)0x0002) /* peripheral-to-memory xfer */
+#define IDMA_DCM_SD_MEM2PER ((ushort)0x0001) /* memory-to-peripheral xfer */
+
+/* IDMA Buffer Descriptors
+*/
+typedef struct idma_bd {
+ uint flags;
+ uint len; /* data length */
+ uint src; /* source data buffer pointer */
+ uint dst; /* destination data buffer pointer */
+} idma_bd_t;
+
+/* IDMA buffer descriptor flag bit fields
+*/
+#define IDMA_BD_V ((uint)0x80000000) /* valid */
+#define IDMA_BD_W ((uint)0x20000000) /* wrap */
+#define IDMA_BD_I ((uint)0x10000000) /* interrupt */
+#define IDMA_BD_L ((uint)0x08000000) /* last */
+#define IDMA_BD_CM ((uint)0x02000000) /* continuous mode */
+#define IDMA_BD_SDN ((uint)0x00400000) /* source done */
+#define IDMA_BD_DDN ((uint)0x00200000) /* destination done */
+#define IDMA_BD_DGBL ((uint)0x00100000) /* destination global */
+#define IDMA_BD_DBO_LE ((uint)0x00040000) /* little-end dest byte order */
+#define IDMA_BD_DBO_BE ((uint)0x00080000) /* big-end dest byte order */
+#define IDMA_BD_DDTB ((uint)0x00010000) /* destination data bus */
+#define IDMA_BD_SGBL ((uint)0x00002000) /* source global */
+#define IDMA_BD_SBO_LE ((uint)0x00000800) /* little-end src byte order */
+#define IDMA_BD_SBO_BE ((uint)0x00001000) /* big-end src byte order */
+#define IDMA_BD_SDTB ((uint)0x00000200) /* source data bus */
+
+/* per-channel IDMA registers
+*/
+typedef struct im_idma {
+ u_char idsr; /* IDMAn event status register */
+ u_char res0[3];
+ u_char idmr; /* IDMAn event mask register */
+ u_char res1[3];
+} im_idma_t;
+
+/* IDMA event register bit fields
+*/
+#define IDMA_EVENT_SC ((unsigned char)0x08) /* stop completed */
+#define IDMA_EVENT_OB ((unsigned char)0x04) /* out of buffers */
+#define IDMA_EVENT_EDN ((unsigned char)0x02) /* external DONE asserted */
+#define IDMA_EVENT_BC ((unsigned char)0x01) /* buffer descriptor complete */
+
+/* RISC Controller Configuration Register (RCCR) bit fields
+*/
+#define RCCR_TIME ((uint)0x80000000) /* timer enable */
+#define RCCR_TIMEP_MASK ((uint)0x3f000000) /* mask for timer period bit field */
+#define RCCR_DR0M ((uint)0x00800000) /* IDMA0 request mode */
+#define RCCR_DR1M ((uint)0x00400000) /* IDMA1 request mode */
+#define RCCR_DR2M ((uint)0x00000080) /* IDMA2 request mode */
+#define RCCR_DR3M ((uint)0x00000040) /* IDMA3 request mode */
+#define RCCR_DR0QP_MASK ((uint)0x00300000) /* mask for IDMA0 req priority */
+#define RCCR_DR0QP_HIGH ((uint)0x00000000) /* IDMA0 has high req priority */
+#define RCCR_DR0QP_MED ((uint)0x00100000) /* IDMA0 has medium req priority */
+#define RCCR_DR0QP_LOW ((uint)0x00200000) /* IDMA0 has low req priority */
+#define RCCR_DR1QP_MASK ((uint)0x00030000) /* mask for IDMA1 req priority */
+#define RCCR_DR1QP_HIGH ((uint)0x00000000) /* IDMA1 has high req priority */
+#define RCCR_DR1QP_MED ((uint)0x00010000) /* IDMA1 has medium req priority */
+#define RCCR_DR1QP_LOW ((uint)0x00020000) /* IDMA1 has low req priority */
+#define RCCR_DR2QP_MASK ((uint)0x00000030) /* mask for IDMA2 req priority */
+#define RCCR_DR2QP_HIGH ((uint)0x00000000) /* IDMA2 has high req priority */
+#define RCCR_DR2QP_MED ((uint)0x00000010) /* IDMA2 has medium req priority */
+#define RCCR_DR2QP_LOW ((uint)0x00000020) /* IDMA2 has low req priority */
+#define RCCR_DR3QP_MASK ((uint)0x00000003) /* mask for IDMA3 req priority */
+#define RCCR_DR3QP_HIGH ((uint)0x00000000) /* IDMA3 has high req priority */
+#define RCCR_DR3QP_MED ((uint)0x00000001) /* IDMA3 has medium req priority */
+#define RCCR_DR3QP_LOW ((uint)0x00000002) /* IDMA3 has low req priority */
+#define RCCR_EIE ((uint)0x00080000) /* external interrupt enable */
+#define RCCR_SCD ((uint)0x00040000) /* scheduler configuration */
+#define RCCR_ERAM_MASK ((uint)0x0000e000) /* mask for enable RAM microcode */
+#define RCCR_ERAM_0KB ((uint)0x00000000) /* use 0KB of dpram for microcode */
+#define RCCR_ERAM_2KB ((uint)0x00002000) /* use 2KB of dpram for microcode */
+#define RCCR_ERAM_4KB ((uint)0x00004000) /* use 4KB of dpram for microcode */
+#define RCCR_ERAM_6KB ((uint)0x00006000) /* use 6KB of dpram for microcode */
+#define RCCR_ERAM_8KB ((uint)0x00008000) /* use 8KB of dpram for microcode */
+#define RCCR_ERAM_10KB ((uint)0x0000a000) /* use 10KB of dpram for microcode */
+#define RCCR_ERAM_12KB ((uint)0x0000c000) /* use 12KB of dpram for microcode */
+#define RCCR_EDM0 ((uint)0x00000800) /* DREQ0 edge detect mode */
+#define RCCR_EDM1 ((uint)0x00000400) /* DREQ1 edge detect mode */
+#define RCCR_EDM2 ((uint)0x00000200) /* DREQ2 edge detect mode */
+#define RCCR_EDM3 ((uint)0x00000100) /* DREQ3 edge detect mode */
+#define RCCR_DEM01 ((uint)0x00000008) /* DONE0/DONE1 edge detect mode */
+#define RCCR_DEM23 ((uint)0x00000004) /* DONE2/DONE3 edge detect mode */
+
+/*-----------------------------------------------------------------------
+ * CMXFCR - CMX FCC Clock Route Register
+ */
+#define CMXFCR_FC1 0x40000000 /* FCC1 connection */
+#define CMXFCR_RF1CS_MSK 0x38000000 /* Receive FCC1 Clock Source Mask */
+#define CMXFCR_TF1CS_MSK 0x07000000 /* Transmit FCC1 Clock Source Mask */
+#define CMXFCR_FC2 0x00400000 /* FCC2 connection */
+#define CMXFCR_RF2CS_MSK 0x00380000 /* Receive FCC2 Clock Source Mask */
+#define CMXFCR_TF2CS_MSK 0x00070000 /* Transmit FCC2 Clock Source Mask */
+#define CMXFCR_FC3 0x00004000 /* FCC3 connection */
+#define CMXFCR_RF3CS_MSK 0x00003800 /* Receive FCC3 Clock Source Mask */
+#define CMXFCR_TF3CS_MSK 0x00000700 /* Transmit FCC3 Clock Source Mask */
+
+#define CMXFCR_RF1CS_BRG5 0x00000000 /* Receive FCC1 Clock Source is BRG5 */
+#define CMXFCR_RF1CS_BRG6 0x08000000 /* Receive FCC1 Clock Source is BRG6 */
+#define CMXFCR_RF1CS_BRG7 0x10000000 /* Receive FCC1 Clock Source is BRG7 */
+#define CMXFCR_RF1CS_BRG8 0x18000000 /* Receive FCC1 Clock Source is BRG8 */
+#define CMXFCR_RF1CS_CLK9 0x20000000 /* Receive FCC1 Clock Source is CLK9 */
+#define CMXFCR_RF1CS_CLK10 0x28000000 /* Receive FCC1 Clock Source is CLK10 */
+#define CMXFCR_RF1CS_CLK11 0x30000000 /* Receive FCC1 Clock Source is CLK11 */
+#define CMXFCR_RF1CS_CLK12 0x38000000 /* Receive FCC1 Clock Source is CLK12 */
+
+#define CMXFCR_TF1CS_BRG5 0x00000000 /* Transmit FCC1 Clock Source is BRG5 */
+#define CMXFCR_TF1CS_BRG6 0x01000000 /* Transmit FCC1 Clock Source is BRG6 */
+#define CMXFCR_TF1CS_BRG7 0x02000000 /* Transmit FCC1 Clock Source is BRG7 */
+#define CMXFCR_TF1CS_BRG8 0x03000000 /* Transmit FCC1 Clock Source is BRG8 */
+#define CMXFCR_TF1CS_CLK9 0x04000000 /* Transmit FCC1 Clock Source is CLK9 */
+#define CMXFCR_TF1CS_CLK10 0x05000000 /* Transmit FCC1 Clock Source is CLK10 */
+#define CMXFCR_TF1CS_CLK11 0x06000000 /* Transmit FCC1 Clock Source is CLK11 */
+#define CMXFCR_TF1CS_CLK12 0x07000000 /* Transmit FCC1 Clock Source is CLK12 */
+
+#define CMXFCR_RF2CS_BRG5 0x00000000 /* Receive FCC2 Clock Source is BRG5 */
+#define CMXFCR_RF2CS_BRG6 0x00080000 /* Receive FCC2 Clock Source is BRG6 */
+#define CMXFCR_RF2CS_BRG7 0x00100000 /* Receive FCC2 Clock Source is BRG7 */
+#define CMXFCR_RF2CS_BRG8 0x00180000 /* Receive FCC2 Clock Source is BRG8 */
+#define CMXFCR_RF2CS_CLK13 0x00200000 /* Receive FCC2 Clock Source is CLK13 */
+#define CMXFCR_RF2CS_CLK14 0x00280000 /* Receive FCC2 Clock Source is CLK14 */
+#define CMXFCR_RF2CS_CLK15 0x00300000 /* Receive FCC2 Clock Source is CLK15 */
+#define CMXFCR_RF2CS_CLK16 0x00380000 /* Receive FCC2 Clock Source is CLK16 */
+
+#define CMXFCR_TF2CS_BRG5 0x00000000 /* Transmit FCC2 Clock Source is BRG5 */
+#define CMXFCR_TF2CS_BRG6 0x00010000 /* Transmit FCC2 Clock Source is BRG6 */
+#define CMXFCR_TF2CS_BRG7 0x00020000 /* Transmit FCC2 Clock Source is BRG7 */
+#define CMXFCR_TF2CS_BRG8 0x00030000 /* Transmit FCC2 Clock Source is BRG8 */
+#define CMXFCR_TF2CS_CLK13 0x00040000 /* Transmit FCC2 Clock Source is CLK13 */
+#define CMXFCR_TF2CS_CLK14 0x00050000 /* Transmit FCC2 Clock Source is CLK14 */
+#define CMXFCR_TF2CS_CLK15 0x00060000 /* Transmit FCC2 Clock Source is CLK15 */
+#define CMXFCR_TF2CS_CLK16 0x00070000 /* Transmit FCC2 Clock Source is CLK16 */
+
+#define CMXFCR_RF3CS_BRG5 0x00000000 /* Receive FCC3 Clock Source is BRG5 */
+#define CMXFCR_RF3CS_BRG6 0x00000800 /* Receive FCC3 Clock Source is BRG6 */
+#define CMXFCR_RF3CS_BRG7 0x00001000 /* Receive FCC3 Clock Source is BRG7 */
+#define CMXFCR_RF3CS_BRG8 0x00001800 /* Receive FCC3 Clock Source is BRG8 */
+#define CMXFCR_RF3CS_CLK13 0x00002000 /* Receive FCC3 Clock Source is CLK13 */
+#define CMXFCR_RF3CS_CLK14 0x00002800 /* Receive FCC3 Clock Source is CLK14 */
+#define CMXFCR_RF3CS_CLK15 0x00003000 /* Receive FCC3 Clock Source is CLK15 */
+#define CMXFCR_RF3CS_CLK16 0x00003800 /* Receive FCC3 Clock Source is CLK16 */
+
+#define CMXFCR_TF3CS_BRG5 0x00000000 /* Transmit FCC3 Clock Source is BRG5 */
+#define CMXFCR_TF3CS_BRG6 0x00000100 /* Transmit FCC3 Clock Source is BRG6 */
+#define CMXFCR_TF3CS_BRG7 0x00000200 /* Transmit FCC3 Clock Source is BRG7 */
+#define CMXFCR_TF3CS_BRG8 0x00000300 /* Transmit FCC3 Clock Source is BRG8 */
+#define CMXFCR_TF3CS_CLK13 0x00000400 /* Transmit FCC3 Clock Source is CLK13 */
+#define CMXFCR_TF3CS_CLK14 0x00000500 /* Transmit FCC3 Clock Source is CLK14 */
+#define CMXFCR_TF3CS_CLK15 0x00000600 /* Transmit FCC3 Clock Source is CLK15 */
+#define CMXFCR_TF3CS_CLK16 0x00000700 /* Transmit FCC3 Clock Source is CLK16 */
+
+/*-----------------------------------------------------------------------
+ * CMXSCR - CMX SCC Clock Route Register
+ */
+#define CMXSCR_GR1 0x80000000 /* Grant Support of SCC1 */
+#define CMXSCR_SC1 0x40000000 /* SCC1 connection */
+#define CMXSCR_RS1CS_MSK 0x38000000 /* Receive SCC1 Clock Source Mask */
+#define CMXSCR_TS1CS_MSK 0x07000000 /* Transmit SCC1 Clock Source Mask */
+#define CMXSCR_GR2 0x00800000 /* Grant Support of SCC2 */
+#define CMXSCR_SC2 0x00400000 /* SCC2 connection */
+#define CMXSCR_RS2CS_MSK 0x00380000 /* Receive SCC2 Clock Source Mask */
+#define CMXSCR_TS2CS_MSK 0x00070000 /* Transmit SCC2 Clock Source Mask */
+#define CMXSCR_GR3 0x00008000 /* Grant Support of SCC3 */
+#define CMXSCR_SC3 0x00004000 /* SCC3 connection */
+#define CMXSCR_RS3CS_MSK 0x00003800 /* Receive SCC3 Clock Source Mask */
+#define CMXSCR_TS3CS_MSK 0x00000700 /* Transmit SCC3 Clock Source Mask */
+#define CMXSCR_GR4 0x00000080 /* Grant Support of SCC4 */
+#define CMXSCR_SC4 0x00000040 /* SCC4 connection */
+#define CMXSCR_RS4CS_MSK 0x00000038 /* Receive SCC4 Clock Source Mask */
+#define CMXSCR_TS4CS_MSK 0x00000007 /* Transmit SCC4 Clock Source Mask */
+
+#define CMXSCR_RS1CS_BRG1 0x00000000 /* SCC1 Rx Clock Source is BRG1 */
+#define CMXSCR_RS1CS_BRG2 0x08000000 /* SCC1 Rx Clock Source is BRG2 */
+#define CMXSCR_RS1CS_BRG3 0x10000000 /* SCC1 Rx Clock Source is BRG3 */
+#define CMXSCR_RS1CS_BRG4 0x18000000 /* SCC1 Rx Clock Source is BRG4 */
+#define CMXSCR_RS1CS_CLK11 0x20000000 /* SCC1 Rx Clock Source is CLK11 */
+#define CMXSCR_RS1CS_CLK12 0x28000000 /* SCC1 Rx Clock Source is CLK12 */
+#define CMXSCR_RS1CS_CLK3 0x30000000 /* SCC1 Rx Clock Source is CLK3 */
+#define CMXSCR_RS1CS_CLK4 0x38000000 /* SCC1 Rx Clock Source is CLK4 */
+
+#define CMXSCR_TS1CS_BRG1 0x00000000 /* SCC1 Tx Clock Source is BRG1 */
+#define CMXSCR_TS1CS_BRG2 0x01000000 /* SCC1 Tx Clock Source is BRG2 */
+#define CMXSCR_TS1CS_BRG3 0x02000000 /* SCC1 Tx Clock Source is BRG3 */
+#define CMXSCR_TS1CS_BRG4 0x03000000 /* SCC1 Tx Clock Source is BRG4 */
+#define CMXSCR_TS1CS_CLK11 0x04000000 /* SCC1 Tx Clock Source is CLK11 */
+#define CMXSCR_TS1CS_CLK12 0x05000000 /* SCC1 Tx Clock Source is CLK12 */
+#define CMXSCR_TS1CS_CLK3 0x06000000 /* SCC1 Tx Clock Source is CLK3 */
+#define CMXSCR_TS1CS_CLK4 0x07000000 /* SCC1 Tx Clock Source is CLK4 */
+
+#define CMXSCR_RS2CS_BRG1 0x00000000 /* SCC2 Rx Clock Source is BRG1 */
+#define CMXSCR_RS2CS_BRG2 0x00080000 /* SCC2 Rx Clock Source is BRG2 */
+#define CMXSCR_RS2CS_BRG3 0x00100000 /* SCC2 Rx Clock Source is BRG3 */
+#define CMXSCR_RS2CS_BRG4 0x00180000 /* SCC2 Rx Clock Source is BRG4 */
+#define CMXSCR_RS2CS_CLK11 0x00200000 /* SCC2 Rx Clock Source is CLK11 */
+#define CMXSCR_RS2CS_CLK12 0x00280000 /* SCC2 Rx Clock Source is CLK12 */
+#define CMXSCR_RS2CS_CLK3 0x00300000 /* SCC2 Rx Clock Source is CLK3 */
+#define CMXSCR_RS2CS_CLK4 0x00380000 /* SCC2 Rx Clock Source is CLK4 */
+
+#define CMXSCR_TS2CS_BRG1 0x00000000 /* SCC2 Tx Clock Source is BRG1 */
+#define CMXSCR_TS2CS_BRG2 0x00010000 /* SCC2 Tx Clock Source is BRG2 */
+#define CMXSCR_TS2CS_BRG3 0x00020000 /* SCC2 Tx Clock Source is BRG3 */
+#define CMXSCR_TS2CS_BRG4 0x00030000 /* SCC2 Tx Clock Source is BRG4 */
+#define CMXSCR_TS2CS_CLK11 0x00040000 /* SCC2 Tx Clock Source is CLK11 */
+#define CMXSCR_TS2CS_CLK12 0x00050000 /* SCC2 Tx Clock Source is CLK12 */
+#define CMXSCR_TS2CS_CLK3 0x00060000 /* SCC2 Tx Clock Source is CLK3 */
+#define CMXSCR_TS2CS_CLK4 0x00070000 /* SCC2 Tx Clock Source is CLK4 */
+
+#define CMXSCR_RS3CS_BRG1 0x00000000 /* SCC3 Rx Clock Source is BRG1 */
+#define CMXSCR_RS3CS_BRG2 0x00000800 /* SCC3 Rx Clock Source is BRG2 */
+#define CMXSCR_RS3CS_BRG3 0x00001000 /* SCC3 Rx Clock Source is BRG3 */
+#define CMXSCR_RS3CS_BRG4 0x00001800 /* SCC3 Rx Clock Source is BRG4 */
+#define CMXSCR_RS3CS_CLK5 0x00002000 /* SCC3 Rx Clock Source is CLK5 */
+#define CMXSCR_RS3CS_CLK6 0x00002800 /* SCC3 Rx Clock Source is CLK6 */
+#define CMXSCR_RS3CS_CLK7 0x00003000 /* SCC3 Rx Clock Source is CLK7 */
+#define CMXSCR_RS3CS_CLK8 0x00003800 /* SCC3 Rx Clock Source is CLK8 */
+
+#define CMXSCR_TS3CS_BRG1 0x00000000 /* SCC3 Tx Clock Source is BRG1 */
+#define CMXSCR_TS3CS_BRG2 0x00000100 /* SCC3 Tx Clock Source is BRG2 */
+#define CMXSCR_TS3CS_BRG3 0x00000200 /* SCC3 Tx Clock Source is BRG3 */
+#define CMXSCR_TS3CS_BRG4 0x00000300 /* SCC3 Tx Clock Source is BRG4 */
+#define CMXSCR_TS3CS_CLK5 0x00000400 /* SCC3 Tx Clock Source is CLK5 */
+#define CMXSCR_TS3CS_CLK6 0x00000500 /* SCC3 Tx Clock Source is CLK6 */
+#define CMXSCR_TS3CS_CLK7 0x00000600 /* SCC3 Tx Clock Source is CLK7 */
+#define CMXSCR_TS3CS_CLK8 0x00000700 /* SCC3 Tx Clock Source is CLK8 */
+
+#define CMXSCR_RS4CS_BRG1 0x00000000 /* SCC4 Rx Clock Source is BRG1 */
+#define CMXSCR_RS4CS_BRG2 0x00000008 /* SCC4 Rx Clock Source is BRG2 */
+#define CMXSCR_RS4CS_BRG3 0x00000010 /* SCC4 Rx Clock Source is BRG3 */
+#define CMXSCR_RS4CS_BRG4 0x00000018 /* SCC4 Rx Clock Source is BRG4 */
+#define CMXSCR_RS4CS_CLK5 0x00000020 /* SCC4 Rx Clock Source is CLK5 */
+#define CMXSCR_RS4CS_CLK6 0x00000028 /* SCC4 Rx Clock Source is CLK6 */
+#define CMXSCR_RS4CS_CLK7 0x00000030 /* SCC4 Rx Clock Source is CLK7 */
+#define CMXSCR_RS4CS_CLK8 0x00000038 /* SCC4 Rx Clock Source is CLK8 */
+
+#define CMXSCR_TS4CS_BRG1 0x00000000 /* SCC4 Tx Clock Source is BRG1 */
+#define CMXSCR_TS4CS_BRG2 0x00000001 /* SCC4 Tx Clock Source is BRG2 */
+#define CMXSCR_TS4CS_BRG3 0x00000002 /* SCC4 Tx Clock Source is BRG3 */
+#define CMXSCR_TS4CS_BRG4 0x00000003 /* SCC4 Tx Clock Source is BRG4 */
+#define CMXSCR_TS4CS_CLK5 0x00000004 /* SCC4 Tx Clock Source is CLK5 */
+#define CMXSCR_TS4CS_CLK6 0x00000005 /* SCC4 Tx Clock Source is CLK6 */
+#define CMXSCR_TS4CS_CLK7 0x00000006 /* SCC4 Tx Clock Source is CLK7 */
+#define CMXSCR_TS4CS_CLK8 0x00000007 /* SCC4 Tx Clock Source is CLK8 */
+
+/*-----------------------------------------------------------------------
+ * SIUMCR - SIU Module Configuration Register 4-31
+ */
+#define SIUMCR_BBD 0x80000000 /* Bus Busy Disable */
+#define SIUMCR_ESE 0x40000000 /* External Snoop Enable */
+#define SIUMCR_PBSE 0x20000000 /* Parity Byte Select Enable */
+#define SIUMCR_CDIS 0x10000000 /* Core Disable */
+#define SIUMCR_DPPC00 0x00000000 /* Data Parity Pins Configuration*/
+#define SIUMCR_DPPC01 0x04000000 /* - " - */
+#define SIUMCR_DPPC10 0x08000000 /* - " - */
+#define SIUMCR_DPPC11 0x0c000000 /* - " - */
+#define SIUMCR_L2CPC00 0x00000000 /* L2 Cache Pins Configuration */
+#define SIUMCR_L2CPC01 0x01000000 /* - " - */
+#define SIUMCR_L2CPC10 0x02000000 /* - " - */
+#define SIUMCR_L2CPC11 0x03000000 /* - " - */
+#define SIUMCR_LBPC00 0x00000000 /* Local Bus Pins Configuration */
+#define SIUMCR_LBPC01 0x00400000 /* - " - */
+#define SIUMCR_LBPC10 0x00800000 /* - " - */
+#define SIUMCR_LBPC11 0x00c00000 /* - " - */
+#define SIUMCR_APPC00 0x00000000 /* Address Parity Pins Configuration*/
+#define SIUMCR_APPC01 0x00100000 /* - " - */
+#define SIUMCR_APPC10 0x00200000 /* - " - */
+#define SIUMCR_APPC11 0x00300000 /* - " - */
+#define SIUMCR_CS10PC00 0x00000000 /* CS10 Pin Configuration */
+#define SIUMCR_CS10PC01 0x00040000 /* - " - */
+#define SIUMCR_CS10PC10 0x00080000 /* - " - */
+#define SIUMCR_CS10PC11 0x000c0000 /* - " - */
+#define SIUMCR_BCTLC00 0x00000000 /* Buffer Control Configuration */
+#define SIUMCR_BCTLC01 0x00010000 /* - " - */
+#define SIUMCR_BCTLC10 0x00020000 /* - " - */
+#define SIUMCR_BCTLC11 0x00030000 /* - " - */
+#define SIUMCR_MMR00 0x00000000 /* Mask Masters Requests */
+#define SIUMCR_MMR01 0x00004000 /* - " - */
+#define SIUMCR_MMR10 0x00008000 /* - " - */
+#define SIUMCR_MMR11 0x0000c000 /* - " - */
+#define SIUMCR_LPBSE 0x00002000 /* LocalBus Parity Byte Select Enable*/
+
+/*-----------------------------------------------------------------------
+ * SCCR - System Clock Control Register 9-8
+*/
+#define SCCR_PCI_MODE 0x00000100 /* PCI Mode */
+#define SCCR_PCI_MODCK 0x00000080 /* Value of PCI_MODCK pin */
+#define SCCR_PCIDF_MSK 0x00000078 /* PCI division factor */
+#define SCCR_PCIDF_SHIFT 3
+
+#ifndef CPM_IMMR_OFFSET
+#define CPM_IMMR_OFFSET 0x101a8
+#endif
+
+#define FCC_PSMR_RMII ((uint)0x00020000) /* Use RMII interface */
+
+/* FCC iop & clock configuration. BSP code is responsible to define Fx_RXCLK & Fx_TXCLK
+ * in order to use clock-computing stuff below for the FCC x
+ */
+
+/* Automatically generates register configurations */
+#define PC_CLK(x) ((uint)(1<<(x-1))) /* FCC CLK I/O ports */
+
+#define CMXFCR_RF1CS(x) ((uint)((x-5)<<27)) /* FCC1 Receive Clock Source */
+#define CMXFCR_TF1CS(x) ((uint)((x-5)<<24)) /* FCC1 Transmit Clock Source */
+#define CMXFCR_RF2CS(x) ((uint)((x-9)<<19)) /* FCC2 Receive Clock Source */
+#define CMXFCR_TF2CS(x) ((uint)((x-9)<<16)) /* FCC2 Transmit Clock Source */
+#define CMXFCR_RF3CS(x) ((uint)((x-9)<<11)) /* FCC3 Receive Clock Source */
+#define CMXFCR_TF3CS(x) ((uint)((x-9)<<8)) /* FCC3 Transmit Clock Source */
+
+#define PC_F1RXCLK PC_CLK(F1_RXCLK)
+#define PC_F1TXCLK PC_CLK(F1_TXCLK)
+#define CMX1_CLK_ROUTE (CMXFCR_RF1CS(F1_RXCLK) | CMXFCR_TF1CS(F1_TXCLK))
+#define CMX1_CLK_MASK ((uint)0xff000000)
+
+#define PC_F2RXCLK PC_CLK(F2_RXCLK)
+#define PC_F2TXCLK PC_CLK(F2_TXCLK)
+#define CMX2_CLK_ROUTE (CMXFCR_RF2CS(F2_RXCLK) | CMXFCR_TF2CS(F2_TXCLK))
+#define CMX2_CLK_MASK ((uint)0x00ff0000)
+
+#define PC_F3RXCLK PC_CLK(F3_RXCLK)
+#define PC_F3TXCLK PC_CLK(F3_TXCLK)
+#define CMX3_CLK_ROUTE (CMXFCR_RF3CS(F3_RXCLK) | CMXFCR_TF3CS(F3_TXCLK))
+#define CMX3_CLK_MASK ((uint)0x0000ff00)
+
+#define CPMUX_CLK_MASK (CMX3_CLK_MASK | CMX2_CLK_MASK)
+#define CPMUX_CLK_ROUTE (CMX3_CLK_ROUTE | CMX2_CLK_ROUTE)
+
+#define CLK_TRX (PC_F3TXCLK | PC_F3RXCLK | PC_F2TXCLK | PC_F2RXCLK)
+
+/* I/O Pin assignment for FCC1. I don't yet know the best way to do this,
+ * but there is little variation among the choices.
+ */
+#define PA1_COL 0x00000001U
+#define PA1_CRS 0x00000002U
+#define PA1_TXER 0x00000004U
+#define PA1_TXEN 0x00000008U
+#define PA1_RXDV 0x00000010U
+#define PA1_RXER 0x00000020U
+#define PA1_TXDAT 0x00003c00U
+#define PA1_RXDAT 0x0003c000U
+#define PA1_PSORA0 (PA1_RXDAT | PA1_TXDAT)
+#define PA1_PSORA1 (PA1_COL | PA1_CRS | PA1_TXER | PA1_TXEN | \
+ PA1_RXDV | PA1_RXER)
+#define PA1_DIRA0 (PA1_RXDAT | PA1_CRS | PA1_COL | PA1_RXER | PA1_RXDV)
+#define PA1_DIRA1 (PA1_TXDAT | PA1_TXEN | PA1_TXER)
+
+
+/* I/O Pin assignment for FCC2. I don't yet know the best way to do this,
+ * but there is little variation among the choices.
+ */
+#define PB2_TXER 0x00000001U
+#define PB2_RXDV 0x00000002U
+#define PB2_TXEN 0x00000004U
+#define PB2_RXER 0x00000008U
+#define PB2_COL 0x00000010U
+#define PB2_CRS 0x00000020U
+#define PB2_TXDAT 0x000003c0U
+#define PB2_RXDAT 0x00003c00U
+#define PB2_PSORB0 (PB2_RXDAT | PB2_TXDAT | PB2_CRS | PB2_COL | \
+ PB2_RXER | PB2_RXDV | PB2_TXER)
+#define PB2_PSORB1 (PB2_TXEN)
+#define PB2_DIRB0 (PB2_RXDAT | PB2_CRS | PB2_COL | PB2_RXER | PB2_RXDV)
+#define PB2_DIRB1 (PB2_TXDAT | PB2_TXEN | PB2_TXER)
+
+
+/* I/O Pin assignment for FCC3. I don't yet know the best way to do this,
+ * but there is little variation among the choices.
+ */
+#define PB3_RXDV 0x00004000U
+#define PB3_RXER 0x00008000U
+#define PB3_TXER 0x00010000U
+#define PB3_TXEN 0x00020000U
+#define PB3_COL 0x00040000U
+#define PB3_CRS 0x00080000U
+#define PB3_TXDAT 0x0f000000U
+#define PC3_TXDAT 0x00000010U
+#define PB3_RXDAT 0x00f00000U
+#define PB3_PSORB0 (PB3_RXDAT | PB3_TXDAT | PB3_CRS | PB3_COL | \
+ PB3_RXER | PB3_RXDV | PB3_TXER | PB3_TXEN)
+#define PB3_PSORB1 0
+#define PB3_DIRB0 (PB3_RXDAT | PB3_CRS | PB3_COL | PB3_RXER | PB3_RXDV)
+#define PB3_DIRB1 (PB3_TXDAT | PB3_TXEN | PB3_TXER)
+#define PC3_DIRC1 (PC3_TXDAT)
+
+/* Handy macro to specify mem for FCCs*/
+#define FCC_MEM_OFFSET(x) (CPM_FCC_SPECIAL_BASE + (x*128))
+#define FCC1_MEM_OFFSET FCC_MEM_OFFSET(0)
+#define FCC2_MEM_OFFSET FCC_MEM_OFFSET(1)
+#define FCC3_MEM_OFFSET FCC_MEM_OFFSET(2)
+
+/* Clocks and GRG's */
+
+enum cpm_clk_dir {
+ CPM_CLK_RX,
+ CPM_CLK_TX,
+ CPM_CLK_RTX
+};
+
+enum cpm_clk_target {
+ CPM_CLK_SCC1,
+ CPM_CLK_SCC2,
+ CPM_CLK_SCC3,
+ CPM_CLK_SCC4,
+ CPM_CLK_FCC1,
+ CPM_CLK_FCC2,
+ CPM_CLK_FCC3,
+ CPM_CLK_SMC1,
+ CPM_CLK_SMC2,
+};
+
+enum cpm_clk {
+ CPM_CLK_NONE = 0,
+ CPM_BRG1, /* Baud Rate Generator 1 */
+ CPM_BRG2, /* Baud Rate Generator 2 */
+ CPM_BRG3, /* Baud Rate Generator 3 */
+ CPM_BRG4, /* Baud Rate Generator 4 */
+ CPM_BRG5, /* Baud Rate Generator 5 */
+ CPM_BRG6, /* Baud Rate Generator 6 */
+ CPM_BRG7, /* Baud Rate Generator 7 */
+ CPM_BRG8, /* Baud Rate Generator 8 */
+ CPM_CLK1, /* Clock 1 */
+ CPM_CLK2, /* Clock 2 */
+ CPM_CLK3, /* Clock 3 */
+ CPM_CLK4, /* Clock 4 */
+ CPM_CLK5, /* Clock 5 */
+ CPM_CLK6, /* Clock 6 */
+ CPM_CLK7, /* Clock 7 */
+ CPM_CLK8, /* Clock 8 */
+ CPM_CLK9, /* Clock 9 */
+ CPM_CLK10, /* Clock 10 */
+ CPM_CLK11, /* Clock 11 */
+ CPM_CLK12, /* Clock 12 */
+ CPM_CLK13, /* Clock 13 */
+ CPM_CLK14, /* Clock 14 */
+ CPM_CLK15, /* Clock 15 */
+ CPM_CLK16, /* Clock 16 */
+ CPM_CLK17, /* Clock 17 */
+ CPM_CLK18, /* Clock 18 */
+ CPM_CLK19, /* Clock 19 */
+ CPM_CLK20, /* Clock 20 */
+ CPM_CLK_DUMMY
+};
+
+extern int cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode);
+extern int cpm2_smc_clk_setup(enum cpm_clk_target target, int clock);
+
+#define CPM_PIN_INPUT 0
+#define CPM_PIN_OUTPUT 1
+#define CPM_PIN_PRIMARY 0
+#define CPM_PIN_SECONDARY 2
+#define CPM_PIN_GPIO 4
+#define CPM_PIN_OPENDRAIN 8
+
+void cpm2_set_pin(int port, int pin, int flags);
+
+#endif /* __CPM2__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
new file mode 100644
index 0000000..ef8a248
--- /dev/null
+++ b/arch/powerpc/include/asm/cputable.h
@@ -0,0 +1,514 @@
+#ifndef __ASM_POWERPC_CPUTABLE_H
+#define __ASM_POWERPC_CPUTABLE_H
+
+#define PPC_FEATURE_32 0x80000000
+#define PPC_FEATURE_64 0x40000000
+#define PPC_FEATURE_601_INSTR 0x20000000
+#define PPC_FEATURE_HAS_ALTIVEC 0x10000000
+#define PPC_FEATURE_HAS_FPU 0x08000000
+#define PPC_FEATURE_HAS_MMU 0x04000000
+#define PPC_FEATURE_HAS_4xxMAC 0x02000000
+#define PPC_FEATURE_UNIFIED_CACHE 0x01000000
+#define PPC_FEATURE_HAS_SPE 0x00800000
+#define PPC_FEATURE_HAS_EFP_SINGLE 0x00400000
+#define PPC_FEATURE_HAS_EFP_DOUBLE 0x00200000
+#define PPC_FEATURE_NO_TB 0x00100000
+#define PPC_FEATURE_POWER4 0x00080000
+#define PPC_FEATURE_POWER5 0x00040000
+#define PPC_FEATURE_POWER5_PLUS 0x00020000
+#define PPC_FEATURE_CELL 0x00010000
+#define PPC_FEATURE_BOOKE 0x00008000
+#define PPC_FEATURE_SMT 0x00004000
+#define PPC_FEATURE_ICACHE_SNOOP 0x00002000
+#define PPC_FEATURE_ARCH_2_05 0x00001000
+#define PPC_FEATURE_PA6T 0x00000800
+#define PPC_FEATURE_HAS_DFP 0x00000400
+#define PPC_FEATURE_POWER6_EXT 0x00000200
+#define PPC_FEATURE_ARCH_2_06 0x00000100
+#define PPC_FEATURE_HAS_VSX 0x00000080
+
+#define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
+ 0x00000040
+
+#define PPC_FEATURE_TRUE_LE 0x00000002
+#define PPC_FEATURE_PPC_LE 0x00000001
+
+#ifdef __KERNEL__
+
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
+
+#ifndef __ASSEMBLY__
+
+/* This structure can grow, it's real size is used by head.S code
+ * via the mkdefs mechanism.
+ */
+struct cpu_spec;
+
+typedef void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec);
+typedef void (*cpu_restore_t)(void);
+
+enum powerpc_oprofile_type {
+ PPC_OPROFILE_INVALID = 0,
+ PPC_OPROFILE_RS64 = 1,
+ PPC_OPROFILE_POWER4 = 2,
+ PPC_OPROFILE_G4 = 3,
+ PPC_OPROFILE_FSL_EMB = 4,
+ PPC_OPROFILE_CELL = 5,
+ PPC_OPROFILE_PA6T = 6,
+};
+
+enum powerpc_pmc_type {
+ PPC_PMC_DEFAULT = 0,
+ PPC_PMC_IBM = 1,
+ PPC_PMC_PA6T = 2,
+};
+
+struct pt_regs;
+
+extern int machine_check_generic(struct pt_regs *regs);
+extern int machine_check_4xx(struct pt_regs *regs);
+extern int machine_check_440A(struct pt_regs *regs);
+extern int machine_check_e500(struct pt_regs *regs);
+extern int machine_check_e200(struct pt_regs *regs);
+
+/* NOTE WELL: Update identify_cpu() if fields are added or removed! */
+struct cpu_spec {
+ /* CPU is matched via (PVR & pvr_mask) == pvr_value */
+ unsigned int pvr_mask;
+ unsigned int pvr_value;
+
+ char *cpu_name;
+ unsigned long cpu_features; /* Kernel features */
+ unsigned int cpu_user_features; /* Userland features */
+
+ /* cache line sizes */
+ unsigned int icache_bsize;
+ unsigned int dcache_bsize;
+
+ /* number of performance monitor counters */
+ unsigned int num_pmcs;
+ enum powerpc_pmc_type pmc_type;
+
+ /* this is called to initialize various CPU bits like L1 cache,
+ * BHT, SPD, etc... from head.S before branching to identify_machine
+ */
+ cpu_setup_t cpu_setup;
+ /* Used to restore cpu setup on secondary processors and at resume */
+ cpu_restore_t cpu_restore;
+
+ /* Used by oprofile userspace to select the right counters */
+ char *oprofile_cpu_type;
+
+ /* Processor specific oprofile operations */
+ enum powerpc_oprofile_type oprofile_type;
+
+ /* Bit locations inside the mmcra change */
+ unsigned long oprofile_mmcra_sihv;
+ unsigned long oprofile_mmcra_sipr;
+
+ /* Bits to clear during an oprofile exception */
+ unsigned long oprofile_mmcra_clear;
+
+ /* Name of processor class, for the ELF AT_PLATFORM entry */
+ char *platform;
+
+ /* Processor specific machine check handling. Return negative
+ * if the error is fatal, 1 if it was fully recovered and 0 to
+ * pass up (not CPU originated) */
+ int (*machine_check)(struct pt_regs *regs);
+};
+
+extern struct cpu_spec *cur_cpu_spec;
+
+extern unsigned int __start___ftr_fixup, __stop___ftr_fixup;
+
+extern struct cpu_spec *identify_cpu(unsigned long offset, unsigned int pvr);
+extern void do_feature_fixups(unsigned long value, void *fixup_start,
+ void *fixup_end);
+
+extern const char *powerpc_base_platform;
+
+#endif /* __ASSEMBLY__ */
+
+/* CPU kernel features */
+
+/* Retain the 32b definitions all use bottom half of word */
+#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0000000000000001)
+#define CPU_FTR_L2CR ASM_CONST(0x0000000000000002)
+#define CPU_FTR_SPEC7450 ASM_CONST(0x0000000000000004)
+#define CPU_FTR_ALTIVEC ASM_CONST(0x0000000000000008)
+#define CPU_FTR_TAU ASM_CONST(0x0000000000000010)
+#define CPU_FTR_CAN_DOZE ASM_CONST(0x0000000000000020)
+#define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040)
+#define CPU_FTR_L2CSR ASM_CONST(0x0000000000000080)
+#define CPU_FTR_601 ASM_CONST(0x0000000000000100)
+#define CPU_FTR_HPTE_TABLE ASM_CONST(0x0000000000000200)
+#define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400)
+#define CPU_FTR_L3CR ASM_CONST(0x0000000000000800)
+#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x0000000000001000)
+#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x0000000000002000)
+#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x0000000000004000)
+#define CPU_FTR_NO_DPM ASM_CONST(0x0000000000008000)
+#define CPU_FTR_HAS_HIGH_BATS ASM_CONST(0x0000000000010000)
+#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000)
+#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000)
+#define CPU_FTR_BIG_PHYS ASM_CONST(0x0000000000080000)
+#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000)
+#define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000)
+#define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000)
+#define CPU_FTR_FPU_UNAVAILABLE ASM_CONST(0x0000000000800000)
+#define CPU_FTR_UNIFIED_ID_CACHE ASM_CONST(0x0000000001000000)
+#define CPU_FTR_SPE ASM_CONST(0x0000000002000000)
+#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x0000000004000000)
+#define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000)
+
+/*
+ * Add the 64-bit processor unique features in the top half of the word;
+ * on 32-bit, make the names available but defined to be 0.
+ */
+#ifdef __powerpc64__
+#define LONG_ASM_CONST(x) ASM_CONST(x)
+#else
+#define LONG_ASM_CONST(x) 0
+#endif
+
+#define CPU_FTR_SLB LONG_ASM_CONST(0x0000000100000000)
+#define CPU_FTR_16M_PAGE LONG_ASM_CONST(0x0000000200000000)
+#define CPU_FTR_TLBIEL LONG_ASM_CONST(0x0000000400000000)
+#define CPU_FTR_NOEXECUTE LONG_ASM_CONST(0x0000000800000000)
+#define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000)
+#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000)
+#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000008000000000)
+#define CPU_FTR_SMT LONG_ASM_CONST(0x0000010000000000)
+#define CPU_FTR_LOCKLESS_TLBIE LONG_ASM_CONST(0x0000040000000000)
+#define CPU_FTR_CI_LARGE_PAGE LONG_ASM_CONST(0x0000100000000000)
+#define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000200000000000)
+#define CPU_FTR_PURR LONG_ASM_CONST(0x0000400000000000)
+#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000800000000000)
+#define CPU_FTR_SPURR LONG_ASM_CONST(0x0001000000000000)
+#define CPU_FTR_DSCR LONG_ASM_CONST(0x0002000000000000)
+#define CPU_FTR_1T_SEGMENT LONG_ASM_CONST(0x0004000000000000)
+#define CPU_FTR_NO_SLBIE_B LONG_ASM_CONST(0x0008000000000000)
+#define CPU_FTR_VSX LONG_ASM_CONST(0x0010000000000000)
+#define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000)
+
+#ifndef __ASSEMBLY__
+
+#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_SLB | \
+ CPU_FTR_TLBIEL | CPU_FTR_NOEXECUTE | \
+ CPU_FTR_NODSISRALIGN | CPU_FTR_16M_PAGE)
+
+/* We only set the altivec features if the kernel was compiled with altivec
+ * support
+ */
+#ifdef CONFIG_ALTIVEC
+#define CPU_FTR_ALTIVEC_COMP CPU_FTR_ALTIVEC
+#define PPC_FEATURE_HAS_ALTIVEC_COMP PPC_FEATURE_HAS_ALTIVEC
+#else
+#define CPU_FTR_ALTIVEC_COMP 0
+#define PPC_FEATURE_HAS_ALTIVEC_COMP 0
+#endif
+
+/* We only set the VSX features if the kernel was compiled with VSX
+ * support
+ */
+#ifdef CONFIG_VSX
+#define CPU_FTR_VSX_COMP CPU_FTR_VSX
+#define PPC_FEATURE_HAS_VSX_COMP PPC_FEATURE_HAS_VSX
+#else
+#define CPU_FTR_VSX_COMP 0
+#define PPC_FEATURE_HAS_VSX_COMP 0
+#endif
+
+/* We only set the spe features if the kernel was compiled with spe
+ * support
+ */
+#ifdef CONFIG_SPE
+#define CPU_FTR_SPE_COMP CPU_FTR_SPE
+#define PPC_FEATURE_HAS_SPE_COMP PPC_FEATURE_HAS_SPE
+#define PPC_FEATURE_HAS_EFP_SINGLE_COMP PPC_FEATURE_HAS_EFP_SINGLE
+#define PPC_FEATURE_HAS_EFP_DOUBLE_COMP PPC_FEATURE_HAS_EFP_DOUBLE
+#else
+#define CPU_FTR_SPE_COMP 0
+#define PPC_FEATURE_HAS_SPE_COMP 0
+#define PPC_FEATURE_HAS_EFP_SINGLE_COMP 0
+#define PPC_FEATURE_HAS_EFP_DOUBLE_COMP 0
+#endif
+
+/* We need to mark all pages as being coherent if we're SMP or we have a
+ * 74[45]x and an MPC107 host bridge. Also 83xx and PowerQUICC II
+ * require it for PCI "streaming/prefetch" to work properly.
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE) \
+ || defined(CONFIG_PPC_83xx) || defined(CONFIG_8260)
+#define CPU_FTR_COMMON CPU_FTR_NEED_COHERENT
+#else
+#define CPU_FTR_COMMON 0
+#endif
+
+/* The powersave features NAP & DOZE seems to confuse BDI when
+ debugging. So if a BDI is used, disable theses
+ */
+#ifndef CONFIG_BDI_SWITCH
+#define CPU_FTR_MAYBE_CAN_DOZE CPU_FTR_CAN_DOZE
+#define CPU_FTR_MAYBE_CAN_NAP CPU_FTR_CAN_NAP
+#else
+#define CPU_FTR_MAYBE_CAN_DOZE 0
+#define CPU_FTR_MAYBE_CAN_NAP 0
+#endif
+
+#define CLASSIC_PPC (!defined(CONFIG_8xx) && !defined(CONFIG_4xx) && \
+ !defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \
+ !defined(CONFIG_BOOKE))
+
+#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | CPU_FTR_HPTE_TABLE | \
+ CPU_FTR_COHERENT_ICACHE | CPU_FTR_UNIFIED_ID_CACHE)
+#define CPU_FTRS_603 (CPU_FTR_COMMON | \
+ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
+#define CPU_FTRS_604 (CPU_FTR_COMMON | \
+ CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_PPC_LE)
+#define CPU_FTRS_740_NOTAU (CPU_FTR_COMMON | \
+ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
+ CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
+#define CPU_FTRS_740 (CPU_FTR_COMMON | \
+ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
+ CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
+ CPU_FTR_PPC_LE)
+#define CPU_FTRS_750 (CPU_FTR_COMMON | \
+ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
+ CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
+ CPU_FTR_PPC_LE)
+#define CPU_FTRS_750CL (CPU_FTRS_750 | CPU_FTR_HAS_HIGH_BATS)
+#define CPU_FTRS_750FX1 (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM)
+#define CPU_FTRS_750FX2 (CPU_FTRS_750 | CPU_FTR_NO_DPM)
+#define CPU_FTRS_750FX (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX | \
+ CPU_FTR_HAS_HIGH_BATS)
+#define CPU_FTRS_750GX (CPU_FTRS_750FX)
+#define CPU_FTRS_7400_NOTAU (CPU_FTR_COMMON | \
+ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
+ CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
+#define CPU_FTRS_7400 (CPU_FTR_COMMON | \
+ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
+ CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
+#define CPU_FTRS_7450_20 (CPU_FTR_COMMON | \
+ CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
+ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
+#define CPU_FTRS_7450_21 (CPU_FTR_COMMON | \
+ CPU_FTR_USE_TB | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
+ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \
+ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
+#define CPU_FTRS_7450_23 (CPU_FTR_COMMON | \
+ CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
+ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
+#define CPU_FTRS_7455_1 (CPU_FTR_COMMON | \
+ CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \
+ CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | \
+ CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS | \
+ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
+#define CPU_FTRS_7455_20 (CPU_FTR_COMMON | \
+ CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
+ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \
+ CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE)
+#define CPU_FTRS_7455 (CPU_FTR_COMMON | \
+ CPU_FTR_USE_TB | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
+ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
+ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
+#define CPU_FTRS_7447_10 (CPU_FTR_COMMON | \
+ CPU_FTR_USE_TB | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
+ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
+ CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC | CPU_FTR_PPC_LE | \
+ CPU_FTR_NEED_PAIRED_STWCX)
+#define CPU_FTRS_7447 (CPU_FTR_COMMON | \
+ CPU_FTR_USE_TB | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
+ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
+ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
+#define CPU_FTRS_7447A (CPU_FTR_COMMON | \
+ CPU_FTR_USE_TB | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
+ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
+ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
+#define CPU_FTRS_7448 (CPU_FTR_COMMON | \
+ CPU_FTR_USE_TB | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
+ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
+ CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
+#define CPU_FTRS_82XX (CPU_FTR_COMMON | \
+ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB)
+#define CPU_FTRS_G2_LE (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \
+ CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS)
+#define CPU_FTRS_E300 (CPU_FTR_MAYBE_CAN_DOZE | \
+ CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \
+ CPU_FTR_COMMON)
+#define CPU_FTRS_E300C2 (CPU_FTR_MAYBE_CAN_DOZE | \
+ CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \
+ CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE)
+#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | \
+ CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE)
+#define CPU_FTRS_8XX (CPU_FTR_USE_TB)
+#define CPU_FTRS_40X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN)
+#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN)
+#define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \
+ CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \
+ CPU_FTR_UNIFIED_ID_CACHE)
+#define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
+ CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN)
+#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
+ CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | \
+ CPU_FTR_NODSISRALIGN)
+#define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN | \
+ CPU_FTR_L2CSR | CPU_FTR_LWSYNC)
+#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
+
+/* 64-bit CPUs */
+#define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
+ CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | CPU_FTR_PPC_LE)
+#define CPU_FTRS_RS64 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
+ CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | \
+ CPU_FTR_MMCRA | CPU_FTR_CTRL)
+#define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
+ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_MMCRA)
+#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
+ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA)
+#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
+ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_MMCRA | CPU_FTR_SMT | \
+ CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
+ CPU_FTR_PURR)
+#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
+ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_MMCRA | CPU_FTR_SMT | \
+ CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
+ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
+ CPU_FTR_DSCR)
+#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
+ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_MMCRA | CPU_FTR_SMT | \
+ CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
+ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
+ CPU_FTR_DSCR | CPU_FTR_SAO)
+#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
+ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
+ CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | CPU_FTR_CELL_TB_BUG)
+#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
+ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \
+ CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \
+ CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B)
+#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | \
+ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2)
+
+#ifdef __powerpc64__
+#define CPU_FTRS_POSSIBLE \
+ (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \
+ CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \
+ CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \
+ CPU_FTR_1T_SEGMENT | CPU_FTR_VSX)
+#else
+enum {
+ CPU_FTRS_POSSIBLE =
+#if CLASSIC_PPC
+ CPU_FTRS_PPC601 | CPU_FTRS_603 | CPU_FTRS_604 | CPU_FTRS_740_NOTAU |
+ CPU_FTRS_740 | CPU_FTRS_750 | CPU_FTRS_750FX1 |
+ CPU_FTRS_750FX2 | CPU_FTRS_750FX | CPU_FTRS_750GX |
+ CPU_FTRS_7400_NOTAU | CPU_FTRS_7400 | CPU_FTRS_7450_20 |
+ CPU_FTRS_7450_21 | CPU_FTRS_7450_23 | CPU_FTRS_7455_1 |
+ CPU_FTRS_7455_20 | CPU_FTRS_7455 | CPU_FTRS_7447_10 |
+ CPU_FTRS_7447 | CPU_FTRS_7447A | CPU_FTRS_82XX |
+ CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_E300C2 |
+ CPU_FTRS_CLASSIC32 |
+#else
+ CPU_FTRS_GENERIC_32 |
+#endif
+#ifdef CONFIG_8xx
+ CPU_FTRS_8XX |
+#endif
+#ifdef CONFIG_40x
+ CPU_FTRS_40X |
+#endif
+#ifdef CONFIG_44x
+ CPU_FTRS_44X |
+#endif
+#ifdef CONFIG_E200
+ CPU_FTRS_E200 |
+#endif
+#ifdef CONFIG_E500
+ CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC |
+#endif
+ 0,
+};
+#endif /* __powerpc64__ */
+
+#ifdef __powerpc64__
+#define CPU_FTRS_ALWAYS \
+ (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \
+ CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \
+ CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE)
+#else
+enum {
+ CPU_FTRS_ALWAYS =
+#if CLASSIC_PPC
+ CPU_FTRS_PPC601 & CPU_FTRS_603 & CPU_FTRS_604 & CPU_FTRS_740_NOTAU &
+ CPU_FTRS_740 & CPU_FTRS_750 & CPU_FTRS_750FX1 &
+ CPU_FTRS_750FX2 & CPU_FTRS_750FX & CPU_FTRS_750GX &
+ CPU_FTRS_7400_NOTAU & CPU_FTRS_7400 & CPU_FTRS_7450_20 &
+ CPU_FTRS_7450_21 & CPU_FTRS_7450_23 & CPU_FTRS_7455_1 &
+ CPU_FTRS_7455_20 & CPU_FTRS_7455 & CPU_FTRS_7447_10 &
+ CPU_FTRS_7447 & CPU_FTRS_7447A & CPU_FTRS_82XX &
+ CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_E300C2 &
+ CPU_FTRS_CLASSIC32 &
+#else
+ CPU_FTRS_GENERIC_32 &
+#endif
+#ifdef CONFIG_8xx
+ CPU_FTRS_8XX &
+#endif
+#ifdef CONFIG_40x
+ CPU_FTRS_40X &
+#endif
+#ifdef CONFIG_44x
+ CPU_FTRS_44X &
+#endif
+#ifdef CONFIG_E200
+ CPU_FTRS_E200 &
+#endif
+#ifdef CONFIG_E500
+ CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC &
+#endif
+ CPU_FTRS_POSSIBLE,
+};
+#endif /* __powerpc64__ */
+
+static inline int cpu_has_feature(unsigned long feature)
+{
+ return (CPU_FTRS_ALWAYS & feature) ||
+ (CPU_FTRS_POSSIBLE
+ & cur_cpu_spec->cpu_features
+ & feature);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_POWERPC_CPUTABLE_H */
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
new file mode 100644
index 0000000..fb11b0c
--- /dev/null
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -0,0 +1,71 @@
+#ifndef _ASM_POWERPC_CPUTHREADS_H
+#define _ASM_POWERPC_CPUTHREADS_H
+
+#include <linux/cpumask.h>
+
+/*
+ * Mapping of threads to cores
+ */
+
+#ifdef CONFIG_SMP
+extern int threads_per_core;
+extern int threads_shift;
+extern cpumask_t threads_core_mask;
+#else
+#define threads_per_core 1
+#define threads_shift 0
+#define threads_core_mask (CPU_MASK_CPU0)
+#endif
+
+/* cpu_thread_mask_to_cores - Return a cpumask of one per cores
+ * hit by the argument
+ *
+ * @threads: a cpumask of threads
+ *
+ * This function returns a cpumask which will have one "cpu" (or thread)
+ * bit set for each core that has at least one thread set in the argument.
+ *
+ * This can typically be used for things like IPI for tlb invalidations
+ * since those need to be done only once per core/TLB
+ */
+static inline cpumask_t cpu_thread_mask_to_cores(cpumask_t threads)
+{
+ cpumask_t tmp, res;
+ int i;
+
+ res = CPU_MASK_NONE;
+ for (i = 0; i < NR_CPUS; i += threads_per_core) {
+ cpus_shift_left(tmp, threads_core_mask, i);
+ if (cpus_intersects(threads, tmp))
+ cpu_set(i, res);
+ }
+ return res;
+}
+
+static inline int cpu_nr_cores(void)
+{
+ return NR_CPUS >> threads_shift;
+}
+
+static inline cpumask_t cpu_online_cores_map(void)
+{
+ return cpu_thread_mask_to_cores(cpu_online_map);
+}
+
+static inline int cpu_thread_to_core(int cpu)
+{
+ return cpu >> threads_shift;
+}
+
+static inline int cpu_thread_in_core(int cpu)
+{
+ return cpu & (threads_per_core - 1);
+}
+
+static inline int cpu_first_thread_in_core(int cpu)
+{
+ return cpu & ~(threads_per_core - 1);
+}
+
+#endif /* _ASM_POWERPC_CPUTHREADS_H */
+
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
new file mode 100644
index 0000000..f42e623
--- /dev/null
+++ b/arch/powerpc/include/asm/cputime.h
@@ -0,0 +1,235 @@
+/*
+ * Definitions for measuring cputime on powerpc machines.
+ *
+ * Copyright (C) 2006 Paul Mackerras, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in
+ * the same units as the timebase. Otherwise we measure cpu time
+ * in jiffies using the generic definitions.
+ */
+
+#ifndef __POWERPC_CPUTIME_H
+#define __POWERPC_CPUTIME_H
+
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+#include <asm-generic/cputime.h>
+#else
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <asm/div64.h>
+#include <asm/time.h>
+#include <asm/param.h>
+
+typedef u64 cputime_t;
+typedef u64 cputime64_t;
+
+#define cputime_zero ((cputime_t)0)
+#define cputime_max ((~((cputime_t)0) >> 1) - 1)
+#define cputime_add(__a, __b) ((__a) + (__b))
+#define cputime_sub(__a, __b) ((__a) - (__b))
+#define cputime_div(__a, __n) ((__a) / (__n))
+#define cputime_halve(__a) ((__a) >> 1)
+#define cputime_eq(__a, __b) ((__a) == (__b))
+#define cputime_gt(__a, __b) ((__a) > (__b))
+#define cputime_ge(__a, __b) ((__a) >= (__b))
+#define cputime_lt(__a, __b) ((__a) < (__b))
+#define cputime_le(__a, __b) ((__a) <= (__b))
+
+#define cputime64_zero ((cputime64_t)0)
+#define cputime64_add(__a, __b) ((__a) + (__b))
+#define cputime64_sub(__a, __b) ((__a) - (__b))
+#define cputime_to_cputime64(__ct) (__ct)
+
+#ifdef __KERNEL__
+
+/*
+ * Convert cputime <-> jiffies
+ */
+extern u64 __cputime_jiffies_factor;
+DECLARE_PER_CPU(unsigned long, cputime_last_delta);
+DECLARE_PER_CPU(unsigned long, cputime_scaled_last_delta);
+
+static inline unsigned long cputime_to_jiffies(const cputime_t ct)
+{
+ return mulhdu(ct, __cputime_jiffies_factor);
+}
+
+/* Estimate the scaled cputime by scaling the real cputime based on
+ * the last scaled to real ratio */
+static inline cputime_t cputime_to_scaled(const cputime_t ct)
+{
+ if (cpu_has_feature(CPU_FTR_SPURR) &&
+ per_cpu(cputime_last_delta, smp_processor_id()))
+ return ct *
+ per_cpu(cputime_scaled_last_delta, smp_processor_id())/
+ per_cpu(cputime_last_delta, smp_processor_id());
+ return ct;
+}
+
+static inline cputime_t jiffies_to_cputime(const unsigned long jif)
+{
+ cputime_t ct;
+ unsigned long sec;
+
+ /* have to be a little careful about overflow */
+ ct = jif % HZ;
+ sec = jif / HZ;
+ if (ct) {
+ ct *= tb_ticks_per_sec;
+ do_div(ct, HZ);
+ }
+ if (sec)
+ ct += (cputime_t) sec * tb_ticks_per_sec;
+ return ct;
+}
+
+static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
+{
+ cputime_t ct;
+ u64 sec;
+
+ /* have to be a little careful about overflow */
+ ct = jif % HZ;
+ sec = jif / HZ;
+ if (ct) {
+ ct *= tb_ticks_per_sec;
+ do_div(ct, HZ);
+ }
+ if (sec)
+ ct += (cputime_t) sec * tb_ticks_per_sec;
+ return ct;
+}
+
+static inline u64 cputime64_to_jiffies64(const cputime_t ct)
+{
+ return mulhdu(ct, __cputime_jiffies_factor);
+}
+
+/*
+ * Convert cputime <-> milliseconds
+ */
+extern u64 __cputime_msec_factor;
+
+static inline unsigned long cputime_to_msecs(const cputime_t ct)
+{
+ return mulhdu(ct, __cputime_msec_factor);
+}
+
+static inline cputime_t msecs_to_cputime(const unsigned long ms)
+{
+ cputime_t ct;
+ unsigned long sec;
+
+ /* have to be a little careful about overflow */
+ ct = ms % 1000;
+ sec = ms / 1000;
+ if (ct) {
+ ct *= tb_ticks_per_sec;
+ do_div(ct, 1000);
+ }
+ if (sec)
+ ct += (cputime_t) sec * tb_ticks_per_sec;
+ return ct;
+}
+
+/*
+ * Convert cputime <-> seconds
+ */
+extern u64 __cputime_sec_factor;
+
+static inline unsigned long cputime_to_secs(const cputime_t ct)
+{
+ return mulhdu(ct, __cputime_sec_factor);
+}
+
+static inline cputime_t secs_to_cputime(const unsigned long sec)
+{
+ return (cputime_t) sec * tb_ticks_per_sec;
+}
+
+/*
+ * Convert cputime <-> timespec
+ */
+static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p)
+{
+ u64 x = ct;
+ unsigned int frac;
+
+ frac = do_div(x, tb_ticks_per_sec);
+ p->tv_sec = x;
+ x = (u64) frac * 1000000000;
+ do_div(x, tb_ticks_per_sec);
+ p->tv_nsec = x;
+}
+
+static inline cputime_t timespec_to_cputime(const struct timespec *p)
+{
+ cputime_t ct;
+
+ ct = (u64) p->tv_nsec * tb_ticks_per_sec;
+ do_div(ct, 1000000000);
+ return ct + (u64) p->tv_sec * tb_ticks_per_sec;
+}
+
+/*
+ * Convert cputime <-> timeval
+ */
+static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p)
+{
+ u64 x = ct;
+ unsigned int frac;
+
+ frac = do_div(x, tb_ticks_per_sec);
+ p->tv_sec = x;
+ x = (u64) frac * 1000000;
+ do_div(x, tb_ticks_per_sec);
+ p->tv_usec = x;
+}
+
+static inline cputime_t timeval_to_cputime(const struct timeval *p)
+{
+ cputime_t ct;
+
+ ct = (u64) p->tv_usec * tb_ticks_per_sec;
+ do_div(ct, 1000000);
+ return ct + (u64) p->tv_sec * tb_ticks_per_sec;
+}
+
+/*
+ * Convert cputime <-> clock_t (units of 1/USER_HZ seconds)
+ */
+extern u64 __cputime_clockt_factor;
+
+static inline unsigned long cputime_to_clock_t(const cputime_t ct)
+{
+ return mulhdu(ct, __cputime_clockt_factor);
+}
+
+static inline cputime_t clock_t_to_cputime(const unsigned long clk)
+{
+ cputime_t ct;
+ unsigned long sec;
+
+ /* have to be a little careful about overflow */
+ ct = clk % USER_HZ;
+ sec = clk / USER_HZ;
+ if (ct) {
+ ct *= tb_ticks_per_sec;
+ do_div(ct, USER_HZ);
+ }
+ if (sec)
+ ct += (cputime_t) sec * tb_ticks_per_sec;
+ return ct;
+}
+
+#define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct))
+
+#endif /* __KERNEL__ */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* __POWERPC_CPUTIME_H */
diff --git a/arch/powerpc/include/asm/current.h b/arch/powerpc/include/asm/current.h
new file mode 100644
index 0000000..e2c7f06
--- /dev/null
+++ b/arch/powerpc/include/asm/current.h
@@ -0,0 +1,40 @@
+#ifndef _ASM_POWERPC_CURRENT_H
+#define _ASM_POWERPC_CURRENT_H
+#ifdef __KERNEL__
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+struct task_struct;
+
+#ifdef __powerpc64__
+#include <linux/stddef.h>
+#include <asm/paca.h>
+
+static inline struct task_struct *get_current(void)
+{
+ struct task_struct *task;
+
+ __asm__ __volatile__("ld %0,%1(13)"
+ : "=r" (task)
+ : "i" (offsetof(struct paca_struct, __current)));
+
+ return task;
+}
+#define current get_current()
+
+#else
+
+/*
+ * We keep `current' in r2 for speed.
+ */
+register struct task_struct *current asm ("r2");
+
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_CURRENT_H */
diff --git a/arch/powerpc/include/asm/dbdma.h b/arch/powerpc/include/asm/dbdma.h
new file mode 100644
index 0000000..e23f07e
--- /dev/null
+++ b/arch/powerpc/include/asm/dbdma.h
@@ -0,0 +1,108 @@
+/*
+ * Definitions for using the Apple Descriptor-Based DMA controller
+ * in Power Macintosh computers.
+ *
+ * Copyright (C) 1996 Paul Mackerras.
+ */
+
+#ifdef __KERNEL__
+#ifndef _ASM_DBDMA_H_
+#define _ASM_DBDMA_H_
+/*
+ * DBDMA control/status registers. All little-endian.
+ */
+struct dbdma_regs {
+ unsigned int control; /* lets you change bits in status */
+ unsigned int status; /* DMA and device status bits (see below) */
+ unsigned int cmdptr_hi; /* upper 32 bits of command address */
+ unsigned int cmdptr; /* (lower 32 bits of) command address (phys) */
+ unsigned int intr_sel; /* select interrupt condition bit */
+ unsigned int br_sel; /* select branch condition bit */
+ unsigned int wait_sel; /* select wait condition bit */
+ unsigned int xfer_mode;
+ unsigned int data2ptr_hi;
+ unsigned int data2ptr;
+ unsigned int res1;
+ unsigned int address_hi;
+ unsigned int br_addr_hi;
+ unsigned int res2[3];
+};
+
+/* Bits in control and status registers */
+#define RUN 0x8000
+#define PAUSE 0x4000
+#define FLUSH 0x2000
+#define WAKE 0x1000
+#define DEAD 0x0800
+#define ACTIVE 0x0400
+#define BT 0x0100
+#define DEVSTAT 0x00ff
+
+/*
+ * DBDMA command structure. These fields are all little-endian!
+ */
+struct dbdma_cmd {
+ unsigned short req_count; /* requested byte transfer count */
+ unsigned short command; /* command word (has bit-fields) */
+ unsigned int phy_addr; /* physical data address */
+ unsigned int cmd_dep; /* command-dependent field */
+ unsigned short res_count; /* residual count after completion */
+ unsigned short xfer_status; /* transfer status */
+};
+
+/* DBDMA command values in command field */
+#define OUTPUT_MORE 0 /* transfer memory data to stream */
+#define OUTPUT_LAST 0x1000 /* ditto followed by end marker */
+#define INPUT_MORE 0x2000 /* transfer stream data to memory */
+#define INPUT_LAST 0x3000 /* ditto, expect end marker */
+#define STORE_WORD 0x4000 /* write word (4 bytes) to device reg */
+#define LOAD_WORD 0x5000 /* read word (4 bytes) from device reg */
+#define DBDMA_NOP 0x6000 /* do nothing */
+#define DBDMA_STOP 0x7000 /* suspend processing */
+
+/* Key values in command field */
+#define KEY_STREAM0 0 /* usual data stream */
+#define KEY_STREAM1 0x100 /* control/status stream */
+#define KEY_STREAM2 0x200 /* device-dependent stream */
+#define KEY_STREAM3 0x300 /* device-dependent stream */
+#define KEY_REGS 0x500 /* device register space */
+#define KEY_SYSTEM 0x600 /* system memory-mapped space */
+#define KEY_DEVICE 0x700 /* device memory-mapped space */
+
+/* Interrupt control values in command field */
+#define INTR_NEVER 0 /* don't interrupt */
+#define INTR_IFSET 0x10 /* intr if condition bit is 1 */
+#define INTR_IFCLR 0x20 /* intr if condition bit is 0 */
+#define INTR_ALWAYS 0x30 /* always interrupt */
+
+/* Branch control values in command field */
+#define BR_NEVER 0 /* don't branch */
+#define BR_IFSET 0x4 /* branch if condition bit is 1 */
+#define BR_IFCLR 0x8 /* branch if condition bit is 0 */
+#define BR_ALWAYS 0xc /* always branch */
+
+/* Wait control values in command field */
+#define WAIT_NEVER 0 /* don't wait */
+#define WAIT_IFSET 1 /* wait if condition bit is 1 */
+#define WAIT_IFCLR 2 /* wait if condition bit is 0 */
+#define WAIT_ALWAYS 3 /* always wait */
+
+/* Align an address for a DBDMA command structure */
+#define DBDMA_ALIGN(x) (((unsigned long)(x) + sizeof(struct dbdma_cmd) - 1) \
+ & -sizeof(struct dbdma_cmd))
+
+/* Useful macros */
+#define DBDMA_DO_STOP(regs) do { \
+ out_le32(&((regs)->control), (RUN|FLUSH)<<16); \
+ while(in_le32(&((regs)->status)) & (ACTIVE|FLUSH)) \
+ ; \
+} while(0)
+
+#define DBDMA_DO_RESET(regs) do { \
+ out_le32(&((regs)->control), (ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN)<<16);\
+ while(in_le32(&((regs)->status)) & (RUN)) \
+ ; \
+} while(0)
+
+#endif /* _ASM_DBDMA_H_ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/dcr-generic.h b/arch/powerpc/include/asm/dcr-generic.h
new file mode 100644
index 0000000..35b7159
--- /dev/null
+++ b/arch/powerpc/include/asm/dcr-generic.h
@@ -0,0 +1,49 @@
+/*
+ * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_POWERPC_DCR_GENERIC_H
+#define _ASM_POWERPC_DCR_GENERIC_H
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+enum host_type_t {DCR_HOST_MMIO, DCR_HOST_NATIVE, DCR_HOST_INVALID};
+
+typedef struct {
+ enum host_type_t type;
+ union {
+ dcr_host_mmio_t mmio;
+ dcr_host_native_t native;
+ } host;
+} dcr_host_t;
+
+extern bool dcr_map_ok_generic(dcr_host_t host);
+
+extern dcr_host_t dcr_map_generic(struct device_node *dev, unsigned int dcr_n,
+ unsigned int dcr_c);
+extern void dcr_unmap_generic(dcr_host_t host, unsigned int dcr_c);
+
+extern u32 dcr_read_generic(dcr_host_t host, unsigned int dcr_n);
+
+extern void dcr_write_generic(dcr_host_t host, unsigned int dcr_n, u32 value);
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_DCR_GENERIC_H */
+
+
diff --git a/arch/powerpc/include/asm/dcr-mmio.h b/arch/powerpc/include/asm/dcr-mmio.h
new file mode 100644
index 0000000..acd491d
--- /dev/null
+++ b/arch/powerpc/include/asm/dcr-mmio.h
@@ -0,0 +1,61 @@
+/*
+ * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_POWERPC_DCR_MMIO_H
+#define _ASM_POWERPC_DCR_MMIO_H
+#ifdef __KERNEL__
+
+#include <asm/io.h>
+
+typedef struct {
+ void __iomem *token;
+ unsigned int stride;
+ unsigned int base;
+} dcr_host_mmio_t;
+
+static inline bool dcr_map_ok_mmio(dcr_host_mmio_t host)
+{
+ return host.token != NULL;
+}
+
+extern dcr_host_mmio_t dcr_map_mmio(struct device_node *dev,
+ unsigned int dcr_n,
+ unsigned int dcr_c);
+extern void dcr_unmap_mmio(dcr_host_mmio_t host, unsigned int dcr_c);
+
+static inline u32 dcr_read_mmio(dcr_host_mmio_t host, unsigned int dcr_n)
+{
+ return in_be32(host.token + ((host.base + dcr_n) * host.stride));
+}
+
+static inline void dcr_write_mmio(dcr_host_mmio_t host,
+ unsigned int dcr_n,
+ u32 value)
+{
+ out_be32(host.token + ((host.base + dcr_n) * host.stride), value);
+}
+
+extern u64 of_translate_dcr_address(struct device_node *dev,
+ unsigned int dcr_n,
+ unsigned int *stride);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_DCR_MMIO_H */
+
+
diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h
new file mode 100644
index 0000000..72d2b72
--- /dev/null
+++ b/arch/powerpc/include/asm/dcr-native.h
@@ -0,0 +1,116 @@
+/*
+ * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_POWERPC_DCR_NATIVE_H
+#define _ASM_POWERPC_DCR_NATIVE_H
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#include <linux/spinlock.h>
+
+typedef struct {
+ unsigned int base;
+} dcr_host_native_t;
+
+static inline bool dcr_map_ok_native(dcr_host_native_t host)
+{
+ return 1;
+}
+
+#define dcr_map_native(dev, dcr_n, dcr_c) \
+ ((dcr_host_native_t){ .base = (dcr_n) })
+#define dcr_unmap_native(host, dcr_c) do {} while (0)
+#define dcr_read_native(host, dcr_n) mfdcr(dcr_n + host.base)
+#define dcr_write_native(host, dcr_n, value) mtdcr(dcr_n + host.base, value)
+
+/* Device Control Registers */
+void __mtdcr(int reg, unsigned int val);
+unsigned int __mfdcr(int reg);
+#define mfdcr(rn) \
+ ({unsigned int rval; \
+ if (__builtin_constant_p(rn)) \
+ asm volatile("mfdcr %0," __stringify(rn) \
+ : "=r" (rval)); \
+ else \
+ rval = __mfdcr(rn); \
+ rval;})
+
+#define mtdcr(rn, v) \
+do { \
+ if (__builtin_constant_p(rn)) \
+ asm volatile("mtdcr " __stringify(rn) ",%0" \
+ : : "r" (v)); \
+ else \
+ __mtdcr(rn, v); \
+} while (0)
+
+/* R/W of indirect DCRs make use of standard naming conventions for DCRs */
+extern spinlock_t dcr_ind_lock;
+
+static inline unsigned __mfdcri(int base_addr, int base_data, int reg)
+{
+ unsigned long flags;
+ unsigned int val;
+
+ spin_lock_irqsave(&dcr_ind_lock, flags);
+ __mtdcr(base_addr, reg);
+ val = __mfdcr(base_data);
+ spin_unlock_irqrestore(&dcr_ind_lock, flags);
+ return val;
+}
+
+static inline void __mtdcri(int base_addr, int base_data, int reg,
+ unsigned val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dcr_ind_lock, flags);
+ __mtdcr(base_addr, reg);
+ __mtdcr(base_data, val);
+ spin_unlock_irqrestore(&dcr_ind_lock, flags);
+}
+
+static inline void __dcri_clrset(int base_addr, int base_data, int reg,
+ unsigned clr, unsigned set)
+{
+ unsigned long flags;
+ unsigned int val;
+
+ spin_lock_irqsave(&dcr_ind_lock, flags);
+ __mtdcr(base_addr, reg);
+ val = (__mfdcr(base_data) & ~clr) | set;
+ __mtdcr(base_data, val);
+ spin_unlock_irqrestore(&dcr_ind_lock, flags);
+}
+
+#define mfdcri(base, reg) __mfdcri(DCRN_ ## base ## _CONFIG_ADDR, \
+ DCRN_ ## base ## _CONFIG_DATA, \
+ reg)
+
+#define mtdcri(base, reg, data) __mtdcri(DCRN_ ## base ## _CONFIG_ADDR, \
+ DCRN_ ## base ## _CONFIG_DATA, \
+ reg, data)
+
+#define dcri_clrset(base, reg, clr, set) __dcri_clrset(DCRN_ ## base ## _CONFIG_ADDR, \
+ DCRN_ ## base ## _CONFIG_DATA, \
+ reg, clr, set)
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_DCR_NATIVE_H */
diff --git a/arch/powerpc/include/asm/dcr-regs.h b/arch/powerpc/include/asm/dcr-regs.h
new file mode 100644
index 0000000..29b0ece
--- /dev/null
+++ b/arch/powerpc/include/asm/dcr-regs.h
@@ -0,0 +1,149 @@
+/*
+ * Common DCR / SDR / CPR register definitions used on various IBM/AMCC
+ * 4xx processors
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp
+ * <benh@kernel.crashing.org>
+ *
+ * Mostly lifted from asm-ppc/ibm4xx.h by
+ *
+ * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
+ *
+ */
+
+#ifndef __DCR_REGS_H__
+#define __DCR_REGS_H__
+
+/*
+ * Most DCRs used for controlling devices such as the MAL, DMA engine,
+ * etc... are obtained for the device tree.
+ *
+ * The definitions in this files are fixed DCRs and indirect DCRs that
+ * are commonly used outside of specific drivers or refer to core
+ * common registers that may occasionally have to be tweaked outside
+ * of the driver main register set
+ */
+
+/* CPRs (440GX and 440SP/440SPe) */
+#define DCRN_CPR0_CONFIG_ADDR 0xc
+#define DCRN_CPR0_CONFIG_DATA 0xd
+
+/* SDRs (440GX and 440SP/440SPe) */
+#define DCRN_SDR0_CONFIG_ADDR 0xe
+#define DCRN_SDR0_CONFIG_DATA 0xf
+
+#define SDR0_PFC0 0x4100
+#define SDR0_PFC1 0x4101
+#define SDR0_PFC1_EPS 0x1c00000
+#define SDR0_PFC1_EPS_SHIFT 22
+#define SDR0_PFC1_RMII 0x02000000
+#define SDR0_MFR 0x4300
+#define SDR0_MFR_TAH0 0x80000000 /* TAHOE0 Enable */
+#define SDR0_MFR_TAH1 0x40000000 /* TAHOE1 Enable */
+#define SDR0_MFR_PCM 0x10000000 /* PPC440GP irq compat mode */
+#define SDR0_MFR_ECS 0x08000000 /* EMAC int clk */
+#define SDR0_MFR_T0TXFL 0x00080000
+#define SDR0_MFR_T0TXFH 0x00040000
+#define SDR0_MFR_T1TXFL 0x00020000
+#define SDR0_MFR_T1TXFH 0x00010000
+#define SDR0_MFR_E0TXFL 0x00008000
+#define SDR0_MFR_E0TXFH 0x00004000
+#define SDR0_MFR_E0RXFL 0x00002000
+#define SDR0_MFR_E0RXFH 0x00001000
+#define SDR0_MFR_E1TXFL 0x00000800
+#define SDR0_MFR_E1TXFH 0x00000400
+#define SDR0_MFR_E1RXFL 0x00000200
+#define SDR0_MFR_E1RXFH 0x00000100
+#define SDR0_MFR_E2TXFL 0x00000080
+#define SDR0_MFR_E2TXFH 0x00000040
+#define SDR0_MFR_E2RXFL 0x00000020
+#define SDR0_MFR_E2RXFH 0x00000010
+#define SDR0_MFR_E3TXFL 0x00000008
+#define SDR0_MFR_E3TXFH 0x00000004
+#define SDR0_MFR_E3RXFL 0x00000002
+#define SDR0_MFR_E3RXFH 0x00000001
+#define SDR0_UART0 0x0120
+#define SDR0_UART1 0x0121
+#define SDR0_UART2 0x0122
+#define SDR0_UART3 0x0123
+#define SDR0_CUST0 0x4000
+
+/*
+ * All those DCR register addresses are offsets from the base address
+ * for the SRAM0 controller (e.g. 0x20 on 440GX). The base address is
+ * excluded here and configured in the device tree.
+ */
+#define DCRN_SRAM0_SB0CR 0x00
+#define DCRN_SRAM0_SB1CR 0x01
+#define DCRN_SRAM0_SB2CR 0x02
+#define DCRN_SRAM0_SB3CR 0x03
+#define SRAM_SBCR_BU_MASK 0x00000180
+#define SRAM_SBCR_BS_64KB 0x00000800
+#define SRAM_SBCR_BU_RO 0x00000080
+#define SRAM_SBCR_BU_RW 0x00000180
+#define DCRN_SRAM0_BEAR 0x04
+#define DCRN_SRAM0_BESR0 0x05
+#define DCRN_SRAM0_BESR1 0x06
+#define DCRN_SRAM0_PMEG 0x07
+#define DCRN_SRAM0_CID 0x08
+#define DCRN_SRAM0_REVID 0x09
+#define DCRN_SRAM0_DPC 0x0a
+#define SRAM_DPC_ENABLE 0x80000000
+
+/*
+ * All those DCR register addresses are offsets from the base address
+ * for the SRAM0 controller (e.g. 0x30 on 440GX). The base address is
+ * excluded here and configured in the device tree.
+ */
+#define DCRN_L2C0_CFG 0x00
+#define L2C_CFG_L2M 0x80000000
+#define L2C_CFG_ICU 0x40000000
+#define L2C_CFG_DCU 0x20000000
+#define L2C_CFG_DCW_MASK 0x1e000000
+#define L2C_CFG_TPC 0x01000000
+#define L2C_CFG_CPC 0x00800000
+#define L2C_CFG_FRAN 0x00200000
+#define L2C_CFG_SS_MASK 0x00180000
+#define L2C_CFG_SS_256 0x00000000
+#define L2C_CFG_CPIM 0x00040000
+#define L2C_CFG_TPIM 0x00020000
+#define L2C_CFG_LIM 0x00010000
+#define L2C_CFG_PMUX_MASK 0x00007000
+#define L2C_CFG_PMUX_SNP 0x00000000
+#define L2C_CFG_PMUX_IF 0x00001000
+#define L2C_CFG_PMUX_DF 0x00002000
+#define L2C_CFG_PMUX_DS 0x00003000
+#define L2C_CFG_PMIM 0x00000800
+#define L2C_CFG_TPEI 0x00000400
+#define L2C_CFG_CPEI 0x00000200
+#define L2C_CFG_NAM 0x00000100
+#define L2C_CFG_SMCM 0x00000080
+#define L2C_CFG_NBRM 0x00000040
+#define L2C_CFG_RDBW 0x00000008 /* only 460EX/GT */
+#define DCRN_L2C0_CMD 0x01
+#define L2C_CMD_CLR 0x80000000
+#define L2C_CMD_DIAG 0x40000000
+#define L2C_CMD_INV 0x20000000
+#define L2C_CMD_CCP 0x10000000
+#define L2C_CMD_CTE 0x08000000
+#define L2C_CMD_STRC 0x04000000
+#define L2C_CMD_STPC 0x02000000
+#define L2C_CMD_RPMC 0x01000000
+#define L2C_CMD_HCC 0x00800000
+#define DCRN_L2C0_ADDR 0x02
+#define DCRN_L2C0_DATA 0x03
+#define DCRN_L2C0_SR 0x04
+#define L2C_SR_CC 0x80000000
+#define L2C_SR_CPE 0x40000000
+#define L2C_SR_TPE 0x20000000
+#define L2C_SR_LRU 0x10000000
+#define L2C_SR_PCS 0x08000000
+#define DCRN_L2C0_REVID 0x05
+#define DCRN_L2C0_SNP0 0x06
+#define DCRN_L2C0_SNP1 0x07
+#define L2C_SNP_BA_MASK 0xffff0000
+#define L2C_SNP_SSR_MASK 0x0000f000
+#define L2C_SNP_SSR_32G 0x0000f000
+#define L2C_SNP_ESR 0x00000800
+
+#endif /* __DCR_REGS_H__ */
diff --git a/arch/powerpc/include/asm/dcr.h b/arch/powerpc/include/asm/dcr.h
new file mode 100644
index 0000000..d13fb68
--- /dev/null
+++ b/arch/powerpc/include/asm/dcr.h
@@ -0,0 +1,78 @@
+/*
+ * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_POWERPC_DCR_H
+#define _ASM_POWERPC_DCR_H
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_PPC_DCR
+
+#ifdef CONFIG_PPC_DCR_NATIVE
+#include <asm/dcr-native.h>
+#endif
+
+#ifdef CONFIG_PPC_DCR_MMIO
+#include <asm/dcr-mmio.h>
+#endif
+
+
+/* Indirection layer for providing both NATIVE and MMIO support. */
+
+#if defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO)
+
+#include <asm/dcr-generic.h>
+
+#define DCR_MAP_OK(host) dcr_map_ok_generic(host)
+#define dcr_map(dev, dcr_n, dcr_c) dcr_map_generic(dev, dcr_n, dcr_c)
+#define dcr_unmap(host, dcr_c) dcr_unmap_generic(host, dcr_c)
+#define dcr_read(host, dcr_n) dcr_read_generic(host, dcr_n)
+#define dcr_write(host, dcr_n, value) dcr_write_generic(host, dcr_n, value)
+
+#else
+
+#ifdef CONFIG_PPC_DCR_NATIVE
+typedef dcr_host_native_t dcr_host_t;
+#define DCR_MAP_OK(host) dcr_map_ok_native(host)
+#define dcr_map(dev, dcr_n, dcr_c) dcr_map_native(dev, dcr_n, dcr_c)
+#define dcr_unmap(host, dcr_c) dcr_unmap_native(host, dcr_c)
+#define dcr_read(host, dcr_n) dcr_read_native(host, dcr_n)
+#define dcr_write(host, dcr_n, value) dcr_write_native(host, dcr_n, value)
+#else
+typedef dcr_host_mmio_t dcr_host_t;
+#define DCR_MAP_OK(host) dcr_map_ok_mmio(host)
+#define dcr_map(dev, dcr_n, dcr_c) dcr_map_mmio(dev, dcr_n, dcr_c)
+#define dcr_unmap(host, dcr_c) dcr_unmap_mmio(host, dcr_c)
+#define dcr_read(host, dcr_n) dcr_read_mmio(host, dcr_n)
+#define dcr_write(host, dcr_n, value) dcr_write_mmio(host, dcr_n, value)
+#endif
+
+#endif /* defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) */
+
+/*
+ * additional helpers to read the DCR * base from the device-tree
+ */
+struct device_node;
+extern unsigned int dcr_resource_start(struct device_node *np,
+ unsigned int index);
+extern unsigned int dcr_resource_len(struct device_node *np,
+ unsigned int index);
+#endif /* CONFIG_PPC_DCR */
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_DCR_H */
diff --git a/arch/powerpc/include/asm/delay.h b/arch/powerpc/include/asm/delay.h
new file mode 100644
index 0000000..f9200a6
--- /dev/null
+++ b/arch/powerpc/include/asm/delay.h
@@ -0,0 +1,34 @@
+#ifndef _ASM_POWERPC_DELAY_H
+#define _ASM_POWERPC_DELAY_H
+#ifdef __KERNEL__
+
+/*
+ * Copyright 1996, Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * PPC64 Support added by Dave Engebretsen, Todd Inglett, Mike Corrigan,
+ * Anton Blanchard.
+ */
+
+extern void __delay(unsigned long loops);
+extern void udelay(unsigned long usecs);
+
+/*
+ * On shared processor machines the generic implementation of mdelay can
+ * result in large errors. While each iteration of the loop inside mdelay
+ * is supposed to take 1ms, the hypervisor could sleep our partition for
+ * longer (eg 10ms). With the right timing these errors can add up.
+ *
+ * Since there is no 32bit overflow issue on 64bit kernels, just call
+ * udelay directly.
+ */
+#ifdef CONFIG_PPC64
+#define mdelay(n) udelay((n) * 1000)
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_DELAY_H */
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
new file mode 100644
index 0000000..228ab2a
--- /dev/null
+++ b/arch/powerpc/include/asm/device.h
@@ -0,0 +1,24 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#ifndef _ASM_POWERPC_DEVICE_H
+#define _ASM_POWERPC_DEVICE_H
+
+struct dma_mapping_ops;
+struct device_node;
+
+struct dev_archdata {
+ /* Optional pointer to an OF device node */
+ struct device_node *of_node;
+
+ /* DMA operations on that device */
+ struct dma_mapping_ops *dma_ops;
+ void *dma_data;
+
+ /* NUMA node if applicable */
+ int numa_node;
+};
+
+#endif /* _ASM_POWERPC_DEVICE_H */
diff --git a/arch/powerpc/include/asm/div64.h b/arch/powerpc/include/asm/div64.h
new file mode 100644
index 0000000..6cd978c
--- /dev/null
+++ b/arch/powerpc/include/asm/div64.h
@@ -0,0 +1 @@
+#include <asm-generic/div64.h>
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
new file mode 100644
index 0000000..c7ca45f
--- /dev/null
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -0,0 +1,474 @@
+/*
+ * Copyright (C) 2004 IBM
+ *
+ * Implements the generic device dma API for powerpc.
+ * the pci and vio busses
+ */
+#ifndef _ASM_DMA_MAPPING_H
+#define _ASM_DMA_MAPPING_H
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <linux/cache.h>
+/* need struct page definitions */
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-attrs.h>
+#include <asm/io.h>
+
+#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+/*
+ * DMA-consistent mapping functions for PowerPCs that don't support
+ * cache snooping. These allocate/free a region of uncached mapped
+ * memory space for use with DMA devices. Alternatively, you could
+ * allocate the space "normally" and use the cache management functions
+ * to ensure it is consistent.
+ */
+extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
+extern void __dma_free_coherent(size_t size, void *vaddr);
+extern void __dma_sync(void *vaddr, size_t size, int direction);
+extern void __dma_sync_page(struct page *page, unsigned long offset,
+ size_t size, int direction);
+
+#else /* ! CONFIG_NOT_COHERENT_CACHE */
+/*
+ * Cache coherent cores.
+ */
+
+#define __dma_alloc_coherent(gfp, size, handle) NULL
+#define __dma_free_coherent(size, addr) ((void)0)
+#define __dma_sync(addr, size, rw) ((void)0)
+#define __dma_sync_page(pg, off, sz, rw) ((void)0)
+
+#endif /* ! CONFIG_NOT_COHERENT_CACHE */
+
+#ifdef CONFIG_PPC64
+
+static inline unsigned long device_to_mask(struct device *dev)
+{
+ if (dev->dma_mask && *dev->dma_mask)
+ return *dev->dma_mask;
+ /* Assume devices without mask can take 32 bit addresses */
+ return 0xfffffffful;
+}
+
+/*
+ * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
+ */
+struct dma_mapping_ops {
+ void * (*alloc_coherent)(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag);
+ void (*free_coherent)(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+ dma_addr_t (*map_single)(struct device *dev, void *ptr,
+ size_t size, enum dma_data_direction direction,
+ struct dma_attrs *attrs);
+ void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction direction,
+ struct dma_attrs *attrs);
+ int (*map_sg)(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs);
+ void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs);
+ int (*dma_supported)(struct device *dev, u64 mask);
+ int (*set_dma_mask)(struct device *dev, u64 dma_mask);
+};
+
+static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
+{
+ /* We don't handle the NULL dev case for ISA for now. We could
+ * do it via an out of line call but it is not needed for now. The
+ * only ISA DMA device we support is the floppy and we have a hack
+ * in the floppy driver directly to get a device for us.
+ */
+ if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL))
+ return NULL;
+ return dev->archdata.dma_ops;
+}
+
+static inline void set_dma_ops(struct device *dev, struct dma_mapping_ops *ops)
+{
+ dev->archdata.dma_ops = ops;
+}
+
+static inline int dma_supported(struct device *dev, u64 mask)
+{
+ struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+ if (unlikely(dma_ops == NULL))
+ return 0;
+ if (dma_ops->dma_supported == NULL)
+ return 1;
+ return dma_ops->dma_supported(dev, mask);
+}
+
+/* We have our own implementation of pci_set_dma_mask() */
+#define HAVE_ARCH_PCI_SET_DMA_MASK
+
+static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+ if (unlikely(dma_ops == NULL))
+ return -EIO;
+ if (dma_ops->set_dma_mask != NULL)
+ return dma_ops->set_dma_mask(dev, dma_mask);
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+ *dev->dma_mask = dma_mask;
+ return 0;
+}
+
+static inline dma_addr_t dma_map_single_attrs(struct device *dev,
+ void *cpu_addr,
+ size_t size,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+ BUG_ON(!dma_ops);
+ return dma_ops->map_single(dev, cpu_addr, size, direction, attrs);
+}
+
+static inline void dma_unmap_single_attrs(struct device *dev,
+ dma_addr_t dma_addr,
+ size_t size,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+ BUG_ON(!dma_ops);
+ dma_ops->unmap_single(dev, dma_addr, size, direction, attrs);
+}
+
+static inline dma_addr_t dma_map_page_attrs(struct device *dev,
+ struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+ BUG_ON(!dma_ops);
+ return dma_ops->map_single(dev, page_address(page) + offset, size,
+ direction, attrs);
+}
+
+static inline void dma_unmap_page_attrs(struct device *dev,
+ dma_addr_t dma_address,
+ size_t size,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+ BUG_ON(!dma_ops);
+ dma_ops->unmap_single(dev, dma_address, size, direction, attrs);
+}
+
+static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+ BUG_ON(!dma_ops);
+ return dma_ops->map_sg(dev, sg, nents, direction, attrs);
+}
+
+static inline void dma_unmap_sg_attrs(struct device *dev,
+ struct scatterlist *sg,
+ int nhwentries,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+ BUG_ON(!dma_ops);
+ dma_ops->unmap_sg(dev, sg, nhwentries, direction, attrs);
+}
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+ BUG_ON(!dma_ops);
+ return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+ BUG_ON(!dma_ops);
+ dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
+}
+
+static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ return dma_map_single_attrs(dev, cpu_addr, size, direction, NULL);
+}
+
+static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ dma_unmap_single_attrs(dev, dma_addr, size, direction, NULL);
+}
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ return dma_map_page_attrs(dev, page, offset, size, direction, NULL);
+}
+
+static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ dma_unmap_page_attrs(dev, dma_address, size, direction, NULL);
+}
+
+static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
+{
+ return dma_map_sg_attrs(dev, sg, nents, direction, NULL);
+}
+
+static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nhwentries,
+ enum dma_data_direction direction)
+{
+ dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
+}
+
+/*
+ * Available generic sets of operations
+ */
+extern struct dma_mapping_ops dma_iommu_ops;
+extern struct dma_mapping_ops dma_direct_ops;
+
+#else /* CONFIG_PPC64 */
+
+#define dma_supported(dev, mask) (1)
+
+static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+
+ *dev->dma_mask = dma_mask;
+
+ return 0;
+}
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t * dma_handle,
+ gfp_t gfp)
+{
+#ifdef CONFIG_NOT_COHERENT_CACHE
+ return __dma_alloc_coherent(size, dma_handle, gfp);
+#else
+ void *ret;
+ /* ignore region specifiers */
+ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
+
+ if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
+ gfp |= GFP_DMA;
+
+ ret = (void *)__get_free_pages(gfp, get_order(size));
+
+ if (ret != NULL) {
+ memset(ret, 0, size);
+ *dma_handle = virt_to_bus(ret);
+ }
+
+ return ret;
+#endif
+}
+
+static inline void
+dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle)
+{
+#ifdef CONFIG_NOT_COHERENT_CACHE
+ __dma_free_coherent(size, vaddr);
+#else
+ free_pages((unsigned long)vaddr, get_order(size));
+#endif
+}
+
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+
+ __dma_sync(ptr, size, direction);
+
+ return virt_to_bus(ptr);
+}
+
+static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ /* We do nothing. */
+}
+
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+
+ __dma_sync_page(page, offset, size, direction);
+
+ return page_to_bus(page) + offset;
+}
+
+static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ /* We do nothing. */
+}
+
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
+ enum dma_data_direction direction)
+{
+ struct scatterlist *sg;
+ int i;
+
+ BUG_ON(direction == DMA_NONE);
+
+ for_each_sg(sgl, sg, nents, i) {
+ BUG_ON(!sg_page(sg));
+ __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
+ sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
+ }
+
+ return nents;
+}
+
+static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nhwentries,
+ enum dma_data_direction direction)
+{
+ /* We don't do anything here. */
+}
+
+#endif /* CONFIG_PPC64 */
+
+static inline void dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+ __dma_sync(bus_to_virt(dma_handle), size, direction);
+}
+
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+ __dma_sync(bus_to_virt(dma_handle), size, direction);
+}
+
+static inline void dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sgl, int nents,
+ enum dma_data_direction direction)
+{
+ struct scatterlist *sg;
+ int i;
+
+ BUG_ON(direction == DMA_NONE);
+
+ for_each_sg(sgl, sg, nents, i)
+ __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
+}
+
+static inline void dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sgl, int nents,
+ enum dma_data_direction direction)
+{
+ struct scatterlist *sg;
+ int i;
+
+ BUG_ON(direction == DMA_NONE);
+
+ for_each_sg(sgl, sg, nents, i)
+ __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+#ifdef CONFIG_PPC64
+ return (dma_addr == DMA_ERROR_CODE);
+#else
+ return 0;
+#endif
+}
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+#ifdef CONFIG_NOT_COHERENT_CACHE
+#define dma_is_consistent(d, h) (0)
+#else
+#define dma_is_consistent(d, h) (1)
+#endif
+
+static inline int dma_get_cache_alignment(void)
+{
+#ifdef CONFIG_PPC64
+ /* no easy way to get cache size on all processors, so return
+ * the maximum possible, to be safe */
+ return (1 << INTERNODE_CACHE_SHIFT);
+#else
+ /*
+ * Each processor family will define its own L1_CACHE_SHIFT,
+ * L1_CACHE_BYTES wraps to this, so this is always safe.
+ */
+ return L1_CACHE_BYTES;
+#endif
+}
+
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ /* just sync everything for now */
+ dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
+}
+
+static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t dma_handle, unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ /* just sync everything for now */
+ dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
+}
+
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+ __dma_sync(vaddr, size, (int)direction);
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_DMA_MAPPING_H */
diff --git a/arch/powerpc/include/asm/dma.h b/arch/powerpc/include/asm/dma.h
new file mode 100644
index 0000000..a7e06e2
--- /dev/null
+++ b/arch/powerpc/include/asm/dma.h
@@ -0,0 +1,360 @@
+#ifndef _ASM_POWERPC_DMA_H
+#define _ASM_POWERPC_DMA_H
+#ifdef __KERNEL__
+
+/*
+ * Defines for using and allocating dma channels.
+ * Written by Hennus Bergman, 1992.
+ * High DMA channel support & info by Hannu Savolainen
+ * and John Boyd, Nov. 1992.
+ * Changes for ppc sound by Christoph Nadig
+ */
+
+/*
+ * Note: Adapted for PowerPC by Gary Thomas
+ * Modified by Cort Dougan <cort@cs.nmt.edu>
+ *
+ * None of this really applies for Power Macintoshes. There is
+ * basically just enough here to get kernel/dma.c to compile.
+ *
+ * There may be some comments or restrictions made here which are
+ * not valid for the PReP platform. Take what you read
+ * with a grain of salt.
+ */
+
+#include <asm/io.h>
+#include <linux/spinlock.h>
+#include <asm/system.h>
+
+#ifndef MAX_DMA_CHANNELS
+#define MAX_DMA_CHANNELS 8
+#endif
+
+/* The maximum address that we can perform a DMA transfer to on this platform */
+/* Doesn't really apply... */
+#define MAX_DMA_ADDRESS (~0UL)
+
+#if !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)
+
+#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
+#define dma_outb outb_p
+#else
+#define dma_outb outb
+#endif
+
+#define dma_inb inb
+
+/*
+ * NOTES about DMA transfers:
+ *
+ * controller 1: channels 0-3, byte operations, ports 00-1F
+ * controller 2: channels 4-7, word operations, ports C0-DF
+ *
+ * - ALL registers are 8 bits only, regardless of transfer size
+ * - channel 4 is not used - cascades 1 into 2.
+ * - channels 0-3 are byte - addresses/counts are for physical bytes
+ * - channels 5-7 are word - addresses/counts are for physical words
+ * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
+ * - transfer count loaded to registers is 1 less than actual count
+ * - controller 2 offsets are all even (2x offsets for controller 1)
+ * - page registers for 5-7 don't use data bit 0, represent 128K pages
+ * - page registers for 0-3 use bit 0, represent 64K pages
+ *
+ * On PReP, DMA transfers are limited to the lower 16MB of _physical_ memory.
+ * On CHRP, the W83C553F (and VLSI Tollgate?) support full 32 bit addressing.
+ * Note that addresses loaded into registers must be _physical_ addresses,
+ * not logical addresses (which may differ if paging is active).
+ *
+ * Address mapping for channels 0-3:
+ *
+ * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
+ * | ... | | ... | | ... |
+ * | ... | | ... | | ... |
+ * | ... | | ... | | ... |
+ * P7 ... P0 A7 ... A0 A7 ... A0
+ * | Page | Addr MSB | Addr LSB | (DMA registers)
+ *
+ * Address mapping for channels 5-7:
+ *
+ * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
+ * | ... | \ \ ... \ \ \ ... \ \
+ * | ... | \ \ ... \ \ \ ... \ (not used)
+ * | ... | \ \ ... \ \ \ ... \
+ * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
+ * | Page | Addr MSB | Addr LSB | (DMA registers)
+ *
+ * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
+ * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
+ * the hardware level, so odd-byte transfers aren't possible).
+ *
+ * Transfer count (_not # bytes_) is limited to 64K, represented as actual
+ * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
+ * and up to 128K bytes may be transferred on channels 5-7 in one operation.
+ *
+ */
+
+/* 8237 DMA controllers */
+#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
+#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
+
+/* DMA controller registers */
+#define DMA1_CMD_REG 0x08 /* command register (w) */
+#define DMA1_STAT_REG 0x08 /* status register (r) */
+#define DMA1_REQ_REG 0x09 /* request register (w) */
+#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
+#define DMA1_MODE_REG 0x0B /* mode register (w) */
+#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
+#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
+#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
+#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
+#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
+
+#define DMA2_CMD_REG 0xD0 /* command register (w) */
+#define DMA2_STAT_REG 0xD0 /* status register (r) */
+#define DMA2_REQ_REG 0xD2 /* request register (w) */
+#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
+#define DMA2_MODE_REG 0xD6 /* mode register (w) */
+#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
+#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
+#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
+#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
+#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
+
+#define DMA_ADDR_0 0x00 /* DMA address registers */
+#define DMA_ADDR_1 0x02
+#define DMA_ADDR_2 0x04
+#define DMA_ADDR_3 0x06
+#define DMA_ADDR_4 0xC0
+#define DMA_ADDR_5 0xC4
+#define DMA_ADDR_6 0xC8
+#define DMA_ADDR_7 0xCC
+
+#define DMA_CNT_0 0x01 /* DMA count registers */
+#define DMA_CNT_1 0x03
+#define DMA_CNT_2 0x05
+#define DMA_CNT_3 0x07
+#define DMA_CNT_4 0xC2
+#define DMA_CNT_5 0xC6
+#define DMA_CNT_6 0xCA
+#define DMA_CNT_7 0xCE
+
+#define DMA_LO_PAGE_0 0x87 /* DMA page registers */
+#define DMA_LO_PAGE_1 0x83
+#define DMA_LO_PAGE_2 0x81
+#define DMA_LO_PAGE_3 0x82
+#define DMA_LO_PAGE_5 0x8B
+#define DMA_LO_PAGE_6 0x89
+#define DMA_LO_PAGE_7 0x8A
+
+#define DMA_HI_PAGE_0 0x487 /* DMA page registers */
+#define DMA_HI_PAGE_1 0x483
+#define DMA_HI_PAGE_2 0x481
+#define DMA_HI_PAGE_3 0x482
+#define DMA_HI_PAGE_5 0x48B
+#define DMA_HI_PAGE_6 0x489
+#define DMA_HI_PAGE_7 0x48A
+
+#define DMA1_EXT_REG 0x40B
+#define DMA2_EXT_REG 0x4D6
+
+#ifndef __powerpc64__
+ /* in arch/ppc/kernel/setup.c -- Cort */
+ extern unsigned int DMA_MODE_WRITE;
+ extern unsigned int DMA_MODE_READ;
+ extern unsigned long ISA_DMA_THRESHOLD;
+#else
+ #define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
+ #define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
+#endif
+
+#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
+
+#define DMA_AUTOINIT 0x10
+
+extern spinlock_t dma_spin_lock;
+
+static __inline__ unsigned long claim_dma_lock(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&dma_spin_lock, flags);
+ return flags;
+}
+
+static __inline__ void release_dma_lock(unsigned long flags)
+{
+ spin_unlock_irqrestore(&dma_spin_lock, flags);
+}
+
+/* enable/disable a specific DMA channel */
+static __inline__ void enable_dma(unsigned int dmanr)
+{
+ unsigned char ucDmaCmd = 0x00;
+
+ if (dmanr != 4) {
+ dma_outb(0, DMA2_MASK_REG); /* This may not be enabled */
+ dma_outb(ucDmaCmd, DMA2_CMD_REG); /* Enable group */
+ }
+ if (dmanr <= 3) {
+ dma_outb(dmanr, DMA1_MASK_REG);
+ dma_outb(ucDmaCmd, DMA1_CMD_REG); /* Enable group */
+ } else {
+ dma_outb(dmanr & 3, DMA2_MASK_REG);
+ }
+}
+
+static __inline__ void disable_dma(unsigned int dmanr)
+{
+ if (dmanr <= 3)
+ dma_outb(dmanr | 4, DMA1_MASK_REG);
+ else
+ dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
+}
+
+/* Clear the 'DMA Pointer Flip Flop'.
+ * Write 0 for LSB/MSB, 1 for MSB/LSB access.
+ * Use this once to initialize the FF to a known state.
+ * After that, keep track of it. :-)
+ * --- In order to do that, the DMA routines below should ---
+ * --- only be used while interrupts are disabled! ---
+ */
+static __inline__ void clear_dma_ff(unsigned int dmanr)
+{
+ if (dmanr <= 3)
+ dma_outb(0, DMA1_CLEAR_FF_REG);
+ else
+ dma_outb(0, DMA2_CLEAR_FF_REG);
+}
+
+/* set mode (above) for a specific DMA channel */
+static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
+{
+ if (dmanr <= 3)
+ dma_outb(mode | dmanr, DMA1_MODE_REG);
+ else
+ dma_outb(mode | (dmanr & 3), DMA2_MODE_REG);
+}
+
+/* Set only the page register bits of the transfer address.
+ * This is used for successive transfers when we know the contents of
+ * the lower 16 bits of the DMA current address register, but a 64k boundary
+ * may have been crossed.
+ */
+static __inline__ void set_dma_page(unsigned int dmanr, int pagenr)
+{
+ switch (dmanr) {
+ case 0:
+ dma_outb(pagenr, DMA_LO_PAGE_0);
+ dma_outb(pagenr >> 8, DMA_HI_PAGE_0);
+ break;
+ case 1:
+ dma_outb(pagenr, DMA_LO_PAGE_1);
+ dma_outb(pagenr >> 8, DMA_HI_PAGE_1);
+ break;
+ case 2:
+ dma_outb(pagenr, DMA_LO_PAGE_2);
+ dma_outb(pagenr >> 8, DMA_HI_PAGE_2);
+ break;
+ case 3:
+ dma_outb(pagenr, DMA_LO_PAGE_3);
+ dma_outb(pagenr >> 8, DMA_HI_PAGE_3);
+ break;
+ case 5:
+ dma_outb(pagenr & 0xfe, DMA_LO_PAGE_5);
+ dma_outb(pagenr >> 8, DMA_HI_PAGE_5);
+ break;
+ case 6:
+ dma_outb(pagenr & 0xfe, DMA_LO_PAGE_6);
+ dma_outb(pagenr >> 8, DMA_HI_PAGE_6);
+ break;
+ case 7:
+ dma_outb(pagenr & 0xfe, DMA_LO_PAGE_7);
+ dma_outb(pagenr >> 8, DMA_HI_PAGE_7);
+ break;
+ }
+}
+
+/* Set transfer address & page bits for specific DMA channel.
+ * Assumes dma flipflop is clear.
+ */
+static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int phys)
+{
+ if (dmanr <= 3) {
+ dma_outb(phys & 0xff,
+ ((dmanr & 3) << 1) + IO_DMA1_BASE);
+ dma_outb((phys >> 8) & 0xff,
+ ((dmanr & 3) << 1) + IO_DMA1_BASE);
+ } else {
+ dma_outb((phys >> 1) & 0xff,
+ ((dmanr & 3) << 2) + IO_DMA2_BASE);
+ dma_outb((phys >> 9) & 0xff,
+ ((dmanr & 3) << 2) + IO_DMA2_BASE);
+ }
+ set_dma_page(dmanr, phys >> 16);
+}
+
+
+/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
+ * a specific DMA channel.
+ * You must ensure the parameters are valid.
+ * NOTE: from a manual: "the number of transfers is one more
+ * than the initial word count"! This is taken into account.
+ * Assumes dma flip-flop is clear.
+ * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
+ */
+static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
+{
+ count--;
+ if (dmanr <= 3) {
+ dma_outb(count & 0xff,
+ ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
+ dma_outb((count >> 8) & 0xff,
+ ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
+ } else {
+ dma_outb((count >> 1) & 0xff,
+ ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
+ dma_outb((count >> 9) & 0xff,
+ ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
+ }
+}
+
+
+/* Get DMA residue count. After a DMA transfer, this
+ * should return zero. Reading this while a DMA transfer is
+ * still in progress will return unpredictable results.
+ * If called before the channel has been used, it may return 1.
+ * Otherwise, it returns the number of _bytes_ left to transfer.
+ *
+ * Assumes DMA flip-flop is clear.
+ */
+static __inline__ int get_dma_residue(unsigned int dmanr)
+{
+ unsigned int io_port = (dmanr <= 3)
+ ? ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE
+ : ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE;
+
+ /* using short to get 16-bit wrap around */
+ unsigned short count;
+
+ count = 1 + dma_inb(io_port);
+ count += dma_inb(io_port) << 8;
+
+ return (dmanr <= 3) ? count : (count << 1);
+}
+
+/* These are in kernel/dma.c: */
+
+/* reserve a DMA channel */
+extern int request_dma(unsigned int dmanr, const char *device_id);
+/* release it again */
+extern void free_dma(unsigned int dmanr);
+
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy (0)
+#endif
+
+#endif /* !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI) */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_DMA_H */
diff --git a/arch/powerpc/include/asm/edac.h b/arch/powerpc/include/asm/edac.h
new file mode 100644
index 0000000..6ead88b
--- /dev/null
+++ b/arch/powerpc/include/asm/edac.h
@@ -0,0 +1,40 @@
+/*
+ * PPC EDAC common defs
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#ifndef ASM_EDAC_H
+#define ASM_EDAC_H
+/*
+ * ECC atomic, DMA, SMP and interrupt safe scrub function.
+ * Implements the per arch atomic_scrub() that EDAC use for software
+ * ECC scrubbing. It reads memory and then writes back the original
+ * value, allowing the hardware to detect and correct memory errors.
+ */
+static __inline__ void atomic_scrub(void *va, u32 size)
+{
+ unsigned int *virt_addr = va;
+ unsigned int temp;
+ unsigned int i;
+
+ for (i = 0; i < size / sizeof(*virt_addr); i++, virt_addr++) {
+ /* Very carefully read and write to memory atomically
+ * so we are interrupt, DMA and SMP safe.
+ */
+ __asm__ __volatile__ ("\n\
+ 1: lwarx %0,0,%1\n\
+ stwcx. %0,0,%1\n\
+ bne- 1b\n\
+ isync"
+ : "=&r"(temp)
+ : "r"(virt_addr)
+ : "cr0", "memory");
+ }
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
new file mode 100644
index 0000000..b886bec
--- /dev/null
+++ b/arch/powerpc/include/asm/eeh.h
@@ -0,0 +1,211 @@
+/*
+ * eeh.h
+ * Copyright (C) 2001 Dave Engebretsen & Todd Inglett IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _PPC64_EEH_H
+#define _PPC64_EEH_H
+#ifdef __KERNEL__
+
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/string.h>
+
+struct pci_dev;
+struct pci_bus;
+struct device_node;
+
+#ifdef CONFIG_EEH
+
+extern int eeh_subsystem_enabled;
+
+/* Values for eeh_mode bits in device_node */
+#define EEH_MODE_SUPPORTED (1<<0)
+#define EEH_MODE_NOCHECK (1<<1)
+#define EEH_MODE_ISOLATED (1<<2)
+#define EEH_MODE_RECOVERING (1<<3)
+#define EEH_MODE_IRQ_DISABLED (1<<4)
+
+/* Max number of EEH freezes allowed before we consider the device
+ * to be permanently disabled. */
+#define EEH_MAX_ALLOWED_FREEZES 5
+
+void __init eeh_init(void);
+unsigned long eeh_check_failure(const volatile void __iomem *token,
+ unsigned long val);
+int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev);
+void __init pci_addr_cache_build(void);
+
+/**
+ * eeh_add_device_early
+ * eeh_add_device_late
+ *
+ * Perform eeh initialization for devices added after boot.
+ * Call eeh_add_device_early before doing any i/o to the
+ * device (including config space i/o). Call eeh_add_device_late
+ * to finish the eeh setup for this device.
+ */
+void eeh_add_device_tree_early(struct device_node *);
+void eeh_add_device_tree_late(struct pci_bus *);
+
+/**
+ * eeh_remove_device_recursive - undo EEH for device & children.
+ * @dev: pci device to be removed
+ *
+ * As above, this removes the device; it also removes child
+ * pci devices as well.
+ */
+void eeh_remove_bus_device(struct pci_dev *);
+
+/**
+ * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
+ *
+ * If this macro yields TRUE, the caller relays to eeh_check_failure()
+ * which does further tests out of line.
+ */
+#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_subsystem_enabled)
+
+/*
+ * Reads from a device which has been isolated by EEH will return
+ * all 1s. This macro gives an all-1s value of the given size (in
+ * bytes: 1, 2, or 4) for comparing with the result of a read.
+ */
+#define EEH_IO_ERROR_VALUE(size) (~0U >> ((4 - (size)) * 8))
+
+#else /* !CONFIG_EEH */
+static inline void eeh_init(void) { }
+
+static inline unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
+{
+ return val;
+}
+
+static inline int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
+{
+ return 0;
+}
+
+static inline void pci_addr_cache_build(void) { }
+
+static inline void eeh_add_device_tree_early(struct device_node *dn) { }
+
+static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
+
+static inline void eeh_remove_bus_device(struct pci_dev *dev) { }
+#define EEH_POSSIBLE_ERROR(val, type) (0)
+#define EEH_IO_ERROR_VALUE(size) (-1UL)
+#endif /* CONFIG_EEH */
+
+/*
+ * MMIO read/write operations with EEH support.
+ */
+static inline u8 eeh_readb(const volatile void __iomem *addr)
+{
+ u8 val = in_8(addr);
+ if (EEH_POSSIBLE_ERROR(val, u8))
+ return eeh_check_failure(addr, val);
+ return val;
+}
+
+static inline u16 eeh_readw(const volatile void __iomem *addr)
+{
+ u16 val = in_le16(addr);
+ if (EEH_POSSIBLE_ERROR(val, u16))
+ return eeh_check_failure(addr, val);
+ return val;
+}
+
+static inline u32 eeh_readl(const volatile void __iomem *addr)
+{
+ u32 val = in_le32(addr);
+ if (EEH_POSSIBLE_ERROR(val, u32))
+ return eeh_check_failure(addr, val);
+ return val;
+}
+
+static inline u64 eeh_readq(const volatile void __iomem *addr)
+{
+ u64 val = in_le64(addr);
+ if (EEH_POSSIBLE_ERROR(val, u64))
+ return eeh_check_failure(addr, val);
+ return val;
+}
+
+static inline u16 eeh_readw_be(const volatile void __iomem *addr)
+{
+ u16 val = in_be16(addr);
+ if (EEH_POSSIBLE_ERROR(val, u16))
+ return eeh_check_failure(addr, val);
+ return val;
+}
+
+static inline u32 eeh_readl_be(const volatile void __iomem *addr)
+{
+ u32 val = in_be32(addr);
+ if (EEH_POSSIBLE_ERROR(val, u32))
+ return eeh_check_failure(addr, val);
+ return val;
+}
+
+static inline u64 eeh_readq_be(const volatile void __iomem *addr)
+{
+ u64 val = in_be64(addr);
+ if (EEH_POSSIBLE_ERROR(val, u64))
+ return eeh_check_failure(addr, val);
+ return val;
+}
+
+static inline void eeh_memcpy_fromio(void *dest, const
+ volatile void __iomem *src,
+ unsigned long n)
+{
+ _memcpy_fromio(dest, src, n);
+
+ /* Look for ffff's here at dest[n]. Assume that at least 4 bytes
+ * were copied. Check all four bytes.
+ */
+ if (n >= 4 && EEH_POSSIBLE_ERROR(*((u32 *)(dest + n - 4)), u32))
+ eeh_check_failure(src, *((u32 *)(dest + n - 4)));
+}
+
+/* in-string eeh macros */
+static inline void eeh_readsb(const volatile void __iomem *addr, void * buf,
+ int ns)
+{
+ _insb(addr, buf, ns);
+ if (EEH_POSSIBLE_ERROR((*(((u8*)buf)+ns-1)), u8))
+ eeh_check_failure(addr, *(u8*)buf);
+}
+
+static inline void eeh_readsw(const volatile void __iomem *addr, void * buf,
+ int ns)
+{
+ _insw(addr, buf, ns);
+ if (EEH_POSSIBLE_ERROR((*(((u16*)buf)+ns-1)), u16))
+ eeh_check_failure(addr, *(u16*)buf);
+}
+
+static inline void eeh_readsl(const volatile void __iomem *addr, void * buf,
+ int nl)
+{
+ _insl(addr, buf, nl);
+ if (EEH_POSSIBLE_ERROR((*(((u32*)buf)+nl-1)), u32))
+ eeh_check_failure(addr, *(u32*)buf);
+}
+
+#endif /* __KERNEL__ */
+#endif /* _PPC64_EEH_H */
diff --git a/arch/powerpc/include/asm/eeh_event.h b/arch/powerpc/include/asm/eeh_event.h
new file mode 100644
index 0000000..cc3cb04
--- /dev/null
+++ b/arch/powerpc/include/asm/eeh_event.h
@@ -0,0 +1,53 @@
+/*
+ * eeh_event.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Copyright (c) 2005 Linas Vepstas <linas@linas.org>
+ */
+
+#ifndef ASM_POWERPC_EEH_EVENT_H
+#define ASM_POWERPC_EEH_EVENT_H
+#ifdef __KERNEL__
+
+/** EEH event -- structure holding pci controller data that describes
+ * a change in the isolation status of a PCI slot. A pointer
+ * to this struct is passed as the data pointer in a notify callback.
+ */
+struct eeh_event {
+ struct list_head list;
+ struct device_node *dn; /* struct device node */
+ struct pci_dev *dev; /* affected device */
+};
+
+/**
+ * eeh_send_failure_event - generate a PCI error event
+ * @dev pci device
+ *
+ * This routine builds a PCI error event which will be delivered
+ * to all listeners on the eeh_notifier_chain.
+ *
+ * This routine can be called within an interrupt context;
+ * the actual event will be delivered in a normal context
+ * (from a workqueue).
+ */
+int eeh_send_failure_event (struct device_node *dn,
+ struct pci_dev *dev);
+
+/* Main recovery function */
+struct pci_dn * handle_eeh_events (struct eeh_event *);
+
+#endif /* __KERNEL__ */
+#endif /* ASM_POWERPC_EEH_EVENT_H */
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
new file mode 100644
index 0000000..80d1f39
--- /dev/null
+++ b/arch/powerpc/include/asm/elf.h
@@ -0,0 +1,424 @@
+#ifndef _ASM_POWERPC_ELF_H
+#define _ASM_POWERPC_ELF_H
+
+#ifdef __KERNEL__
+#include <linux/sched.h> /* for task_struct */
+#include <asm/page.h>
+#include <asm/string.h>
+#endif
+
+#include <asm/types.h>
+#include <asm/ptrace.h>
+#include <asm/cputable.h>
+#include <asm/auxvec.h>
+
+/* PowerPC relocations defined by the ABIs */
+#define R_PPC_NONE 0
+#define R_PPC_ADDR32 1 /* 32bit absolute address */
+#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */
+#define R_PPC_ADDR16 3 /* 16bit absolute address */
+#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */
+#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */
+#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */
+#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */
+#define R_PPC_ADDR14_BRTAKEN 8
+#define R_PPC_ADDR14_BRNTAKEN 9
+#define R_PPC_REL24 10 /* PC relative 26 bit */
+#define R_PPC_REL14 11 /* PC relative 16 bit */
+#define R_PPC_REL14_BRTAKEN 12
+#define R_PPC_REL14_BRNTAKEN 13
+#define R_PPC_GOT16 14
+#define R_PPC_GOT16_LO 15
+#define R_PPC_GOT16_HI 16
+#define R_PPC_GOT16_HA 17
+#define R_PPC_PLTREL24 18
+#define R_PPC_COPY 19
+#define R_PPC_GLOB_DAT 20
+#define R_PPC_JMP_SLOT 21
+#define R_PPC_RELATIVE 22
+#define R_PPC_LOCAL24PC 23
+#define R_PPC_UADDR32 24
+#define R_PPC_UADDR16 25
+#define R_PPC_REL32 26
+#define R_PPC_PLT32 27
+#define R_PPC_PLTREL32 28
+#define R_PPC_PLT16_LO 29
+#define R_PPC_PLT16_HI 30
+#define R_PPC_PLT16_HA 31
+#define R_PPC_SDAREL16 32
+#define R_PPC_SECTOFF 33
+#define R_PPC_SECTOFF_LO 34
+#define R_PPC_SECTOFF_HI 35
+#define R_PPC_SECTOFF_HA 36
+
+/* PowerPC relocations defined for the TLS access ABI. */
+#define R_PPC_TLS 67 /* none (sym+add)@tls */
+#define R_PPC_DTPMOD32 68 /* word32 (sym+add)@dtpmod */
+#define R_PPC_TPREL16 69 /* half16* (sym+add)@tprel */
+#define R_PPC_TPREL16_LO 70 /* half16 (sym+add)@tprel@l */
+#define R_PPC_TPREL16_HI 71 /* half16 (sym+add)@tprel@h */
+#define R_PPC_TPREL16_HA 72 /* half16 (sym+add)@tprel@ha */
+#define R_PPC_TPREL32 73 /* word32 (sym+add)@tprel */
+#define R_PPC_DTPREL16 74 /* half16* (sym+add)@dtprel */
+#define R_PPC_DTPREL16_LO 75 /* half16 (sym+add)@dtprel@l */
+#define R_PPC_DTPREL16_HI 76 /* half16 (sym+add)@dtprel@h */
+#define R_PPC_DTPREL16_HA 77 /* half16 (sym+add)@dtprel@ha */
+#define R_PPC_DTPREL32 78 /* word32 (sym+add)@dtprel */
+#define R_PPC_GOT_TLSGD16 79 /* half16* (sym+add)@got@tlsgd */
+#define R_PPC_GOT_TLSGD16_LO 80 /* half16 (sym+add)@got@tlsgd@l */
+#define R_PPC_GOT_TLSGD16_HI 81 /* half16 (sym+add)@got@tlsgd@h */
+#define R_PPC_GOT_TLSGD16_HA 82 /* half16 (sym+add)@got@tlsgd@ha */
+#define R_PPC_GOT_TLSLD16 83 /* half16* (sym+add)@got@tlsld */
+#define R_PPC_GOT_TLSLD16_LO 84 /* half16 (sym+add)@got@tlsld@l */
+#define R_PPC_GOT_TLSLD16_HI 85 /* half16 (sym+add)@got@tlsld@h */
+#define R_PPC_GOT_TLSLD16_HA 86 /* half16 (sym+add)@got@tlsld@ha */
+#define R_PPC_GOT_TPREL16 87 /* half16* (sym+add)@got@tprel */
+#define R_PPC_GOT_TPREL16_LO 88 /* half16 (sym+add)@got@tprel@l */
+#define R_PPC_GOT_TPREL16_HI 89 /* half16 (sym+add)@got@tprel@h */
+#define R_PPC_GOT_TPREL16_HA 90 /* half16 (sym+add)@got@tprel@ha */
+#define R_PPC_GOT_DTPREL16 91 /* half16* (sym+add)@got@dtprel */
+#define R_PPC_GOT_DTPREL16_LO 92 /* half16* (sym+add)@got@dtprel@l */
+#define R_PPC_GOT_DTPREL16_HI 93 /* half16* (sym+add)@got@dtprel@h */
+#define R_PPC_GOT_DTPREL16_HA 94 /* half16* (sym+add)@got@dtprel@ha */
+
+/* keep this the last entry. */
+#define R_PPC_NUM 95
+
+/*
+ * ELF register definitions..
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define ELF_NGREG 48 /* includes nip, msr, lr, etc. */
+#define ELF_NFPREG 33 /* includes fpscr */
+
+typedef unsigned long elf_greg_t64;
+typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG];
+
+typedef unsigned int elf_greg_t32;
+typedef elf_greg_t32 elf_gregset_t32[ELF_NGREG];
+typedef elf_gregset_t32 compat_elf_gregset_t;
+
+/*
+ * ELF_ARCH, CLASS, and DATA are used to set parameters in the core dumps.
+ */
+#ifdef __powerpc64__
+# define ELF_NVRREG32 33 /* includes vscr & vrsave stuffed together */
+# define ELF_NVRREG 34 /* includes vscr & vrsave in split vectors */
+# define ELF_NVSRHALFREG 32 /* Half the vsx registers */
+# define ELF_GREG_TYPE elf_greg_t64
+#else
+# define ELF_NEVRREG 34 /* includes acc (as 2) */
+# define ELF_NVRREG 33 /* includes vscr */
+# define ELF_GREG_TYPE elf_greg_t32
+# define ELF_ARCH EM_PPC
+# define ELF_CLASS ELFCLASS32
+# define ELF_DATA ELFDATA2MSB
+#endif /* __powerpc64__ */
+
+#ifndef ELF_ARCH
+# define ELF_ARCH EM_PPC64
+# define ELF_CLASS ELFCLASS64
+# define ELF_DATA ELFDATA2MSB
+ typedef elf_greg_t64 elf_greg_t;
+ typedef elf_gregset_t64 elf_gregset_t;
+#else
+ /* Assumption: ELF_ARCH == EM_PPC and ELF_CLASS == ELFCLASS32 */
+ typedef elf_greg_t32 elf_greg_t;
+ typedef elf_gregset_t32 elf_gregset_t;
+#endif /* ELF_ARCH */
+
+/* Floating point registers */
+typedef double elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+/* Altivec registers */
+/*
+ * The entries with indexes 0-31 contain the corresponding vector registers.
+ * The entry with index 32 contains the vscr as the last word (offset 12)
+ * within the quadword. This allows the vscr to be stored as either a
+ * quadword (since it must be copied via a vector register to/from storage)
+ * or as a word.
+ *
+ * 64-bit kernel notes: The entry at index 33 contains the vrsave as the first
+ * word (offset 0) within the quadword.
+ *
+ * This definition of the VMX state is compatible with the current PPC32
+ * ptrace interface. This allows signal handling and ptrace to use the same
+ * structures. This also simplifies the implementation of a bi-arch
+ * (combined (32- and 64-bit) gdb.
+ *
+ * Note that it's _not_ compatible with 32 bits ucontext which stuffs the
+ * vrsave along with vscr and so only uses 33 vectors for the register set
+ */
+typedef __vector128 elf_vrreg_t;
+typedef elf_vrreg_t elf_vrregset_t[ELF_NVRREG];
+#ifdef __powerpc64__
+typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32];
+typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
+#endif
+
+#ifdef __KERNEL__
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
+#define compat_elf_check_arch(x) ((x)->e_machine == EM_PPC)
+
+#define USE_ELF_CORE_DUMP
+#define CORE_DUMP_USE_REGSET
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+
+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ use of this is to invoke "./ld.so someprog" to test out a new version of
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+#define ELF_ET_DYN_BASE (0x20000000)
+
+/*
+ * Our registers are always unsigned longs, whether we're a 32 bit
+ * process or 64 bit, on either a 64 bit or 32 bit kernel.
+ *
+ * This macro relies on elf_regs[i] having the right type to truncate to,
+ * either u32 or u64. It defines the body of the elf_core_copy_regs
+ * function, either the native one with elf_gregset_t elf_regs or
+ * the 32-bit one with elf_gregset_t32 elf_regs.
+ */
+#define PPC_ELF_CORE_COPY_REGS(elf_regs, regs) \
+ int i, nregs = min(sizeof(*regs) / sizeof(unsigned long), \
+ (size_t)ELF_NGREG); \
+ for (i = 0; i < nregs; i++) \
+ elf_regs[i] = ((unsigned long *) regs)[i]; \
+ memset(&elf_regs[i], 0, (ELF_NGREG - i) * sizeof(elf_regs[0]))
+
+/* Common routine for both 32-bit and 64-bit native processes */
+static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs,
+ struct pt_regs *regs)
+{
+ PPC_ELF_CORE_COPY_REGS(elf_regs, regs);
+}
+#define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs);
+
+typedef elf_vrregset_t elf_fpxregset_t;
+
+/* ELF_HWCAP yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. This could be done in userspace,
+ but it's not easy, and we've already done it here. */
+# define ELF_HWCAP (cur_cpu_spec->cpu_user_features)
+
+/* This yields a string that ld.so will use to load implementation
+ specific libraries for optimization. This is more specific in
+ intent than poking at uname or /proc/cpuinfo. */
+
+#define ELF_PLATFORM (cur_cpu_spec->platform)
+
+/* While ELF_PLATFORM indicates the ISA supported by the platform, it
+ * may not accurately reflect the underlying behavior of the hardware
+ * (as in the case of running in Power5+ compatibility mode on a
+ * Power6 machine). ELF_BASE_PLATFORM allows ld.so to load libraries
+ * that are tuned for the real hardware.
+ */
+#define ELF_BASE_PLATFORM (powerpc_base_platform)
+
+#ifdef __powerpc64__
+# define ELF_PLAT_INIT(_r, load_addr) do { \
+ _r->gpr[2] = load_addr; \
+} while (0)
+#endif /* __powerpc64__ */
+
+#ifdef __powerpc64__
+# define SET_PERSONALITY(ex, ibcs2) \
+do { \
+ unsigned long new_flags = 0; \
+ if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
+ new_flags = _TIF_32BIT; \
+ if ((current_thread_info()->flags & _TIF_32BIT) \
+ != new_flags) \
+ set_thread_flag(TIF_ABI_PENDING); \
+ else \
+ clear_thread_flag(TIF_ABI_PENDING); \
+ if (personality(current->personality) != PER_LINUX32) \
+ set_personality(PER_LINUX | \
+ (current->personality & (~PER_MASK))); \
+} while (0)
+/*
+ * An executable for which elf_read_implies_exec() returns TRUE will
+ * have the READ_IMPLIES_EXEC personality flag set automatically. This
+ * is only required to work around bugs in old 32bit toolchains. Since
+ * the 64bit ABI has never had these issues dont enable the workaround
+ * even if we have an executable stack.
+ */
+# define elf_read_implies_exec(ex, exec_stk) (test_thread_flag(TIF_32BIT) ? \
+ (exec_stk != EXSTACK_DISABLE_X) : 0)
+#else
+# define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
+#endif /* __powerpc64__ */
+
+extern int dcache_bsize;
+extern int icache_bsize;
+extern int ucache_bsize;
+
+/* vDSO has arch_setup_additional_pages */
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
+struct linux_binprm;
+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int executable_stack);
+#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b);
+
+#endif /* __KERNEL__ */
+
+/*
+ * The requirements here are:
+ * - keep the final alignment of sp (sp & 0xf)
+ * - make sure the 32-bit value at the first 16 byte aligned position of
+ * AUXV is greater than 16 for glibc compatibility.
+ * AT_IGNOREPPC is used for that.
+ * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
+ * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
+ * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes
+ */
+#define ARCH_DLINFO \
+do { \
+ /* Handle glibc compatibility. */ \
+ NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
+ NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
+ /* Cache size items */ \
+ NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \
+ NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \
+ NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \
+ VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base) \
+} while (0)
+
+/* PowerPC64 relocations defined by the ABIs */
+#define R_PPC64_NONE R_PPC_NONE
+#define R_PPC64_ADDR32 R_PPC_ADDR32 /* 32bit absolute address. */
+#define R_PPC64_ADDR24 R_PPC_ADDR24 /* 26bit address, word aligned. */
+#define R_PPC64_ADDR16 R_PPC_ADDR16 /* 16bit absolute address. */
+#define R_PPC64_ADDR16_LO R_PPC_ADDR16_LO /* lower 16bits of abs. address. */
+#define R_PPC64_ADDR16_HI R_PPC_ADDR16_HI /* high 16bits of abs. address. */
+#define R_PPC64_ADDR16_HA R_PPC_ADDR16_HA /* adjusted high 16bits. */
+#define R_PPC64_ADDR14 R_PPC_ADDR14 /* 16bit address, word aligned. */
+#define R_PPC64_ADDR14_BRTAKEN R_PPC_ADDR14_BRTAKEN
+#define R_PPC64_ADDR14_BRNTAKEN R_PPC_ADDR14_BRNTAKEN
+#define R_PPC64_REL24 R_PPC_REL24 /* PC relative 26 bit, word aligned. */
+#define R_PPC64_REL14 R_PPC_REL14 /* PC relative 16 bit. */
+#define R_PPC64_REL14_BRTAKEN R_PPC_REL14_BRTAKEN
+#define R_PPC64_REL14_BRNTAKEN R_PPC_REL14_BRNTAKEN
+#define R_PPC64_GOT16 R_PPC_GOT16
+#define R_PPC64_GOT16_LO R_PPC_GOT16_LO
+#define R_PPC64_GOT16_HI R_PPC_GOT16_HI
+#define R_PPC64_GOT16_HA R_PPC_GOT16_HA
+
+#define R_PPC64_COPY R_PPC_COPY
+#define R_PPC64_GLOB_DAT R_PPC_GLOB_DAT
+#define R_PPC64_JMP_SLOT R_PPC_JMP_SLOT
+#define R_PPC64_RELATIVE R_PPC_RELATIVE
+
+#define R_PPC64_UADDR32 R_PPC_UADDR32
+#define R_PPC64_UADDR16 R_PPC_UADDR16
+#define R_PPC64_REL32 R_PPC_REL32
+#define R_PPC64_PLT32 R_PPC_PLT32
+#define R_PPC64_PLTREL32 R_PPC_PLTREL32
+#define R_PPC64_PLT16_LO R_PPC_PLT16_LO
+#define R_PPC64_PLT16_HI R_PPC_PLT16_HI
+#define R_PPC64_PLT16_HA R_PPC_PLT16_HA
+
+#define R_PPC64_SECTOFF R_PPC_SECTOFF
+#define R_PPC64_SECTOFF_LO R_PPC_SECTOFF_LO
+#define R_PPC64_SECTOFF_HI R_PPC_SECTOFF_HI
+#define R_PPC64_SECTOFF_HA R_PPC_SECTOFF_HA
+#define R_PPC64_ADDR30 37 /* word30 (S + A - P) >> 2. */
+#define R_PPC64_ADDR64 38 /* doubleword64 S + A. */
+#define R_PPC64_ADDR16_HIGHER 39 /* half16 #higher(S + A). */
+#define R_PPC64_ADDR16_HIGHERA 40 /* half16 #highera(S + A). */
+#define R_PPC64_ADDR16_HIGHEST 41 /* half16 #highest(S + A). */
+#define R_PPC64_ADDR16_HIGHESTA 42 /* half16 #highesta(S + A). */
+#define R_PPC64_UADDR64 43 /* doubleword64 S + A. */
+#define R_PPC64_REL64 44 /* doubleword64 S + A - P. */
+#define R_PPC64_PLT64 45 /* doubleword64 L + A. */
+#define R_PPC64_PLTREL64 46 /* doubleword64 L + A - P. */
+#define R_PPC64_TOC16 47 /* half16* S + A - .TOC. */
+#define R_PPC64_TOC16_LO 48 /* half16 #lo(S + A - .TOC.). */
+#define R_PPC64_TOC16_HI 49 /* half16 #hi(S + A - .TOC.). */
+#define R_PPC64_TOC16_HA 50 /* half16 #ha(S + A - .TOC.). */
+#define R_PPC64_TOC 51 /* doubleword64 .TOC. */
+#define R_PPC64_PLTGOT16 52 /* half16* M + A. */
+#define R_PPC64_PLTGOT16_LO 53 /* half16 #lo(M + A). */
+#define R_PPC64_PLTGOT16_HI 54 /* half16 #hi(M + A). */
+#define R_PPC64_PLTGOT16_HA 55 /* half16 #ha(M + A). */
+
+#define R_PPC64_ADDR16_DS 56 /* half16ds* (S + A) >> 2. */
+#define R_PPC64_ADDR16_LO_DS 57 /* half16ds #lo(S + A) >> 2. */
+#define R_PPC64_GOT16_DS 58 /* half16ds* (G + A) >> 2. */
+#define R_PPC64_GOT16_LO_DS 59 /* half16ds #lo(G + A) >> 2. */
+#define R_PPC64_PLT16_LO_DS 60 /* half16ds #lo(L + A) >> 2. */
+#define R_PPC64_SECTOFF_DS 61 /* half16ds* (R + A) >> 2. */
+#define R_PPC64_SECTOFF_LO_DS 62 /* half16ds #lo(R + A) >> 2. */
+#define R_PPC64_TOC16_DS 63 /* half16ds* (S + A - .TOC.) >> 2. */
+#define R_PPC64_TOC16_LO_DS 64 /* half16ds #lo(S + A - .TOC.) >> 2. */
+#define R_PPC64_PLTGOT16_DS 65 /* half16ds* (M + A) >> 2. */
+#define R_PPC64_PLTGOT16_LO_DS 66 /* half16ds #lo(M + A) >> 2. */
+
+/* PowerPC64 relocations defined for the TLS access ABI. */
+#define R_PPC64_TLS 67 /* none (sym+add)@tls */
+#define R_PPC64_DTPMOD64 68 /* doubleword64 (sym+add)@dtpmod */
+#define R_PPC64_TPREL16 69 /* half16* (sym+add)@tprel */
+#define R_PPC64_TPREL16_LO 70 /* half16 (sym+add)@tprel@l */
+#define R_PPC64_TPREL16_HI 71 /* half16 (sym+add)@tprel@h */
+#define R_PPC64_TPREL16_HA 72 /* half16 (sym+add)@tprel@ha */
+#define R_PPC64_TPREL64 73 /* doubleword64 (sym+add)@tprel */
+#define R_PPC64_DTPREL16 74 /* half16* (sym+add)@dtprel */
+#define R_PPC64_DTPREL16_LO 75 /* half16 (sym+add)@dtprel@l */
+#define R_PPC64_DTPREL16_HI 76 /* half16 (sym+add)@dtprel@h */
+#define R_PPC64_DTPREL16_HA 77 /* half16 (sym+add)@dtprel@ha */
+#define R_PPC64_DTPREL64 78 /* doubleword64 (sym+add)@dtprel */
+#define R_PPC64_GOT_TLSGD16 79 /* half16* (sym+add)@got@tlsgd */
+#define R_PPC64_GOT_TLSGD16_LO 80 /* half16 (sym+add)@got@tlsgd@l */
+#define R_PPC64_GOT_TLSGD16_HI 81 /* half16 (sym+add)@got@tlsgd@h */
+#define R_PPC64_GOT_TLSGD16_HA 82 /* half16 (sym+add)@got@tlsgd@ha */
+#define R_PPC64_GOT_TLSLD16 83 /* half16* (sym+add)@got@tlsld */
+#define R_PPC64_GOT_TLSLD16_LO 84 /* half16 (sym+add)@got@tlsld@l */
+#define R_PPC64_GOT_TLSLD16_HI 85 /* half16 (sym+add)@got@tlsld@h */
+#define R_PPC64_GOT_TLSLD16_HA 86 /* half16 (sym+add)@got@tlsld@ha */
+#define R_PPC64_GOT_TPREL16_DS 87 /* half16ds* (sym+add)@got@tprel */
+#define R_PPC64_GOT_TPREL16_LO_DS 88 /* half16ds (sym+add)@got@tprel@l */
+#define R_PPC64_GOT_TPREL16_HI 89 /* half16 (sym+add)@got@tprel@h */
+#define R_PPC64_GOT_TPREL16_HA 90 /* half16 (sym+add)@got@tprel@ha */
+#define R_PPC64_GOT_DTPREL16_DS 91 /* half16ds* (sym+add)@got@dtprel */
+#define R_PPC64_GOT_DTPREL16_LO_DS 92 /* half16ds (sym+add)@got@dtprel@l */
+#define R_PPC64_GOT_DTPREL16_HI 93 /* half16 (sym+add)@got@dtprel@h */
+#define R_PPC64_GOT_DTPREL16_HA 94 /* half16 (sym+add)@got@dtprel@ha */
+#define R_PPC64_TPREL16_DS 95 /* half16ds* (sym+add)@tprel */
+#define R_PPC64_TPREL16_LO_DS 96 /* half16ds (sym+add)@tprel@l */
+#define R_PPC64_TPREL16_HIGHER 97 /* half16 (sym+add)@tprel@higher */
+#define R_PPC64_TPREL16_HIGHERA 98 /* half16 (sym+add)@tprel@highera */
+#define R_PPC64_TPREL16_HIGHEST 99 /* half16 (sym+add)@tprel@highest */
+#define R_PPC64_TPREL16_HIGHESTA 100 /* half16 (sym+add)@tprel@highesta */
+#define R_PPC64_DTPREL16_DS 101 /* half16ds* (sym+add)@dtprel */
+#define R_PPC64_DTPREL16_LO_DS 102 /* half16ds (sym+add)@dtprel@l */
+#define R_PPC64_DTPREL16_HIGHER 103 /* half16 (sym+add)@dtprel@higher */
+#define R_PPC64_DTPREL16_HIGHERA 104 /* half16 (sym+add)@dtprel@highera */
+#define R_PPC64_DTPREL16_HIGHEST 105 /* half16 (sym+add)@dtprel@highest */
+#define R_PPC64_DTPREL16_HIGHESTA 106 /* half16 (sym+add)@dtprel@highesta */
+
+/* Keep this the last entry. */
+#define R_PPC64_NUM 107
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_SPU_BASE
+/* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
+#define NT_SPU 1
+
+#define ARCH_HAVE_EXTRA_ELF_NOTES
+
+#endif /* CONFIG_SPU_BASE */
+
+#endif /* __KERNEL */
+
+#endif /* _ASM_POWERPC_ELF_H */
diff --git a/arch/powerpc/include/asm/emergency-restart.h b/arch/powerpc/include/asm/emergency-restart.h
new file mode 100644
index 0000000..3711bd9
--- /dev/null
+++ b/arch/powerpc/include/asm/emergency-restart.h
@@ -0,0 +1 @@
+#include <asm-generic/emergency-restart.h>
diff --git a/arch/powerpc/include/asm/errno.h b/arch/powerpc/include/asm/errno.h
new file mode 100644
index 0000000..8c145fd
--- /dev/null
+++ b/arch/powerpc/include/asm/errno.h
@@ -0,0 +1,11 @@
+#ifndef _ASM_POWERPC_ERRNO_H
+#define _ASM_POWERPC_ERRNO_H
+
+#include <asm-generic/errno.h>
+
+#undef EDEADLOCK
+#define EDEADLOCK 58 /* File locking deadlock error */
+
+#define _LAST_ERRNO 516
+
+#endif /* _ASM_POWERPC_ERRNO_H */
diff --git a/arch/powerpc/include/asm/exception.h b/arch/powerpc/include/asm/exception.h
new file mode 100644
index 0000000..329148b
--- /dev/null
+++ b/arch/powerpc/include/asm/exception.h
@@ -0,0 +1,311 @@
+#ifndef _ASM_POWERPC_EXCEPTION_H
+#define _ASM_POWERPC_EXCEPTION_H
+/*
+ * Extracted from head_64.S
+ *
+ * PowerPC version
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
+ * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
+ * Adapted for Power Macintosh by Paul Mackerras.
+ * Low-level exception handlers and MMU support
+ * rewritten by Paul Mackerras.
+ * Copyright (C) 1996 Paul Mackerras.
+ *
+ * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
+ * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
+ *
+ * This file contains the low-level support and setup for the
+ * PowerPC-64 platform, including trap and interrupt dispatch.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+/*
+ * The following macros define the code that appears as
+ * the prologue to each of the exception handlers. They
+ * are split into two parts to allow a single kernel binary
+ * to be used for pSeries and iSeries.
+ *
+ * We make as much of the exception code common between native
+ * exception handlers (including pSeries LPAR) and iSeries LPAR
+ * implementations as possible.
+ */
+
+#define EX_R9 0
+#define EX_R10 8
+#define EX_R11 16
+#define EX_R12 24
+#define EX_R13 32
+#define EX_SRR0 40
+#define EX_DAR 48
+#define EX_DSISR 56
+#define EX_CCR 60
+#define EX_R3 64
+#define EX_LR 72
+
+/*
+ * We're short on space and time in the exception prolog, so we can't
+ * use the normal SET_REG_IMMEDIATE macro. Normally we just need the
+ * low halfword of the address, but for Kdump we need the whole low
+ * word.
+ */
+#ifdef CONFIG_CRASH_DUMP
+#define LOAD_HANDLER(reg, label) \
+ oris reg,reg,(label)@h; /* virt addr of handler ... */ \
+ ori reg,reg,(label)@l; /* .. and the rest */
+#else
+#define LOAD_HANDLER(reg, label) \
+ ori reg,reg,(label)@l; /* virt addr of handler ... */
+#endif
+
+#define EXCEPTION_PROLOG_1(area) \
+ mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
+ std r9,area+EX_R9(r13); /* save r9 - r12 */ \
+ std r10,area+EX_R10(r13); \
+ std r11,area+EX_R11(r13); \
+ std r12,area+EX_R12(r13); \
+ mfspr r9,SPRN_SPRG1; \
+ std r9,area+EX_R13(r13); \
+ mfcr r9
+
+/*
+ * Equal to EXCEPTION_PROLOG_PSERIES, except that it forces 64bit mode.
+ * The firmware calls the registered system_reset_fwnmi and
+ * machine_check_fwnmi handlers in 32bit mode if the cpu happens to run
+ * a 32bit application at the time of the event.
+ * This firmware bug is present on POWER4 and JS20.
+ */
+#define EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(area, label) \
+ EXCEPTION_PROLOG_1(area); \
+ clrrdi r12,r13,32; /* get high part of &label */ \
+ mfmsr r10; \
+ /* force 64bit mode */ \
+ li r11,5; /* MSR_SF_LG|MSR_ISF_LG */ \
+ rldimi r10,r11,61,0; /* insert into top 3 bits */ \
+ /* done 64bit mode */ \
+ mfspr r11,SPRN_SRR0; /* save SRR0 */ \
+ LOAD_HANDLER(r12,label) \
+ ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
+ mtspr SPRN_SRR0,r12; \
+ mfspr r12,SPRN_SRR1; /* and SRR1 */ \
+ mtspr SPRN_SRR1,r10; \
+ rfid; \
+ b . /* prevent speculative execution */
+
+#define EXCEPTION_PROLOG_PSERIES(area, label) \
+ EXCEPTION_PROLOG_1(area); \
+ clrrdi r12,r13,32; /* get high part of &label */ \
+ mfmsr r10; \
+ mfspr r11,SPRN_SRR0; /* save SRR0 */ \
+ LOAD_HANDLER(r12,label) \
+ ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
+ mtspr SPRN_SRR0,r12; \
+ mfspr r12,SPRN_SRR1; /* and SRR1 */ \
+ mtspr SPRN_SRR1,r10; \
+ rfid; \
+ b . /* prevent speculative execution */
+
+/*
+ * The common exception prolog is used for all except a few exceptions
+ * such as a segment miss on a kernel address. We have to be prepared
+ * to take another exception from the point where we first touch the
+ * kernel stack onwards.
+ *
+ * On entry r13 points to the paca, r9-r13 are saved in the paca,
+ * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
+ * SRR1, and relocation is on.
+ */
+#define EXCEPTION_PROLOG_COMMON(n, area) \
+ andi. r10,r12,MSR_PR; /* See if coming from user */ \
+ mr r10,r1; /* Save r1 */ \
+ subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
+ beq- 1f; \
+ ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
+1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
+ bge- cr1,2f; /* abort if it is */ \
+ b 3f; \
+2: li r1,(n); /* will be reloaded later */ \
+ sth r1,PACA_TRAP_SAVE(r13); \
+ b bad_stack; \
+3: std r9,_CCR(r1); /* save CR in stackframe */ \
+ std r11,_NIP(r1); /* save SRR0 in stackframe */ \
+ std r12,_MSR(r1); /* save SRR1 in stackframe */ \
+ std r10,0(r1); /* make stack chain pointer */ \
+ std r0,GPR0(r1); /* save r0 in stackframe */ \
+ std r10,GPR1(r1); /* save r1 in stackframe */ \
+ ACCOUNT_CPU_USER_ENTRY(r9, r10); \
+ std r2,GPR2(r1); /* save r2 in stackframe */ \
+ SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
+ SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
+ ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
+ ld r10,area+EX_R10(r13); \
+ std r9,GPR9(r1); \
+ std r10,GPR10(r1); \
+ ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
+ ld r10,area+EX_R12(r13); \
+ ld r11,area+EX_R13(r13); \
+ std r9,GPR11(r1); \
+ std r10,GPR12(r1); \
+ std r11,GPR13(r1); \
+ ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
+ mflr r9; /* save LR in stackframe */ \
+ std r9,_LINK(r1); \
+ mfctr r10; /* save CTR in stackframe */ \
+ std r10,_CTR(r1); \
+ lbz r10,PACASOFTIRQEN(r13); \
+ mfspr r11,SPRN_XER; /* save XER in stackframe */ \
+ std r10,SOFTE(r1); \
+ std r11,_XER(r1); \
+ li r9,(n)+1; \
+ std r9,_TRAP(r1); /* set trap number */ \
+ li r10,0; \
+ ld r11,exception_marker@toc(r2); \
+ std r10,RESULT(r1); /* clear regs->result */ \
+ std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
+
+/*
+ * Exception vectors.
+ */
+#define STD_EXCEPTION_PSERIES(n, label) \
+ . = n; \
+ .globl label##_pSeries; \
+label##_pSeries: \
+ HMT_MEDIUM; \
+ mtspr SPRN_SPRG1,r13; /* save r13 */ \
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
+
+#define HSTD_EXCEPTION_PSERIES(n, label) \
+ . = n; \
+ .globl label##_pSeries; \
+label##_pSeries: \
+ HMT_MEDIUM; \
+ mtspr SPRN_SPRG1,r20; /* save r20 */ \
+ mfspr r20,SPRN_HSRR0; /* copy HSRR0 to SRR0 */ \
+ mtspr SPRN_SRR0,r20; \
+ mfspr r20,SPRN_HSRR1; /* copy HSRR0 to SRR0 */ \
+ mtspr SPRN_SRR1,r20; \
+ mfspr r20,SPRN_SPRG1; /* restore r20 */ \
+ mtspr SPRN_SPRG1,r13; /* save r13 */ \
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
+
+
+#define MASKABLE_EXCEPTION_PSERIES(n, label) \
+ . = n; \
+ .globl label##_pSeries; \
+label##_pSeries: \
+ HMT_MEDIUM; \
+ mtspr SPRN_SPRG1,r13; /* save r13 */ \
+ mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
+ std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \
+ std r10,PACA_EXGEN+EX_R10(r13); \
+ lbz r10,PACASOFTIRQEN(r13); \
+ mfcr r9; \
+ cmpwi r10,0; \
+ beq masked_interrupt; \
+ mfspr r10,SPRN_SPRG1; \
+ std r10,PACA_EXGEN+EX_R13(r13); \
+ std r11,PACA_EXGEN+EX_R11(r13); \
+ std r12,PACA_EXGEN+EX_R12(r13); \
+ clrrdi r12,r13,32; /* get high part of &label */ \
+ mfmsr r10; \
+ mfspr r11,SPRN_SRR0; /* save SRR0 */ \
+ LOAD_HANDLER(r12,label##_common) \
+ ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
+ mtspr SPRN_SRR0,r12; \
+ mfspr r12,SPRN_SRR1; /* and SRR1 */ \
+ mtspr SPRN_SRR1,r10; \
+ rfid; \
+ b . /* prevent speculative execution */
+
+#ifdef CONFIG_PPC_ISERIES
+#define DISABLE_INTS \
+ li r11,0; \
+ stb r11,PACASOFTIRQEN(r13); \
+BEGIN_FW_FTR_SECTION; \
+ stb r11,PACAHARDIRQEN(r13); \
+END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \
+ TRACE_DISABLE_INTS; \
+BEGIN_FW_FTR_SECTION; \
+ mfmsr r10; \
+ ori r10,r10,MSR_EE; \
+ mtmsrd r10,1; \
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
+#else
+#define DISABLE_INTS \
+ li r11,0; \
+ stb r11,PACASOFTIRQEN(r13); \
+ stb r11,PACAHARDIRQEN(r13); \
+ TRACE_DISABLE_INTS
+#endif /* CONFIG_PPC_ISERIES */
+
+#define ENABLE_INTS \
+ ld r12,_MSR(r1); \
+ mfmsr r11; \
+ rlwimi r11,r12,0,MSR_EE; \
+ mtmsrd r11,1
+
+#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
+ .align 7; \
+ .globl label##_common; \
+label##_common: \
+ EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
+ DISABLE_INTS; \
+ bl .save_nvgprs; \
+ addi r3,r1,STACK_FRAME_OVERHEAD; \
+ bl hdlr; \
+ b .ret_from_except
+
+/*
+ * Like STD_EXCEPTION_COMMON, but for exceptions that can occur
+ * in the idle task and therefore need the special idle handling.
+ */
+#define STD_EXCEPTION_COMMON_IDLE(trap, label, hdlr) \
+ .align 7; \
+ .globl label##_common; \
+label##_common: \
+ EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
+ FINISH_NAP; \
+ DISABLE_INTS; \
+ bl .save_nvgprs; \
+ addi r3,r1,STACK_FRAME_OVERHEAD; \
+ bl hdlr; \
+ b .ret_from_except
+
+#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
+ .align 7; \
+ .globl label##_common; \
+label##_common: \
+ EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
+ FINISH_NAP; \
+ DISABLE_INTS; \
+BEGIN_FTR_SECTION \
+ bl .ppc64_runlatch_on; \
+END_FTR_SECTION_IFSET(CPU_FTR_CTRL) \
+ addi r3,r1,STACK_FRAME_OVERHEAD; \
+ bl hdlr; \
+ b .ret_from_except_lite
+
+/*
+ * When the idle code in power4_idle puts the CPU into NAP mode,
+ * it has to do so in a loop, and relies on the external interrupt
+ * and decrementer interrupt entry code to get it out of the loop.
+ * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
+ * to signal that it is in the loop and needs help to get out.
+ */
+#ifdef CONFIG_PPC_970_NAP
+#define FINISH_NAP \
+BEGIN_FTR_SECTION \
+ clrrdi r11,r1,THREAD_SHIFT; \
+ ld r9,TI_LOCAL_FLAGS(r11); \
+ andi. r10,r9,_TLF_NAPPING; \
+ bnel power4_fixup_nap; \
+END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
+#else
+#define FINISH_NAP
+#endif
+
+#endif /* _ASM_POWERPC_EXCEPTION_H */
diff --git a/arch/powerpc/include/asm/fb.h b/arch/powerpc/include/asm/fb.h
new file mode 100644
index 0000000..411af8d
--- /dev/null
+++ b/arch/powerpc/include/asm/fb.h
@@ -0,0 +1,21 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/arch/powerpc/include/asm/fcntl.h b/arch/powerpc/include/asm/fcntl.h
new file mode 100644
index 0000000..ce5c451
--- /dev/null
+++ b/arch/powerpc/include/asm/fcntl.h
@@ -0,0 +1,11 @@
+#ifndef _ASM_FCNTL_H
+#define _ASM_FCNTL_H
+
+#define O_DIRECTORY 040000 /* must be a directory */
+#define O_NOFOLLOW 0100000 /* don't follow links */
+#define O_LARGEFILE 0200000
+#define O_DIRECT 0400000 /* direct disk access hint */
+
+#include <asm-generic/fcntl.h>
+
+#endif /* _ASM_FCNTL_H */
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
new file mode 100644
index 0000000..a102996
--- /dev/null
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -0,0 +1,126 @@
+#ifndef __ASM_POWERPC_FEATURE_FIXUPS_H
+#define __ASM_POWERPC_FEATURE_FIXUPS_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifdef __ASSEMBLY__
+
+/*
+ * Feature section common macros
+ *
+ * Note that the entries now contain offsets between the table entry
+ * and the code rather than absolute code pointers in order to be
+ * useable with the vdso shared library. There is also an assumption
+ * that values will be negative, that is, the fixup table has to be
+ * located after the code it fixes up.
+ */
+#if defined(CONFIG_PPC64) && !defined(__powerpc64__)
+/* 64 bits kernel, 32 bits code (ie. vdso32) */
+#define FTR_ENTRY_LONG .llong
+#define FTR_ENTRY_OFFSET .long 0xffffffff; .long
+#else
+/* 64 bit kernel 64 bit code, or 32 bit kernel 32 bit code */
+#define FTR_ENTRY_LONG PPC_LONG
+#define FTR_ENTRY_OFFSET PPC_LONG
+#endif
+
+#define START_FTR_SECTION(label) label##1:
+
+#define FTR_SECTION_ELSE_NESTED(label) \
+label##2: \
+ .pushsection __ftr_alt_##label,"a"; \
+ .align 2; \
+label##3:
+
+#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \
+label##4: \
+ .popsection; \
+ .pushsection sect,"a"; \
+ .align 3; \
+label##5: \
+ FTR_ENTRY_LONG msk; \
+ FTR_ENTRY_LONG val; \
+ FTR_ENTRY_OFFSET label##1b-label##5b; \
+ FTR_ENTRY_OFFSET label##2b-label##5b; \
+ FTR_ENTRY_OFFSET label##3b-label##5b; \
+ FTR_ENTRY_OFFSET label##4b-label##5b; \
+ .popsection;
+
+
+/* CPU feature dependent sections */
+#define BEGIN_FTR_SECTION_NESTED(label) START_FTR_SECTION(label)
+#define BEGIN_FTR_SECTION START_FTR_SECTION(97)
+
+#define END_FTR_SECTION_NESTED(msk, val, label) \
+ FTR_SECTION_ELSE_NESTED(label) \
+ MAKE_FTR_SECTION_ENTRY(msk, val, label, __ftr_fixup)
+
+#define END_FTR_SECTION(msk, val) \
+ END_FTR_SECTION_NESTED(msk, val, 97)
+
+#define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk))
+#define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0)
+
+/* CPU feature sections with alternatives, use BEGIN_FTR_SECTION to start */
+#define FTR_SECTION_ELSE FTR_SECTION_ELSE_NESTED(97)
+#define ALT_FTR_SECTION_END_NESTED(msk, val, label) \
+ MAKE_FTR_SECTION_ENTRY(msk, val, label, __ftr_fixup)
+#define ALT_FTR_SECTION_END_NESTED_IFSET(msk, label) \
+ ALT_FTR_SECTION_END_NESTED(msk, msk, label)
+#define ALT_FTR_SECTION_END_NESTED_IFCLR(msk, label) \
+ ALT_FTR_SECTION_END_NESTED(msk, 0, label)
+#define ALT_FTR_SECTION_END(msk, val) \
+ ALT_FTR_SECTION_END_NESTED(msk, val, 97)
+#define ALT_FTR_SECTION_END_IFSET(msk) \
+ ALT_FTR_SECTION_END_NESTED_IFSET(msk, 97)
+#define ALT_FTR_SECTION_END_IFCLR(msk) \
+ ALT_FTR_SECTION_END_NESTED_IFCLR(msk, 97)
+
+/* Firmware feature dependent sections */
+#define BEGIN_FW_FTR_SECTION_NESTED(label) START_FTR_SECTION(label)
+#define BEGIN_FW_FTR_SECTION START_FTR_SECTION(97)
+
+#define END_FW_FTR_SECTION_NESTED(msk, val, label) \
+ FTR_SECTION_ELSE_NESTED(label) \
+ MAKE_FTR_SECTION_ENTRY(msk, val, label, __fw_ftr_fixup)
+
+#define END_FW_FTR_SECTION(msk, val) \
+ END_FW_FTR_SECTION_NESTED(msk, val, 97)
+
+#define END_FW_FTR_SECTION_IFSET(msk) END_FW_FTR_SECTION((msk), (msk))
+#define END_FW_FTR_SECTION_IFCLR(msk) END_FW_FTR_SECTION((msk), 0)
+
+/* Firmware feature sections with alternatives */
+#define FW_FTR_SECTION_ELSE_NESTED(label) FTR_SECTION_ELSE_NESTED(label)
+#define FW_FTR_SECTION_ELSE FTR_SECTION_ELSE_NESTED(97)
+#define ALT_FW_FTR_SECTION_END_NESTED(msk, val, label) \
+ MAKE_FTR_SECTION_ENTRY(msk, val, label, __fw_ftr_fixup)
+#define ALT_FW_FTR_SECTION_END_NESTED_IFSET(msk, label) \
+ ALT_FW_FTR_SECTION_END_NESTED(msk, msk, label)
+#define ALT_FW_FTR_SECTION_END_NESTED_IFCLR(msk, label) \
+ ALT_FW_FTR_SECTION_END_NESTED(msk, 0, label)
+#define ALT_FW_FTR_SECTION_END(msk, val) \
+ ALT_FW_FTR_SECTION_END_NESTED(msk, val, 97)
+#define ALT_FW_FTR_SECTION_END_IFSET(msk) \
+ ALT_FW_FTR_SECTION_END_NESTED_IFSET(msk, 97)
+#define ALT_FW_FTR_SECTION_END_IFCLR(msk) \
+ ALT_FW_FTR_SECTION_END_NESTED_IFCLR(msk, 97)
+
+#endif /* __ASSEMBLY__ */
+
+/* LWSYNC feature sections */
+#define START_LWSYNC_SECTION(label) label##1:
+#define MAKE_LWSYNC_SECTION_ENTRY(label, sect) \
+label##2: \
+ .pushsection sect,"a"; \
+ .align 2; \
+label##3: \
+ .long label##1b-label##3b; \
+ .popsection;
+
+#endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
new file mode 100644
index 0000000..3a17982
--- /dev/null
+++ b/arch/powerpc/include/asm/firmware.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * Modifications for ppc64:
+ * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef __ASM_POWERPC_FIRMWARE_H
+#define __ASM_POWERPC_FIRMWARE_H
+
+#ifdef __KERNEL__
+
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
+
+/* firmware feature bitmask values */
+#define FIRMWARE_MAX_FEATURES 63
+
+#define FW_FEATURE_PFT ASM_CONST(0x0000000000000001)
+#define FW_FEATURE_TCE ASM_CONST(0x0000000000000002)
+#define FW_FEATURE_SPRG0 ASM_CONST(0x0000000000000004)
+#define FW_FEATURE_DABR ASM_CONST(0x0000000000000008)
+#define FW_FEATURE_COPY ASM_CONST(0x0000000000000010)
+#define FW_FEATURE_ASR ASM_CONST(0x0000000000000020)
+#define FW_FEATURE_DEBUG ASM_CONST(0x0000000000000040)
+#define FW_FEATURE_TERM ASM_CONST(0x0000000000000080)
+#define FW_FEATURE_PERF ASM_CONST(0x0000000000000100)
+#define FW_FEATURE_DUMP ASM_CONST(0x0000000000000200)
+#define FW_FEATURE_INTERRUPT ASM_CONST(0x0000000000000400)
+#define FW_FEATURE_MIGRATE ASM_CONST(0x0000000000000800)
+#define FW_FEATURE_PERFMON ASM_CONST(0x0000000000001000)
+#define FW_FEATURE_CRQ ASM_CONST(0x0000000000002000)
+#define FW_FEATURE_VIO ASM_CONST(0x0000000000004000)
+#define FW_FEATURE_RDMA ASM_CONST(0x0000000000008000)
+#define FW_FEATURE_LLAN ASM_CONST(0x0000000000010000)
+#define FW_FEATURE_BULK ASM_CONST(0x0000000000020000)
+#define FW_FEATURE_XDABR ASM_CONST(0x0000000000040000)
+#define FW_FEATURE_MULTITCE ASM_CONST(0x0000000000080000)
+#define FW_FEATURE_SPLPAR ASM_CONST(0x0000000000100000)
+#define FW_FEATURE_ISERIES ASM_CONST(0x0000000000200000)
+#define FW_FEATURE_LPAR ASM_CONST(0x0000000000400000)
+#define FW_FEATURE_PS3_LV1 ASM_CONST(0x0000000000800000)
+#define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000)
+#define FW_FEATURE_BULK_REMOVE ASM_CONST(0x0000000002000000)
+#define FW_FEATURE_CMO ASM_CONST(0x0000000004000000)
+
+#ifndef __ASSEMBLY__
+
+enum {
+#ifdef CONFIG_PPC64
+ FW_FEATURE_PSERIES_POSSIBLE = FW_FEATURE_PFT | FW_FEATURE_TCE |
+ FW_FEATURE_SPRG0 | FW_FEATURE_DABR | FW_FEATURE_COPY |
+ FW_FEATURE_ASR | FW_FEATURE_DEBUG | FW_FEATURE_TERM |
+ FW_FEATURE_PERF | FW_FEATURE_DUMP | FW_FEATURE_INTERRUPT |
+ FW_FEATURE_MIGRATE | FW_FEATURE_PERFMON | FW_FEATURE_CRQ |
+ FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
+ FW_FEATURE_BULK | FW_FEATURE_XDABR | FW_FEATURE_MULTITCE |
+ FW_FEATURE_SPLPAR | FW_FEATURE_LPAR | FW_FEATURE_CMO,
+ FW_FEATURE_PSERIES_ALWAYS = 0,
+ FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
+ FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
+ FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
+ FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
+ FW_FEATURE_CELLEB_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_BEAT,
+ FW_FEATURE_CELLEB_ALWAYS = 0,
+ FW_FEATURE_NATIVE_POSSIBLE = 0,
+ FW_FEATURE_NATIVE_ALWAYS = 0,
+ FW_FEATURE_POSSIBLE =
+#ifdef CONFIG_PPC_PSERIES
+ FW_FEATURE_PSERIES_POSSIBLE |
+#endif
+#ifdef CONFIG_PPC_ISERIES
+ FW_FEATURE_ISERIES_POSSIBLE |
+#endif
+#ifdef CONFIG_PPC_PS3
+ FW_FEATURE_PS3_POSSIBLE |
+#endif
+#ifdef CONFIG_PPC_CELLEB
+ FW_FEATURE_CELLEB_POSSIBLE |
+#endif
+#ifdef CONFIG_PPC_NATIVE
+ FW_FEATURE_NATIVE_ALWAYS |
+#endif
+ 0,
+ FW_FEATURE_ALWAYS =
+#ifdef CONFIG_PPC_PSERIES
+ FW_FEATURE_PSERIES_ALWAYS &
+#endif
+#ifdef CONFIG_PPC_ISERIES
+ FW_FEATURE_ISERIES_ALWAYS &
+#endif
+#ifdef CONFIG_PPC_PS3
+ FW_FEATURE_PS3_ALWAYS &
+#endif
+#ifdef CONFIG_PPC_CELLEB
+ FW_FEATURE_CELLEB_ALWAYS &
+#endif
+#ifdef CONFIG_PPC_NATIVE
+ FW_FEATURE_NATIVE_ALWAYS &
+#endif
+ FW_FEATURE_POSSIBLE,
+
+#else /* CONFIG_PPC64 */
+ FW_FEATURE_POSSIBLE = 0,
+ FW_FEATURE_ALWAYS = 0,
+#endif
+};
+
+/* This is used to identify firmware features which are available
+ * to the kernel.
+ */
+extern unsigned long powerpc_firmware_features;
+
+#define firmware_has_feature(feature) \
+ ((FW_FEATURE_ALWAYS & (feature)) || \
+ (FW_FEATURE_POSSIBLE & powerpc_firmware_features & (feature)))
+
+extern void system_reset_fwnmi(void);
+extern void machine_check_fwnmi(void);
+
+/* This is true if we are using the firmware NMI handler (typically LPAR) */
+extern int fwnmi_active;
+
+extern unsigned int __start___fw_ftr_fixup, __stop___fw_ftr_fixup;
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* __ASM_POWERPC_FIRMWARE_H */
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
new file mode 100644
index 0000000..8428b38
--- /dev/null
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -0,0 +1,106 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Copyright 2008 Freescale Semiconductor Inc.
+ * Port to powerpc added by Kumar Gala
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+extern unsigned long FIXADDR_TOP;
+
+#ifndef __ASSEMBLY__
+#include <linux/kernel.h>
+#include <asm/page.h>
+#ifdef CONFIG_HIGHMEM
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#endif
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process. We allocate these special addresses
+ * from the end of virtual memory (0xfffff000) backwards.
+ * Also this lets us do fail-safe vmalloc(), we
+ * can guarantee that these special addresses and
+ * vmalloc()-ed addresses never overlap.
+ *
+ * these 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * highger than 1) use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ *
+ * TLB entries of such buffers will not be flushed across
+ * task switches.
+ */
+enum fixed_addresses {
+ FIX_HOLE,
+#ifdef CONFIG_HIGHMEM
+ FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
+ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
+#endif
+ /* FIX_PCIE_MCFG, */
+ __end_of_fixed_addresses
+};
+
+extern void __set_fixmap (enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t flags);
+
+#define set_fixmap(idx, phys) \
+ __set_fixmap(idx, phys, PAGE_KERNEL)
+/*
+ * Some hardware wants to get fixmapped without caching.
+ */
+#define set_fixmap_nocache(idx, phys) \
+ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
+
+#define clear_fixmap(idx) \
+ __set_fixmap(idx, 0, __pgprot(0))
+
+#define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
+
+#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
+#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
+
+extern void __this_fixmap_does_not_exist(void);
+
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without tranlation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
+{
+ /*
+ * this branch gets completely eliminated after inlining,
+ * except when someone tries to use fixaddr indices in an
+ * illegal way. (such as mixing up address types or using
+ * out-of-range indices).
+ *
+ * If it doesn't get removed, the linker will complain
+ * loudly with a reasonably clear error message..
+ */
+ if (idx >= __end_of_fixed_addresses)
+ __this_fixmap_does_not_exist();
+
+ return __fix_to_virt(idx);
+}
+
+static inline unsigned long virt_to_fix(const unsigned long vaddr)
+{
+ BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
+ return __virt_to_fix(vaddr);
+}
+
+#endif /* !__ASSEMBLY__ */
+#endif
diff --git a/arch/powerpc/include/asm/floppy.h b/arch/powerpc/include/asm/floppy.h
new file mode 100644
index 0000000..24bd34c
--- /dev/null
+++ b/arch/powerpc/include/asm/floppy.h
@@ -0,0 +1,213 @@
+/*
+ * Architecture specific parts of the Floppy driver
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995
+ */
+#ifndef __ASM_POWERPC_FLOPPY_H
+#define __ASM_POWERPC_FLOPPY_H
+#ifdef __KERNEL__
+
+#include <asm/machdep.h>
+
+#define fd_inb(port) inb_p(port)
+#define fd_outb(value,port) outb_p(value,port)
+
+#define fd_enable_dma() enable_dma(FLOPPY_DMA)
+#define fd_disable_dma() fd_ops->_disable_dma(FLOPPY_DMA)
+#define fd_free_dma() fd_ops->_free_dma(FLOPPY_DMA)
+#define fd_clear_dma_ff() clear_dma_ff(FLOPPY_DMA)
+#define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA, mode)
+#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA, count)
+#define fd_get_dma_residue() fd_ops->_get_dma_residue(FLOPPY_DMA)
+#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
+#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
+#define fd_cacheflush(addr,size) /* nothing */
+#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL);
+
+#include <linux/pci.h>
+#include <asm/ppc-pci.h> /* for isa_bridge_pcidev */
+
+#define fd_dma_setup(addr,size,mode,io) fd_ops->_dma_setup(addr,size,mode,io)
+
+static int fd_request_dma(void);
+
+struct fd_dma_ops {
+ void (*_disable_dma)(unsigned int dmanr);
+ void (*_free_dma)(unsigned int dmanr);
+ int (*_get_dma_residue)(unsigned int dummy);
+ int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
+};
+
+static int virtual_dma_count;
+static int virtual_dma_residue;
+static char *virtual_dma_addr;
+static int virtual_dma_mode;
+static int doing_vdma;
+static struct fd_dma_ops *fd_ops;
+
+static irqreturn_t floppy_hardint(int irq, void *dev_id)
+{
+ unsigned char st;
+ int lcount;
+ char *lptr;
+
+ if (!doing_vdma)
+ return floppy_interrupt(irq, dev_id);
+
+
+ st = 1;
+ for (lcount=virtual_dma_count, lptr=virtual_dma_addr;
+ lcount; lcount--, lptr++) {
+ st=inb(virtual_dma_port+4) & 0xa0 ;
+ if (st != 0xa0)
+ break;
+ if (virtual_dma_mode)
+ outb_p(*lptr, virtual_dma_port+5);
+ else
+ *lptr = inb_p(virtual_dma_port+5);
+ }
+ virtual_dma_count = lcount;
+ virtual_dma_addr = lptr;
+ st = inb(virtual_dma_port+4);
+
+ if (st == 0x20)
+ return IRQ_HANDLED;
+ if (!(st & 0x20)) {
+ virtual_dma_residue += virtual_dma_count;
+ virtual_dma_count=0;
+ doing_vdma = 0;
+ floppy_interrupt(irq, dev_id);
+ return IRQ_HANDLED;
+ }
+ return IRQ_HANDLED;
+}
+
+static void vdma_disable_dma(unsigned int dummy)
+{
+ doing_vdma = 0;
+ virtual_dma_residue += virtual_dma_count;
+ virtual_dma_count=0;
+}
+
+static void vdma_nop(unsigned int dummy)
+{
+}
+
+
+static int vdma_get_dma_residue(unsigned int dummy)
+{
+ return virtual_dma_count + virtual_dma_residue;
+}
+
+
+static int fd_request_irq(void)
+{
+ if (can_use_virtual_dma)
+ return request_irq(FLOPPY_IRQ, floppy_hardint,
+ IRQF_DISABLED, "floppy", NULL);
+ else
+ return request_irq(FLOPPY_IRQ, floppy_interrupt,
+ IRQF_DISABLED, "floppy", NULL);
+}
+
+static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
+{
+ doing_vdma = 1;
+ virtual_dma_port = io;
+ virtual_dma_mode = (mode == DMA_MODE_WRITE);
+ virtual_dma_addr = addr;
+ virtual_dma_count = size;
+ virtual_dma_residue = 0;
+ return 0;
+}
+
+static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
+{
+ static unsigned long prev_size;
+ static dma_addr_t bus_addr = 0;
+ static char *prev_addr;
+ static int prev_dir;
+ int dir;
+
+ doing_vdma = 0;
+ dir = (mode == DMA_MODE_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE;
+
+ if (bus_addr
+ && (addr != prev_addr || size != prev_size || dir != prev_dir)) {
+ /* different from last time -- unmap prev */
+ pci_unmap_single(isa_bridge_pcidev, bus_addr, prev_size, prev_dir);
+ bus_addr = 0;
+ }
+
+ if (!bus_addr) /* need to map it */
+ bus_addr = pci_map_single(isa_bridge_pcidev, addr, size, dir);
+
+ /* remember this one as prev */
+ prev_addr = addr;
+ prev_size = size;
+ prev_dir = dir;
+
+ fd_clear_dma_ff();
+ fd_cacheflush(addr, size);
+ fd_set_dma_mode(mode);
+ set_dma_addr(FLOPPY_DMA, bus_addr);
+ fd_set_dma_count(size);
+ virtual_dma_port = io;
+ fd_enable_dma();
+
+ return 0;
+}
+
+static struct fd_dma_ops real_dma_ops =
+{
+ ._disable_dma = disable_dma,
+ ._free_dma = free_dma,
+ ._get_dma_residue = get_dma_residue,
+ ._dma_setup = hard_dma_setup
+};
+
+static struct fd_dma_ops virt_dma_ops =
+{
+ ._disable_dma = vdma_disable_dma,
+ ._free_dma = vdma_nop,
+ ._get_dma_residue = vdma_get_dma_residue,
+ ._dma_setup = vdma_dma_setup
+};
+
+static int fd_request_dma(void)
+{
+ if (can_use_virtual_dma & 1) {
+ fd_ops = &virt_dma_ops;
+ return 0;
+ }
+ else {
+ fd_ops = &real_dma_ops;
+ return request_dma(FLOPPY_DMA, "floppy");
+ }
+}
+
+static int FDC1 = 0x3f0;
+static int FDC2 = -1;
+
+/*
+ * Again, the CMOS information not available
+ */
+#define FLOPPY0_TYPE 6
+#define FLOPPY1_TYPE 0
+
+#define N_FDC 2 /* Don't change this! */
+#define N_DRIVE 8
+
+/*
+ * The PowerPC has no problems with floppy DMA crossing 64k borders.
+ */
+#define CROSS_64KB(a,s) (0)
+
+#define EXTRA_FLOPPY_PARAMS
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_POWERPC_FLOPPY_H */
diff --git a/arch/powerpc/include/asm/fs_pd.h b/arch/powerpc/include/asm/fs_pd.h
new file mode 100644
index 0000000..9361cd5
--- /dev/null
+++ b/arch/powerpc/include/asm/fs_pd.h
@@ -0,0 +1,50 @@
+/*
+ * Platform information definitions.
+ *
+ * 2006 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef FS_PD_H
+#define FS_PD_H
+#include <sysdev/fsl_soc.h>
+#include <asm/time.h>
+
+#ifdef CONFIG_CPM2
+#include <asm/cpm2.h>
+
+#if defined(CONFIG_8260)
+#include <asm/mpc8260.h>
+#endif
+
+#define cpm2_map(member) (&cpm2_immr->member)
+#define cpm2_map_size(member, size) (&cpm2_immr->member)
+#define cpm2_unmap(addr) do {} while(0)
+#endif
+
+#ifdef CONFIG_8xx
+#include <asm/8xx_immap.h>
+#include <asm/mpc8xx.h>
+
+extern immap_t __iomem *mpc8xx_immr;
+
+#define immr_map(member) (&mpc8xx_immr->member)
+#define immr_map_size(member, size) (&mpc8xx_immr->member)
+#define immr_unmap(addr) do {} while (0)
+#endif
+
+static inline int uart_baudrate(void)
+{
+ return get_baudrate();
+}
+
+static inline int uart_clock(void)
+{
+ return ppc_proc_freq;
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/fsl_gtm.h b/arch/powerpc/include/asm/fsl_gtm.h
new file mode 100644
index 0000000..8e8c9b5
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_gtm.h
@@ -0,0 +1,47 @@
+/*
+ * Freescale General-purpose Timers Module
+ *
+ * Copyright (c) Freescale Semicondutor, Inc. 2006.
+ * Shlomi Gridish <gridish@freescale.com>
+ * Jerry Huang <Chang-Ming.Huang@freescale.com>
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __ASM_FSL_GTM_H
+#define __ASM_FSL_GTM_H
+
+#include <linux/types.h>
+
+struct gtm;
+
+struct gtm_timer {
+ unsigned int irq;
+
+ struct gtm *gtm;
+ bool requested;
+ u8 __iomem *gtcfr;
+ __be16 __iomem *gtmdr;
+ __be16 __iomem *gtpsr;
+ __be16 __iomem *gtcnr;
+ __be16 __iomem *gtrfr;
+ __be16 __iomem *gtevr;
+};
+
+extern struct gtm_timer *gtm_get_timer16(void);
+extern struct gtm_timer *gtm_get_specific_timer16(struct gtm *gtm,
+ unsigned int timer);
+extern void gtm_put_timer16(struct gtm_timer *tmr);
+extern int gtm_set_timer16(struct gtm_timer *tmr, unsigned long usec,
+ bool reload);
+extern int gtm_set_exact_timer16(struct gtm_timer *tmr, u16 usec,
+ bool reload);
+extern void gtm_stop_timer16(struct gtm_timer *tmr);
+extern void gtm_ack_timer16(struct gtm_timer *tmr, u16 events);
+
+#endif /* __ASM_FSL_GTM_H */
diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h
new file mode 100644
index 0000000..303f548
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_lbc.h
@@ -0,0 +1,311 @@
+/* Freescale Local Bus Controller
+ *
+ * Copyright (c) 2006-2007 Freescale Semiconductor
+ *
+ * Authors: Nick Spence <nick.spence@freescale.com>,
+ * Scott Wood <scottwood@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_FSL_LBC_H
+#define __ASM_FSL_LBC_H
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <asm/io.h>
+
+struct fsl_lbc_bank {
+ __be32 br; /**< Base Register */
+#define BR_BA 0xFFFF8000
+#define BR_BA_SHIFT 15
+#define BR_PS 0x00001800
+#define BR_PS_SHIFT 11
+#define BR_PS_8 0x00000800 /* Port Size 8 bit */
+#define BR_PS_16 0x00001000 /* Port Size 16 bit */
+#define BR_PS_32 0x00001800 /* Port Size 32 bit */
+#define BR_DECC 0x00000600
+#define BR_DECC_SHIFT 9
+#define BR_DECC_OFF 0x00000000 /* HW ECC checking and generation off */
+#define BR_DECC_CHK 0x00000200 /* HW ECC checking on, generation off */
+#define BR_DECC_CHK_GEN 0x00000400 /* HW ECC checking and generation on */
+#define BR_WP 0x00000100
+#define BR_WP_SHIFT 8
+#define BR_MSEL 0x000000E0
+#define BR_MSEL_SHIFT 5
+#define BR_MS_GPCM 0x00000000 /* GPCM */
+#define BR_MS_FCM 0x00000020 /* FCM */
+#define BR_MS_SDRAM 0x00000060 /* SDRAM */
+#define BR_MS_UPMA 0x00000080 /* UPMA */
+#define BR_MS_UPMB 0x000000A0 /* UPMB */
+#define BR_MS_UPMC 0x000000C0 /* UPMC */
+#define BR_V 0x00000001
+#define BR_V_SHIFT 0
+#define BR_RES ~(BR_BA|BR_PS|BR_DECC|BR_WP|BR_MSEL|BR_V)
+
+ __be32 or; /**< Base Register */
+#define OR0 0x5004
+#define OR1 0x500C
+#define OR2 0x5014
+#define OR3 0x501C
+#define OR4 0x5024
+#define OR5 0x502C
+#define OR6 0x5034
+#define OR7 0x503C
+
+#define OR_FCM_AM 0xFFFF8000
+#define OR_FCM_AM_SHIFT 15
+#define OR_FCM_BCTLD 0x00001000
+#define OR_FCM_BCTLD_SHIFT 12
+#define OR_FCM_PGS 0x00000400
+#define OR_FCM_PGS_SHIFT 10
+#define OR_FCM_CSCT 0x00000200
+#define OR_FCM_CSCT_SHIFT 9
+#define OR_FCM_CST 0x00000100
+#define OR_FCM_CST_SHIFT 8
+#define OR_FCM_CHT 0x00000080
+#define OR_FCM_CHT_SHIFT 7
+#define OR_FCM_SCY 0x00000070
+#define OR_FCM_SCY_SHIFT 4
+#define OR_FCM_SCY_1 0x00000010
+#define OR_FCM_SCY_2 0x00000020
+#define OR_FCM_SCY_3 0x00000030
+#define OR_FCM_SCY_4 0x00000040
+#define OR_FCM_SCY_5 0x00000050
+#define OR_FCM_SCY_6 0x00000060
+#define OR_FCM_SCY_7 0x00000070
+#define OR_FCM_RST 0x00000008
+#define OR_FCM_RST_SHIFT 3
+#define OR_FCM_TRLX 0x00000004
+#define OR_FCM_TRLX_SHIFT 2
+#define OR_FCM_EHTR 0x00000002
+#define OR_FCM_EHTR_SHIFT 1
+};
+
+struct fsl_lbc_regs {
+ struct fsl_lbc_bank bank[8];
+ u8 res0[0x28];
+ __be32 mar; /**< UPM Address Register */
+ u8 res1[0x4];
+ __be32 mamr; /**< UPMA Mode Register */
+#define MxMR_OP_NO (0 << 28) /**< normal operation */
+#define MxMR_OP_WA (1 << 28) /**< write array */
+#define MxMR_OP_RA (2 << 28) /**< read array */
+#define MxMR_OP_RP (3 << 28) /**< run pattern */
+#define MxMR_MAD 0x3f /**< machine address */
+ __be32 mbmr; /**< UPMB Mode Register */
+ __be32 mcmr; /**< UPMC Mode Register */
+ u8 res2[0x8];
+ __be32 mrtpr; /**< Memory Refresh Timer Prescaler Register */
+ __be32 mdr; /**< UPM Data Register */
+ u8 res3[0x4];
+ __be32 lsor; /**< Special Operation Initiation Register */
+ __be32 lsdmr; /**< SDRAM Mode Register */
+ u8 res4[0x8];
+ __be32 lurt; /**< UPM Refresh Timer */
+ __be32 lsrt; /**< SDRAM Refresh Timer */
+ u8 res5[0x8];
+ __be32 ltesr; /**< Transfer Error Status Register */
+#define LTESR_BM 0x80000000
+#define LTESR_FCT 0x40000000
+#define LTESR_PAR 0x20000000
+#define LTESR_WP 0x04000000
+#define LTESR_ATMW 0x00800000
+#define LTESR_ATMR 0x00400000
+#define LTESR_CS 0x00080000
+#define LTESR_CC 0x00000001
+#define LTESR_NAND_MASK (LTESR_FCT | LTESR_PAR | LTESR_CC)
+ __be32 ltedr; /**< Transfer Error Disable Register */
+ __be32 lteir; /**< Transfer Error Interrupt Register */
+ __be32 lteatr; /**< Transfer Error Attributes Register */
+ __be32 ltear; /**< Transfer Error Address Register */
+ u8 res6[0xC];
+ __be32 lbcr; /**< Configuration Register */
+#define LBCR_LDIS 0x80000000
+#define LBCR_LDIS_SHIFT 31
+#define LBCR_BCTLC 0x00C00000
+#define LBCR_BCTLC_SHIFT 22
+#define LBCR_AHD 0x00200000
+#define LBCR_LPBSE 0x00020000
+#define LBCR_LPBSE_SHIFT 17
+#define LBCR_EPAR 0x00010000
+#define LBCR_EPAR_SHIFT 16
+#define LBCR_BMT 0x0000FF00
+#define LBCR_BMT_SHIFT 8
+#define LBCR_INIT 0x00040000
+ __be32 lcrr; /**< Clock Ratio Register */
+#define LCRR_DBYP 0x80000000
+#define LCRR_DBYP_SHIFT 31
+#define LCRR_BUFCMDC 0x30000000
+#define LCRR_BUFCMDC_SHIFT 28
+#define LCRR_ECL 0x03000000
+#define LCRR_ECL_SHIFT 24
+#define LCRR_EADC 0x00030000
+#define LCRR_EADC_SHIFT 16
+#define LCRR_CLKDIV 0x0000000F
+#define LCRR_CLKDIV_SHIFT 0
+ u8 res7[0x8];
+ __be32 fmr; /**< Flash Mode Register */
+#define FMR_CWTO 0x0000F000
+#define FMR_CWTO_SHIFT 12
+#define FMR_BOOT 0x00000800
+#define FMR_ECCM 0x00000100
+#define FMR_AL 0x00000030
+#define FMR_AL_SHIFT 4
+#define FMR_OP 0x00000003
+#define FMR_OP_SHIFT 0
+ __be32 fir; /**< Flash Instruction Register */
+#define FIR_OP0 0xF0000000
+#define FIR_OP0_SHIFT 28
+#define FIR_OP1 0x0F000000
+#define FIR_OP1_SHIFT 24
+#define FIR_OP2 0x00F00000
+#define FIR_OP2_SHIFT 20
+#define FIR_OP3 0x000F0000
+#define FIR_OP3_SHIFT 16
+#define FIR_OP4 0x0000F000
+#define FIR_OP4_SHIFT 12
+#define FIR_OP5 0x00000F00
+#define FIR_OP5_SHIFT 8
+#define FIR_OP6 0x000000F0
+#define FIR_OP6_SHIFT 4
+#define FIR_OP7 0x0000000F
+#define FIR_OP7_SHIFT 0
+#define FIR_OP_NOP 0x0 /* No operation and end of sequence */
+#define FIR_OP_CA 0x1 /* Issue current column address */
+#define FIR_OP_PA 0x2 /* Issue current block+page address */
+#define FIR_OP_UA 0x3 /* Issue user defined address */
+#define FIR_OP_CM0 0x4 /* Issue command from FCR[CMD0] */
+#define FIR_OP_CM1 0x5 /* Issue command from FCR[CMD1] */
+#define FIR_OP_CM2 0x6 /* Issue command from FCR[CMD2] */
+#define FIR_OP_CM3 0x7 /* Issue command from FCR[CMD3] */
+#define FIR_OP_WB 0x8 /* Write FBCR bytes from FCM buffer */
+#define FIR_OP_WS 0x9 /* Write 1 or 2 bytes from MDR[AS] */
+#define FIR_OP_RB 0xA /* Read FBCR bytes to FCM buffer */
+#define FIR_OP_RS 0xB /* Read 1 or 2 bytes to MDR[AS] */
+#define FIR_OP_CW0 0xC /* Wait then issue FCR[CMD0] */
+#define FIR_OP_CW1 0xD /* Wait then issue FCR[CMD1] */
+#define FIR_OP_RBW 0xE /* Wait then read FBCR bytes */
+#define FIR_OP_RSW 0xE /* Wait then read 1 or 2 bytes */
+ __be32 fcr; /**< Flash Command Register */
+#define FCR_CMD0 0xFF000000
+#define FCR_CMD0_SHIFT 24
+#define FCR_CMD1 0x00FF0000
+#define FCR_CMD1_SHIFT 16
+#define FCR_CMD2 0x0000FF00
+#define FCR_CMD2_SHIFT 8
+#define FCR_CMD3 0x000000FF
+#define FCR_CMD3_SHIFT 0
+ __be32 fbar; /**< Flash Block Address Register */
+#define FBAR_BLK 0x00FFFFFF
+ __be32 fpar; /**< Flash Page Address Register */
+#define FPAR_SP_PI 0x00007C00
+#define FPAR_SP_PI_SHIFT 10
+#define FPAR_SP_MS 0x00000200
+#define FPAR_SP_CI 0x000001FF
+#define FPAR_SP_CI_SHIFT 0
+#define FPAR_LP_PI 0x0003F000
+#define FPAR_LP_PI_SHIFT 12
+#define FPAR_LP_MS 0x00000800
+#define FPAR_LP_CI 0x000007FF
+#define FPAR_LP_CI_SHIFT 0
+ __be32 fbcr; /**< Flash Byte Count Register */
+#define FBCR_BC 0x00000FFF
+ u8 res11[0x8];
+ u8 res8[0xF00];
+};
+
+extern struct fsl_lbc_regs __iomem *fsl_lbc_regs;
+extern spinlock_t fsl_lbc_lock;
+
+/*
+ * FSL UPM routines
+ */
+struct fsl_upm {
+ __be32 __iomem *mxmr;
+ int width;
+};
+
+extern int fsl_lbc_find(phys_addr_t addr_base);
+extern int fsl_upm_find(phys_addr_t addr_base, struct fsl_upm *upm);
+
+/**
+ * fsl_upm_start_pattern - start UPM patterns execution
+ * @upm: pointer to the fsl_upm structure obtained via fsl_upm_find
+ * @pat_offset: UPM pattern offset for the command to be executed
+ *
+ * This routine programmes UPM so the next memory access that hits an UPM
+ * will trigger pattern execution, starting at pat_offset.
+ */
+static inline void fsl_upm_start_pattern(struct fsl_upm *upm, u8 pat_offset)
+{
+ clrsetbits_be32(upm->mxmr, MxMR_MAD, MxMR_OP_RP | pat_offset);
+}
+
+/**
+ * fsl_upm_end_pattern - end UPM patterns execution
+ * @upm: pointer to the fsl_upm structure obtained via fsl_upm_find
+ *
+ * This routine reverts UPM to normal operation mode.
+ */
+static inline void fsl_upm_end_pattern(struct fsl_upm *upm)
+{
+ clrbits32(upm->mxmr, MxMR_OP_RP);
+
+ while (in_be32(upm->mxmr) & MxMR_OP_RP)
+ cpu_relax();
+}
+
+/**
+ * fsl_upm_run_pattern - actually run an UPM pattern
+ * @upm: pointer to the fsl_upm structure obtained via fsl_upm_find
+ * @io_base: remapped pointer to where memory access should happen
+ * @mar: MAR register content during pattern execution
+ *
+ * This function triggers dummy write to the memory specified by the io_base,
+ * thus UPM pattern actually executed. Note that mar usage depends on the
+ * pre-programmed AMX bits in the UPM RAM.
+ */
+static inline int fsl_upm_run_pattern(struct fsl_upm *upm,
+ void __iomem *io_base, u32 mar)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fsl_lbc_lock, flags);
+
+ out_be32(&fsl_lbc_regs->mar, mar << (32 - upm->width));
+
+ switch (upm->width) {
+ case 8:
+ out_8(io_base, 0x0);
+ break;
+ case 16:
+ out_be16(io_base, 0x0);
+ break;
+ case 32:
+ out_be32(io_base, 0x0);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ spin_unlock_irqrestore(&fsl_lbc_lock, flags);
+
+ return ret;
+}
+
+#endif /* __ASM_FSL_LBC_H */
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
new file mode 100644
index 0000000..de92132
--- /dev/null
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_POWERPC_FTRACE
+#define _ASM_POWERPC_FTRACE
+
+#ifdef CONFIG_FTRACE
+#define MCOUNT_ADDR ((long)(_mcount))
+#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
+
+#ifndef __ASSEMBLY__
+extern void _mcount(void);
+#endif
+
+#endif
+
+#endif /* _ASM_POWERPC_FTRACE */
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
new file mode 100644
index 0000000..6d406c5
--- /dev/null
+++ b/arch/powerpc/include/asm/futex.h
@@ -0,0 +1,117 @@
+#ifndef _ASM_POWERPC_FUTEX_H
+#define _ASM_POWERPC_FUTEX_H
+
+#ifdef __KERNEL__
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+#include <asm/synch.h>
+#include <asm/asm-compat.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+ __asm__ __volatile ( \
+ LWSYNC_ON_SMP \
+"1: lwarx %0,0,%2\n" \
+ insn \
+ PPC405_ERR77(0, %2) \
+"2: stwcx. %1,0,%2\n" \
+ "bne- 1b\n" \
+ "li %1,0\n" \
+"3: .section .fixup,\"ax\"\n" \
+"4: li %1,%3\n" \
+ "b 3b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ ".align 3\n" \
+ PPC_LONG "1b,4b,2b,4b\n" \
+ ".previous" \
+ : "=&r" (oldval), "=&r" (ret) \
+ : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \
+ : "cr0", "memory")
+
+static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ pagefault_disable();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("add %1,%0,%1\n", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("or %1,%0,%1\n", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("andc %1,%0,%1\n", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("xor %1,%0,%1\n", ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ pagefault_enable();
+
+ if (!ret) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+ case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+ case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+ case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+ case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+ case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+ default: ret = -ENOSYS;
+ }
+ }
+ return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+{
+ int prev;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ __asm__ __volatile__ (
+ LWSYNC_ON_SMP
+"1: lwarx %0,0,%2 # futex_atomic_cmpxchg_inatomic\n\
+ cmpw 0,%0,%3\n\
+ bne- 3f\n"
+ PPC405_ERR77(0,%2)
+"2: stwcx. %4,0,%2\n\
+ bne- 1b\n"
+ ISYNC_ON_SMP
+"3: .section .fixup,\"ax\"\n\
+4: li %0,%5\n\
+ b 3b\n\
+ .previous\n\
+ .section __ex_table,\"a\"\n\
+ .align 3\n\
+ " PPC_LONG "1b,4b,2b,4b\n\
+ .previous" \
+ : "=&r" (prev), "+m" (*uaddr)
+ : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)
+ : "cc", "memory");
+
+ return prev;
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_FUTEX_H */
diff --git a/arch/powerpc/include/asm/gpio.h b/arch/powerpc/include/asm/gpio.h
new file mode 100644
index 0000000..ea04632
--- /dev/null
+++ b/arch/powerpc/include/asm/gpio.h
@@ -0,0 +1,56 @@
+/*
+ * Generic GPIO API implementation for PowerPC.
+ *
+ * Copyright (c) 2007-2008 MontaVista Software, Inc.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_POWERPC_GPIO_H
+#define __ASM_POWERPC_GPIO_H
+
+#include <linux/errno.h>
+#include <asm-generic/gpio.h>
+
+#ifdef CONFIG_GPIOLIB
+
+/*
+ * We don't (yet) implement inlined/rapid versions for on-chip gpios.
+ * Just call gpiolib.
+ */
+static inline int gpio_get_value(unsigned int gpio)
+{
+ return __gpio_get_value(gpio);
+}
+
+static inline void gpio_set_value(unsigned int gpio, int value)
+{
+ __gpio_set_value(gpio, value);
+}
+
+static inline int gpio_cansleep(unsigned int gpio)
+{
+ return __gpio_cansleep(gpio);
+}
+
+/*
+ * Not implemented, yet.
+ */
+static inline int gpio_to_irq(unsigned int gpio)
+{
+ return -ENOSYS;
+}
+
+static inline int irq_to_gpio(unsigned int irq)
+{
+ return -EINVAL;
+}
+
+#endif /* CONFIG_GPIOLIB */
+
+#endif /* __ASM_POWERPC_GPIO_H */
diff --git a/arch/powerpc/include/asm/grackle.h b/arch/powerpc/include/asm/grackle.h
new file mode 100644
index 0000000..bd7812a
--- /dev/null
+++ b/arch/powerpc/include/asm/grackle.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_POWERPC_GRACKLE_H
+#define _ASM_POWERPC_GRACKLE_H
+#ifdef __KERNEL__
+/*
+ * Functions for setting up and using a MPC106 northbridge
+ */
+
+#include <asm/pci-bridge.h>
+
+extern void setup_grackle(struct pci_controller *hose);
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_GRACKLE_H */
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h
new file mode 100644
index 0000000..288e14d
--- /dev/null
+++ b/arch/powerpc/include/asm/hardirq.h
@@ -0,0 +1,29 @@
+#ifndef _ASM_POWERPC_HARDIRQ_H
+#define _ASM_POWERPC_HARDIRQ_H
+#ifdef __KERNEL__
+
+#include <asm/irq.h>
+#include <asm/bug.h>
+
+/* The __last_jiffy_stamp field is needed to ensure that no decrementer
+ * interrupt is lost on SMP machines. Since on most CPUs it is in the same
+ * cache line as local_irq_count, it is cheap to access and is also used on UP
+ * for uniformity.
+ */
+typedef struct {
+ unsigned int __softirq_pending; /* set_bit is used on this */
+ unsigned int __last_jiffy_stamp;
+} ____cacheline_aligned irq_cpustat_t;
+
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+
+#define last_jiffy_stamp(cpu) __IRQ_STAT((cpu), __last_jiffy_stamp)
+
+static inline void ack_bad_irq(int irq)
+{
+ printk(KERN_CRIT "illegal vector %d received!\n", irq);
+ BUG();
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_HARDIRQ_H */
diff --git a/arch/powerpc/include/asm/heathrow.h b/arch/powerpc/include/asm/heathrow.h
new file mode 100644
index 0000000..93f5495
--- /dev/null
+++ b/arch/powerpc/include/asm/heathrow.h
@@ -0,0 +1,67 @@
+#ifndef _ASM_POWERPC_HEATHROW_H
+#define _ASM_POWERPC_HEATHROW_H
+#ifdef __KERNEL__
+/*
+ * heathrow.h: definitions for using the "Heathrow" I/O controller chip.
+ *
+ * Grabbed from Open Firmware definitions on a PowerBook G3 Series
+ *
+ * Copyright (C) 1997 Paul Mackerras.
+ */
+
+/* Front light color on Yikes/B&W G3. 32 bits */
+#define HEATHROW_FRONT_LIGHT 0x32 /* (set to 0 or 0xffffffff) */
+
+/* Brightness/contrast (gossamer iMac ?). 8 bits */
+#define HEATHROW_BRIGHTNESS_CNTL 0x32
+#define HEATHROW_CONTRAST_CNTL 0x33
+
+/* offset from ohare base for feature control register */
+#define HEATHROW_MBCR 0x34 /* Media bay control */
+#define HEATHROW_FCR 0x38 /* Feature control */
+#define HEATHROW_AUX_CNTL_REG 0x3c /* Aux control */
+
+/*
+ * Bits in feature control register.
+ * Bits postfixed with a _N are in inverse logic
+ */
+#define HRW_SCC_TRANS_EN_N 0x00000001 /* Also controls modem power */
+#define HRW_BAY_POWER_N 0x00000002
+#define HRW_BAY_PCI_ENABLE 0x00000004
+#define HRW_BAY_IDE_ENABLE 0x00000008
+#define HRW_BAY_FLOPPY_ENABLE 0x00000010
+#define HRW_IDE0_ENABLE 0x00000020
+#define HRW_IDE0_RESET_N 0x00000040
+#define HRW_BAY_DEV_MASK 0x0000001c
+#define HRW_BAY_RESET_N 0x00000080
+#define HRW_IOBUS_ENABLE 0x00000100 /* Internal IDE ? */
+#define HRW_SCC_ENABLE 0x00000200
+#define HRW_MESH_ENABLE 0x00000400
+#define HRW_SWIM_ENABLE 0x00000800
+#define HRW_SOUND_POWER_N 0x00001000
+#define HRW_SOUND_CLK_ENABLE 0x00002000
+#define HRW_SCCA_IO 0x00004000
+#define HRW_SCCB_IO 0x00008000
+#define HRW_PORT_OR_DESK_VIA_N 0x00010000 /* This one is 0 on PowerBook */
+#define HRW_PWM_MON_ID_N 0x00020000 /* ??? (0) */
+#define HRW_HOOK_MB_CNT_N 0x00040000 /* ??? (0) */
+#define HRW_SWIM_CLONE_FLOPPY 0x00080000 /* ??? (0) */
+#define HRW_AUD_RUN22 0x00100000 /* ??? (1) */
+#define HRW_SCSI_LINK_MODE 0x00200000 /* Read ??? (1) */
+#define HRW_ARB_BYPASS 0x00400000 /* Disable internal PCI arbitrer */
+#define HRW_IDE1_RESET_N 0x00800000 /* Media bay */
+#define HRW_SLOW_SCC_PCLK 0x01000000 /* ??? (0) */
+#define HRW_RESET_SCC 0x02000000
+#define HRW_MFDC_CELL_ENABLE 0x04000000 /* ??? (0) */
+#define HRW_USE_MFDC 0x08000000 /* ??? (0) */
+#define HRW_BMAC_IO_ENABLE 0x60000000 /* two bits, not documented in OF */
+#define HRW_BMAC_RESET 0x80000000 /* not documented in OF */
+
+/* We OR those features at boot on desktop G3s */
+#define HRW_DEFAULTS (HRW_SCCA_IO | HRW_SCCB_IO | HRW_SCC_ENABLE)
+
+/* Looks like Heathrow has some sort of GPIOs as well... */
+#define HRW_GPIO_MODEM_RESET 0x6d
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_HEATHROW_H */
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
new file mode 100644
index 0000000..5d99b648
--- /dev/null
+++ b/arch/powerpc/include/asm/highmem.h
@@ -0,0 +1,138 @@
+/*
+ * highmem.h: virtual kernel memory mappings for high memory
+ *
+ * PowerPC version, stolen from the i386 version.
+ *
+ * Used in CONFIG_HIGHMEM systems for memory pages which
+ * are not addressable by direct kernel virtual addresses.
+ *
+ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
+ * Gerhard.Wichert@pdb.siemens.de
+ *
+ *
+ * Redesigned the x86 32-bit VM architecture to deal with
+ * up to 16 Terrabyte physical memory. With current x86 CPUs
+ * we now support up to 64 Gigabytes physical RAM.
+ *
+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
+ */
+
+#ifndef _ASM_HIGHMEM_H
+#define _ASM_HIGHMEM_H
+
+#ifdef __KERNEL__
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <asm/kmap_types.h>
+#include <asm/tlbflush.h>
+#include <asm/page.h>
+#include <asm/fixmap.h>
+
+extern pte_t *kmap_pte;
+extern pgprot_t kmap_prot;
+extern pte_t *pkmap_page_table;
+
+/*
+ * Right now we initialize only a single pte table. It can be extended
+ * easily, subsequent pte tables have to be allocated in one physical
+ * chunk of RAM.
+ */
+#define LAST_PKMAP (1 << PTE_SHIFT)
+#define LAST_PKMAP_MASK (LAST_PKMAP-1)
+#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK)
+#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+extern void *kmap_high(struct page *page);
+extern void kunmap_high(struct page *page);
+
+static inline void *kmap(struct page *page)
+{
+ might_sleep();
+ if (!PageHighMem(page))
+ return page_address(page);
+ return kmap_high(page);
+}
+
+static inline void kunmap(struct page *page)
+{
+ BUG_ON(in_interrupt());
+ if (!PageHighMem(page))
+ return;
+ kunmap_high(page);
+}
+
+/*
+ * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
+ * gives a more generic (and caching) interface. But kmap_atomic can
+ * be used in IRQ contexts, so in some (very limited) cases we need
+ * it.
+ */
+static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+{
+ unsigned int idx;
+ unsigned long vaddr;
+
+ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(!pte_none(*(kmap_pte-idx)));
+#endif
+ set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
+ flush_tlb_page(NULL, vaddr);
+
+ return (void*) vaddr;
+}
+
+static inline void *kmap_atomic(struct page *page, enum km_type type)
+{
+ return kmap_atomic_prot(page, type, kmap_prot);
+}
+
+static inline void kunmap_atomic(void *kvaddr, enum km_type type)
+{
+#ifdef CONFIG_DEBUG_HIGHMEM
+ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+ enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+
+ if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
+ pagefault_enable();
+ return;
+ }
+
+ BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+
+ /*
+ * force other mappings to Oops if they'll try to access
+ * this pte without first remap it
+ */
+ pte_clear(&init_mm, vaddr, kmap_pte-idx);
+ flush_tlb_page(NULL, vaddr);
+#endif
+ pagefault_enable();
+}
+
+static inline struct page *kmap_atomic_to_page(void *ptr)
+{
+ unsigned long idx, vaddr = (unsigned long) ptr;
+ pte_t *pte;
+
+ if (vaddr < FIXADDR_START)
+ return virt_to_page(ptr);
+
+ idx = virt_to_fix(vaddr);
+ pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
+ return pte_page(*pte);
+}
+
+#define flush_cache_kmaps() flush_cache_all()
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_HIGHMEM_H */
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
new file mode 100644
index 0000000..26f0d0a
--- /dev/null
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -0,0 +1,75 @@
+#ifndef _ASM_POWERPC_HUGETLB_H
+#define _ASM_POWERPC_HUGETLB_H
+
+#include <asm/page.h>
+
+
+int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
+ unsigned long len);
+
+void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
+ unsigned long end, unsigned long floor,
+ unsigned long ceiling);
+
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte);
+
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep);
+
+/*
+ * If the arch doesn't supply something else, assume that hugepage
+ * size aligned regions are ok without further preparation.
+ */
+static inline int prepare_hugepage_range(struct file *file,
+ unsigned long addr, unsigned long len)
+{
+ struct hstate *h = hstate_file(file);
+ if (len & ~huge_page_mask(h))
+ return -EINVAL;
+ if (addr & ~huge_page_mask(h))
+ return -EINVAL;
+ return 0;
+}
+
+static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
+{
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+ return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+ return pte_wrprotect(pte);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+ return *ptep;
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+ return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
+#endif /* _ASM_POWERPC_HUGETLB_H */
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
new file mode 100644
index 0000000..fbe2932
--- /dev/null
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -0,0 +1,296 @@
+#ifndef _ASM_POWERPC_HVCALL_H
+#define _ASM_POWERPC_HVCALL_H
+#ifdef __KERNEL__
+
+#define HVSC .long 0x44000022
+
+#define H_SUCCESS 0
+#define H_BUSY 1 /* Hardware busy -- retry later */
+#define H_CLOSED 2 /* Resource closed */
+#define H_NOT_AVAILABLE 3
+#define H_CONSTRAINED 4 /* Resource request constrained to max allowed */
+#define H_PARTIAL 5
+#define H_IN_PROGRESS 14 /* Kind of like busy */
+#define H_PAGE_REGISTERED 15
+#define H_PARTIAL_STORE 16
+#define H_PENDING 17 /* returned from H_POLL_PENDING */
+#define H_CONTINUE 18 /* Returned from H_Join on success */
+#define H_LONG_BUSY_START_RANGE 9900 /* Start of long busy range */
+#define H_LONG_BUSY_ORDER_1_MSEC 9900 /* Long busy, hint that 1msec \
+ is a good time to retry */
+#define H_LONG_BUSY_ORDER_10_MSEC 9901 /* Long busy, hint that 10msec \
+ is a good time to retry */
+#define H_LONG_BUSY_ORDER_100_MSEC 9902 /* Long busy, hint that 100msec \
+ is a good time to retry */
+#define H_LONG_BUSY_ORDER_1_SEC 9903 /* Long busy, hint that 1sec \
+ is a good time to retry */
+#define H_LONG_BUSY_ORDER_10_SEC 9904 /* Long busy, hint that 10sec \
+ is a good time to retry */
+#define H_LONG_BUSY_ORDER_100_SEC 9905 /* Long busy, hint that 100sec \
+ is a good time to retry */
+#define H_LONG_BUSY_END_RANGE 9905 /* End of long busy range */
+#define H_HARDWARE -1 /* Hardware error */
+#define H_FUNCTION -2 /* Function not supported */
+#define H_PRIVILEGE -3 /* Caller not privileged */
+#define H_PARAMETER -4 /* Parameter invalid, out-of-range or conflicting */
+#define H_BAD_MODE -5 /* Illegal msr value */
+#define H_PTEG_FULL -6 /* PTEG is full */
+#define H_NOT_FOUND -7 /* PTE was not found" */
+#define H_RESERVED_DABR -8 /* DABR address is reserved by the hypervisor on this processor" */
+#define H_NO_MEM -9
+#define H_AUTHORITY -10
+#define H_PERMISSION -11
+#define H_DROPPED -12
+#define H_SOURCE_PARM -13
+#define H_DEST_PARM -14
+#define H_REMOTE_PARM -15
+#define H_RESOURCE -16
+#define H_ADAPTER_PARM -17
+#define H_RH_PARM -18
+#define H_RCQ_PARM -19
+#define H_SCQ_PARM -20
+#define H_EQ_PARM -21
+#define H_RT_PARM -22
+#define H_ST_PARM -23
+#define H_SIGT_PARM -24
+#define H_TOKEN_PARM -25
+#define H_MLENGTH_PARM -27
+#define H_MEM_PARM -28
+#define H_MEM_ACCESS_PARM -29
+#define H_ATTR_PARM -30
+#define H_PORT_PARM -31
+#define H_MCG_PARM -32
+#define H_VL_PARM -33
+#define H_TSIZE_PARM -34
+#define H_TRACE_PARM -35
+
+#define H_MASK_PARM -37
+#define H_MCG_FULL -38
+#define H_ALIAS_EXIST -39
+#define H_P_COUNTER -40
+#define H_TABLE_FULL -41
+#define H_ALT_TABLE -42
+#define H_MR_CONDITION -43
+#define H_NOT_ENOUGH_RESOURCES -44
+#define H_R_STATE -45
+#define H_RESCINDEND -46
+
+
+/* Long Busy is a condition that can be returned by the firmware
+ * when a call cannot be completed now, but the identical call
+ * should be retried later. This prevents calls blocking in the
+ * firmware for long periods of time. Annoyingly the firmware can return
+ * a range of return codes, hinting at how long we should wait before
+ * retrying. If you don't care for the hint, the macro below is a good
+ * way to check for the long_busy return codes
+ */
+#define H_IS_LONG_BUSY(x) ((x >= H_LONG_BUSY_START_RANGE) \
+ && (x <= H_LONG_BUSY_END_RANGE))
+
+/* Flags */
+#define H_LARGE_PAGE (1UL<<(63-16))
+#define H_EXACT (1UL<<(63-24)) /* Use exact PTE or return H_PTEG_FULL */
+#define H_R_XLATE (1UL<<(63-25)) /* include a valid logical page num in the pte if the valid bit is set */
+#define H_READ_4 (1UL<<(63-26)) /* Return 4 PTEs */
+#define H_PAGE_STATE_CHANGE (1UL<<(63-28))
+#define H_PAGE_UNUSED ((1UL<<(63-29)) | (1UL<<(63-30)))
+#define H_PAGE_SET_UNUSED (H_PAGE_STATE_CHANGE | H_PAGE_UNUSED)
+#define H_PAGE_SET_LOANED (H_PAGE_SET_UNUSED | (1UL<<(63-31)))
+#define H_PAGE_SET_ACTIVE H_PAGE_STATE_CHANGE
+#define H_AVPN (1UL<<(63-32)) /* An avpn is provided as a sanity test */
+#define H_ANDCOND (1UL<<(63-33))
+#define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */
+#define H_ICACHE_SYNCHRONIZE (1UL<<(63-41)) /* dcbst, icbi, etc (ignored for IO pages */
+#define H_ZERO_PAGE (1UL<<(63-48)) /* zero the page before mapping (ignored for IO pages) */
+#define H_COPY_PAGE (1UL<<(63-49))
+#define H_N (1UL<<(63-61))
+#define H_PP1 (1UL<<(63-62))
+#define H_PP2 (1UL<<(63-63))
+
+/* VASI States */
+#define H_VASI_INVALID 0
+#define H_VASI_ENABLED 1
+#define H_VASI_ABORTED 2
+#define H_VASI_SUSPENDING 3
+#define H_VASI_SUSPENDED 4
+#define H_VASI_RESUMED 5
+#define H_VASI_COMPLETED 6
+
+/* DABRX flags */
+#define H_DABRX_HYPERVISOR (1UL<<(63-61))
+#define H_DABRX_KERNEL (1UL<<(63-62))
+#define H_DABRX_USER (1UL<<(63-63))
+
+/* Each control block has to be on a 4K bondary */
+#define H_CB_ALIGNMENT 4096
+
+/* pSeries hypervisor opcodes */
+#define H_REMOVE 0x04
+#define H_ENTER 0x08
+#define H_READ 0x0c
+#define H_CLEAR_MOD 0x10
+#define H_CLEAR_REF 0x14
+#define H_PROTECT 0x18
+#define H_GET_TCE 0x1c
+#define H_PUT_TCE 0x20
+#define H_SET_SPRG0 0x24
+#define H_SET_DABR 0x28
+#define H_PAGE_INIT 0x2c
+#define H_SET_ASR 0x30
+#define H_ASR_ON 0x34
+#define H_ASR_OFF 0x38
+#define H_LOGICAL_CI_LOAD 0x3c
+#define H_LOGICAL_CI_STORE 0x40
+#define H_LOGICAL_CACHE_LOAD 0x44
+#define H_LOGICAL_CACHE_STORE 0x48
+#define H_LOGICAL_ICBI 0x4c
+#define H_LOGICAL_DCBF 0x50
+#define H_GET_TERM_CHAR 0x54
+#define H_PUT_TERM_CHAR 0x58
+#define H_REAL_TO_LOGICAL 0x5c
+#define H_HYPERVISOR_DATA 0x60
+#define H_EOI 0x64
+#define H_CPPR 0x68
+#define H_IPI 0x6c
+#define H_IPOLL 0x70
+#define H_XIRR 0x74
+#define H_PERFMON 0x7c
+#define H_MIGRATE_DMA 0x78
+#define H_REGISTER_VPA 0xDC
+#define H_CEDE 0xE0
+#define H_CONFER 0xE4
+#define H_PROD 0xE8
+#define H_GET_PPP 0xEC
+#define H_SET_PPP 0xF0
+#define H_PURR 0xF4
+#define H_PIC 0xF8
+#define H_REG_CRQ 0xFC
+#define H_FREE_CRQ 0x100
+#define H_VIO_SIGNAL 0x104
+#define H_SEND_CRQ 0x108
+#define H_COPY_RDMA 0x110
+#define H_REGISTER_LOGICAL_LAN 0x114
+#define H_FREE_LOGICAL_LAN 0x118
+#define H_ADD_LOGICAL_LAN_BUFFER 0x11C
+#define H_SEND_LOGICAL_LAN 0x120
+#define H_BULK_REMOVE 0x124
+#define H_MULTICAST_CTRL 0x130
+#define H_SET_XDABR 0x134
+#define H_STUFF_TCE 0x138
+#define H_PUT_TCE_INDIRECT 0x13C
+#define H_CHANGE_LOGICAL_LAN_MAC 0x14C
+#define H_VTERM_PARTNER_INFO 0x150
+#define H_REGISTER_VTERM 0x154
+#define H_FREE_VTERM 0x158
+#define H_RESET_EVENTS 0x15C
+#define H_ALLOC_RESOURCE 0x160
+#define H_FREE_RESOURCE 0x164
+#define H_MODIFY_QP 0x168
+#define H_QUERY_QP 0x16C
+#define H_REREGISTER_PMR 0x170
+#define H_REGISTER_SMR 0x174
+#define H_QUERY_MR 0x178
+#define H_QUERY_MW 0x17C
+#define H_QUERY_HCA 0x180
+#define H_QUERY_PORT 0x184
+#define H_MODIFY_PORT 0x188
+#define H_DEFINE_AQP1 0x18C
+#define H_GET_TRACE_BUFFER 0x190
+#define H_DEFINE_AQP0 0x194
+#define H_RESIZE_MR 0x198
+#define H_ATTACH_MCQP 0x19C
+#define H_DETACH_MCQP 0x1A0
+#define H_CREATE_RPT 0x1A4
+#define H_REMOVE_RPT 0x1A8
+#define H_REGISTER_RPAGES 0x1AC
+#define H_DISABLE_AND_GETC 0x1B0
+#define H_ERROR_DATA 0x1B4
+#define H_GET_HCA_INFO 0x1B8
+#define H_GET_PERF_COUNT 0x1BC
+#define H_MANAGE_TRACE 0x1C0
+#define H_FREE_LOGICAL_LAN_BUFFER 0x1D4
+#define H_QUERY_INT_STATE 0x1E4
+#define H_POLL_PENDING 0x1D8
+#define H_ILLAN_ATTRIBUTES 0x244
+#define H_JOIN 0x298
+#define H_VASI_STATE 0x2A4
+#define H_ENABLE_CRQ 0x2B0
+#define H_SET_MPP 0x2D0
+#define H_GET_MPP 0x2D4
+#define MAX_HCALL_OPCODE H_GET_MPP
+
+#ifndef __ASSEMBLY__
+
+/**
+ * plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments
+ * @opcode: The hypervisor call to make.
+ *
+ * This call supports up to 7 arguments and only returns the status of
+ * the hcall. Use this version where possible, its slightly faster than
+ * the other plpar_hcalls.
+ */
+long plpar_hcall_norets(unsigned long opcode, ...);
+
+/**
+ * plpar_hcall: - Make a pseries hypervisor call
+ * @opcode: The hypervisor call to make.
+ * @retbuf: Buffer to store up to 4 return arguments in.
+ *
+ * This call supports up to 6 arguments and 4 return arguments. Use
+ * PLPAR_HCALL_BUFSIZE to size the return argument buffer.
+ *
+ * Used for all but the craziest of phyp interfaces (see plpar_hcall9)
+ */
+#define PLPAR_HCALL_BUFSIZE 4
+long plpar_hcall(unsigned long opcode, unsigned long *retbuf, ...);
+
+/**
+ * plpar_hcall_raw: - Make a hypervisor call without calculating hcall stats
+ * @opcode: The hypervisor call to make.
+ * @retbuf: Buffer to store up to 4 return arguments in.
+ *
+ * This call supports up to 6 arguments and 4 return arguments. Use
+ * PLPAR_HCALL_BUFSIZE to size the return argument buffer.
+ *
+ * Used when phyp interface needs to be called in real mode. Similar to
+ * plpar_hcall, but plpar_hcall_raw works in real mode and does not
+ * calculate hypervisor call statistics.
+ */
+long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...);
+
+/**
+ * plpar_hcall9: - Make a pseries hypervisor call with up to 9 return arguments
+ * @opcode: The hypervisor call to make.
+ * @retbuf: Buffer to store up to 9 return arguments in.
+ *
+ * This call supports up to 9 arguments and 9 return arguments. Use
+ * PLPAR_HCALL9_BUFSIZE to size the return argument buffer.
+ */
+#define PLPAR_HCALL9_BUFSIZE 9
+long plpar_hcall9(unsigned long opcode, unsigned long *retbuf, ...);
+
+/* For hcall instrumentation. One structure per-hcall, per-CPU */
+struct hcall_stats {
+ unsigned long num_calls; /* number of calls (on this CPU) */
+ unsigned long tb_total; /* total wall time (mftb) of calls. */
+ unsigned long purr_total; /* total cpu time (PURR) of calls. */
+};
+#define HCALL_STAT_ARRAY_SIZE ((MAX_HCALL_OPCODE >> 2) + 1)
+
+struct hvcall_mpp_data {
+ unsigned long entitled_mem;
+ unsigned long mapped_mem;
+ unsigned short group_num;
+ unsigned short pool_num;
+ unsigned char mem_weight;
+ unsigned char unallocated_mem_weight;
+ unsigned long unallocated_entitlement; /* value in bytes */
+ unsigned long pool_size;
+ signed long loan_request;
+ unsigned long backing_mem;
+};
+
+int h_get_mpp(struct hvcall_mpp_data *);
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_HVCALL_H */
diff --git a/arch/powerpc/include/asm/hvconsole.h b/arch/powerpc/include/asm/hvconsole.h
new file mode 100644
index 0000000..35ea69e
--- /dev/null
+++ b/arch/powerpc/include/asm/hvconsole.h
@@ -0,0 +1,41 @@
+/*
+ * hvconsole.h
+ * Copyright (C) 2004 Ryan S Arnold, IBM Corporation
+ *
+ * LPAR console support.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _PPC64_HVCONSOLE_H
+#define _PPC64_HVCONSOLE_H
+#ifdef __KERNEL__
+
+/*
+ * PSeries firmware will only send/recv up to 16 bytes of character data per
+ * hcall.
+ */
+#define MAX_VIO_PUT_CHARS 16
+#define SIZE_VIO_GET_CHARS 16
+
+/*
+ * Vio firmware always attempts to fetch MAX_VIO_GET_CHARS chars. The 'count'
+ * parm is included to conform to put_chars() function pointer template
+ */
+extern int hvc_get_chars(uint32_t vtermno, char *buf, int count);
+extern int hvc_put_chars(uint32_t vtermno, const char *buf, int count);
+
+#endif /* __KERNEL__ */
+#endif /* _PPC64_HVCONSOLE_H */
diff --git a/arch/powerpc/include/asm/hvcserver.h b/arch/powerpc/include/asm/hvcserver.h
new file mode 100644
index 0000000..67d7da3
--- /dev/null
+++ b/arch/powerpc/include/asm/hvcserver.h
@@ -0,0 +1,59 @@
+/*
+ * hvcserver.h
+ * Copyright (C) 2004 Ryan S Arnold, IBM Corporation
+ *
+ * PPC64 virtual I/O console server support.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _PPC64_HVCSERVER_H
+#define _PPC64_HVCSERVER_H
+#ifdef __KERNEL__
+
+#include <linux/list.h>
+
+/* Converged Location Code length */
+#define HVCS_CLC_LENGTH 79
+
+/**
+ * hvcs_partner_info - an element in a list of partner info
+ * @node: list_head denoting this partner_info struct's position in the list of
+ * partner info.
+ * @unit_address: The partner unit address of this entry.
+ * @partition_ID: The partner partition ID of this entry.
+ * @location_code: The converged location code of this entry + 1 char for the
+ * null-term.
+ *
+ * This structure outlines the format that partner info is presented to a caller
+ * of the hvcs partner info fetching functions. These are strung together into
+ * a list using linux kernel lists.
+ */
+struct hvcs_partner_info {
+ struct list_head node;
+ uint32_t unit_address;
+ uint32_t partition_ID;
+ char location_code[HVCS_CLC_LENGTH + 1]; /* CLC + 1 null-term char */
+};
+
+extern int hvcs_free_partner_info(struct list_head *head);
+extern int hvcs_get_partner_info(uint32_t unit_address,
+ struct list_head *head, unsigned long *pi_buff);
+extern int hvcs_register_connection(uint32_t unit_address,
+ uint32_t p_partition_ID, uint32_t p_unit_address);
+extern int hvcs_free_connection(uint32_t unit_address);
+
+#endif /* __KERNEL__ */
+#endif /* _PPC64_HVCSERVER_H */
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
new file mode 100644
index 0000000..f75a5fc
--- /dev/null
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _ASM_POWERPC_HW_IRQ_H
+#define _ASM_POWERPC_HW_IRQ_H
+
+#ifdef __KERNEL__
+
+#include <linux/errno.h>
+#include <linux/compiler.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+
+extern void timer_interrupt(struct pt_regs *);
+
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+
+static inline unsigned long local_get_flags(void)
+{
+ unsigned long flags;
+
+ __asm__ __volatile__("lbz %0,%1(13)"
+ : "=r" (flags)
+ : "i" (offsetof(struct paca_struct, soft_enabled)));
+
+ return flags;
+}
+
+static inline unsigned long raw_local_irq_disable(void)
+{
+ unsigned long flags, zero;
+
+ __asm__ __volatile__("li %1,0; lbz %0,%2(13); stb %1,%2(13)"
+ : "=r" (flags), "=&r" (zero)
+ : "i" (offsetof(struct paca_struct, soft_enabled))
+ : "memory");
+
+ return flags;
+}
+
+extern void raw_local_irq_restore(unsigned long);
+extern void iseries_handle_interrupts(void);
+
+#define raw_local_irq_enable() raw_local_irq_restore(1)
+#define raw_local_save_flags(flags) ((flags) = local_get_flags())
+#define raw_local_irq_save(flags) ((flags) = raw_local_irq_disable())
+
+#define raw_irqs_disabled() (local_get_flags() == 0)
+#define raw_irqs_disabled_flags(flags) ((flags) == 0)
+
+#define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1)
+#define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1)
+
+#define hard_irq_disable() \
+ do { \
+ __hard_irq_disable(); \
+ get_paca()->soft_enabled = 0; \
+ get_paca()->hard_enabled = 0; \
+ } while(0)
+
+static inline int irqs_disabled_flags(unsigned long flags)
+{
+ return flags == 0;
+}
+
+#else
+
+#if defined(CONFIG_BOOKE)
+#define SET_MSR_EE(x) mtmsr(x)
+#define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
+#else
+#define SET_MSR_EE(x) mtmsr(x)
+#define local_irq_restore(flags) mtmsr(flags)
+#endif
+
+static inline void local_irq_disable(void)
+{
+#ifdef CONFIG_BOOKE
+ __asm__ __volatile__("wrteei 0": : :"memory");
+#else
+ unsigned long msr;
+ __asm__ __volatile__("": : :"memory");
+ msr = mfmsr();
+ SET_MSR_EE(msr & ~MSR_EE);
+#endif
+}
+
+static inline void local_irq_enable(void)
+{
+#ifdef CONFIG_BOOKE
+ __asm__ __volatile__("wrteei 1": : :"memory");
+#else
+ unsigned long msr;
+ __asm__ __volatile__("": : :"memory");
+ msr = mfmsr();
+ SET_MSR_EE(msr | MSR_EE);
+#endif
+}
+
+static inline void local_irq_save_ptr(unsigned long *flags)
+{
+ unsigned long msr;
+ msr = mfmsr();
+ *flags = msr;
+#ifdef CONFIG_BOOKE
+ __asm__ __volatile__("wrteei 0": : :"memory");
+#else
+ SET_MSR_EE(msr & ~MSR_EE);
+#endif
+ __asm__ __volatile__("": : :"memory");
+}
+
+#define local_save_flags(flags) ((flags) = mfmsr())
+#define local_irq_save(flags) local_irq_save_ptr(&flags)
+#define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
+
+#define hard_irq_enable() local_irq_enable()
+#define hard_irq_disable() local_irq_disable()
+
+static inline int irqs_disabled_flags(unsigned long flags)
+{
+ return (flags & MSR_EE) == 0;
+}
+
+#endif /* CONFIG_PPC64 */
+
+/*
+ * interrupt-retrigger: should we handle this via lost interrupts and IPIs
+ * or should we not care like we do now ? --BenH.
+ */
+struct hw_interrupt_type;
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_HW_IRQ_H */
diff --git a/arch/powerpc/include/asm/hydra.h b/arch/powerpc/include/asm/hydra.h
new file mode 100644
index 0000000..1ad4eed
--- /dev/null
+++ b/arch/powerpc/include/asm/hydra.h
@@ -0,0 +1,102 @@
+/*
+ * include/asm-ppc/hydra.h -- Mac I/O `Hydra' definitions
+ *
+ * Copyright (C) 1997 Geert Uytterhoeven
+ *
+ * This file is based on the following documentation:
+ *
+ * Macintosh Technology in the Common Hardware Reference Platform
+ * Apple Computer, Inc.
+ *
+ * © Copyright 1995 Apple Computer, Inc. All rights reserved.
+ *
+ * It's available online from http://chrp.apple.com/MacTech.pdf.
+ * You can obtain paper copies of this book from computer bookstores or by
+ * writing Morgan Kaufmann Publishers, Inc., 340 Pine Street, Sixth Floor, San
+ * Francisco, CA 94104. Reference ISBN 1-55860-393-X.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _ASMPPC_HYDRA_H
+#define _ASMPPC_HYDRA_H
+
+#ifdef __KERNEL__
+
+struct Hydra {
+ /* DBDMA Controller Register Space */
+ char Pad1[0x30];
+ u_int CachePD;
+ u_int IDs;
+ u_int Feature_Control;
+ char Pad2[0x7fc4];
+ /* DBDMA Channel Register Space */
+ char SCSI_DMA[0x100];
+ char Pad3[0x300];
+ char SCCA_Tx_DMA[0x100];
+ char SCCA_Rx_DMA[0x100];
+ char SCCB_Tx_DMA[0x100];
+ char SCCB_Rx_DMA[0x100];
+ char Pad4[0x7800];
+ /* Device Register Space */
+ char SCSI[0x1000];
+ char ADB[0x1000];
+ char SCC_Legacy[0x1000];
+ char SCC[0x1000];
+ char Pad9[0x2000];
+ char VIA[0x2000];
+ char Pad10[0x28000];
+ char OpenPIC[0x40000];
+};
+
+extern volatile struct Hydra __iomem *Hydra;
+
+
+ /*
+ * Feature Control Register
+ */
+
+#define HYDRA_FC_SCC_CELL_EN 0x00000001 /* Enable SCC Clock */
+#define HYDRA_FC_SCSI_CELL_EN 0x00000002 /* Enable SCSI Clock */
+#define HYDRA_FC_SCCA_ENABLE 0x00000004 /* Enable SCC A Lines */
+#define HYDRA_FC_SCCB_ENABLE 0x00000008 /* Enable SCC B Lines */
+#define HYDRA_FC_ARB_BYPASS 0x00000010 /* Bypass Internal Arbiter */
+#define HYDRA_FC_RESET_SCC 0x00000020 /* Reset SCC */
+#define HYDRA_FC_MPIC_ENABLE 0x00000040 /* Enable OpenPIC */
+#define HYDRA_FC_SLOW_SCC_PCLK 0x00000080 /* 1=15.6672, 0=25 MHz */
+#define HYDRA_FC_MPIC_IS_MASTER 0x00000100 /* OpenPIC Master Mode */
+
+
+ /*
+ * OpenPIC Interrupt Sources
+ */
+
+#define HYDRA_INT_SIO 0
+#define HYDRA_INT_SCSI_DMA 1
+#define HYDRA_INT_SCCA_TX_DMA 2
+#define HYDRA_INT_SCCA_RX_DMA 3
+#define HYDRA_INT_SCCB_TX_DMA 4
+#define HYDRA_INT_SCCB_RX_DMA 5
+#define HYDRA_INT_SCSI 6
+#define HYDRA_INT_SCCA 7
+#define HYDRA_INT_SCCB 8
+#define HYDRA_INT_VIA 9
+#define HYDRA_INT_ADB 10
+#define HYDRA_INT_ADB_NMI 11
+#define HYDRA_INT_EXT1 12 /* PCI IRQW */
+#define HYDRA_INT_EXT2 13 /* PCI IRQX */
+#define HYDRA_INT_EXT3 14 /* PCI IRQY */
+#define HYDRA_INT_EXT4 15 /* PCI IRQZ */
+#define HYDRA_INT_EXT5 16 /* IDE Primay/Secondary */
+#define HYDRA_INT_EXT6 17 /* IDE Secondary */
+#define HYDRA_INT_EXT7 18 /* Power Off Request */
+#define HYDRA_INT_SPARE 19
+
+extern int hydra_init(void);
+extern void macio_adb_init(void);
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASMPPC_HYDRA_H */
diff --git a/arch/powerpc/include/asm/i8259.h b/arch/powerpc/include/asm/i8259.h
new file mode 100644
index 0000000..105ade2
--- /dev/null
+++ b/arch/powerpc/include/asm/i8259.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_POWERPC_I8259_H
+#define _ASM_POWERPC_I8259_H
+#ifdef __KERNEL__
+
+#include <linux/irq.h>
+
+extern void i8259_init(struct device_node *node, unsigned long intack_addr);
+extern unsigned int i8259_irq(void);
+extern struct irq_host *i8259_get_host(void);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_I8259_H */
diff --git a/arch/powerpc/include/asm/ibmebus.h b/arch/powerpc/include/asm/ibmebus.h
new file mode 100644
index 0000000..1a9d9ae
--- /dev/null
+++ b/arch/powerpc/include/asm/ibmebus.h
@@ -0,0 +1,60 @@
+/*
+ * IBM PowerPC eBus Infrastructure Support.
+ *
+ * Copyright (c) 2005 IBM Corporation
+ * Joachim Fenkes <fenkes@de.ibm.com>
+ * Heiko J Schick <schickhj@de.ibm.com>
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ASM_EBUS_H
+#define _ASM_EBUS_H
+#ifdef __KERNEL__
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+
+extern struct bus_type ibmebus_bus_type;
+
+int ibmebus_register_driver(struct of_platform_driver *drv);
+void ibmebus_unregister_driver(struct of_platform_driver *drv);
+
+int ibmebus_request_irq(u32 ist, irq_handler_t handler,
+ unsigned long irq_flags, const char *devname,
+ void *dev_id);
+void ibmebus_free_irq(u32 ist, void *dev_id);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_IBMEBUS_H */
diff --git a/arch/powerpc/include/asm/ide.h b/arch/powerpc/include/asm/ide.h
new file mode 100644
index 0000000..048480e
--- /dev/null
+++ b/arch/powerpc/include/asm/ide.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 1994-1996 Linus Torvalds & authors
+ *
+ * This file contains the powerpc architecture specific IDE code.
+ */
+#ifndef _ASM_POWERPC_IDE_H
+#define _ASM_POWERPC_IDE_H
+
+#ifdef __KERNEL__
+
+#ifndef __powerpc64__
+#include <linux/sched.h>
+#include <asm/mpc8xx.h>
+#endif
+#include <asm/io.h>
+
+#define __ide_mm_insw(p, a, c) readsw((void __iomem *)(p), (a), (c))
+#define __ide_mm_insl(p, a, c) readsl((void __iomem *)(p), (a), (c))
+#define __ide_mm_outsw(p, a, c) writesw((void __iomem *)(p), (a), (c))
+#define __ide_mm_outsl(p, a, c) writesl((void __iomem *)(p), (a), (c))
+
+#ifndef __powerpc64__
+#include <linux/ioport.h>
+
+/* FIXME: use ide_platform host driver */
+static __inline__ int ide_default_irq(unsigned long base)
+{
+#ifdef CONFIG_PPLUS
+ switch (base) {
+ case 0x1f0: return 14;
+ case 0x170: return 15;
+ }
+#endif
+ return 0;
+}
+
+/* FIXME: use ide_platform host driver */
+static __inline__ unsigned long ide_default_io_base(int index)
+{
+#ifdef CONFIG_PPLUS
+ switch (index) {
+ case 0: return 0x1f0;
+ case 1: return 0x170;
+ }
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_BLK_DEV_MPC8xx_IDE
+#define IDE_ARCH_ACK_INTR 1
+#define ide_ack_intr(hwif) ((hwif)->ack_intr ? (hwif)->ack_intr(hwif) : 1)
+#endif
+
+#endif /* __powerpc64__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_IDE_H */
diff --git a/arch/powerpc/include/asm/immap_86xx.h b/arch/powerpc/include/asm/immap_86xx.h
new file mode 100644
index 0000000..0f165e5
--- /dev/null
+++ b/arch/powerpc/include/asm/immap_86xx.h
@@ -0,0 +1,156 @@
+/**
+ * MPC86xx Internal Memory Map
+ *
+ * Authors: Jeff Brown
+ * Timur Tabi <timur@freescale.com>
+ *
+ * Copyright 2004,2007 Freescale Semiconductor, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This header file defines structures for various 86xx SOC devices that are
+ * used by multiple source files.
+ */
+
+#ifndef __ASM_POWERPC_IMMAP_86XX_H__
+#define __ASM_POWERPC_IMMAP_86XX_H__
+#ifdef __KERNEL__
+
+/* Global Utility Registers */
+struct ccsr_guts {
+ __be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */
+ __be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */
+ __be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */
+ __be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */
+ __be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */
+ u8 res1[0x20 - 0x14];
+ __be32 porcir; /* 0x.0020 - POR Configuration Information Register */
+ u8 res2[0x30 - 0x24];
+ __be32 gpiocr; /* 0x.0030 - GPIO Control Register */
+ u8 res3[0x40 - 0x34];
+ __be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */
+ u8 res4[0x50 - 0x44];
+ __be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */
+ u8 res5[0x60 - 0x54];
+ __be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */
+ u8 res6[0x70 - 0x64];
+ __be32 devdisr; /* 0x.0070 - Device Disable Control */
+ __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */
+ u8 res7[0x80 - 0x78];
+ __be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */
+ u8 res8[0x90 - 0x84];
+ __be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */
+ __be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */
+ u8 res9[0xA0 - 0x98];
+ __be32 pvr; /* 0x.00a0 - Processor Version Register */
+ __be32 svr; /* 0x.00a4 - System Version Register */
+ u8 res10[0xB0 - 0xA8];
+ __be32 rstcr; /* 0x.00b0 - Reset Control Register */
+ u8 res11[0xC0 - 0xB4];
+ __be32 elbcvselcr; /* 0x.00c0 - eLBC Voltage Select Ctrl Reg */
+ u8 res12[0x800 - 0xC4];
+ __be32 clkdvdr; /* 0x.0800 - Clock Divide Register */
+ u8 res13[0x900 - 0x804];
+ __be32 ircr; /* 0x.0900 - Infrared Control Register */
+ u8 res14[0x908 - 0x904];
+ __be32 dmacr; /* 0x.0908 - DMA Control Register */
+ u8 res15[0x914 - 0x90C];
+ __be32 elbccr; /* 0x.0914 - eLBC Control Register */
+ u8 res16[0xB20 - 0x918];
+ __be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */
+ __be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */
+ __be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */
+ u8 res17[0xE00 - 0xB2C];
+ __be32 clkocr; /* 0x.0e00 - Clock Out Select Register */
+ u8 res18[0xE10 - 0xE04];
+ __be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */
+ u8 res19[0xE20 - 0xE14];
+ __be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */
+ u8 res20[0xF04 - 0xE24];
+ __be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */
+ __be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */
+ u8 res21[0xF40 - 0xF0C];
+ __be32 srds2cr0; /* 0x.0f40 - SerDes1 Control Register 0 */
+ __be32 srds2cr1; /* 0x.0f44 - SerDes1 Control Register 0 */
+} __attribute__ ((packed));
+
+#define CCSR_GUTS_DMACR_DEV_SSI 0 /* DMA controller/channel set to SSI */
+#define CCSR_GUTS_DMACR_DEV_IR 1 /* DMA controller/channel set to IR */
+
+/*
+ * Set the DMACR register in the GUTS
+ *
+ * The DMACR register determines the source of initiated transfers for each
+ * channel on each DMA controller. Rather than have a bunch of repetitive
+ * macros for the bit patterns, we just have a function that calculates
+ * them.
+ *
+ * guts: Pointer to GUTS structure
+ * co: The DMA controller (0 or 1)
+ * ch: The channel on the DMA controller (0, 1, 2, or 3)
+ * device: The device to set as the source (CCSR_GUTS_DMACR_DEV_xx)
+ */
+static inline void guts_set_dmacr(struct ccsr_guts __iomem *guts,
+ unsigned int co, unsigned int ch, unsigned int device)
+{
+ unsigned int shift = 16 + (8 * (1 - co) + 2 * (3 - ch));
+
+ clrsetbits_be32(&guts->dmacr, 3 << shift, device << shift);
+}
+
+#define CCSR_GUTS_PMUXCR_LDPSEL 0x00010000
+#define CCSR_GUTS_PMUXCR_SSI1_MASK 0x0000C000 /* Bitmask for SSI1 */
+#define CCSR_GUTS_PMUXCR_SSI1_LA 0x00000000 /* Latched address */
+#define CCSR_GUTS_PMUXCR_SSI1_HI 0x00004000 /* High impedance */
+#define CCSR_GUTS_PMUXCR_SSI1_SSI 0x00008000 /* Used for SSI1 */
+#define CCSR_GUTS_PMUXCR_SSI2_MASK 0x00003000 /* Bitmask for SSI2 */
+#define CCSR_GUTS_PMUXCR_SSI2_LA 0x00000000 /* Latched address */
+#define CCSR_GUTS_PMUXCR_SSI2_HI 0x00001000 /* High impedance */
+#define CCSR_GUTS_PMUXCR_SSI2_SSI 0x00002000 /* Used for SSI2 */
+#define CCSR_GUTS_PMUXCR_LA_22_25_LA 0x00000000 /* Latched Address */
+#define CCSR_GUTS_PMUXCR_LA_22_25_HI 0x00000400 /* High impedance */
+#define CCSR_GUTS_PMUXCR_DBGDRV 0x00000200 /* Signals not driven */
+#define CCSR_GUTS_PMUXCR_DMA2_0 0x00000008
+#define CCSR_GUTS_PMUXCR_DMA2_3 0x00000004
+#define CCSR_GUTS_PMUXCR_DMA1_0 0x00000002
+#define CCSR_GUTS_PMUXCR_DMA1_3 0x00000001
+
+/*
+ * Set the DMA external control bits in the GUTS
+ *
+ * The DMA external control bits in the PMUXCR are only meaningful for
+ * channels 0 and 3. Any other channels are ignored.
+ *
+ * guts: Pointer to GUTS structure
+ * co: The DMA controller (0 or 1)
+ * ch: The channel on the DMA controller (0, 1, 2, or 3)
+ * value: the new value for the bit (0 or 1)
+ */
+static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts,
+ unsigned int co, unsigned int ch, unsigned int value)
+{
+ if ((ch == 0) || (ch == 3)) {
+ unsigned int shift = 2 * (co + 1) - (ch & 1) - 1;
+
+ clrsetbits_be32(&guts->pmuxcr, 1 << shift, value << shift);
+ }
+}
+
+#define CCSR_GUTS_CLKDVDR_PXCKEN 0x80000000
+#define CCSR_GUTS_CLKDVDR_SSICKEN 0x20000000
+#define CCSR_GUTS_CLKDVDR_PXCKINV 0x10000000
+#define CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT 25
+#define CCSR_GUTS_CLKDVDR_PXCKDLY_MASK 0x06000000
+#define CCSR_GUTS_CLKDVDR_PXCKDLY(x) \
+ (((x) & 3) << CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT)
+#define CCSR_GUTS_CLKDVDR_PXCLK_SHIFT 16
+#define CCSR_GUTS_CLKDVDR_PXCLK_MASK 0x001F0000
+#define CCSR_GUTS_CLKDVDR_PXCLK(x) (((x) & 31) << CCSR_GUTS_CLKDVDR_PXCLK_SHIFT)
+#define CCSR_GUTS_CLKDVDR_SSICLK_MASK 0x000000FF
+#define CCSR_GUTS_CLKDVDR_SSICLK(x) ((x) & CCSR_GUTS_CLKDVDR_SSICLK_MASK)
+
+#endif /* __ASM_POWERPC_IMMAP_86XX_H__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/immap_cpm2.h b/arch/powerpc/include/asm/immap_cpm2.h
new file mode 100644
index 0000000..4080bab
--- /dev/null
+++ b/arch/powerpc/include/asm/immap_cpm2.h
@@ -0,0 +1,650 @@
+/*
+ * CPM2 Internal Memory Map
+ * Copyright (c) 1999 Dan Malek (dmalek@jlc.net)
+ *
+ * The Internal Memory Map for devices with CPM2 on them. This
+ * is the superset of all CPM2 devices (8260, 8266, 8280, 8272,
+ * 8560).
+ */
+#ifdef __KERNEL__
+#ifndef __IMMAP_CPM2__
+#define __IMMAP_CPM2__
+
+#include <linux/types.h>
+
+/* System configuration registers.
+*/
+typedef struct sys_82xx_conf {
+ u32 sc_siumcr;
+ u32 sc_sypcr;
+ u8 res1[6];
+ u16 sc_swsr;
+ u8 res2[20];
+ u32 sc_bcr;
+ u8 sc_ppc_acr;
+ u8 res3[3];
+ u32 sc_ppc_alrh;
+ u32 sc_ppc_alrl;
+ u8 sc_lcl_acr;
+ u8 res4[3];
+ u32 sc_lcl_alrh;
+ u32 sc_lcl_alrl;
+ u32 sc_tescr1;
+ u32 sc_tescr2;
+ u32 sc_ltescr1;
+ u32 sc_ltescr2;
+ u32 sc_pdtea;
+ u8 sc_pdtem;
+ u8 res5[3];
+ u32 sc_ldtea;
+ u8 sc_ldtem;
+ u8 res6[163];
+} sysconf_82xx_cpm2_t;
+
+typedef struct sys_85xx_conf {
+ u32 sc_cear;
+ u16 sc_ceer;
+ u16 sc_cemr;
+ u8 res1[70];
+ u32 sc_smaer;
+ u8 res2[4];
+ u32 sc_smevr;
+ u32 sc_smctr;
+ u32 sc_lmaer;
+ u8 res3[4];
+ u32 sc_lmevr;
+ u32 sc_lmctr;
+ u8 res4[144];
+} sysconf_85xx_cpm2_t;
+
+typedef union sys_conf {
+ sysconf_82xx_cpm2_t siu_82xx;
+ sysconf_85xx_cpm2_t siu_85xx;
+} sysconf_cpm2_t;
+
+
+
+/* Memory controller registers.
+*/
+typedef struct mem_ctlr {
+ u32 memc_br0;
+ u32 memc_or0;
+ u32 memc_br1;
+ u32 memc_or1;
+ u32 memc_br2;
+ u32 memc_or2;
+ u32 memc_br3;
+ u32 memc_or3;
+ u32 memc_br4;
+ u32 memc_or4;
+ u32 memc_br5;
+ u32 memc_or5;
+ u32 memc_br6;
+ u32 memc_or6;
+ u32 memc_br7;
+ u32 memc_or7;
+ u32 memc_br8;
+ u32 memc_or8;
+ u32 memc_br9;
+ u32 memc_or9;
+ u32 memc_br10;
+ u32 memc_or10;
+ u32 memc_br11;
+ u32 memc_or11;
+ u8 res1[8];
+ u32 memc_mar;
+ u8 res2[4];
+ u32 memc_mamr;
+ u32 memc_mbmr;
+ u32 memc_mcmr;
+ u8 res3[8];
+ u16 memc_mptpr;
+ u8 res4[2];
+ u32 memc_mdr;
+ u8 res5[4];
+ u32 memc_psdmr;
+ u32 memc_lsdmr;
+ u8 memc_purt;
+ u8 res6[3];
+ u8 memc_psrt;
+ u8 res7[3];
+ u8 memc_lurt;
+ u8 res8[3];
+ u8 memc_lsrt;
+ u8 res9[3];
+ u32 memc_immr;
+ u32 memc_pcibr0;
+ u32 memc_pcibr1;
+ u8 res10[16];
+ u32 memc_pcimsk0;
+ u32 memc_pcimsk1;
+ u8 res11[52];
+} memctl_cpm2_t;
+
+/* System Integration Timers.
+*/
+typedef struct sys_int_timers {
+ u8 res1[32];
+ u16 sit_tmcntsc;
+ u8 res2[2];
+ u32 sit_tmcnt;
+ u8 res3[4];
+ u32 sit_tmcntal;
+ u8 res4[16];
+ u16 sit_piscr;
+ u8 res5[2];
+ u32 sit_pitc;
+ u32 sit_pitr;
+ u8 res6[94];
+ u8 res7[390];
+} sit_cpm2_t;
+
+#define PISCR_PIRQ_MASK ((u16)0xff00)
+#define PISCR_PS ((u16)0x0080)
+#define PISCR_PIE ((u16)0x0004)
+#define PISCR_PTF ((u16)0x0002)
+#define PISCR_PTE ((u16)0x0001)
+
+/* PCI Controller.
+*/
+typedef struct pci_ctlr {
+ u32 pci_omisr;
+ u32 pci_omimr;
+ u8 res1[8];
+ u32 pci_ifqpr;
+ u32 pci_ofqpr;
+ u8 res2[8];
+ u32 pci_imr0;
+ u32 pci_imr1;
+ u32 pci_omr0;
+ u32 pci_omr1;
+ u32 pci_odr;
+ u8 res3[4];
+ u32 pci_idr;
+ u8 res4[20];
+ u32 pci_imisr;
+ u32 pci_imimr;
+ u8 res5[24];
+ u32 pci_ifhpr;
+ u8 res6[4];
+ u32 pci_iftpr;
+ u8 res7[4];
+ u32 pci_iphpr;
+ u8 res8[4];
+ u32 pci_iptpr;
+ u8 res9[4];
+ u32 pci_ofhpr;
+ u8 res10[4];
+ u32 pci_oftpr;
+ u8 res11[4];
+ u32 pci_ophpr;
+ u8 res12[4];
+ u32 pci_optpr;
+ u8 res13[8];
+ u32 pci_mucr;
+ u8 res14[8];
+ u32 pci_qbar;
+ u8 res15[12];
+ u32 pci_dmamr0;
+ u32 pci_dmasr0;
+ u32 pci_dmacdar0;
+ u8 res16[4];
+ u32 pci_dmasar0;
+ u8 res17[4];
+ u32 pci_dmadar0;
+ u8 res18[4];
+ u32 pci_dmabcr0;
+ u32 pci_dmandar0;
+ u8 res19[86];
+ u32 pci_dmamr1;
+ u32 pci_dmasr1;
+ u32 pci_dmacdar1;
+ u8 res20[4];
+ u32 pci_dmasar1;
+ u8 res21[4];
+ u32 pci_dmadar1;
+ u8 res22[4];
+ u32 pci_dmabcr1;
+ u32 pci_dmandar1;
+ u8 res23[88];
+ u32 pci_dmamr2;
+ u32 pci_dmasr2;
+ u32 pci_dmacdar2;
+ u8 res24[4];
+ u32 pci_dmasar2;
+ u8 res25[4];
+ u32 pci_dmadar2;
+ u8 res26[4];
+ u32 pci_dmabcr2;
+ u32 pci_dmandar2;
+ u8 res27[88];
+ u32 pci_dmamr3;
+ u32 pci_dmasr3;
+ u32 pci_dmacdar3;
+ u8 res28[4];
+ u32 pci_dmasar3;
+ u8 res29[4];
+ u32 pci_dmadar3;
+ u8 res30[4];
+ u32 pci_dmabcr3;
+ u32 pci_dmandar3;
+ u8 res31[344];
+ u32 pci_potar0;
+ u8 res32[4];
+ u32 pci_pobar0;
+ u8 res33[4];
+ u32 pci_pocmr0;
+ u8 res34[4];
+ u32 pci_potar1;
+ u8 res35[4];
+ u32 pci_pobar1;
+ u8 res36[4];
+ u32 pci_pocmr1;
+ u8 res37[4];
+ u32 pci_potar2;
+ u8 res38[4];
+ u32 pci_pobar2;
+ u8 res39[4];
+ u32 pci_pocmr2;
+ u8 res40[50];
+ u32 pci_ptcr;
+ u32 pci_gpcr;
+ u32 pci_gcr;
+ u32 pci_esr;
+ u32 pci_emr;
+ u32 pci_ecr;
+ u32 pci_eacr;
+ u8 res41[4];
+ u32 pci_edcr;
+ u8 res42[4];
+ u32 pci_eccr;
+ u8 res43[44];
+ u32 pci_pitar1;
+ u8 res44[4];
+ u32 pci_pibar1;
+ u8 res45[4];
+ u32 pci_picmr1;
+ u8 res46[4];
+ u32 pci_pitar0;
+ u8 res47[4];
+ u32 pci_pibar0;
+ u8 res48[4];
+ u32 pci_picmr0;
+ u8 res49[4];
+ u32 pci_cfg_addr;
+ u32 pci_cfg_data;
+ u32 pci_int_ack;
+ u8 res50[756];
+} pci_cpm2_t;
+
+/* Interrupt Controller.
+*/
+typedef struct interrupt_controller {
+ u16 ic_sicr;
+ u8 res1[2];
+ u32 ic_sivec;
+ u32 ic_sipnrh;
+ u32 ic_sipnrl;
+ u32 ic_siprr;
+ u32 ic_scprrh;
+ u32 ic_scprrl;
+ u32 ic_simrh;
+ u32 ic_simrl;
+ u32 ic_siexr;
+ u8 res2[88];
+} intctl_cpm2_t;
+
+/* Clocks and Reset.
+*/
+typedef struct clk_and_reset {
+ u32 car_sccr;
+ u8 res1[4];
+ u32 car_scmr;
+ u8 res2[4];
+ u32 car_rsr;
+ u32 car_rmr;
+ u8 res[104];
+} car_cpm2_t;
+
+/* Input/Output Port control/status registers.
+ * Names consistent with processor manual, although they are different
+ * from the original 8xx names.......
+ */
+typedef struct io_port {
+ u32 iop_pdira;
+ u32 iop_ppara;
+ u32 iop_psora;
+ u32 iop_podra;
+ u32 iop_pdata;
+ u8 res1[12];
+ u32 iop_pdirb;
+ u32 iop_pparb;
+ u32 iop_psorb;
+ u32 iop_podrb;
+ u32 iop_pdatb;
+ u8 res2[12];
+ u32 iop_pdirc;
+ u32 iop_pparc;
+ u32 iop_psorc;
+ u32 iop_podrc;
+ u32 iop_pdatc;
+ u8 res3[12];
+ u32 iop_pdird;
+ u32 iop_ppard;
+ u32 iop_psord;
+ u32 iop_podrd;
+ u32 iop_pdatd;
+ u8 res4[12];
+} iop_cpm2_t;
+
+/* Communication Processor Module Timers
+*/
+typedef struct cpm_timers {
+ u8 cpmt_tgcr1;
+ u8 res1[3];
+ u8 cpmt_tgcr2;
+ u8 res2[11];
+ u16 cpmt_tmr1;
+ u16 cpmt_tmr2;
+ u16 cpmt_trr1;
+ u16 cpmt_trr2;
+ u16 cpmt_tcr1;
+ u16 cpmt_tcr2;
+ u16 cpmt_tcn1;
+ u16 cpmt_tcn2;
+ u16 cpmt_tmr3;
+ u16 cpmt_tmr4;
+ u16 cpmt_trr3;
+ u16 cpmt_trr4;
+ u16 cpmt_tcr3;
+ u16 cpmt_tcr4;
+ u16 cpmt_tcn3;
+ u16 cpmt_tcn4;
+ u16 cpmt_ter1;
+ u16 cpmt_ter2;
+ u16 cpmt_ter3;
+ u16 cpmt_ter4;
+ u8 res3[584];
+} cpmtimer_cpm2_t;
+
+/* DMA control/status registers.
+*/
+typedef struct sdma_csr {
+ u8 res0[24];
+ u8 sdma_sdsr;
+ u8 res1[3];
+ u8 sdma_sdmr;
+ u8 res2[3];
+ u8 sdma_idsr1;
+ u8 res3[3];
+ u8 sdma_idmr1;
+ u8 res4[3];
+ u8 sdma_idsr2;
+ u8 res5[3];
+ u8 sdma_idmr2;
+ u8 res6[3];
+ u8 sdma_idsr3;
+ u8 res7[3];
+ u8 sdma_idmr3;
+ u8 res8[3];
+ u8 sdma_idsr4;
+ u8 res9[3];
+ u8 sdma_idmr4;
+ u8 res10[707];
+} sdma_cpm2_t;
+
+/* Fast controllers
+*/
+typedef struct fcc {
+ u32 fcc_gfmr;
+ u32 fcc_fpsmr;
+ u16 fcc_ftodr;
+ u8 res1[2];
+ u16 fcc_fdsr;
+ u8 res2[2];
+ u16 fcc_fcce;
+ u8 res3[2];
+ u16 fcc_fccm;
+ u8 res4[2];
+ u8 fcc_fccs;
+ u8 res5[3];
+ u8 fcc_ftirr_phy[4];
+} fcc_t;
+
+/* Fast controllers continued
+ */
+typedef struct fcc_c {
+ u32 fcc_firper;
+ u32 fcc_firer;
+ u32 fcc_firsr_hi;
+ u32 fcc_firsr_lo;
+ u8 fcc_gfemr;
+ u8 res1[15];
+} fcc_c_t;
+
+/* TC Layer
+ */
+typedef struct tclayer {
+ u16 tc_tcmode;
+ u16 tc_cdsmr;
+ u16 tc_tcer;
+ u16 tc_rcc;
+ u16 tc_tcmr;
+ u16 tc_fcc;
+ u16 tc_ccc;
+ u16 tc_icc;
+ u16 tc_tcc;
+ u16 tc_ecc;
+ u8 res1[12];
+} tclayer_t;
+
+
+/* I2C
+*/
+typedef struct i2c {
+ u8 i2c_i2mod;
+ u8 res1[3];
+ u8 i2c_i2add;
+ u8 res2[3];
+ u8 i2c_i2brg;
+ u8 res3[3];
+ u8 i2c_i2com;
+ u8 res4[3];
+ u8 i2c_i2cer;
+ u8 res5[3];
+ u8 i2c_i2cmr;
+ u8 res6[331];
+} i2c_cpm2_t;
+
+typedef struct scc { /* Serial communication channels */
+ u32 scc_gsmrl;
+ u32 scc_gsmrh;
+ u16 scc_psmr;
+ u8 res1[2];
+ u16 scc_todr;
+ u16 scc_dsr;
+ u16 scc_scce;
+ u8 res2[2];
+ u16 scc_sccm;
+ u8 res3;
+ u8 scc_sccs;
+ u8 res4[8];
+} scc_t;
+
+typedef struct smc { /* Serial management channels */
+ u8 res1[2];
+ u16 smc_smcmr;
+ u8 res2[2];
+ u8 smc_smce;
+ u8 res3[3];
+ u8 smc_smcm;
+ u8 res4[5];
+} smc_t;
+
+/* Serial Peripheral Interface.
+*/
+typedef struct spi_ctrl {
+ u16 spi_spmode;
+ u8 res1[4];
+ u8 spi_spie;
+ u8 res2[3];
+ u8 spi_spim;
+ u8 res3[2];
+ u8 spi_spcom;
+ u8 res4[82];
+} spictl_cpm2_t;
+
+/* CPM Mux.
+*/
+typedef struct cpmux {
+ u8 cmx_si1cr;
+ u8 res1;
+ u8 cmx_si2cr;
+ u8 res2;
+ u32 cmx_fcr;
+ u32 cmx_scr;
+ u8 cmx_smr;
+ u8 res3;
+ u16 cmx_uar;
+ u8 res4[16];
+} cpmux_t;
+
+/* SIRAM control
+*/
+typedef struct siram {
+ u16 si_amr;
+ u16 si_bmr;
+ u16 si_cmr;
+ u16 si_dmr;
+ u8 si_gmr;
+ u8 res1;
+ u8 si_cmdr;
+ u8 res2;
+ u8 si_str;
+ u8 res3;
+ u16 si_rsr;
+} siramctl_t;
+
+typedef struct mcc {
+ u16 mcc_mcce;
+ u8 res1[2];
+ u16 mcc_mccm;
+ u8 res2[2];
+ u8 mcc_mccf;
+ u8 res3[7];
+} mcc_t;
+
+typedef struct comm_proc {
+ u32 cp_cpcr;
+ u32 cp_rccr;
+ u8 res1[14];
+ u16 cp_rter;
+ u8 res2[2];
+ u16 cp_rtmr;
+ u16 cp_rtscr;
+ u8 res3[2];
+ u32 cp_rtsr;
+ u8 res4[12];
+} cpm_cpm2_t;
+
+/* USB Controller.
+*/
+typedef struct usb_ctlr {
+ u8 usb_usmod;
+ u8 usb_usadr;
+ u8 usb_uscom;
+ u8 res1[1];
+ u16 usb_usep1;
+ u16 usb_usep2;
+ u16 usb_usep3;
+ u16 usb_usep4;
+ u8 res2[4];
+ u16 usb_usber;
+ u8 res3[2];
+ u16 usb_usbmr;
+ u8 usb_usbs;
+ u8 res4[7];
+} usb_cpm2_t;
+
+/* ...and the whole thing wrapped up....
+*/
+
+typedef struct immap {
+ /* Some references are into the unique and known dpram spaces,
+ * others are from the generic base.
+ */
+#define im_dprambase im_dpram1
+ u8 im_dpram1[16*1024];
+ u8 res1[16*1024];
+ u8 im_dpram2[4*1024];
+ u8 res2[8*1024];
+ u8 im_dpram3[4*1024];
+ u8 res3[16*1024];
+
+ sysconf_cpm2_t im_siu_conf; /* SIU Configuration */
+ memctl_cpm2_t im_memctl; /* Memory Controller */
+ sit_cpm2_t im_sit; /* System Integration Timers */
+ pci_cpm2_t im_pci; /* PCI Controller */
+ intctl_cpm2_t im_intctl; /* Interrupt Controller */
+ car_cpm2_t im_clkrst; /* Clocks and reset */
+ iop_cpm2_t im_ioport; /* IO Port control/status */
+ cpmtimer_cpm2_t im_cpmtimer; /* CPM timers */
+ sdma_cpm2_t im_sdma; /* SDMA control/status */
+
+ fcc_t im_fcc[3]; /* Three FCCs */
+ u8 res4z[32];
+ fcc_c_t im_fcc_c[3]; /* Continued FCCs */
+
+ u8 res4[32];
+
+ tclayer_t im_tclayer[8]; /* Eight TCLayers */
+ u16 tc_tcgsr;
+ u16 tc_tcger;
+
+ /* First set of baud rate generators.
+ */
+ u8 res[236];
+ u32 im_brgc5;
+ u32 im_brgc6;
+ u32 im_brgc7;
+ u32 im_brgc8;
+
+ u8 res5[608];
+
+ i2c_cpm2_t im_i2c; /* I2C control/status */
+ cpm_cpm2_t im_cpm; /* Communication processor */
+
+ /* Second set of baud rate generators.
+ */
+ u32 im_brgc1;
+ u32 im_brgc2;
+ u32 im_brgc3;
+ u32 im_brgc4;
+
+ scc_t im_scc[4]; /* Four SCCs */
+ smc_t im_smc[2]; /* Couple of SMCs */
+ spictl_cpm2_t im_spi; /* A SPI */
+ cpmux_t im_cpmux; /* CPM clock route mux */
+ siramctl_t im_siramctl1; /* First SI RAM Control */
+ mcc_t im_mcc1; /* First MCC */
+ siramctl_t im_siramctl2; /* Second SI RAM Control */
+ mcc_t im_mcc2; /* Second MCC */
+ usb_cpm2_t im_usb; /* USB Controller */
+
+ u8 res6[1153];
+
+ u16 im_si1txram[256];
+ u8 res7[512];
+ u16 im_si1rxram[256];
+ u8 res8[512];
+ u16 im_si2txram[256];
+ u8 res9[512];
+ u16 im_si2rxram[256];
+ u8 res10[512];
+ u8 res11[4096];
+} cpm2_map_t;
+
+extern cpm2_map_t __iomem *cpm2_immr;
+
+#endif /* __IMMAP_CPM2__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/immap_qe.h b/arch/powerpc/include/asm/immap_qe.h
new file mode 100644
index 0000000..3c2fced
--- /dev/null
+++ b/arch/powerpc/include/asm/immap_qe.h
@@ -0,0 +1,483 @@
+/*
+ * QUICC Engine (QE) Internal Memory Map.
+ * The Internal Memory Map for devices with QE on them. This
+ * is the superset of all QE devices (8360, etc.).
+
+ * Copyright (C) 2006. Freescale Semicondutor, Inc. All rights reserved.
+ *
+ * Authors: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef _ASM_POWERPC_IMMAP_QE_H
+#define _ASM_POWERPC_IMMAP_QE_H
+#ifdef __KERNEL__
+
+#include <linux/kernel.h>
+#include <asm/io.h>
+
+#define QE_IMMAP_SIZE (1024 * 1024) /* 1MB from 1MB+IMMR */
+
+/* QE I-RAM */
+struct qe_iram {
+ __be32 iadd; /* I-RAM Address Register */
+ __be32 idata; /* I-RAM Data Register */
+ u8 res0[0x78];
+} __attribute__ ((packed));
+
+/* QE Interrupt Controller */
+struct qe_ic_regs {
+ __be32 qicr;
+ __be32 qivec;
+ __be32 qripnr;
+ __be32 qipnr;
+ __be32 qipxcc;
+ __be32 qipycc;
+ __be32 qipwcc;
+ __be32 qipzcc;
+ __be32 qimr;
+ __be32 qrimr;
+ __be32 qicnr;
+ u8 res0[0x4];
+ __be32 qiprta;
+ __be32 qiprtb;
+ u8 res1[0x4];
+ __be32 qricr;
+ u8 res2[0x20];
+ __be32 qhivec;
+ u8 res3[0x1C];
+} __attribute__ ((packed));
+
+/* Communications Processor */
+struct cp_qe {
+ __be32 cecr; /* QE command register */
+ __be32 ceccr; /* QE controller configuration register */
+ __be32 cecdr; /* QE command data register */
+ u8 res0[0xA];
+ __be16 ceter; /* QE timer event register */
+ u8 res1[0x2];
+ __be16 cetmr; /* QE timers mask register */
+ __be32 cetscr; /* QE time-stamp timer control register */
+ __be32 cetsr1; /* QE time-stamp register 1 */
+ __be32 cetsr2; /* QE time-stamp register 2 */
+ u8 res2[0x8];
+ __be32 cevter; /* QE virtual tasks event register */
+ __be32 cevtmr; /* QE virtual tasks mask register */
+ __be16 cercr; /* QE RAM control register */
+ u8 res3[0x2];
+ u8 res4[0x24];
+ __be16 ceexe1; /* QE external request 1 event register */
+ u8 res5[0x2];
+ __be16 ceexm1; /* QE external request 1 mask register */
+ u8 res6[0x2];
+ __be16 ceexe2; /* QE external request 2 event register */
+ u8 res7[0x2];
+ __be16 ceexm2; /* QE external request 2 mask register */
+ u8 res8[0x2];
+ __be16 ceexe3; /* QE external request 3 event register */
+ u8 res9[0x2];
+ __be16 ceexm3; /* QE external request 3 mask register */
+ u8 res10[0x2];
+ __be16 ceexe4; /* QE external request 4 event register */
+ u8 res11[0x2];
+ __be16 ceexm4; /* QE external request 4 mask register */
+ u8 res12[0x3A];
+ __be32 ceurnr; /* QE microcode revision number register */
+ u8 res13[0x244];
+} __attribute__ ((packed));
+
+/* QE Multiplexer */
+struct qe_mux {
+ __be32 cmxgcr; /* CMX general clock route register */
+ __be32 cmxsi1cr_l; /* CMX SI1 clock route low register */
+ __be32 cmxsi1cr_h; /* CMX SI1 clock route high register */
+ __be32 cmxsi1syr; /* CMX SI1 SYNC route register */
+ __be32 cmxucr[4]; /* CMX UCCx clock route registers */
+ __be32 cmxupcr; /* CMX UPC clock route register */
+ u8 res0[0x1C];
+} __attribute__ ((packed));
+
+/* QE Timers */
+struct qe_timers {
+ u8 gtcfr1; /* Timer 1 and Timer 2 global config register*/
+ u8 res0[0x3];
+ u8 gtcfr2; /* Timer 3 and timer 4 global config register*/
+ u8 res1[0xB];
+ __be16 gtmdr1; /* Timer 1 mode register */
+ __be16 gtmdr2; /* Timer 2 mode register */
+ __be16 gtrfr1; /* Timer 1 reference register */
+ __be16 gtrfr2; /* Timer 2 reference register */
+ __be16 gtcpr1; /* Timer 1 capture register */
+ __be16 gtcpr2; /* Timer 2 capture register */
+ __be16 gtcnr1; /* Timer 1 counter */
+ __be16 gtcnr2; /* Timer 2 counter */
+ __be16 gtmdr3; /* Timer 3 mode register */
+ __be16 gtmdr4; /* Timer 4 mode register */
+ __be16 gtrfr3; /* Timer 3 reference register */
+ __be16 gtrfr4; /* Timer 4 reference register */
+ __be16 gtcpr3; /* Timer 3 capture register */
+ __be16 gtcpr4; /* Timer 4 capture register */
+ __be16 gtcnr3; /* Timer 3 counter */
+ __be16 gtcnr4; /* Timer 4 counter */
+ __be16 gtevr1; /* Timer 1 event register */
+ __be16 gtevr2; /* Timer 2 event register */
+ __be16 gtevr3; /* Timer 3 event register */
+ __be16 gtevr4; /* Timer 4 event register */
+ __be16 gtps; /* Timer 1 prescale register */
+ u8 res2[0x46];
+} __attribute__ ((packed));
+
+/* BRG */
+struct qe_brg {
+ __be32 brgc[16]; /* BRG configuration registers */
+ u8 res0[0x40];
+} __attribute__ ((packed));
+
+/* SPI */
+struct spi {
+ u8 res0[0x20];
+ __be32 spmode; /* SPI mode register */
+ u8 res1[0x2];
+ u8 spie; /* SPI event register */
+ u8 res2[0x1];
+ u8 res3[0x2];
+ u8 spim; /* SPI mask register */
+ u8 res4[0x1];
+ u8 res5[0x1];
+ u8 spcom; /* SPI command register */
+ u8 res6[0x2];
+ __be32 spitd; /* SPI transmit data register (cpu mode) */
+ __be32 spird; /* SPI receive data register (cpu mode) */
+ u8 res7[0x8];
+} __attribute__ ((packed));
+
+/* SI */
+struct si1 {
+ __be16 siamr1; /* SI1 TDMA mode register */
+ __be16 sibmr1; /* SI1 TDMB mode register */
+ __be16 sicmr1; /* SI1 TDMC mode register */
+ __be16 sidmr1; /* SI1 TDMD mode register */
+ u8 siglmr1_h; /* SI1 global mode register high */
+ u8 res0[0x1];
+ u8 sicmdr1_h; /* SI1 command register high */
+ u8 res2[0x1];
+ u8 sistr1_h; /* SI1 status register high */
+ u8 res3[0x1];
+ __be16 sirsr1_h; /* SI1 RAM shadow address register high */
+ u8 sitarc1; /* SI1 RAM counter Tx TDMA */
+ u8 sitbrc1; /* SI1 RAM counter Tx TDMB */
+ u8 sitcrc1; /* SI1 RAM counter Tx TDMC */
+ u8 sitdrc1; /* SI1 RAM counter Tx TDMD */
+ u8 sirarc1; /* SI1 RAM counter Rx TDMA */
+ u8 sirbrc1; /* SI1 RAM counter Rx TDMB */
+ u8 sircrc1; /* SI1 RAM counter Rx TDMC */
+ u8 sirdrc1; /* SI1 RAM counter Rx TDMD */
+ u8 res4[0x8];
+ __be16 siemr1; /* SI1 TDME mode register 16 bits */
+ __be16 sifmr1; /* SI1 TDMF mode register 16 bits */
+ __be16 sigmr1; /* SI1 TDMG mode register 16 bits */
+ __be16 sihmr1; /* SI1 TDMH mode register 16 bits */
+ u8 siglmg1_l; /* SI1 global mode register low 8 bits */
+ u8 res5[0x1];
+ u8 sicmdr1_l; /* SI1 command register low 8 bits */
+ u8 res6[0x1];
+ u8 sistr1_l; /* SI1 status register low 8 bits */
+ u8 res7[0x1];
+ __be16 sirsr1_l; /* SI1 RAM shadow address register low 16 bits*/
+ u8 siterc1; /* SI1 RAM counter Tx TDME 8 bits */
+ u8 sitfrc1; /* SI1 RAM counter Tx TDMF 8 bits */
+ u8 sitgrc1; /* SI1 RAM counter Tx TDMG 8 bits */
+ u8 sithrc1; /* SI1 RAM counter Tx TDMH 8 bits */
+ u8 sirerc1; /* SI1 RAM counter Rx TDME 8 bits */
+ u8 sirfrc1; /* SI1 RAM counter Rx TDMF 8 bits */
+ u8 sirgrc1; /* SI1 RAM counter Rx TDMG 8 bits */
+ u8 sirhrc1; /* SI1 RAM counter Rx TDMH 8 bits */
+ u8 res8[0x8];
+ __be32 siml1; /* SI1 multiframe limit register */
+ u8 siedm1; /* SI1 extended diagnostic mode register */
+ u8 res9[0xBB];
+} __attribute__ ((packed));
+
+/* SI Routing Tables */
+struct sir {
+ u8 tx[0x400];
+ u8 rx[0x400];
+ u8 res0[0x800];
+} __attribute__ ((packed));
+
+/* USB Controller */
+struct usb_ctlr {
+ u8 usb_usmod;
+ u8 usb_usadr;
+ u8 usb_uscom;
+ u8 res1[1];
+ __be16 usb_usep1;
+ __be16 usb_usep2;
+ __be16 usb_usep3;
+ __be16 usb_usep4;
+ u8 res2[4];
+ __be16 usb_usber;
+ u8 res3[2];
+ __be16 usb_usbmr;
+ u8 res4[1];
+ u8 usb_usbs;
+ __be16 usb_ussft;
+ u8 res5[2];
+ __be16 usb_usfrn;
+ u8 res6[0x22];
+} __attribute__ ((packed));
+
+/* MCC */
+struct mcc {
+ __be32 mcce; /* MCC event register */
+ __be32 mccm; /* MCC mask register */
+ __be32 mccf; /* MCC configuration register */
+ __be32 merl; /* MCC emergency request level register */
+ u8 res0[0xF0];
+} __attribute__ ((packed));
+
+/* QE UCC Slow */
+struct ucc_slow {
+ __be32 gumr_l; /* UCCx general mode register (low) */
+ __be32 gumr_h; /* UCCx general mode register (high) */
+ __be16 upsmr; /* UCCx protocol-specific mode register */
+ u8 res0[0x2];
+ __be16 utodr; /* UCCx transmit on demand register */
+ __be16 udsr; /* UCCx data synchronization register */
+ __be16 ucce; /* UCCx event register */
+ u8 res1[0x2];
+ __be16 uccm; /* UCCx mask register */
+ u8 res2[0x1];
+ u8 uccs; /* UCCx status register */
+ u8 res3[0x24];
+ __be16 utpt;
+ u8 res4[0x52];
+ u8 guemr; /* UCC general extended mode register */
+} __attribute__ ((packed));
+
+/* QE UCC Fast */
+struct ucc_fast {
+ __be32 gumr; /* UCCx general mode register */
+ __be32 upsmr; /* UCCx protocol-specific mode register */
+ __be16 utodr; /* UCCx transmit on demand register */
+ u8 res0[0x2];
+ __be16 udsr; /* UCCx data synchronization register */
+ u8 res1[0x2];
+ __be32 ucce; /* UCCx event register */
+ __be32 uccm; /* UCCx mask register */
+ u8 uccs; /* UCCx status register */
+ u8 res2[0x7];
+ __be32 urfb; /* UCC receive FIFO base */
+ __be16 urfs; /* UCC receive FIFO size */
+ u8 res3[0x2];
+ __be16 urfet; /* UCC receive FIFO emergency threshold */
+ __be16 urfset; /* UCC receive FIFO special emergency
+ threshold */
+ __be32 utfb; /* UCC transmit FIFO base */
+ __be16 utfs; /* UCC transmit FIFO size */
+ u8 res4[0x2];
+ __be16 utfet; /* UCC transmit FIFO emergency threshold */
+ u8 res5[0x2];
+ __be16 utftt; /* UCC transmit FIFO transmit threshold */
+ u8 res6[0x2];
+ __be16 utpt; /* UCC transmit polling timer */
+ u8 res7[0x2];
+ __be32 urtry; /* UCC retry counter register */
+ u8 res8[0x4C];
+ u8 guemr; /* UCC general extended mode register */
+} __attribute__ ((packed));
+
+struct ucc {
+ union {
+ struct ucc_slow slow;
+ struct ucc_fast fast;
+ u8 res[0x200]; /* UCC blocks are 512 bytes each */
+ };
+} __attribute__ ((packed));
+
+/* MultiPHY UTOPIA POS Controllers (UPC) */
+struct upc {
+ __be32 upgcr; /* UTOPIA/POS general configuration register */
+ __be32 uplpa; /* UTOPIA/POS last PHY address */
+ __be32 uphec; /* ATM HEC register */
+ __be32 upuc; /* UTOPIA/POS UCC configuration */
+ __be32 updc1; /* UTOPIA/POS device 1 configuration */
+ __be32 updc2; /* UTOPIA/POS device 2 configuration */
+ __be32 updc3; /* UTOPIA/POS device 3 configuration */
+ __be32 updc4; /* UTOPIA/POS device 4 configuration */
+ __be32 upstpa; /* UTOPIA/POS STPA threshold */
+ u8 res0[0xC];
+ __be32 updrs1_h; /* UTOPIA/POS device 1 rate select */
+ __be32 updrs1_l; /* UTOPIA/POS device 1 rate select */
+ __be32 updrs2_h; /* UTOPIA/POS device 2 rate select */
+ __be32 updrs2_l; /* UTOPIA/POS device 2 rate select */
+ __be32 updrs3_h; /* UTOPIA/POS device 3 rate select */
+ __be32 updrs3_l; /* UTOPIA/POS device 3 rate select */
+ __be32 updrs4_h; /* UTOPIA/POS device 4 rate select */
+ __be32 updrs4_l; /* UTOPIA/POS device 4 rate select */
+ __be32 updrp1; /* UTOPIA/POS device 1 receive priority low */
+ __be32 updrp2; /* UTOPIA/POS device 2 receive priority low */
+ __be32 updrp3; /* UTOPIA/POS device 3 receive priority low */
+ __be32 updrp4; /* UTOPIA/POS device 4 receive priority low */
+ __be32 upde1; /* UTOPIA/POS device 1 event */
+ __be32 upde2; /* UTOPIA/POS device 2 event */
+ __be32 upde3; /* UTOPIA/POS device 3 event */
+ __be32 upde4; /* UTOPIA/POS device 4 event */
+ __be16 uprp1;
+ __be16 uprp2;
+ __be16 uprp3;
+ __be16 uprp4;
+ u8 res1[0x8];
+ __be16 uptirr1_0; /* Device 1 transmit internal rate 0 */
+ __be16 uptirr1_1; /* Device 1 transmit internal rate 1 */
+ __be16 uptirr1_2; /* Device 1 transmit internal rate 2 */
+ __be16 uptirr1_3; /* Device 1 transmit internal rate 3 */
+ __be16 uptirr2_0; /* Device 2 transmit internal rate 0 */
+ __be16 uptirr2_1; /* Device 2 transmit internal rate 1 */
+ __be16 uptirr2_2; /* Device 2 transmit internal rate 2 */
+ __be16 uptirr2_3; /* Device 2 transmit internal rate 3 */
+ __be16 uptirr3_0; /* Device 3 transmit internal rate 0 */
+ __be16 uptirr3_1; /* Device 3 transmit internal rate 1 */
+ __be16 uptirr3_2; /* Device 3 transmit internal rate 2 */
+ __be16 uptirr3_3; /* Device 3 transmit internal rate 3 */
+ __be16 uptirr4_0; /* Device 4 transmit internal rate 0 */
+ __be16 uptirr4_1; /* Device 4 transmit internal rate 1 */
+ __be16 uptirr4_2; /* Device 4 transmit internal rate 2 */
+ __be16 uptirr4_3; /* Device 4 transmit internal rate 3 */
+ __be32 uper1; /* Device 1 port enable register */
+ __be32 uper2; /* Device 2 port enable register */
+ __be32 uper3; /* Device 3 port enable register */
+ __be32 uper4; /* Device 4 port enable register */
+ u8 res2[0x150];
+} __attribute__ ((packed));
+
+/* SDMA */
+struct sdma {
+ __be32 sdsr; /* Serial DMA status register */
+ __be32 sdmr; /* Serial DMA mode register */
+ __be32 sdtr1; /* SDMA system bus threshold register */
+ __be32 sdtr2; /* SDMA secondary bus threshold register */
+ __be32 sdhy1; /* SDMA system bus hysteresis register */
+ __be32 sdhy2; /* SDMA secondary bus hysteresis register */
+ __be32 sdta1; /* SDMA system bus address register */
+ __be32 sdta2; /* SDMA secondary bus address register */
+ __be32 sdtm1; /* SDMA system bus MSNUM register */
+ __be32 sdtm2; /* SDMA secondary bus MSNUM register */
+ u8 res0[0x10];
+ __be32 sdaqr; /* SDMA address bus qualify register */
+ __be32 sdaqmr; /* SDMA address bus qualify mask register */
+ u8 res1[0x4];
+ __be32 sdebcr; /* SDMA CAM entries base register */
+ u8 res2[0x38];
+} __attribute__ ((packed));
+
+/* Debug Space */
+struct dbg {
+ __be32 bpdcr; /* Breakpoint debug command register */
+ __be32 bpdsr; /* Breakpoint debug status register */
+ __be32 bpdmr; /* Breakpoint debug mask register */
+ __be32 bprmrr0; /* Breakpoint request mode risc register 0 */
+ __be32 bprmrr1; /* Breakpoint request mode risc register 1 */
+ u8 res0[0x8];
+ __be32 bprmtr0; /* Breakpoint request mode trb register 0 */
+ __be32 bprmtr1; /* Breakpoint request mode trb register 1 */
+ u8 res1[0x8];
+ __be32 bprmir; /* Breakpoint request mode immediate register */
+ __be32 bprmsr; /* Breakpoint request mode serial register */
+ __be32 bpemr; /* Breakpoint exit mode register */
+ u8 res2[0x48];
+} __attribute__ ((packed));
+
+/*
+ * RISC Special Registers (Trap and Breakpoint). These are described in
+ * the QE Developer's Handbook.
+ */
+struct rsp {
+ __be32 tibcr[16]; /* Trap/instruction breakpoint control regs */
+ u8 res0[64];
+ __be32 ibcr0;
+ __be32 ibs0;
+ __be32 ibcnr0;
+ u8 res1[4];
+ __be32 ibcr1;
+ __be32 ibs1;
+ __be32 ibcnr1;
+ __be32 npcr;
+ __be32 dbcr;
+ __be32 dbar;
+ __be32 dbamr;
+ __be32 dbsr;
+ __be32 dbcnr;
+ u8 res2[12];
+ __be32 dbdr_h;
+ __be32 dbdr_l;
+ __be32 dbdmr_h;
+ __be32 dbdmr_l;
+ __be32 bsr;
+ __be32 bor;
+ __be32 bior;
+ u8 res3[4];
+ __be32 iatr[4];
+ __be32 eccr; /* Exception control configuration register */
+ __be32 eicr;
+ u8 res4[0x100-0xf8];
+} __attribute__ ((packed));
+
+struct qe_immap {
+ struct qe_iram iram; /* I-RAM */
+ struct qe_ic_regs ic; /* Interrupt Controller */
+ struct cp_qe cp; /* Communications Processor */
+ struct qe_mux qmx; /* QE Multiplexer */
+ struct qe_timers qet; /* QE Timers */
+ struct spi spi[0x2]; /* spi */
+ struct mcc mcc; /* mcc */
+ struct qe_brg brg; /* brg */
+ struct usb_ctlr usb; /* USB */
+ struct si1 si1; /* SI */
+ u8 res11[0x800];
+ struct sir sir; /* SI Routing Tables */
+ struct ucc ucc1; /* ucc1 */
+ struct ucc ucc3; /* ucc3 */
+ struct ucc ucc5; /* ucc5 */
+ struct ucc ucc7; /* ucc7 */
+ u8 res12[0x600];
+ struct upc upc1; /* MultiPHY UTOPIA POS Ctrlr 1*/
+ struct ucc ucc2; /* ucc2 */
+ struct ucc ucc4; /* ucc4 */
+ struct ucc ucc6; /* ucc6 */
+ struct ucc ucc8; /* ucc8 */
+ u8 res13[0x600];
+ struct upc upc2; /* MultiPHY UTOPIA POS Ctrlr 2*/
+ struct sdma sdma; /* SDMA */
+ struct dbg dbg; /* 0x104080 - 0x1040FF
+ Debug Space */
+ struct rsp rsp[0x2]; /* 0x104100 - 0x1042FF
+ RISC Special Registers
+ (Trap and Breakpoint) */
+ u8 res14[0x300]; /* 0x104300 - 0x1045FF */
+ u8 res15[0x3A00]; /* 0x104600 - 0x107FFF */
+ u8 res16[0x8000]; /* 0x108000 - 0x110000 */
+ u8 muram[0xC000]; /* 0x110000 - 0x11C000
+ Multi-user RAM */
+ u8 res17[0x24000]; /* 0x11C000 - 0x140000 */
+ u8 res18[0xC0000]; /* 0x140000 - 0x200000 */
+} __attribute__ ((packed));
+
+extern struct qe_immap __iomem *qe_immr;
+extern phys_addr_t get_qe_base(void);
+
+static inline unsigned long immrbar_virt_to_phys(void *address)
+{
+ if ( ((u32)address >= (u32)qe_immr) &&
+ ((u32)address < ((u32)qe_immr + QE_IMMAP_SIZE)) )
+ return (unsigned long)(address - (u32)qe_immr +
+ (u32)get_qe_base());
+ return (unsigned long)virt_to_phys(address);
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_IMMAP_QE_H */
diff --git a/arch/powerpc/include/asm/io-defs.h b/arch/powerpc/include/asm/io-defs.h
new file mode 100644
index 0000000..44d7927
--- /dev/null
+++ b/arch/powerpc/include/asm/io-defs.h
@@ -0,0 +1,60 @@
+/* This file is meant to be include multiple times by other headers */
+/* last 2 argments are used by platforms/cell/io-workarounds.[ch] */
+
+DEF_PCI_AC_RET(readb, u8, (const PCI_IO_ADDR addr), (addr), mem, addr)
+DEF_PCI_AC_RET(readw, u16, (const PCI_IO_ADDR addr), (addr), mem, addr)
+DEF_PCI_AC_RET(readl, u32, (const PCI_IO_ADDR addr), (addr), mem, addr)
+DEF_PCI_AC_RET(readw_be, u16, (const PCI_IO_ADDR addr), (addr), mem, addr)
+DEF_PCI_AC_RET(readl_be, u32, (const PCI_IO_ADDR addr), (addr), mem, addr)
+DEF_PCI_AC_NORET(writeb, (u8 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
+DEF_PCI_AC_NORET(writew, (u16 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
+DEF_PCI_AC_NORET(writel, (u32 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
+DEF_PCI_AC_NORET(writew_be, (u16 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
+DEF_PCI_AC_NORET(writel_be, (u32 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
+
+#ifdef __powerpc64__
+DEF_PCI_AC_RET(readq, u64, (const PCI_IO_ADDR addr), (addr), mem, addr)
+DEF_PCI_AC_RET(readq_be, u64, (const PCI_IO_ADDR addr), (addr), mem, addr)
+DEF_PCI_AC_NORET(writeq, (u64 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
+DEF_PCI_AC_NORET(writeq_be, (u64 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
+#endif /* __powerpc64__ */
+
+DEF_PCI_AC_RET(inb, u8, (unsigned long port), (port), pio, port)
+DEF_PCI_AC_RET(inw, u16, (unsigned long port), (port), pio, port)
+DEF_PCI_AC_RET(inl, u32, (unsigned long port), (port), pio, port)
+DEF_PCI_AC_NORET(outb, (u8 val, unsigned long port), (val, port), pio, port)
+DEF_PCI_AC_NORET(outw, (u16 val, unsigned long port), (val, port), pio, port)
+DEF_PCI_AC_NORET(outl, (u32 val, unsigned long port), (val, port), pio, port)
+
+DEF_PCI_AC_NORET(readsb, (const PCI_IO_ADDR a, void *b, unsigned long c),
+ (a, b, c), mem, a)
+DEF_PCI_AC_NORET(readsw, (const PCI_IO_ADDR a, void *b, unsigned long c),
+ (a, b, c), mem, a)
+DEF_PCI_AC_NORET(readsl, (const PCI_IO_ADDR a, void *b, unsigned long c),
+ (a, b, c), mem, a)
+DEF_PCI_AC_NORET(writesb, (PCI_IO_ADDR a, const void *b, unsigned long c),
+ (a, b, c), mem, a)
+DEF_PCI_AC_NORET(writesw, (PCI_IO_ADDR a, const void *b, unsigned long c),
+ (a, b, c), mem, a)
+DEF_PCI_AC_NORET(writesl, (PCI_IO_ADDR a, const void *b, unsigned long c),
+ (a, b, c), mem, a)
+
+DEF_PCI_AC_NORET(insb, (unsigned long p, void *b, unsigned long c),
+ (p, b, c), pio, p)
+DEF_PCI_AC_NORET(insw, (unsigned long p, void *b, unsigned long c),
+ (p, b, c), pio, p)
+DEF_PCI_AC_NORET(insl, (unsigned long p, void *b, unsigned long c),
+ (p, b, c), pio, p)
+DEF_PCI_AC_NORET(outsb, (unsigned long p, const void *b, unsigned long c),
+ (p, b, c), pio, p)
+DEF_PCI_AC_NORET(outsw, (unsigned long p, const void *b, unsigned long c),
+ (p, b, c), pio, p)
+DEF_PCI_AC_NORET(outsl, (unsigned long p, const void *b, unsigned long c),
+ (p, b, c), pio, p)
+
+DEF_PCI_AC_NORET(memset_io, (PCI_IO_ADDR a, int c, unsigned long n),
+ (a, c, n), mem, a)
+DEF_PCI_AC_NORET(memcpy_fromio, (void *d, const PCI_IO_ADDR s, unsigned long n),
+ (d, s, n), mem, s)
+DEF_PCI_AC_NORET(memcpy_toio, (PCI_IO_ADDR d, const void *s, unsigned long n),
+ (d, s, n), mem, d)
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
new file mode 100644
index 0000000..77c7fa0
--- /dev/null
+++ b/arch/powerpc/include/asm/io.h
@@ -0,0 +1,787 @@
+#ifndef _ASM_POWERPC_IO_H
+#define _ASM_POWERPC_IO_H
+#ifdef __KERNEL__
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/* Check of existence of legacy devices */
+extern int check_legacy_ioport(unsigned long base_port);
+#define I8042_DATA_REG 0x60
+#define FDC_BASE 0x3f0
+/* only relevant for PReP */
+#define _PIDXR 0x279
+#define _PNPWRP 0xa79
+#define PNPBIOS_BASE 0xf000
+
+#include <linux/device.h>
+#include <linux/io.h>
+
+#include <linux/compiler.h>
+#include <asm/page.h>
+#include <asm/byteorder.h>
+#include <asm/synch.h>
+#include <asm/delay.h>
+#include <asm/mmu.h>
+
+#include <asm-generic/iomap.h>
+
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+#endif
+
+#define SIO_CONFIG_RA 0x398
+#define SIO_CONFIG_RD 0x399
+
+#define SLOW_DOWN_IO
+
+/* 32 bits uses slightly different variables for the various IO
+ * bases. Most of this file only uses _IO_BASE though which we
+ * define properly based on the platform
+ */
+#ifndef CONFIG_PCI
+#define _IO_BASE 0
+#define _ISA_MEM_BASE 0
+#define PCI_DRAM_OFFSET 0
+#elif defined(CONFIG_PPC32)
+#define _IO_BASE isa_io_base
+#define _ISA_MEM_BASE isa_mem_base
+#define PCI_DRAM_OFFSET pci_dram_offset
+#else
+#define _IO_BASE pci_io_base
+#define _ISA_MEM_BASE isa_mem_base
+#define PCI_DRAM_OFFSET 0
+#endif
+
+extern unsigned long isa_io_base;
+extern unsigned long pci_io_base;
+extern unsigned long pci_dram_offset;
+
+extern resource_size_t isa_mem_base;
+
+#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_INDIRECT_IO)
+#error CONFIG_PPC_INDIRECT_IO is not yet supported on 32 bits
+#endif
+
+/*
+ *
+ * Low level MMIO accessors
+ *
+ * This provides the non-bus specific accessors to MMIO. Those are PowerPC
+ * specific and thus shouldn't be used in generic code. The accessors
+ * provided here are:
+ *
+ * in_8, in_le16, in_be16, in_le32, in_be32, in_le64, in_be64
+ * out_8, out_le16, out_be16, out_le32, out_be32, out_le64, out_be64
+ * _insb, _insw_ns, _insl_ns, _outsb, _outsw_ns, _outsl_ns
+ *
+ * Those operate directly on a kernel virtual address. Note that the prototype
+ * for the out_* accessors has the arguments in opposite order from the usual
+ * linux PCI accessors. Unlike those, they take the address first and the value
+ * next.
+ *
+ * Note: I might drop the _ns suffix on the stream operations soon as it is
+ * simply normal for stream operations to not swap in the first place.
+ *
+ */
+
+#ifdef CONFIG_PPC64
+#define IO_SET_SYNC_FLAG() do { local_paca->io_sync = 1; } while(0)
+#else
+#define IO_SET_SYNC_FLAG()
+#endif
+
+/* gcc 4.0 and older doesn't have 'Z' constraint */
+#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ == 0)
+#define DEF_MMIO_IN_LE(name, size, insn) \
+static inline u##size name(const volatile u##size __iomem *addr) \
+{ \
+ u##size ret; \
+ __asm__ __volatile__("sync;"#insn" %0,0,%1;twi 0,%0,0;isync" \
+ : "=r" (ret) : "r" (addr), "m" (*addr) : "memory"); \
+ return ret; \
+}
+
+#define DEF_MMIO_OUT_LE(name, size, insn) \
+static inline void name(volatile u##size __iomem *addr, u##size val) \
+{ \
+ __asm__ __volatile__("sync;"#insn" %1,0,%2" \
+ : "=m" (*addr) : "r" (val), "r" (addr) : "memory"); \
+ IO_SET_SYNC_FLAG(); \
+}
+#else /* newer gcc */
+#define DEF_MMIO_IN_LE(name, size, insn) \
+static inline u##size name(const volatile u##size __iomem *addr) \
+{ \
+ u##size ret; \
+ __asm__ __volatile__("sync;"#insn" %0,%y1;twi 0,%0,0;isync" \
+ : "=r" (ret) : "Z" (*addr) : "memory"); \
+ return ret; \
+}
+
+#define DEF_MMIO_OUT_LE(name, size, insn) \
+static inline void name(volatile u##size __iomem *addr, u##size val) \
+{ \
+ __asm__ __volatile__("sync;"#insn" %1,%y0" \
+ : "=Z" (*addr) : "r" (val) : "memory"); \
+ IO_SET_SYNC_FLAG(); \
+}
+#endif
+
+#define DEF_MMIO_IN_BE(name, size, insn) \
+static inline u##size name(const volatile u##size __iomem *addr) \
+{ \
+ u##size ret; \
+ __asm__ __volatile__("sync;"#insn"%U1%X1 %0,%1;twi 0,%0,0;isync"\
+ : "=r" (ret) : "m" (*addr) : "memory"); \
+ return ret; \
+}
+
+#define DEF_MMIO_OUT_BE(name, size, insn) \
+static inline void name(volatile u##size __iomem *addr, u##size val) \
+{ \
+ __asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \
+ : "=m" (*addr) : "r" (val) : "memory"); \
+ IO_SET_SYNC_FLAG(); \
+}
+
+
+DEF_MMIO_IN_BE(in_8, 8, lbz);
+DEF_MMIO_IN_BE(in_be16, 16, lhz);
+DEF_MMIO_IN_BE(in_be32, 32, lwz);
+DEF_MMIO_IN_LE(in_le16, 16, lhbrx);
+DEF_MMIO_IN_LE(in_le32, 32, lwbrx);
+
+DEF_MMIO_OUT_BE(out_8, 8, stb);
+DEF_MMIO_OUT_BE(out_be16, 16, sth);
+DEF_MMIO_OUT_BE(out_be32, 32, stw);
+DEF_MMIO_OUT_LE(out_le16, 16, sthbrx);
+DEF_MMIO_OUT_LE(out_le32, 32, stwbrx);
+
+#ifdef __powerpc64__
+DEF_MMIO_OUT_BE(out_be64, 64, std);
+DEF_MMIO_IN_BE(in_be64, 64, ld);
+
+/* There is no asm instructions for 64 bits reverse loads and stores */
+static inline u64 in_le64(const volatile u64 __iomem *addr)
+{
+ return swab64(in_be64(addr));
+}
+
+static inline void out_le64(volatile u64 __iomem *addr, u64 val)
+{
+ out_be64(addr, swab64(val));
+}
+#endif /* __powerpc64__ */
+
+/*
+ * Low level IO stream instructions are defined out of line for now
+ */
+extern void _insb(const volatile u8 __iomem *addr, void *buf, long count);
+extern void _outsb(volatile u8 __iomem *addr,const void *buf,long count);
+extern void _insw_ns(const volatile u16 __iomem *addr, void *buf, long count);
+extern void _outsw_ns(volatile u16 __iomem *addr, const void *buf, long count);
+extern void _insl_ns(const volatile u32 __iomem *addr, void *buf, long count);
+extern void _outsl_ns(volatile u32 __iomem *addr, const void *buf, long count);
+
+/* The _ns naming is historical and will be removed. For now, just #define
+ * the non _ns equivalent names
+ */
+#define _insw _insw_ns
+#define _insl _insl_ns
+#define _outsw _outsw_ns
+#define _outsl _outsl_ns
+
+
+/*
+ * memset_io, memcpy_toio, memcpy_fromio base implementations are out of line
+ */
+
+extern void _memset_io(volatile void __iomem *addr, int c, unsigned long n);
+extern void _memcpy_fromio(void *dest, const volatile void __iomem *src,
+ unsigned long n);
+extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
+ unsigned long n);
+
+/*
+ *
+ * PCI and standard ISA accessors
+ *
+ * Those are globally defined linux accessors for devices on PCI or ISA
+ * busses. They follow the Linux defined semantics. The current implementation
+ * for PowerPC is as close as possible to the x86 version of these, and thus
+ * provides fairly heavy weight barriers for the non-raw versions
+ *
+ * In addition, they support a hook mechanism when CONFIG_PPC_INDIRECT_IO
+ * allowing the platform to provide its own implementation of some or all
+ * of the accessors.
+ */
+
+/*
+ * Include the EEH definitions when EEH is enabled only so they don't get
+ * in the way when building for 32 bits
+ */
+#ifdef CONFIG_EEH
+#include <asm/eeh.h>
+#endif
+
+/* Shortcut to the MMIO argument pointer */
+#define PCI_IO_ADDR volatile void __iomem *
+
+/* Indirect IO address tokens:
+ *
+ * When CONFIG_PPC_INDIRECT_IO is set, the platform can provide hooks
+ * on all IOs. (Note that this is all 64 bits only for now)
+ *
+ * To help platforms who may need to differenciate MMIO addresses in
+ * their hooks, a bitfield is reserved for use by the platform near the
+ * top of MMIO addresses (not PIO, those have to cope the hard way).
+ *
+ * This bit field is 12 bits and is at the top of the IO virtual
+ * addresses PCI_IO_INDIRECT_TOKEN_MASK.
+ *
+ * The kernel virtual space is thus:
+ *
+ * 0xD000000000000000 : vmalloc
+ * 0xD000080000000000 : PCI PHB IO space
+ * 0xD000080080000000 : ioremap
+ * 0xD0000fffffffffff : end of ioremap region
+ *
+ * Since the top 4 bits are reserved as the region ID, we use thus
+ * the next 12 bits and keep 4 bits available for the future if the
+ * virtual address space is ever to be extended.
+ *
+ * The direct IO mapping operations will then mask off those bits
+ * before doing the actual access, though that only happen when
+ * CONFIG_PPC_INDIRECT_IO is set, thus be careful when you use that
+ * mechanism
+ */
+
+#ifdef CONFIG_PPC_INDIRECT_IO
+#define PCI_IO_IND_TOKEN_MASK 0x0fff000000000000ul
+#define PCI_IO_IND_TOKEN_SHIFT 48
+#define PCI_FIX_ADDR(addr) \
+ ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK))
+#define PCI_GET_ADDR_TOKEN(addr) \
+ (((unsigned long)(addr) & PCI_IO_IND_TOKEN_MASK) >> \
+ PCI_IO_IND_TOKEN_SHIFT)
+#define PCI_SET_ADDR_TOKEN(addr, token) \
+do { \
+ unsigned long __a = (unsigned long)(addr); \
+ __a &= ~PCI_IO_IND_TOKEN_MASK; \
+ __a |= ((unsigned long)(token)) << PCI_IO_IND_TOKEN_SHIFT; \
+ (addr) = (void __iomem *)__a; \
+} while(0)
+#else
+#define PCI_FIX_ADDR(addr) (addr)
+#endif
+
+
+/*
+ * Non ordered and non-swapping "raw" accessors
+ */
+
+static inline unsigned char __raw_readb(const volatile void __iomem *addr)
+{
+ return *(volatile unsigned char __force *)PCI_FIX_ADDR(addr);
+}
+static inline unsigned short __raw_readw(const volatile void __iomem *addr)
+{
+ return *(volatile unsigned short __force *)PCI_FIX_ADDR(addr);
+}
+static inline unsigned int __raw_readl(const volatile void __iomem *addr)
+{
+ return *(volatile unsigned int __force *)PCI_FIX_ADDR(addr);
+}
+static inline void __raw_writeb(unsigned char v, volatile void __iomem *addr)
+{
+ *(volatile unsigned char __force *)PCI_FIX_ADDR(addr) = v;
+}
+static inline void __raw_writew(unsigned short v, volatile void __iomem *addr)
+{
+ *(volatile unsigned short __force *)PCI_FIX_ADDR(addr) = v;
+}
+static inline void __raw_writel(unsigned int v, volatile void __iomem *addr)
+{
+ *(volatile unsigned int __force *)PCI_FIX_ADDR(addr) = v;
+}
+
+#ifdef __powerpc64__
+static inline unsigned long __raw_readq(const volatile void __iomem *addr)
+{
+ return *(volatile unsigned long __force *)PCI_FIX_ADDR(addr);
+}
+static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr)
+{
+ *(volatile unsigned long __force *)PCI_FIX_ADDR(addr) = v;
+}
+#endif /* __powerpc64__ */
+
+/*
+ *
+ * PCI PIO and MMIO accessors.
+ *
+ *
+ * On 32 bits, PIO operations have a recovery mechanism in case they trigger
+ * machine checks (which they occasionally do when probing non existing
+ * IO ports on some platforms, like PowerMac and 8xx).
+ * I always found it to be of dubious reliability and I am tempted to get
+ * rid of it one of these days. So if you think it's important to keep it,
+ * please voice up asap. We never had it for 64 bits and I do not intend
+ * to port it over
+ */
+
+#ifdef CONFIG_PPC32
+
+#define __do_in_asm(name, op) \
+static inline unsigned int name(unsigned int port) \
+{ \
+ unsigned int x; \
+ __asm__ __volatile__( \
+ "sync\n" \
+ "0:" op " %0,0,%1\n" \
+ "1: twi 0,%0,0\n" \
+ "2: isync\n" \
+ "3: nop\n" \
+ "4:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "5: li %0,-1\n" \
+ " b 4b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 2\n" \
+ " .long 0b,5b\n" \
+ " .long 1b,5b\n" \
+ " .long 2b,5b\n" \
+ " .long 3b,5b\n" \
+ ".previous" \
+ : "=&r" (x) \
+ : "r" (port + _IO_BASE) \
+ : "memory"); \
+ return x; \
+}
+
+#define __do_out_asm(name, op) \
+static inline void name(unsigned int val, unsigned int port) \
+{ \
+ __asm__ __volatile__( \
+ "sync\n" \
+ "0:" op " %0,0,%1\n" \
+ "1: sync\n" \
+ "2:\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 2\n" \
+ " .long 0b,2b\n" \
+ " .long 1b,2b\n" \
+ ".previous" \
+ : : "r" (val), "r" (port + _IO_BASE) \
+ : "memory"); \
+}
+
+__do_in_asm(_rec_inb, "lbzx")
+__do_in_asm(_rec_inw, "lhbrx")
+__do_in_asm(_rec_inl, "lwbrx")
+__do_out_asm(_rec_outb, "stbx")
+__do_out_asm(_rec_outw, "sthbrx")
+__do_out_asm(_rec_outl, "stwbrx")
+
+#endif /* CONFIG_PPC32 */
+
+/* The "__do_*" operations below provide the actual "base" implementation
+ * for each of the defined acccessor. Some of them use the out_* functions
+ * directly, some of them still use EEH, though we might change that in the
+ * future. Those macros below provide the necessary argument swapping and
+ * handling of the IO base for PIO.
+ *
+ * They are themselves used by the macros that define the actual accessors
+ * and can be used by the hooks if any.
+ *
+ * Note that PIO operations are always defined in terms of their corresonding
+ * MMIO operations. That allows platforms like iSeries who want to modify the
+ * behaviour of both to only hook on the MMIO version and get both. It's also
+ * possible to hook directly at the toplevel PIO operation if they have to
+ * be handled differently
+ */
+#define __do_writeb(val, addr) out_8(PCI_FIX_ADDR(addr), val)
+#define __do_writew(val, addr) out_le16(PCI_FIX_ADDR(addr), val)
+#define __do_writel(val, addr) out_le32(PCI_FIX_ADDR(addr), val)
+#define __do_writeq(val, addr) out_le64(PCI_FIX_ADDR(addr), val)
+#define __do_writew_be(val, addr) out_be16(PCI_FIX_ADDR(addr), val)
+#define __do_writel_be(val, addr) out_be32(PCI_FIX_ADDR(addr), val)
+#define __do_writeq_be(val, addr) out_be64(PCI_FIX_ADDR(addr), val)
+
+#ifdef CONFIG_EEH
+#define __do_readb(addr) eeh_readb(PCI_FIX_ADDR(addr))
+#define __do_readw(addr) eeh_readw(PCI_FIX_ADDR(addr))
+#define __do_readl(addr) eeh_readl(PCI_FIX_ADDR(addr))
+#define __do_readq(addr) eeh_readq(PCI_FIX_ADDR(addr))
+#define __do_readw_be(addr) eeh_readw_be(PCI_FIX_ADDR(addr))
+#define __do_readl_be(addr) eeh_readl_be(PCI_FIX_ADDR(addr))
+#define __do_readq_be(addr) eeh_readq_be(PCI_FIX_ADDR(addr))
+#else /* CONFIG_EEH */
+#define __do_readb(addr) in_8(PCI_FIX_ADDR(addr))
+#define __do_readw(addr) in_le16(PCI_FIX_ADDR(addr))
+#define __do_readl(addr) in_le32(PCI_FIX_ADDR(addr))
+#define __do_readq(addr) in_le64(PCI_FIX_ADDR(addr))
+#define __do_readw_be(addr) in_be16(PCI_FIX_ADDR(addr))
+#define __do_readl_be(addr) in_be32(PCI_FIX_ADDR(addr))
+#define __do_readq_be(addr) in_be64(PCI_FIX_ADDR(addr))
+#endif /* !defined(CONFIG_EEH) */
+
+#ifdef CONFIG_PPC32
+#define __do_outb(val, port) _rec_outb(val, port)
+#define __do_outw(val, port) _rec_outw(val, port)
+#define __do_outl(val, port) _rec_outl(val, port)
+#define __do_inb(port) _rec_inb(port)
+#define __do_inw(port) _rec_inw(port)
+#define __do_inl(port) _rec_inl(port)
+#else /* CONFIG_PPC32 */
+#define __do_outb(val, port) writeb(val,(PCI_IO_ADDR)_IO_BASE+port);
+#define __do_outw(val, port) writew(val,(PCI_IO_ADDR)_IO_BASE+port);
+#define __do_outl(val, port) writel(val,(PCI_IO_ADDR)_IO_BASE+port);
+#define __do_inb(port) readb((PCI_IO_ADDR)_IO_BASE + port);
+#define __do_inw(port) readw((PCI_IO_ADDR)_IO_BASE + port);
+#define __do_inl(port) readl((PCI_IO_ADDR)_IO_BASE + port);
+#endif /* !CONFIG_PPC32 */
+
+#ifdef CONFIG_EEH
+#define __do_readsb(a, b, n) eeh_readsb(PCI_FIX_ADDR(a), (b), (n))
+#define __do_readsw(a, b, n) eeh_readsw(PCI_FIX_ADDR(a), (b), (n))
+#define __do_readsl(a, b, n) eeh_readsl(PCI_FIX_ADDR(a), (b), (n))
+#else /* CONFIG_EEH */
+#define __do_readsb(a, b, n) _insb(PCI_FIX_ADDR(a), (b), (n))
+#define __do_readsw(a, b, n) _insw(PCI_FIX_ADDR(a), (b), (n))
+#define __do_readsl(a, b, n) _insl(PCI_FIX_ADDR(a), (b), (n))
+#endif /* !CONFIG_EEH */
+#define __do_writesb(a, b, n) _outsb(PCI_FIX_ADDR(a),(b),(n))
+#define __do_writesw(a, b, n) _outsw(PCI_FIX_ADDR(a),(b),(n))
+#define __do_writesl(a, b, n) _outsl(PCI_FIX_ADDR(a),(b),(n))
+
+#define __do_insb(p, b, n) readsb((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
+#define __do_insw(p, b, n) readsw((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
+#define __do_insl(p, b, n) readsl((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
+#define __do_outsb(p, b, n) writesb((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
+#define __do_outsw(p, b, n) writesw((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
+#define __do_outsl(p, b, n) writesl((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
+
+#define __do_memset_io(addr, c, n) \
+ _memset_io(PCI_FIX_ADDR(addr), c, n)
+#define __do_memcpy_toio(dst, src, n) \
+ _memcpy_toio(PCI_FIX_ADDR(dst), src, n)
+
+#ifdef CONFIG_EEH
+#define __do_memcpy_fromio(dst, src, n) \
+ eeh_memcpy_fromio(dst, PCI_FIX_ADDR(src), n)
+#else /* CONFIG_EEH */
+#define __do_memcpy_fromio(dst, src, n) \
+ _memcpy_fromio(dst,PCI_FIX_ADDR(src),n)
+#endif /* !CONFIG_EEH */
+
+#ifdef CONFIG_PPC_INDIRECT_IO
+#define DEF_PCI_HOOK(x) x
+#else
+#define DEF_PCI_HOOK(x) NULL
+#endif
+
+/* Structure containing all the hooks */
+extern struct ppc_pci_io {
+
+#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) ret (*name) at;
+#define DEF_PCI_AC_NORET(name, at, al, space, aa) void (*name) at;
+
+#include <asm/io-defs.h>
+
+#undef DEF_PCI_AC_RET
+#undef DEF_PCI_AC_NORET
+
+} ppc_pci_io;
+
+/* The inline wrappers */
+#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \
+static inline ret name at \
+{ \
+ if (DEF_PCI_HOOK(ppc_pci_io.name) != NULL) \
+ return ppc_pci_io.name al; \
+ return __do_##name al; \
+}
+
+#define DEF_PCI_AC_NORET(name, at, al, space, aa) \
+static inline void name at \
+{ \
+ if (DEF_PCI_HOOK(ppc_pci_io.name) != NULL) \
+ ppc_pci_io.name al; \
+ else \
+ __do_##name al; \
+}
+
+#include <asm/io-defs.h>
+
+#undef DEF_PCI_AC_RET
+#undef DEF_PCI_AC_NORET
+
+/* Some drivers check for the presence of readq & writeq with
+ * a #ifdef, so we make them happy here.
+ */
+#ifdef __powerpc64__
+#define readq readq
+#define writeq writeq
+#endif
+
+/*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * access
+ */
+#define xlate_dev_mem_ptr(p) __va(p)
+
+/*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+#define xlate_dev_kmem_ptr(p) p
+
+/*
+ * We don't do relaxed operations yet, at least not with this semantic
+ */
+#define readb_relaxed(addr) readb(addr)
+#define readw_relaxed(addr) readw(addr)
+#define readl_relaxed(addr) readl(addr)
+#define readq_relaxed(addr) readq(addr)
+
+#ifdef CONFIG_PPC32
+#define mmiowb()
+#else
+/*
+ * Enforce synchronisation of stores vs. spin_unlock
+ * (this does it explicitly, though our implementation of spin_unlock
+ * does it implicitely too)
+ */
+static inline void mmiowb(void)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__("sync; li %0,0; stb %0,%1(13)"
+ : "=&r" (tmp) : "i" (offsetof(struct paca_struct, io_sync))
+ : "memory");
+}
+#endif /* !CONFIG_PPC32 */
+
+static inline void iosync(void)
+{
+ __asm__ __volatile__ ("sync" : : : "memory");
+}
+
+/* Enforce in-order execution of data I/O.
+ * No distinction between read/write on PPC; use eieio for all three.
+ * Those are fairly week though. They don't provide a barrier between
+ * MMIO and cacheable storage nor do they provide a barrier vs. locks,
+ * they only provide barriers between 2 __raw MMIO operations and
+ * possibly break write combining.
+ */
+#define iobarrier_rw() eieio()
+#define iobarrier_r() eieio()
+#define iobarrier_w() eieio()
+
+
+/*
+ * output pause versions need a delay at least for the
+ * w83c105 ide controller in a p610.
+ */
+#define inb_p(port) inb(port)
+#define outb_p(val, port) (udelay(1), outb((val), (port)))
+#define inw_p(port) inw(port)
+#define outw_p(val, port) (udelay(1), outw((val), (port)))
+#define inl_p(port) inl(port)
+#define outl_p(val, port) (udelay(1), outl((val), (port)))
+
+
+#define IO_SPACE_LIMIT ~(0UL)
+
+
+/**
+ * ioremap - map bus memory into CPU space
+ * @address: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * We provide a few variations of it:
+ *
+ * * ioremap is the standard one and provides non-cacheable guarded mappings
+ * and can be hooked by the platform via ppc_md
+ *
+ * * ioremap_flags allows to specify the page flags as an argument and can
+ * also be hooked by the platform via ppc_md. ioremap_prot is the exact
+ * same thing as ioremap_flags.
+ *
+ * * ioremap_nocache is identical to ioremap
+ *
+ * * iounmap undoes such a mapping and can be hooked
+ *
+ * * __ioremap_at (and the pending __iounmap_at) are low level functions to
+ * create hand-made mappings for use only by the PCI code and cannot
+ * currently be hooked. Must be page aligned.
+ *
+ * * __ioremap is the low level implementation used by ioremap and
+ * ioremap_flags and cannot be hooked (but can be used by a hook on one
+ * of the previous ones)
+ *
+ * * __iounmap, is the low level implementation used by iounmap and cannot
+ * be hooked (but can be used by a hook on iounmap)
+ *
+ */
+extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
+extern void __iomem *ioremap_flags(phys_addr_t address, unsigned long size,
+ unsigned long flags);
+#define ioremap_nocache(addr, size) ioremap((addr), (size))
+#define ioremap_prot(addr, size, prot) ioremap_flags((addr), (size), (prot))
+
+extern void iounmap(volatile void __iomem *addr);
+
+extern void __iomem *__ioremap(phys_addr_t, unsigned long size,
+ unsigned long flags);
+extern void __iounmap(volatile void __iomem *addr);
+
+extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea,
+ unsigned long size, unsigned long flags);
+extern void __iounmap_at(void *ea, unsigned long size);
+
+/*
+ * When CONFIG_PPC_INDIRECT_IO is set, we use the generic iomap implementation
+ * which needs some additional definitions here. They basically allow PIO
+ * space overall to be 1GB. This will work as long as we never try to use
+ * iomap to map MMIO below 1GB which should be fine on ppc64
+ */
+#define HAVE_ARCH_PIO_SIZE 1
+#define PIO_OFFSET 0x00000000UL
+#define PIO_MASK (FULL_IO_SIZE - 1)
+#define PIO_RESERVED (FULL_IO_SIZE)
+
+#define mmio_read16be(addr) readw_be(addr)
+#define mmio_read32be(addr) readl_be(addr)
+#define mmio_write16be(val, addr) writew_be(val, addr)
+#define mmio_write32be(val, addr) writel_be(val, addr)
+#define mmio_insb(addr, dst, count) readsb(addr, dst, count)
+#define mmio_insw(addr, dst, count) readsw(addr, dst, count)
+#define mmio_insl(addr, dst, count) readsl(addr, dst, count)
+#define mmio_outsb(addr, src, count) writesb(addr, src, count)
+#define mmio_outsw(addr, src, count) writesw(addr, src, count)
+#define mmio_outsl(addr, src, count) writesl(addr, src, count)
+
+/**
+ * virt_to_phys - map virtual addresses to physical
+ * @address: address to remap
+ *
+ * The returned physical address is the physical (CPU) mapping for
+ * the memory address given. It is only valid to use this function on
+ * addresses directly mapped or allocated via kmalloc.
+ *
+ * This function does not give bus mappings for DMA transfers. In
+ * almost all conceivable cases a device driver should not be using
+ * this function
+ */
+static inline unsigned long virt_to_phys(volatile void * address)
+{
+ return __pa((unsigned long)address);
+}
+
+/**
+ * phys_to_virt - map physical address to virtual
+ * @address: address to remap
+ *
+ * The returned virtual address is a current CPU mapping for
+ * the memory address given. It is only valid to use this function on
+ * addresses that have a kernel mapping
+ *
+ * This function does not handle bus mappings for DMA transfers. In
+ * almost all conceivable cases a device driver should not be using
+ * this function
+ */
+static inline void * phys_to_virt(unsigned long address)
+{
+ return (void *)__va(address);
+}
+
+/*
+ * Change "struct page" to physical address.
+ */
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+
+/* We do NOT want virtual merging, it would put too much pressure on
+ * our iommu allocator. Instead, we want drivers to be smart enough
+ * to coalesce sglists that happen to have been mapped in a contiguous
+ * way by the iommu
+ */
+#define BIO_VMERGE_BOUNDARY 0
+
+/*
+ * 32 bits still uses virt_to_bus() for it's implementation of DMA
+ * mappings se we have to keep it defined here. We also have some old
+ * drivers (shame shame shame) that use bus_to_virt() and haven't been
+ * fixed yet so I need to define it here.
+ */
+#ifdef CONFIG_PPC32
+
+static inline unsigned long virt_to_bus(volatile void * address)
+{
+ if (address == NULL)
+ return 0;
+ return __pa(address) + PCI_DRAM_OFFSET;
+}
+
+static inline void * bus_to_virt(unsigned long address)
+{
+ if (address == 0)
+ return NULL;
+ return __va(address - PCI_DRAM_OFFSET);
+}
+
+#define page_to_bus(page) (page_to_phys(page) + PCI_DRAM_OFFSET)
+
+#endif /* CONFIG_PPC32 */
+
+/* access ports */
+#define setbits32(_addr, _v) out_be32((_addr), in_be32(_addr) | (_v))
+#define clrbits32(_addr, _v) out_be32((_addr), in_be32(_addr) & ~(_v))
+
+#define setbits16(_addr, _v) out_be16((_addr), in_be16(_addr) | (_v))
+#define clrbits16(_addr, _v) out_be16((_addr), in_be16(_addr) & ~(_v))
+
+#define setbits8(_addr, _v) out_8((_addr), in_8(_addr) | (_v))
+#define clrbits8(_addr, _v) out_8((_addr), in_8(_addr) & ~(_v))
+
+/* Clear and set bits in one shot. These macros can be used to clear and
+ * set multiple bits in a register using a single read-modify-write. These
+ * macros can also be used to set a multiple-bit bit pattern using a mask,
+ * by specifying the mask in the 'clear' parameter and the new bit pattern
+ * in the 'set' parameter.
+ */
+
+#define clrsetbits(type, addr, clear, set) \
+ out_##type((addr), (in_##type(addr) & ~(clear)) | (set))
+
+#ifdef __powerpc64__
+#define clrsetbits_be64(addr, clear, set) clrsetbits(be64, addr, clear, set)
+#define clrsetbits_le64(addr, clear, set) clrsetbits(le64, addr, clear, set)
+#endif
+
+#define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set)
+#define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set)
+
+#define clrsetbits_be16(addr, clear, set) clrsetbits(be16, addr, clear, set)
+#define clrsetbits_le16(addr, clear, set) clrsetbits(le16, addr, clear, set)
+
+#define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set)
+
+void __iomem *devm_ioremap_prot(struct device *dev, resource_size_t offset,
+ size_t size, unsigned long flags);
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_IO_H */
diff --git a/arch/powerpc/include/asm/ioctl.h b/arch/powerpc/include/asm/ioctl.h
new file mode 100644
index 0000000..57d6830
--- /dev/null
+++ b/arch/powerpc/include/asm/ioctl.h
@@ -0,0 +1,13 @@
+#ifndef _ASM_POWERPC_IOCTL_H
+#define _ASM_POWERPC_IOCTL_H
+
+#define _IOC_SIZEBITS 13
+#define _IOC_DIRBITS 3
+
+#define _IOC_NONE 1U
+#define _IOC_READ 2U
+#define _IOC_WRITE 4U
+
+#include <asm-generic/ioctl.h>
+
+#endif /* _ASM_POWERPC_IOCTL_H */
diff --git a/arch/powerpc/include/asm/ioctls.h b/arch/powerpc/include/asm/ioctls.h
new file mode 100644
index 0000000..279a622
--- /dev/null
+++ b/arch/powerpc/include/asm/ioctls.h
@@ -0,0 +1,110 @@
+#ifndef _ASM_POWERPC_IOCTLS_H
+#define _ASM_POWERPC_IOCTLS_H
+
+#include <asm/ioctl.h>
+
+#define FIOCLEX _IO('f', 1)
+#define FIONCLEX _IO('f', 2)
+#define FIOASYNC _IOW('f', 125, int)
+#define FIONBIO _IOW('f', 126, int)
+#define FIONREAD _IOR('f', 127, int)
+#define TIOCINQ FIONREAD
+#define FIOQSIZE _IOR('f', 128, loff_t)
+
+#define TIOCGETP _IOR('t', 8, struct sgttyb)
+#define TIOCSETP _IOW('t', 9, struct sgttyb)
+#define TIOCSETN _IOW('t', 10, struct sgttyb) /* TIOCSETP wo flush */
+
+#define TIOCSETC _IOW('t', 17, struct tchars)
+#define TIOCGETC _IOR('t', 18, struct tchars)
+#define TCGETS _IOR('t', 19, struct termios)
+#define TCSETS _IOW('t', 20, struct termios)
+#define TCSETSW _IOW('t', 21, struct termios)
+#define TCSETSF _IOW('t', 22, struct termios)
+
+#define TCGETA _IOR('t', 23, struct termio)
+#define TCSETA _IOW('t', 24, struct termio)
+#define TCSETAW _IOW('t', 25, struct termio)
+#define TCSETAF _IOW('t', 28, struct termio)
+
+#define TCSBRK _IO('t', 29)
+#define TCXONC _IO('t', 30)
+#define TCFLSH _IO('t', 31)
+
+#define TIOCSWINSZ _IOW('t', 103, struct winsize)
+#define TIOCGWINSZ _IOR('t', 104, struct winsize)
+#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
+#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
+#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
+
+#define TIOCGLTC _IOR('t', 116, struct ltchars)
+#define TIOCSLTC _IOW('t', 117, struct ltchars)
+#define TIOCSPGRP _IOW('t', 118, int)
+#define TIOCGPGRP _IOR('t', 119, int)
+
+#define TIOCEXCL 0x540C
+#define TIOCNXCL 0x540D
+#define TIOCSCTTY 0x540E
+
+#define TIOCSTI 0x5412
+#define TIOCMGET 0x5415
+#define TIOCMBIS 0x5416
+#define TIOCMBIC 0x5417
+#define TIOCMSET 0x5418
+# define TIOCM_LE 0x001
+# define TIOCM_DTR 0x002
+# define TIOCM_RTS 0x004
+# define TIOCM_ST 0x008
+# define TIOCM_SR 0x010
+# define TIOCM_CTS 0x020
+# define TIOCM_CAR 0x040
+# define TIOCM_RNG 0x080
+# define TIOCM_DSR 0x100
+# define TIOCM_CD TIOCM_CAR
+# define TIOCM_RI TIOCM_RNG
+#define TIOCM_OUT1 0x2000
+#define TIOCM_OUT2 0x4000
+#define TIOCM_LOOP 0x8000
+
+#define TIOCGSOFTCAR 0x5419
+#define TIOCSSOFTCAR 0x541A
+#define TIOCLINUX 0x541C
+#define TIOCCONS 0x541D
+#define TIOCGSERIAL 0x541E
+#define TIOCSSERIAL 0x541F
+#define TIOCPKT 0x5420
+# define TIOCPKT_DATA 0
+# define TIOCPKT_FLUSHREAD 1
+# define TIOCPKT_FLUSHWRITE 2
+# define TIOCPKT_STOP 4
+# define TIOCPKT_START 8
+# define TIOCPKT_NOSTOP 16
+# define TIOCPKT_DOSTOP 32
+
+
+#define TIOCNOTTY 0x5422
+#define TIOCSETD 0x5423
+#define TIOCGETD 0x5424
+#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
+#define TIOCSBRK 0x5427 /* BSD compatibility */
+#define TIOCCBRK 0x5428 /* BSD compatibility */
+#define TIOCGSID 0x5429 /* Return the session ID of FD */
+#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
+#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
+
+#define TIOCSERCONFIG 0x5453
+#define TIOCSERGWILD 0x5454
+#define TIOCSERSWILD 0x5455
+#define TIOCGLCKTRMIOS 0x5456
+#define TIOCSLCKTRMIOS 0x5457
+#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
+#define TIOCSERGETLSR 0x5459 /* Get line status register */
+ /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+#define TIOCSERGETMULTI 0x545A /* Get multiport config */
+#define TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
+#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
+
+#endif /* _ASM_POWERPC_IOCTLS_H */
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
new file mode 100644
index 0000000..51ecfef
--- /dev/null
+++ b/arch/powerpc/include/asm/iommu.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
+ * Rewrite, cleanup:
+ * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_IOMMU_H
+#define _ASM_IOMMU_H
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <asm/machdep.h>
+#include <asm/types.h>
+
+#define IOMMU_PAGE_SHIFT 12
+#define IOMMU_PAGE_SIZE (ASM_CONST(1) << IOMMU_PAGE_SHIFT)
+#define IOMMU_PAGE_MASK (~((1 << IOMMU_PAGE_SHIFT) - 1))
+#define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE)
+
+/* Boot time flags */
+extern int iommu_is_off;
+extern int iommu_force_on;
+
+/* Pure 2^n version of get_order */
+static __inline__ __attribute_const__ int get_iommu_order(unsigned long size)
+{
+ return __ilog2((size - 1) >> IOMMU_PAGE_SHIFT) + 1;
+}
+
+
+/*
+ * IOMAP_MAX_ORDER defines the largest contiguous block
+ * of dma space we can get. IOMAP_MAX_ORDER = 13
+ * allows up to 2**12 pages (4096 * 4096) = 16 MB
+ */
+#define IOMAP_MAX_ORDER 13
+
+struct iommu_table {
+ unsigned long it_busno; /* Bus number this table belongs to */
+ unsigned long it_size; /* Size of iommu table in entries */
+ unsigned long it_offset; /* Offset into global table */
+ unsigned long it_base; /* mapped address of tce table */
+ unsigned long it_index; /* which iommu table this is */
+ unsigned long it_type; /* type: PCI or Virtual Bus */
+ unsigned long it_blocksize; /* Entries in each block (cacheline) */
+ unsigned long it_hint; /* Hint for next alloc */
+ unsigned long it_largehint; /* Hint for large allocs */
+ unsigned long it_halfpoint; /* Breaking point for small/large allocs */
+ spinlock_t it_lock; /* Protects it_map */
+ unsigned long *it_map; /* A simple allocation bitmap for now */
+};
+
+struct scatterlist;
+
+/* Frees table for an individual device node */
+extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);
+
+/* Initializes an iommu_table based in values set in the passed-in
+ * structure
+ */
+extern struct iommu_table *iommu_init_table(struct iommu_table * tbl,
+ int nid);
+
+extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
+ struct scatterlist *sglist, int nelems,
+ unsigned long mask, enum dma_data_direction direction,
+ struct dma_attrs *attrs);
+extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
+ int nelems, enum dma_data_direction direction,
+ struct dma_attrs *attrs);
+
+extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
+ size_t size, dma_addr_t *dma_handle,
+ unsigned long mask, gfp_t flag, int node);
+extern void iommu_free_coherent(struct iommu_table *tbl, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+extern dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
+ void *vaddr, size_t size, unsigned long mask,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs);
+extern void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction direction,
+ struct dma_attrs *attrs);
+
+extern void iommu_init_early_pSeries(void);
+extern void iommu_init_early_iSeries(void);
+extern void iommu_init_early_dart(void);
+extern void iommu_init_early_pasemi(void);
+
+#ifdef CONFIG_PCI
+extern void pci_iommu_init(void);
+extern void pci_direct_iommu_init(void);
+#else
+static inline void pci_iommu_init(void) { }
+#endif
+
+extern void alloc_dart_table(void);
+#if defined(CONFIG_PPC64) && defined(CONFIG_PM)
+static inline void iommu_save(void)
+{
+ if (ppc_md.iommu_save)
+ ppc_md.iommu_save();
+}
+
+static inline void iommu_restore(void)
+{
+ if (ppc_md.iommu_restore)
+ ppc_md.iommu_restore();
+}
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_IOMMU_H */
diff --git a/arch/powerpc/include/asm/ipcbuf.h b/arch/powerpc/include/asm/ipcbuf.h
new file mode 100644
index 0000000..2c3e1d9
--- /dev/null
+++ b/arch/powerpc/include/asm/ipcbuf.h
@@ -0,0 +1,34 @@
+#ifndef _ASM_POWERPC_IPCBUF_H
+#define _ASM_POWERPC_IPCBUF_H
+
+/*
+ * The ipc64_perm structure for the powerpc is identical to
+ * kern_ipc_perm as we have always had 32-bit UIDs and GIDs in the
+ * kernel. Note extra padding because this structure is passed back
+ * and forth between kernel and user space. Pad space is left for:
+ * - 1 32-bit value to fill up for 8-byte alignment
+ * - 2 miscellaneous 64-bit values
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/types.h>
+
+struct ipc64_perm
+{
+ __kernel_key_t key;
+ __kernel_uid_t uid;
+ __kernel_gid_t gid;
+ __kernel_uid_t cuid;
+ __kernel_gid_t cgid;
+ __kernel_mode_t mode;
+ unsigned int seq;
+ unsigned int __pad1;
+ unsigned long long __unused1;
+ unsigned long long __unused2;
+};
+
+#endif /* _ASM_POWERPC_IPCBUF_H */
diff --git a/arch/powerpc/include/asm/ipic.h b/arch/powerpc/include/asm/ipic.h
new file mode 100644
index 0000000..fb59829
--- /dev/null
+++ b/arch/powerpc/include/asm/ipic.h
@@ -0,0 +1,84 @@
+/*
+ * IPIC external definitions and structure.
+ *
+ * Maintainer: Kumar Gala <galak@kernel.crashing.org>
+ *
+ * Copyright 2005 Freescale Semiconductor, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifdef __KERNEL__
+#ifndef __ASM_IPIC_H__
+#define __ASM_IPIC_H__
+
+#include <linux/irq.h>
+
+/* Flags when we init the IPIC */
+#define IPIC_SPREADMODE_GRP_A 0x00000001
+#define IPIC_SPREADMODE_GRP_B 0x00000002
+#define IPIC_SPREADMODE_GRP_C 0x00000004
+#define IPIC_SPREADMODE_GRP_D 0x00000008
+#define IPIC_SPREADMODE_MIX_A 0x00000010
+#define IPIC_SPREADMODE_MIX_B 0x00000020
+#define IPIC_DISABLE_MCP_OUT 0x00000040
+#define IPIC_IRQ0_MCP 0x00000080
+
+/* IPIC registers offsets */
+#define IPIC_SICFR 0x00 /* System Global Interrupt Configuration Register */
+#define IPIC_SIVCR 0x04 /* System Global Interrupt Vector Register */
+#define IPIC_SIPNR_H 0x08 /* System Internal Interrupt Pending Register (HIGH) */
+#define IPIC_SIPNR_L 0x0C /* System Internal Interrupt Pending Register (LOW) */
+#define IPIC_SIPRR_A 0x10 /* System Internal Interrupt group A Priority Register */
+#define IPIC_SIPRR_B 0x14 /* System Internal Interrupt group B Priority Register */
+#define IPIC_SIPRR_C 0x18 /* System Internal Interrupt group C Priority Register */
+#define IPIC_SIPRR_D 0x1C /* System Internal Interrupt group D Priority Register */
+#define IPIC_SIMSR_H 0x20 /* System Internal Interrupt Mask Register (HIGH) */
+#define IPIC_SIMSR_L 0x24 /* System Internal Interrupt Mask Register (LOW) */
+#define IPIC_SICNR 0x28 /* System Internal Interrupt Control Register */
+#define IPIC_SEPNR 0x2C /* System External Interrupt Pending Register */
+#define IPIC_SMPRR_A 0x30 /* System Mixed Interrupt group A Priority Register */
+#define IPIC_SMPRR_B 0x34 /* System Mixed Interrupt group B Priority Register */
+#define IPIC_SEMSR 0x38 /* System External Interrupt Mask Register */
+#define IPIC_SECNR 0x3C /* System External Interrupt Control Register */
+#define IPIC_SERSR 0x40 /* System Error Status Register */
+#define IPIC_SERMR 0x44 /* System Error Mask Register */
+#define IPIC_SERCR 0x48 /* System Error Control Register */
+#define IPIC_SIFCR_H 0x50 /* System Internal Interrupt Force Register (HIGH) */
+#define IPIC_SIFCR_L 0x54 /* System Internal Interrupt Force Register (LOW) */
+#define IPIC_SEFCR 0x58 /* System External Interrupt Force Register */
+#define IPIC_SERFR 0x5C /* System Error Force Register */
+#define IPIC_SCVCR 0x60 /* System Critical Interrupt Vector Register */
+#define IPIC_SMVCR 0x64 /* System Management Interrupt Vector Register */
+
+enum ipic_prio_grp {
+ IPIC_INT_GRP_A = IPIC_SIPRR_A,
+ IPIC_INT_GRP_D = IPIC_SIPRR_D,
+ IPIC_MIX_GRP_A = IPIC_SMPRR_A,
+ IPIC_MIX_GRP_B = IPIC_SMPRR_B,
+};
+
+enum ipic_mcp_irq {
+ IPIC_MCP_IRQ0 = 0,
+ IPIC_MCP_WDT = 1,
+ IPIC_MCP_SBA = 2,
+ IPIC_MCP_PCI1 = 5,
+ IPIC_MCP_PCI2 = 6,
+ IPIC_MCP_MU = 7,
+};
+
+extern int ipic_set_priority(unsigned int irq, unsigned int priority);
+extern void ipic_set_highest_priority(unsigned int irq);
+extern void ipic_set_default_priority(void);
+extern void ipic_enable_mcp(enum ipic_mcp_irq mcp_irq);
+extern void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq);
+extern u32 ipic_get_mcp_status(void);
+extern void ipic_clear_mcp_status(u32 mask);
+
+extern struct ipic * ipic_init(struct device_node *node, unsigned int flags);
+extern unsigned int ipic_get_irq(void);
+
+#endif /* __ASM_IPIC_H__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
new file mode 100644
index 0000000..a372f76
--- /dev/null
+++ b/arch/powerpc/include/asm/irq.h
@@ -0,0 +1,366 @@
+#ifdef __KERNEL__
+#ifndef _ASM_POWERPC_IRQ_H
+#define _ASM_POWERPC_IRQ_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/threads.h>
+#include <linux/list.h>
+#include <linux/radix-tree.h>
+
+#include <asm/types.h>
+#include <asm/atomic.h>
+
+
+#define get_irq_desc(irq) (&irq_desc[(irq)])
+
+/* Define a way to iterate across irqs. */
+#define for_each_irq(i) \
+ for ((i) = 0; (i) < NR_IRQS; ++(i))
+
+extern atomic_t ppc_n_lost_interrupts;
+
+/* This number is used when no interrupt has been assigned */
+#define NO_IRQ (0)
+
+/* This is a special irq number to return from get_irq() to tell that
+ * no interrupt happened _and_ ignore it (don't count it as bad). Some
+ * platforms like iSeries rely on that.
+ */
+#define NO_IRQ_IGNORE ((unsigned int)-1)
+
+/* Total number of virq in the platform (make it a CONFIG_* option ? */
+#define NR_IRQS 512
+
+/* Number of irqs reserved for the legacy controller */
+#define NUM_ISA_INTERRUPTS 16
+
+/* This type is the placeholder for a hardware interrupt number. It has to
+ * be big enough to enclose whatever representation is used by a given
+ * platform.
+ */
+typedef unsigned long irq_hw_number_t;
+
+/* Interrupt controller "host" data structure. This could be defined as a
+ * irq domain controller. That is, it handles the mapping between hardware
+ * and virtual interrupt numbers for a given interrupt domain. The host
+ * structure is generally created by the PIC code for a given PIC instance
+ * (though a host can cover more than one PIC if they have a flat number
+ * model). It's the host callbacks that are responsible for setting the
+ * irq_chip on a given irq_desc after it's been mapped.
+ *
+ * The host code and data structures are fairly agnostic to the fact that
+ * we use an open firmware device-tree. We do have references to struct
+ * device_node in two places: in irq_find_host() to find the host matching
+ * a given interrupt controller node, and of course as an argument to its
+ * counterpart host->ops->match() callback. However, those are treated as
+ * generic pointers by the core and the fact that it's actually a device-node
+ * pointer is purely a convention between callers and implementation. This
+ * code could thus be used on other architectures by replacing those two
+ * by some sort of arch-specific void * "token" used to identify interrupt
+ * controllers.
+ */
+struct irq_host;
+struct radix_tree_root;
+
+/* Functions below are provided by the host and called whenever a new mapping
+ * is created or an old mapping is disposed. The host can then proceed to
+ * whatever internal data structures management is required. It also needs
+ * to setup the irq_desc when returning from map().
+ */
+struct irq_host_ops {
+ /* Match an interrupt controller device node to a host, returns
+ * 1 on a match
+ */
+ int (*match)(struct irq_host *h, struct device_node *node);
+
+ /* Create or update a mapping between a virtual irq number and a hw
+ * irq number. This is called only once for a given mapping.
+ */
+ int (*map)(struct irq_host *h, unsigned int virq, irq_hw_number_t hw);
+
+ /* Dispose of such a mapping */
+ void (*unmap)(struct irq_host *h, unsigned int virq);
+
+ /* Update of such a mapping */
+ void (*remap)(struct irq_host *h, unsigned int virq, irq_hw_number_t hw);
+
+ /* Translate device-tree interrupt specifier from raw format coming
+ * from the firmware to a irq_hw_number_t (interrupt line number) and
+ * type (sense) that can be passed to set_irq_type(). In the absence
+ * of this callback, irq_create_of_mapping() and irq_of_parse_and_map()
+ * will return the hw number in the first cell and IRQ_TYPE_NONE for
+ * the type (which amount to keeping whatever default value the
+ * interrupt controller has for that line)
+ */
+ int (*xlate)(struct irq_host *h, struct device_node *ctrler,
+ u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
+};
+
+struct irq_host {
+ struct list_head link;
+
+ /* type of reverse mapping technique */
+ unsigned int revmap_type;
+#define IRQ_HOST_MAP_LEGACY 0 /* legacy 8259, gets irqs 1..15 */
+#define IRQ_HOST_MAP_NOMAP 1 /* no fast reverse mapping */
+#define IRQ_HOST_MAP_LINEAR 2 /* linear map of interrupts */
+#define IRQ_HOST_MAP_TREE 3 /* radix tree */
+ union {
+ struct {
+ unsigned int size;
+ unsigned int *revmap;
+ } linear;
+ struct radix_tree_root tree;
+ } revmap_data;
+ struct irq_host_ops *ops;
+ void *host_data;
+ irq_hw_number_t inval_irq;
+
+ /* Optional device node pointer */
+ struct device_node *of_node;
+};
+
+/* The main irq map itself is an array of NR_IRQ entries containing the
+ * associate host and irq number. An entry with a host of NULL is free.
+ * An entry can be allocated if it's free, the allocator always then sets
+ * hwirq first to the host's invalid irq number and then fills ops.
+ */
+struct irq_map_entry {
+ irq_hw_number_t hwirq;
+ struct irq_host *host;
+};
+
+extern struct irq_map_entry irq_map[NR_IRQS];
+
+extern irq_hw_number_t virq_to_hw(unsigned int virq);
+
+/**
+ * irq_alloc_host - Allocate a new irq_host data structure
+ * @of_node: optional device-tree node of the interrupt controller
+ * @revmap_type: type of reverse mapping to use
+ * @revmap_arg: for IRQ_HOST_MAP_LINEAR linear only: size of the map
+ * @ops: map/unmap host callbacks
+ * @inval_irq: provide a hw number in that host space that is always invalid
+ *
+ * Allocates and initialize and irq_host structure. Note that in the case of
+ * IRQ_HOST_MAP_LEGACY, the map() callback will be called before this returns
+ * for all legacy interrupts except 0 (which is always the invalid irq for
+ * a legacy controller). For a IRQ_HOST_MAP_LINEAR, the map is allocated by
+ * this call as well. For a IRQ_HOST_MAP_TREE, the radix tree will be allocated
+ * later during boot automatically (the reverse mapping will use the slow path
+ * until that happens).
+ */
+extern struct irq_host *irq_alloc_host(struct device_node *of_node,
+ unsigned int revmap_type,
+ unsigned int revmap_arg,
+ struct irq_host_ops *ops,
+ irq_hw_number_t inval_irq);
+
+
+/**
+ * irq_find_host - Locates a host for a given device node
+ * @node: device-tree node of the interrupt controller
+ */
+extern struct irq_host *irq_find_host(struct device_node *node);
+
+
+/**
+ * irq_set_default_host - Set a "default" host
+ * @host: default host pointer
+ *
+ * For convenience, it's possible to set a "default" host that will be used
+ * whenever NULL is passed to irq_create_mapping(). It makes life easier for
+ * platforms that want to manipulate a few hard coded interrupt numbers that
+ * aren't properly represented in the device-tree.
+ */
+extern void irq_set_default_host(struct irq_host *host);
+
+
+/**
+ * irq_set_virq_count - Set the maximum number of virt irqs
+ * @count: number of linux virtual irqs, capped with NR_IRQS
+ *
+ * This is mainly for use by platforms like iSeries who want to program
+ * the virtual irq number in the controller to avoid the reverse mapping
+ */
+extern void irq_set_virq_count(unsigned int count);
+
+
+/**
+ * irq_create_mapping - Map a hardware interrupt into linux virq space
+ * @host: host owning this hardware interrupt or NULL for default host
+ * @hwirq: hardware irq number in that host space
+ *
+ * Only one mapping per hardware interrupt is permitted. Returns a linux
+ * virq number.
+ * If the sense/trigger is to be specified, set_irq_type() should be called
+ * on the number returned from that call.
+ */
+extern unsigned int irq_create_mapping(struct irq_host *host,
+ irq_hw_number_t hwirq);
+
+
+/**
+ * irq_dispose_mapping - Unmap an interrupt
+ * @virq: linux virq number of the interrupt to unmap
+ */
+extern void irq_dispose_mapping(unsigned int virq);
+
+/**
+ * irq_find_mapping - Find a linux virq from an hw irq number.
+ * @host: host owning this hardware interrupt
+ * @hwirq: hardware irq number in that host space
+ *
+ * This is a slow path, for use by generic code. It's expected that an
+ * irq controller implementation directly calls the appropriate low level
+ * mapping function.
+ */
+extern unsigned int irq_find_mapping(struct irq_host *host,
+ irq_hw_number_t hwirq);
+
+/**
+ * irq_create_direct_mapping - Allocate a virq for direct mapping
+ * @host: host to allocate the virq for or NULL for default host
+ *
+ * This routine is used for irq controllers which can choose the hardware
+ * interrupt numbers they generate. In such a case it's simplest to use
+ * the linux virq as the hardware interrupt number.
+ */
+extern unsigned int irq_create_direct_mapping(struct irq_host *host);
+
+/**
+ * irq_radix_revmap - Find a linux virq from a hw irq number.
+ * @host: host owning this hardware interrupt
+ * @hwirq: hardware irq number in that host space
+ *
+ * This is a fast path, for use by irq controller code that uses radix tree
+ * revmaps
+ */
+extern unsigned int irq_radix_revmap(struct irq_host *host,
+ irq_hw_number_t hwirq);
+
+/**
+ * irq_linear_revmap - Find a linux virq from a hw irq number.
+ * @host: host owning this hardware interrupt
+ * @hwirq: hardware irq number in that host space
+ *
+ * This is a fast path, for use by irq controller code that uses linear
+ * revmaps. It does fallback to the slow path if the revmap doesn't exist
+ * yet and will create the revmap entry with appropriate locking
+ */
+
+extern unsigned int irq_linear_revmap(struct irq_host *host,
+ irq_hw_number_t hwirq);
+
+
+
+/**
+ * irq_alloc_virt - Allocate virtual irq numbers
+ * @host: host owning these new virtual irqs
+ * @count: number of consecutive numbers to allocate
+ * @hint: pass a hint number, the allocator will try to use a 1:1 mapping
+ *
+ * This is a low level function that is used internally by irq_create_mapping()
+ * and that can be used by some irq controllers implementations for things
+ * like allocating ranges of numbers for MSIs. The revmaps are left untouched.
+ */
+extern unsigned int irq_alloc_virt(struct irq_host *host,
+ unsigned int count,
+ unsigned int hint);
+
+/**
+ * irq_free_virt - Free virtual irq numbers
+ * @virq: virtual irq number of the first interrupt to free
+ * @count: number of interrupts to free
+ *
+ * This function is the opposite of irq_alloc_virt. It will not clear reverse
+ * maps, this should be done previously by unmap'ing the interrupt. In fact,
+ * all interrupts covered by the range being freed should have been unmapped
+ * prior to calling this.
+ */
+extern void irq_free_virt(unsigned int virq, unsigned int count);
+
+
+/* -- OF helpers -- */
+
+/* irq_create_of_mapping - Map a hardware interrupt into linux virq space
+ * @controller: Device node of the interrupt controller
+ * @inspec: Interrupt specifier from the device-tree
+ * @intsize: Size of the interrupt specifier from the device-tree
+ *
+ * This function is identical to irq_create_mapping except that it takes
+ * as input informations straight from the device-tree (typically the results
+ * of the of_irq_map_*() functions.
+ */
+extern unsigned int irq_create_of_mapping(struct device_node *controller,
+ u32 *intspec, unsigned int intsize);
+
+
+/* irq_of_parse_and_map - Parse nad Map an interrupt into linux virq space
+ * @device: Device node of the device whose interrupt is to be mapped
+ * @index: Index of the interrupt to map
+ *
+ * This function is a wrapper that chains of_irq_map_one() and
+ * irq_create_of_mapping() to make things easier to callers
+ */
+extern unsigned int irq_of_parse_and_map(struct device_node *dev, int index);
+
+/* -- End OF helpers -- */
+
+/**
+ * irq_early_init - Init irq remapping subsystem
+ */
+extern void irq_early_init(void);
+
+static __inline__ int irq_canonicalize(int irq)
+{
+ return irq;
+}
+
+extern int distribute_irqs;
+
+struct irqaction;
+struct pt_regs;
+
+#define __ARCH_HAS_DO_SOFTIRQ
+
+#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+/*
+ * Per-cpu stacks for handling critical, debug and machine check
+ * level interrupts.
+ */
+extern struct thread_info *critirq_ctx[NR_CPUS];
+extern struct thread_info *dbgirq_ctx[NR_CPUS];
+extern struct thread_info *mcheckirq_ctx[NR_CPUS];
+extern void exc_lvl_ctx_init(void);
+#else
+#define exc_lvl_ctx_init()
+#endif
+
+#ifdef CONFIG_IRQSTACKS
+/*
+ * Per-cpu stacks for handling hard and soft interrupts.
+ */
+extern struct thread_info *hardirq_ctx[NR_CPUS];
+extern struct thread_info *softirq_ctx[NR_CPUS];
+
+extern void irq_ctx_init(void);
+extern void call_do_softirq(struct thread_info *tp);
+extern int call_handle_irq(int irq, void *p1,
+ struct thread_info *tp, void *func);
+#else
+#define irq_ctx_init()
+
+#endif /* CONFIG_IRQSTACKS */
+
+extern void do_IRQ(struct pt_regs *regs);
+
+#endif /* _ASM_IRQ_H */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/irq_regs.h b/arch/powerpc/include/asm/irq_regs.h
new file mode 100644
index 0000000..ba94b51
--- /dev/null
+++ b/arch/powerpc/include/asm/irq_regs.h
@@ -0,0 +1,2 @@
+#include <asm-generic/irq_regs.h>
+
diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h
new file mode 100644
index 0000000..17ba3a8
--- /dev/null
+++ b/arch/powerpc/include/asm/irqflags.h
@@ -0,0 +1,42 @@
+/*
+ * IRQ flags handling
+ */
+#ifndef _ASM_IRQFLAGS_H
+#define _ASM_IRQFLAGS_H
+
+#ifndef __ASSEMBLY__
+/*
+ * Get definitions for raw_local_save_flags(x), etc.
+ */
+#include <asm/hw_irq.h>
+
+#else
+#ifdef CONFIG_TRACE_IRQFLAGS
+/*
+ * Most of the CPU's IRQ-state tracing is done from assembly code; we
+ * have to call a C function so call a wrapper that saves all the
+ * C-clobbered registers.
+ */
+#define TRACE_ENABLE_INTS bl .trace_hardirqs_on
+#define TRACE_DISABLE_INTS bl .trace_hardirqs_off
+#define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip) \
+ cmpdi en, 0; \
+ bne 95f; \
+ stb en,PACASOFTIRQEN(r13); \
+ bl .trace_hardirqs_off; \
+ b skip; \
+95: bl .trace_hardirqs_on; \
+ li en,1;
+#define TRACE_AND_RESTORE_IRQ(en) \
+ TRACE_AND_RESTORE_IRQ_PARTIAL(en,96f); \
+96: stb en,PACASOFTIRQEN(r13)
+#else
+#define TRACE_ENABLE_INTS
+#define TRACE_DISABLE_INTS
+#define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip)
+#define TRACE_AND_RESTORE_IRQ(en) \
+ stb en,PACASOFTIRQEN(r13)
+#endif
+#endif
+
+#endif
diff --git a/arch/powerpc/include/asm/iseries/alpaca.h b/arch/powerpc/include/asm/iseries/alpaca.h
new file mode 100644
index 0000000..c0cce67
--- /dev/null
+++ b/arch/powerpc/include/asm/iseries/alpaca.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright © 2008 Stephen Rothwell IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_POWERPC_ISERIES_ALPACA_H
+#define _ASM_POWERPC_ISERIES_ALPACA_H
+
+/*
+ * This is the part of the paca that the iSeries hypervisor
+ * needs to be statically initialised. Immediately after boot
+ * we switch to the normal Linux paca.
+ */
+struct alpaca {
+ struct lppaca *lppaca_ptr; /* Pointer to LpPaca for PLIC */
+ const void *reg_save_ptr; /* Pointer to LpRegSave for PLIC */
+};
+
+#endif /* _ASM_POWERPC_ISERIES_ALPACA_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_call.h b/arch/powerpc/include/asm/iseries/hv_call.h
new file mode 100644
index 0000000..162d653
--- /dev/null
+++ b/arch/powerpc/include/asm/iseries/hv_call.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This file contains the "hypervisor call" interface which is used to
+ * drive the hypervisor from the OS.
+ */
+#ifndef _ASM_POWERPC_ISERIES_HV_CALL_H
+#define _ASM_POWERPC_ISERIES_HV_CALL_H
+
+#include <asm/iseries/hv_call_sc.h>
+#include <asm/iseries/hv_types.h>
+#include <asm/paca.h>
+
+/* Type of yield for HvCallBaseYieldProcessor */
+#define HvCall_YieldTimed 0 /* Yield until specified time (tb) */
+#define HvCall_YieldToActive 1 /* Yield until all active procs have run */
+#define HvCall_YieldToProc 2 /* Yield until the specified processor has run */
+
+/* interrupt masks for setEnabledInterrupts */
+#define HvCall_MaskIPI 0x00000001
+#define HvCall_MaskLpEvent 0x00000002
+#define HvCall_MaskLpProd 0x00000004
+#define HvCall_MaskTimeout 0x00000008
+
+/* Log buffer formats */
+#define HvCall_LogBuffer_ASCII 0
+#define HvCall_LogBuffer_EBCDIC 1
+
+#define HvCallBaseAckDeferredInts HvCallBase + 0
+#define HvCallBaseCpmPowerOff HvCallBase + 1
+#define HvCallBaseGetHwPatch HvCallBase + 2
+#define HvCallBaseReIplSpAttn HvCallBase + 3
+#define HvCallBaseSetASR HvCallBase + 4
+#define HvCallBaseSetASRAndRfi HvCallBase + 5
+#define HvCallBaseSetIMR HvCallBase + 6
+#define HvCallBaseSendIPI HvCallBase + 7
+#define HvCallBaseTerminateMachine HvCallBase + 8
+#define HvCallBaseTerminateMachineSrc HvCallBase + 9
+#define HvCallBaseProcessPlicInterrupts HvCallBase + 10
+#define HvCallBaseIsPrimaryCpmOrMsdIpl HvCallBase + 11
+#define HvCallBaseSetVirtualSIT HvCallBase + 12
+#define HvCallBaseVaryOffThisProcessor HvCallBase + 13
+#define HvCallBaseVaryOffMemoryChunk HvCallBase + 14
+#define HvCallBaseVaryOffInteractivePercentage HvCallBase + 15
+#define HvCallBaseSendLpProd HvCallBase + 16
+#define HvCallBaseSetEnabledInterrupts HvCallBase + 17
+#define HvCallBaseYieldProcessor HvCallBase + 18
+#define HvCallBaseVaryOffSharedProcUnits HvCallBase + 19
+#define HvCallBaseSetVirtualDecr HvCallBase + 20
+#define HvCallBaseClearLogBuffer HvCallBase + 21
+#define HvCallBaseGetLogBufferCodePage HvCallBase + 22
+#define HvCallBaseGetLogBufferFormat HvCallBase + 23
+#define HvCallBaseGetLogBufferLength HvCallBase + 24
+#define HvCallBaseReadLogBuffer HvCallBase + 25
+#define HvCallBaseSetLogBufferFormatAndCodePage HvCallBase + 26
+#define HvCallBaseWriteLogBuffer HvCallBase + 27
+#define HvCallBaseRouter28 HvCallBase + 28
+#define HvCallBaseRouter29 HvCallBase + 29
+#define HvCallBaseRouter30 HvCallBase + 30
+#define HvCallBaseSetDebugBus HvCallBase + 31
+
+#define HvCallCcSetDABR HvCallCc + 7
+
+static inline void HvCall_setVirtualDecr(void)
+{
+ /*
+ * Ignore any error return codes - most likely means that the
+ * target value for the LP has been increased and this vary off
+ * would bring us below the new target.
+ */
+ HvCall0(HvCallBaseSetVirtualDecr);
+}
+
+static inline void HvCall_yieldProcessor(unsigned typeOfYield, u64 yieldParm)
+{
+ HvCall2(HvCallBaseYieldProcessor, typeOfYield, yieldParm);
+}
+
+static inline void HvCall_setEnabledInterrupts(u64 enabledInterrupts)
+{
+ HvCall1(HvCallBaseSetEnabledInterrupts, enabledInterrupts);
+}
+
+static inline void HvCall_setLogBufferFormatAndCodepage(int format,
+ u32 codePage)
+{
+ HvCall2(HvCallBaseSetLogBufferFormatAndCodePage, format, codePage);
+}
+
+extern void HvCall_writeLogBuffer(const void *buffer, u64 bufLen);
+
+static inline void HvCall_sendIPI(struct paca_struct *targetPaca)
+{
+ HvCall1(HvCallBaseSendIPI, targetPaca->paca_index);
+}
+
+#endif /* _ASM_POWERPC_ISERIES_HV_CALL_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_call_event.h b/arch/powerpc/include/asm/iseries/hv_call_event.h
new file mode 100644
index 0000000..cc029d3
--- /dev/null
+++ b/arch/powerpc/include/asm/iseries/hv_call_event.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This file contains the "hypervisor call" interface which is used to
+ * drive the hypervisor from the OS.
+ */
+#ifndef _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H
+#define _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H
+
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/iseries/hv_call_sc.h>
+#include <asm/iseries/hv_types.h>
+#include <asm/abs_addr.h>
+
+struct HvLpEvent;
+
+typedef u8 HvLpEvent_Type;
+typedef u8 HvLpEvent_AckInd;
+typedef u8 HvLpEvent_AckType;
+
+typedef u8 HvLpDma_Direction;
+typedef u8 HvLpDma_AddressType;
+
+typedef u64 HvLpEvent_Rc;
+typedef u64 HvLpDma_Rc;
+
+#define HvCallEventAckLpEvent HvCallEvent + 0
+#define HvCallEventCancelLpEvent HvCallEvent + 1
+#define HvCallEventCloseLpEventPath HvCallEvent + 2
+#define HvCallEventDmaBufList HvCallEvent + 3
+#define HvCallEventDmaSingle HvCallEvent + 4
+#define HvCallEventDmaToSp HvCallEvent + 5
+#define HvCallEventGetOverflowLpEvents HvCallEvent + 6
+#define HvCallEventGetSourceLpInstanceId HvCallEvent + 7
+#define HvCallEventGetTargetLpInstanceId HvCallEvent + 8
+#define HvCallEventOpenLpEventPath HvCallEvent + 9
+#define HvCallEventSetLpEventStack HvCallEvent + 10
+#define HvCallEventSignalLpEvent HvCallEvent + 11
+#define HvCallEventSignalLpEventParms HvCallEvent + 12
+#define HvCallEventSetInterLpQueueIndex HvCallEvent + 13
+#define HvCallEventSetLpEventQueueInterruptProc HvCallEvent + 14
+#define HvCallEventRouter15 HvCallEvent + 15
+
+static inline void HvCallEvent_getOverflowLpEvents(u8 queueIndex)
+{
+ HvCall1(HvCallEventGetOverflowLpEvents, queueIndex);
+}
+
+static inline void HvCallEvent_setInterLpQueueIndex(u8 queueIndex)
+{
+ HvCall1(HvCallEventSetInterLpQueueIndex, queueIndex);
+}
+
+static inline void HvCallEvent_setLpEventStack(u8 queueIndex,
+ char *eventStackAddr, u32 eventStackSize)
+{
+ HvCall3(HvCallEventSetLpEventStack, queueIndex,
+ virt_to_abs(eventStackAddr), eventStackSize);
+}
+
+static inline void HvCallEvent_setLpEventQueueInterruptProc(u8 queueIndex,
+ u16 lpLogicalProcIndex)
+{
+ HvCall2(HvCallEventSetLpEventQueueInterruptProc, queueIndex,
+ lpLogicalProcIndex);
+}
+
+static inline HvLpEvent_Rc HvCallEvent_signalLpEvent(struct HvLpEvent *event)
+{
+ return HvCall1(HvCallEventSignalLpEvent, virt_to_abs(event));
+}
+
+static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp,
+ HvLpEvent_Type type, u16 subtype, HvLpEvent_AckInd ackInd,
+ HvLpEvent_AckType ackType, HvLpInstanceId sourceInstanceId,
+ HvLpInstanceId targetInstanceId, u64 correlationToken,
+ u64 eventData1, u64 eventData2, u64 eventData3,
+ u64 eventData4, u64 eventData5)
+{
+ /* Pack the misc bits into a single Dword to pass to PLIC */
+ union {
+ struct {
+ u8 ack_and_target;
+ u8 type;
+ u16 subtype;
+ HvLpInstanceId src_inst;
+ HvLpInstanceId target_inst;
+ } parms;
+ u64 dword;
+ } packed;
+
+ packed.parms.ack_and_target = (ackType << 7) | (ackInd << 6) | targetLp;
+ packed.parms.type = type;
+ packed.parms.subtype = subtype;
+ packed.parms.src_inst = sourceInstanceId;
+ packed.parms.target_inst = targetInstanceId;
+
+ return HvCall7(HvCallEventSignalLpEventParms, packed.dword,
+ correlationToken, eventData1, eventData2,
+ eventData3, eventData4, eventData5);
+}
+
+extern void *iseries_hv_alloc(size_t size, dma_addr_t *dma_handle, gfp_t flag);
+extern void iseries_hv_free(size_t size, void *vaddr, dma_addr_t dma_handle);
+extern dma_addr_t iseries_hv_map(void *vaddr, size_t size,
+ enum dma_data_direction direction);
+extern void iseries_hv_unmap(dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction);
+
+static inline HvLpEvent_Rc HvCallEvent_ackLpEvent(struct HvLpEvent *event)
+{
+ return HvCall1(HvCallEventAckLpEvent, virt_to_abs(event));
+}
+
+static inline HvLpEvent_Rc HvCallEvent_cancelLpEvent(struct HvLpEvent *event)
+{
+ return HvCall1(HvCallEventCancelLpEvent, virt_to_abs(event));
+}
+
+static inline HvLpInstanceId HvCallEvent_getSourceLpInstanceId(
+ HvLpIndex targetLp, HvLpEvent_Type type)
+{
+ return HvCall2(HvCallEventGetSourceLpInstanceId, targetLp, type);
+}
+
+static inline HvLpInstanceId HvCallEvent_getTargetLpInstanceId(
+ HvLpIndex targetLp, HvLpEvent_Type type)
+{
+ return HvCall2(HvCallEventGetTargetLpInstanceId, targetLp, type);
+}
+
+static inline void HvCallEvent_openLpEventPath(HvLpIndex targetLp,
+ HvLpEvent_Type type)
+{
+ HvCall2(HvCallEventOpenLpEventPath, targetLp, type);
+}
+
+static inline void HvCallEvent_closeLpEventPath(HvLpIndex targetLp,
+ HvLpEvent_Type type)
+{
+ HvCall2(HvCallEventCloseLpEventPath, targetLp, type);
+}
+
+static inline HvLpDma_Rc HvCallEvent_dmaBufList(HvLpEvent_Type type,
+ HvLpIndex remoteLp, HvLpDma_Direction direction,
+ HvLpInstanceId localInstanceId,
+ HvLpInstanceId remoteInstanceId,
+ HvLpDma_AddressType localAddressType,
+ HvLpDma_AddressType remoteAddressType,
+ /* Do these need to be converted to absolute addresses? */
+ u64 localBufList, u64 remoteBufList, u32 transferLength)
+{
+ /* Pack the misc bits into a single Dword to pass to PLIC */
+ union {
+ struct {
+ u8 flags;
+ HvLpIndex remote;
+ u8 type;
+ u8 reserved;
+ HvLpInstanceId local_inst;
+ HvLpInstanceId remote_inst;
+ } parms;
+ u64 dword;
+ } packed;
+
+ packed.parms.flags = (direction << 7) |
+ (localAddressType << 6) | (remoteAddressType << 5);
+ packed.parms.remote = remoteLp;
+ packed.parms.type = type;
+ packed.parms.reserved = 0;
+ packed.parms.local_inst = localInstanceId;
+ packed.parms.remote_inst = remoteInstanceId;
+
+ return HvCall4(HvCallEventDmaBufList, packed.dword, localBufList,
+ remoteBufList, transferLength);
+}
+
+static inline HvLpDma_Rc HvCallEvent_dmaToSp(void *local, u32 remote,
+ u32 length, HvLpDma_Direction dir)
+{
+ return HvCall4(HvCallEventDmaToSp, virt_to_abs(local), remote,
+ length, dir);
+}
+
+#endif /* _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_call_sc.h b/arch/powerpc/include/asm/iseries/hv_call_sc.h
new file mode 100644
index 0000000..f5d2109
--- /dev/null
+++ b/arch/powerpc/include/asm/iseries/hv_call_sc.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_POWERPC_ISERIES_HV_CALL_SC_H
+#define _ASM_POWERPC_ISERIES_HV_CALL_SC_H
+
+#include <linux/types.h>
+
+#define HvCallBase 0x8000000000000000ul
+#define HvCallCc 0x8001000000000000ul
+#define HvCallCfg 0x8002000000000000ul
+#define HvCallEvent 0x8003000000000000ul
+#define HvCallHpt 0x8004000000000000ul
+#define HvCallPci 0x8005000000000000ul
+#define HvCallSm 0x8007000000000000ul
+#define HvCallXm 0x8009000000000000ul
+
+extern u64 HvCall0(u64);
+extern u64 HvCall1(u64, u64);
+extern u64 HvCall2(u64, u64, u64);
+extern u64 HvCall3(u64, u64, u64, u64);
+extern u64 HvCall4(u64, u64, u64, u64, u64);
+extern u64 HvCall5(u64, u64, u64, u64, u64, u64);
+extern u64 HvCall6(u64, u64, u64, u64, u64, u64, u64);
+extern u64 HvCall7(u64, u64, u64, u64, u64, u64, u64, u64);
+
+extern u64 HvCall0Ret16(u64, void *);
+extern u64 HvCall1Ret16(u64, void *, u64);
+extern u64 HvCall2Ret16(u64, void *, u64, u64);
+extern u64 HvCall3Ret16(u64, void *, u64, u64, u64);
+extern u64 HvCall4Ret16(u64, void *, u64, u64, u64, u64);
+extern u64 HvCall5Ret16(u64, void *, u64, u64, u64, u64, u64);
+extern u64 HvCall6Ret16(u64, void *, u64, u64, u64, u64, u64, u64);
+extern u64 HvCall7Ret16(u64, void *, u64, u64 ,u64 ,u64 ,u64 ,u64 ,u64);
+
+#endif /* _ASM_POWERPC_ISERIES_HV_CALL_SC_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_call_xm.h b/arch/powerpc/include/asm/iseries/hv_call_xm.h
new file mode 100644
index 0000000..392ac3f
--- /dev/null
+++ b/arch/powerpc/include/asm/iseries/hv_call_xm.h
@@ -0,0 +1,61 @@
+/*
+ * This file contains the "hypervisor call" interface which is used to
+ * drive the hypervisor from SLIC.
+ */
+#ifndef _ASM_POWERPC_ISERIES_HV_CALL_XM_H
+#define _ASM_POWERPC_ISERIES_HV_CALL_XM_H
+
+#include <asm/iseries/hv_call_sc.h>
+#include <asm/iseries/hv_types.h>
+
+#define HvCallXmGetTceTableParms HvCallXm + 0
+#define HvCallXmTestBus HvCallXm + 1
+#define HvCallXmConnectBusUnit HvCallXm + 2
+#define HvCallXmLoadTod HvCallXm + 8
+#define HvCallXmTestBusUnit HvCallXm + 9
+#define HvCallXmSetTce HvCallXm + 11
+#define HvCallXmSetTces HvCallXm + 13
+
+static inline void HvCallXm_getTceTableParms(u64 cb)
+{
+ HvCall1(HvCallXmGetTceTableParms, cb);
+}
+
+static inline u64 HvCallXm_setTce(u64 tceTableToken, u64 tceOffset, u64 tce)
+{
+ return HvCall3(HvCallXmSetTce, tceTableToken, tceOffset, tce);
+}
+
+static inline u64 HvCallXm_setTces(u64 tceTableToken, u64 tceOffset,
+ u64 numTces, u64 tce1, u64 tce2, u64 tce3, u64 tce4)
+{
+ return HvCall7(HvCallXmSetTces, tceTableToken, tceOffset, numTces,
+ tce1, tce2, tce3, tce4);
+}
+
+static inline u64 HvCallXm_testBus(u16 busNumber)
+{
+ return HvCall1(HvCallXmTestBus, busNumber);
+}
+
+static inline u64 HvCallXm_testBusUnit(u16 busNumber, u8 subBusNumber,
+ u8 deviceId)
+{
+ return HvCall2(HvCallXmTestBusUnit, busNumber,
+ (subBusNumber << 8) | deviceId);
+}
+
+static inline u64 HvCallXm_connectBusUnit(u16 busNumber, u8 subBusNumber,
+ u8 deviceId, u64 interruptToken)
+{
+ return HvCall5(HvCallXmConnectBusUnit, busNumber,
+ (subBusNumber << 8) | deviceId, interruptToken, 0,
+ 0 /* HvLpConfig::mapDsaToQueueIndex(HvLpDSA(busNumber, xBoard, xCard)) */);
+}
+
+static inline u64 HvCallXm_loadTod(void)
+{
+ return HvCall0(HvCallXmLoadTod);
+}
+
+#endif /* _ASM_POWERPC_ISERIES_HV_CALL_XM_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_lp_config.h b/arch/powerpc/include/asm/iseries/hv_lp_config.h
new file mode 100644
index 0000000..a006fd1
--- /dev/null
+++ b/arch/powerpc/include/asm/iseries/hv_lp_config.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H
+#define _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H
+
+/*
+ * This file contains the interface to the LPAR configuration data
+ * to determine which resources should be allocated to each partition.
+ */
+
+#include <asm/iseries/hv_call_sc.h>
+#include <asm/iseries/hv_types.h>
+
+enum {
+ HvCallCfg_Cur = 0,
+ HvCallCfg_Init = 1,
+ HvCallCfg_Max = 2,
+ HvCallCfg_Min = 3
+};
+
+#define HvCallCfgGetSystemPhysicalProcessors HvCallCfg + 6
+#define HvCallCfgGetPhysicalProcessors HvCallCfg + 7
+#define HvCallCfgGetMsChunks HvCallCfg + 9
+#define HvCallCfgGetSharedPoolIndex HvCallCfg + 20
+#define HvCallCfgGetSharedProcUnits HvCallCfg + 21
+#define HvCallCfgGetNumProcsInSharedPool HvCallCfg + 22
+#define HvCallCfgGetVirtualLanIndexMap HvCallCfg + 30
+#define HvCallCfgGetHostingLpIndex HvCallCfg + 32
+
+extern HvLpIndex HvLpConfig_getLpIndex_outline(void);
+extern HvLpIndex HvLpConfig_getLpIndex(void);
+extern HvLpIndex HvLpConfig_getPrimaryLpIndex(void);
+
+static inline u64 HvLpConfig_getMsChunks(void)
+{
+ return HvCall2(HvCallCfgGetMsChunks, HvLpConfig_getLpIndex(),
+ HvCallCfg_Cur);
+}
+
+static inline u64 HvLpConfig_getSystemPhysicalProcessors(void)
+{
+ return HvCall0(HvCallCfgGetSystemPhysicalProcessors);
+}
+
+static inline u64 HvLpConfig_getNumProcsInSharedPool(HvLpSharedPoolIndex sPI)
+{
+ return (u16)HvCall1(HvCallCfgGetNumProcsInSharedPool, sPI);
+}
+
+static inline u64 HvLpConfig_getPhysicalProcessors(void)
+{
+ return HvCall2(HvCallCfgGetPhysicalProcessors, HvLpConfig_getLpIndex(),
+ HvCallCfg_Cur);
+}
+
+static inline HvLpSharedPoolIndex HvLpConfig_getSharedPoolIndex(void)
+{
+ return HvCall1(HvCallCfgGetSharedPoolIndex, HvLpConfig_getLpIndex());
+}
+
+static inline u64 HvLpConfig_getSharedProcUnits(void)
+{
+ return HvCall2(HvCallCfgGetSharedProcUnits, HvLpConfig_getLpIndex(),
+ HvCallCfg_Cur);
+}
+
+static inline u64 HvLpConfig_getMaxSharedProcUnits(void)
+{
+ return HvCall2(HvCallCfgGetSharedProcUnits, HvLpConfig_getLpIndex(),
+ HvCallCfg_Max);
+}
+
+static inline u64 HvLpConfig_getMaxPhysicalProcessors(void)
+{
+ return HvCall2(HvCallCfgGetPhysicalProcessors, HvLpConfig_getLpIndex(),
+ HvCallCfg_Max);
+}
+
+static inline HvLpVirtualLanIndexMap HvLpConfig_getVirtualLanIndexMapForLp(
+ HvLpIndex lp)
+{
+ /*
+ * This is a new function in V5R1 so calls to this on older
+ * hypervisors will return -1
+ */
+ u64 retVal = HvCall1(HvCallCfgGetVirtualLanIndexMap, lp);
+ if (retVal == -1)
+ retVal = 0;
+ return retVal;
+}
+
+static inline HvLpVirtualLanIndexMap HvLpConfig_getVirtualLanIndexMap(void)
+{
+ return HvLpConfig_getVirtualLanIndexMapForLp(
+ HvLpConfig_getLpIndex_outline());
+}
+
+static inline int HvLpConfig_doLpsCommunicateOnVirtualLan(HvLpIndex lp1,
+ HvLpIndex lp2)
+{
+ HvLpVirtualLanIndexMap virtualLanIndexMap1 =
+ HvLpConfig_getVirtualLanIndexMapForLp(lp1);
+ HvLpVirtualLanIndexMap virtualLanIndexMap2 =
+ HvLpConfig_getVirtualLanIndexMapForLp(lp2);
+ return ((virtualLanIndexMap1 & virtualLanIndexMap2) != 0);
+}
+
+static inline HvLpIndex HvLpConfig_getHostingLpIndex(HvLpIndex lp)
+{
+ return HvCall1(HvCallCfgGetHostingLpIndex, lp);
+}
+
+#endif /* _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_lp_event.h b/arch/powerpc/include/asm/iseries/hv_lp_event.h
new file mode 100644
index 0000000..8f5da7d
--- /dev/null
+++ b/arch/powerpc/include/asm/iseries/hv_lp_event.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* This file contains the class for HV events in the system. */
+
+#ifndef _ASM_POWERPC_ISERIES_HV_LP_EVENT_H
+#define _ASM_POWERPC_ISERIES_HV_LP_EVENT_H
+
+#include <asm/types.h>
+#include <asm/ptrace.h>
+#include <asm/iseries/hv_types.h>
+#include <asm/iseries/hv_call_event.h>
+
+/*
+ * HvLpEvent is the structure for Lp Event messages passed between
+ * partitions through PLIC.
+ */
+
+struct HvLpEvent {
+ u8 flags; /* Event flags x00-x00 */
+ u8 xType; /* Type of message x01-x01 */
+ u16 xSubtype; /* Subtype for event x02-x03 */
+ u8 xSourceLp; /* Source LP x04-x04 */
+ u8 xTargetLp; /* Target LP x05-x05 */
+ u8 xSizeMinus1; /* Size of Derived class - 1 x06-x06 */
+ u8 xRc; /* RC for Ack flows x07-x07 */
+ u16 xSourceInstanceId; /* Source sides instance id x08-x09 */
+ u16 xTargetInstanceId; /* Target sides instance id x0A-x0B */
+ union {
+ u32 xSubtypeData; /* Data usable by the subtype x0C-x0F */
+ u16 xSubtypeDataShort[2]; /* Data as 2 shorts */
+ u8 xSubtypeDataChar[4]; /* Data as 4 chars */
+ } x;
+
+ u64 xCorrelationToken; /* Unique value for source/type x10-x17 */
+};
+
+typedef void (*LpEventHandler)(struct HvLpEvent *);
+
+/* Register a handler for an event type - returns 0 on success */
+extern int HvLpEvent_registerHandler(HvLpEvent_Type eventType,
+ LpEventHandler hdlr);
+
+/*
+ * Unregister a handler for an event type
+ *
+ * This call will sleep until the handler being removed is guaranteed to
+ * be no longer executing on any CPU. Do not call with locks held.
+ *
+ * returns 0 on success
+ * Unregister will fail if there are any paths open for the type
+ */
+extern int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType);
+
+/*
+ * Open an Lp Event Path for an event type
+ * returns 0 on success
+ * openPath will fail if there is no handler registered for the event type.
+ * The lpIndex specified is the partition index for the target partition
+ * (for VirtualIo, VirtualLan and SessionMgr) other types specify zero)
+ */
+extern int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex);
+
+/*
+ * Close an Lp Event Path for a type and partition
+ * returns 0 on success
+ */
+extern int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex);
+
+#define HvLpEvent_Type_Hypervisor 0
+#define HvLpEvent_Type_MachineFac 1
+#define HvLpEvent_Type_SessionMgr 2
+#define HvLpEvent_Type_SpdIo 3
+#define HvLpEvent_Type_VirtualBus 4
+#define HvLpEvent_Type_PciIo 5
+#define HvLpEvent_Type_RioIo 6
+#define HvLpEvent_Type_VirtualLan 7
+#define HvLpEvent_Type_VirtualIo 8
+#define HvLpEvent_Type_NumTypes 9
+
+#define HvLpEvent_Rc_Good 0
+#define HvLpEvent_Rc_BufferNotAvailable 1
+#define HvLpEvent_Rc_Cancelled 2
+#define HvLpEvent_Rc_GenericError 3
+#define HvLpEvent_Rc_InvalidAddress 4
+#define HvLpEvent_Rc_InvalidPartition 5
+#define HvLpEvent_Rc_InvalidSize 6
+#define HvLpEvent_Rc_InvalidSubtype 7
+#define HvLpEvent_Rc_InvalidSubtypeData 8
+#define HvLpEvent_Rc_InvalidType 9
+#define HvLpEvent_Rc_PartitionDead 10
+#define HvLpEvent_Rc_PathClosed 11
+#define HvLpEvent_Rc_SubtypeError 12
+
+#define HvLpEvent_Function_Ack 0
+#define HvLpEvent_Function_Int 1
+
+#define HvLpEvent_AckInd_NoAck 0
+#define HvLpEvent_AckInd_DoAck 1
+
+#define HvLpEvent_AckType_ImmediateAck 0
+#define HvLpEvent_AckType_DeferredAck 1
+
+#define HV_LP_EVENT_INT 0x01
+#define HV_LP_EVENT_DO_ACK 0x02
+#define HV_LP_EVENT_DEFERRED_ACK 0x04
+#define HV_LP_EVENT_VALID 0x80
+
+#define HvLpDma_Direction_LocalToRemote 0
+#define HvLpDma_Direction_RemoteToLocal 1
+
+#define HvLpDma_AddressType_TceIndex 0
+#define HvLpDma_AddressType_RealAddress 1
+
+#define HvLpDma_Rc_Good 0
+#define HvLpDma_Rc_Error 1
+#define HvLpDma_Rc_PartitionDead 2
+#define HvLpDma_Rc_PathClosed 3
+#define HvLpDma_Rc_InvalidAddress 4
+#define HvLpDma_Rc_InvalidLength 5
+
+static inline int hvlpevent_is_valid(struct HvLpEvent *h)
+{
+ return h->flags & HV_LP_EVENT_VALID;
+}
+
+static inline void hvlpevent_invalidate(struct HvLpEvent *h)
+{
+ h->flags &= ~ HV_LP_EVENT_VALID;
+}
+
+static inline int hvlpevent_is_int(struct HvLpEvent *h)
+{
+ return h->flags & HV_LP_EVENT_INT;
+}
+
+static inline int hvlpevent_is_ack(struct HvLpEvent *h)
+{
+ return !hvlpevent_is_int(h);
+}
+
+static inline int hvlpevent_need_ack(struct HvLpEvent *h)
+{
+ return h->flags & HV_LP_EVENT_DO_ACK;
+}
+
+#endif /* _ASM_POWERPC_ISERIES_HV_LP_EVENT_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_types.h b/arch/powerpc/include/asm/iseries/hv_types.h
new file mode 100644
index 0000000..c3e6d2a
--- /dev/null
+++ b/arch/powerpc/include/asm/iseries/hv_types.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_POWERPC_ISERIES_HV_TYPES_H
+#define _ASM_POWERPC_ISERIES_HV_TYPES_H
+
+/*
+ * General typedefs for the hypervisor.
+ */
+
+#include <asm/types.h>
+
+typedef u8 HvLpIndex;
+typedef u16 HvLpInstanceId;
+typedef u64 HvLpTOD;
+typedef u64 HvLpSystemSerialNum;
+typedef u8 HvLpDeviceSerialNum[12];
+typedef u16 HvLpSanHwSet;
+typedef u16 HvLpBus;
+typedef u16 HvLpBoard;
+typedef u16 HvLpCard;
+typedef u8 HvLpDeviceType[4];
+typedef u8 HvLpDeviceModel[3];
+typedef u64 HvIoToken;
+typedef u8 HvLpName[8];
+typedef u32 HvIoId;
+typedef u64 HvRealMemoryIndex;
+typedef u32 HvLpIndexMap; /* Must hold HVMAXARCHITECTEDLPS bits!!! */
+typedef u16 HvLpVrmIndex;
+typedef u32 HvXmGenerationId;
+typedef u8 HvLpBusPool;
+typedef u8 HvLpSharedPoolIndex;
+typedef u16 HvLpSharedProcUnitsX100;
+typedef u8 HvLpVirtualLanIndex;
+typedef u16 HvLpVirtualLanIndexMap; /* Must hold HVMAXARCHITECTEDVIRTUALLANS bits!!! */
+typedef u16 HvBusNumber; /* Hypervisor Bus Number */
+typedef u8 HvSubBusNumber; /* Hypervisor SubBus Number */
+typedef u8 HvAgentId; /* Hypervisor DevFn */
+
+
+#define HVMAXARCHITECTEDLPS 32
+#define HVMAXARCHITECTEDVIRTUALLANS 16
+#define HVMAXARCHITECTEDVIRTUALDISKS 32
+#define HVMAXARCHITECTEDVIRTUALCDROMS 8
+#define HVMAXARCHITECTEDVIRTUALTAPES 8
+#define HVCHUNKSIZE (256 * 1024)
+#define HVPAGESIZE (4 * 1024)
+#define HVLPMINMEGSPRIMARY 256
+#define HVLPMINMEGSSECONDARY 64
+#define HVCHUNKSPERMEG 4
+#define HVPAGESPERMEG 256
+#define HVPAGESPERCHUNK 64
+
+#define HvLpIndexInvalid ((HvLpIndex)0xff)
+
+/*
+ * Enums for the sub-components under PLIC
+ * Used in HvCall and HvPrimaryCall
+ */
+enum {
+ HvCallCompId = 0,
+ HvCallCpuCtlsCompId = 1,
+ HvCallCfgCompId = 2,
+ HvCallEventCompId = 3,
+ HvCallHptCompId = 4,
+ HvCallPciCompId = 5,
+ HvCallSlmCompId = 6,
+ HvCallSmCompId = 7,
+ HvCallSpdCompId = 8,
+ HvCallXmCompId = 9,
+ HvCallRioCompId = 10,
+ HvCallRsvd3CompId = 11,
+ HvCallRsvd2CompId = 12,
+ HvCallRsvd1CompId = 13,
+ HvCallMaxCompId = 14,
+ HvPrimaryCallCompId = 0,
+ HvPrimaryCallCfgCompId = 1,
+ HvPrimaryCallPciCompId = 2,
+ HvPrimaryCallSmCompId = 3,
+ HvPrimaryCallSpdCompId = 4,
+ HvPrimaryCallXmCompId = 5,
+ HvPrimaryCallRioCompId = 6,
+ HvPrimaryCallRsvd7CompId = 7,
+ HvPrimaryCallRsvd6CompId = 8,
+ HvPrimaryCallRsvd5CompId = 9,
+ HvPrimaryCallRsvd4CompId = 10,
+ HvPrimaryCallRsvd3CompId = 11,
+ HvPrimaryCallRsvd2CompId = 12,
+ HvPrimaryCallRsvd1CompId = 13,
+ HvPrimaryCallMaxCompId = HvCallMaxCompId
+};
+
+struct HvLpBufferList {
+ u64 addr;
+ u64 len;
+};
+
+#endif /* _ASM_POWERPC_ISERIES_HV_TYPES_H */
diff --git a/arch/powerpc/include/asm/iseries/iommu.h b/arch/powerpc/include/asm/iseries/iommu.h
new file mode 100644
index 0000000..c59ee7e
--- /dev/null
+++ b/arch/powerpc/include/asm/iseries/iommu.h
@@ -0,0 +1,41 @@
+#ifndef _ASM_POWERPC_ISERIES_IOMMU_H
+#define _ASM_POWERPC_ISERIES_IOMMU_H
+
+/*
+ * Copyright (C) 2005 Stephen Rothwell, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the:
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330,
+ * Boston, MA 02111-1307 USA
+ */
+
+struct pci_dev;
+struct vio_dev;
+struct device_node;
+struct iommu_table;
+
+/* Creates table for an individual device node */
+extern void iommu_devnode_init_iSeries(struct pci_dev *pdev,
+ struct device_node *dn);
+
+/* Get table parameters from HV */
+extern void iommu_table_getparms_iSeries(unsigned long busno,
+ unsigned char slotno, unsigned char virtbus,
+ struct iommu_table *tbl);
+
+extern struct iommu_table *vio_build_iommu_table_iseries(struct vio_dev *dev);
+extern void iommu_vio_init(void);
+
+#endif /* _ASM_POWERPC_ISERIES_IOMMU_H */
diff --git a/arch/powerpc/include/asm/iseries/it_lp_queue.h b/arch/powerpc/include/asm/iseries/it_lp_queue.h
new file mode 100644
index 0000000..4282788
--- /dev/null
+++ b/arch/powerpc/include/asm/iseries/it_lp_queue.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H
+#define _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H
+
+/*
+ * This control block defines the simple LP queue structure that is
+ * shared between the hypervisor (PLIC) and the OS in order to send
+ * events to an LP.
+ */
+
+#include <asm/types.h>
+#include <asm/ptrace.h>
+
+#define IT_LP_MAX_QUEUES 8
+
+#define IT_LP_NOT_USED 0 /* Queue will not be used by PLIC */
+#define IT_LP_DEDICATED_IO 1 /* Queue dedicated to IO processor specified */
+#define IT_LP_DEDICATED_LP 2 /* Queue dedicated to LP specified */
+#define IT_LP_SHARED 3 /* Queue shared for both IO and LP */
+
+#define IT_LP_EVENT_STACK_SIZE 4096
+#define IT_LP_EVENT_MAX_SIZE 256
+#define IT_LP_EVENT_ALIGN 64
+
+struct hvlpevent_queue {
+/*
+ * The hq_current_event is the pointer to the next event stack entry
+ * that will become valid. The OS must peek at this entry to determine
+ * if it is valid. PLIC will set the valid indicator as the very last
+ * store into that entry.
+ *
+ * When the OS has completed processing of the event then it will mark
+ * the event as invalid so that PLIC knows it can store into that event
+ * location again.
+ *
+ * If the event stack fills and there are overflow events, then PLIC
+ * will set the hq_overflow_pending flag in which case the OS will
+ * have to fetch the additional LP events once they have drained the
+ * event stack.
+ *
+ * The first 16-bytes are known by both the OS and PLIC. The remainder
+ * of the cache line is for use by the OS.
+ */
+ u8 hq_overflow_pending; /* 0x00 Overflow events are pending */
+ u8 hq_status; /* 0x01 DedicatedIo or DedicatedLp or NotUsed */
+ u16 hq_proc_index; /* 0x02 Logical Proc Index for correlation */
+ u8 hq_reserved1[12]; /* 0x04 */
+ char *hq_current_event; /* 0x10 */
+ char *hq_last_event; /* 0x18 */
+ char *hq_event_stack; /* 0x20 */
+ u8 hq_index; /* 0x28 unique sequential index. */
+ u8 hq_reserved2[3]; /* 0x29-2b */
+ spinlock_t hq_lock;
+};
+
+extern struct hvlpevent_queue hvlpevent_queue;
+
+extern int hvlpevent_is_pending(void);
+extern void process_hvlpevents(void);
+extern void setup_hvlpevent_queue(void);
+
+#endif /* _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H */
diff --git a/arch/powerpc/include/asm/iseries/lpar_map.h b/arch/powerpc/include/asm/iseries/lpar_map.h
new file mode 100644
index 0000000..5e9f3e1
--- /dev/null
+++ b/arch/powerpc/include/asm/iseries/lpar_map.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_POWERPC_ISERIES_LPAR_MAP_H
+#define _ASM_POWERPC_ISERIES_LPAR_MAP_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/types.h>
+
+#endif
+
+/*
+ * The iSeries hypervisor will set up mapping for one or more
+ * ESID/VSID pairs (in SLB/segment registers) and will set up
+ * mappings of one or more ranges of pages to VAs.
+ * We will have the hypervisor set up the ESID->VSID mapping
+ * for the four kernel segments (C-F). With shared processors,
+ * the hypervisor will clear all segment registers and reload
+ * these four whenever the processor is switched from one
+ * partition to another.
+ */
+
+/* The Vsid and Esid identified below will be used by the hypervisor
+ * to set up a memory mapping for part of the load area before giving
+ * control to the Linux kernel. The load area is 64 MB, but this must
+ * not attempt to map the whole load area. The Hashed Page Table may
+ * need to be located within the load area (if the total partition size
+ * is 64 MB), but cannot be mapped. Typically, this should specify
+ * to map half (32 MB) of the load area.
+ *
+ * The hypervisor will set up page table entries for the number of
+ * pages specified.
+ *
+ * In 32-bit mode, the hypervisor will load all four of the
+ * segment registers (identified by the low-order four bits of the
+ * Esid field. In 64-bit mode, the hypervisor will load one SLB
+ * entry to map the Esid to the Vsid.
+*/
+
+#define HvEsidsToMap 2
+#define HvRangesToMap 1
+
+/* Hypervisor initially maps 32MB of the load area */
+#define HvPagesToMap 8192
+
+#ifndef __ASSEMBLY__
+struct LparMap {
+ u64 xNumberEsids; // Number of ESID/VSID pairs
+ u64 xNumberRanges; // Number of VA ranges to map
+ u64 xSegmentTableOffs; // Page number within load area of seg table
+ u64 xRsvd[5];
+ struct {
+ u64 xKernelEsid; // Esid used to map kernel load
+ u64 xKernelVsid; // Vsid used to map kernel load
+ } xEsids[HvEsidsToMap];
+ struct {
+ u64 xPages; // Number of pages to be mapped
+ u64 xOffset; // Offset from start of load area
+ u64 xVPN; // Virtual Page Number
+ } xRanges[HvRangesToMap];
+};
+
+extern const struct LparMap xLparMap;
+
+#endif /* __ASSEMBLY__ */
+
+/* the fixed address where the LparMap exists */
+#define LPARMAP_PHYS 0x7000
+
+#endif /* _ASM_POWERPC_ISERIES_LPAR_MAP_H */
diff --git a/arch/powerpc/include/asm/iseries/mf.h b/arch/powerpc/include/asm/iseries/mf.h
new file mode 100644
index 0000000..eb851a9
--- /dev/null
+++ b/arch/powerpc/include/asm/iseries/mf.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2001 Troy D. Armstrong IBM Corporation
+ * Copyright (C) 2004 Stephen Rothwell IBM Corporation
+ *
+ * This modules exists as an interface between a Linux secondary partition
+ * running on an iSeries and the primary partition's Virtual Service
+ * Processor (VSP) object. The VSP has final authority over powering on/off
+ * all partitions in the iSeries. It also provides miscellaneous low-level
+ * machine facility type operations.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_POWERPC_ISERIES_MF_H
+#define _ASM_POWERPC_ISERIES_MF_H
+
+#include <linux/types.h>
+
+#include <asm/iseries/hv_types.h>
+#include <asm/iseries/hv_call_event.h>
+
+struct rtc_time;
+
+typedef void (*MFCompleteHandler)(void *clientToken, int returnCode);
+
+extern void mf_allocate_lp_events(HvLpIndex targetLp, HvLpEvent_Type type,
+ unsigned size, unsigned amount, MFCompleteHandler hdlr,
+ void *userToken);
+extern void mf_deallocate_lp_events(HvLpIndex targetLp, HvLpEvent_Type type,
+ unsigned count, MFCompleteHandler hdlr, void *userToken);
+
+extern void mf_power_off(void);
+extern void mf_reboot(char *cmd);
+
+extern void mf_display_src(u32 word);
+extern void mf_display_progress(u16 value);
+
+extern void mf_init(void);
+
+#endif /* _ASM_POWERPC_ISERIES_MF_H */
diff --git a/arch/powerpc/include/asm/iseries/vio.h b/arch/powerpc/include/asm/iseries/vio.h
new file mode 100644
index 0000000..f9ac0d0
--- /dev/null
+++ b/arch/powerpc/include/asm/iseries/vio.h
@@ -0,0 +1,265 @@
+/* -*- linux-c -*-
+ *
+ * iSeries Virtual I/O Message Path header
+ *
+ * Authors: Dave Boutcher <boutcher@us.ibm.com>
+ * Ryan Arnold <ryanarn@us.ibm.com>
+ * Colin Devilbiss <devilbis@us.ibm.com>
+ *
+ * (C) Copyright 2000 IBM Corporation
+ *
+ * This header file is used by the iSeries virtual I/O device
+ * drivers. It defines the interfaces to the common functions
+ * (implemented in drivers/char/viopath.h) as well as defining
+ * common functions and structures. Currently (at the time I
+ * wrote this comment) the iSeries virtual I/O device drivers
+ * that use this are
+ * drivers/block/viodasd.c
+ * drivers/char/viocons.c
+ * drivers/char/viotape.c
+ * drivers/cdrom/viocd.c
+ *
+ * The iSeries virtual ethernet support (veth.c) uses a whole
+ * different set of functions.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) anyu later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef _ASM_POWERPC_ISERIES_VIO_H
+#define _ASM_POWERPC_ISERIES_VIO_H
+
+#include <asm/iseries/hv_types.h>
+#include <asm/iseries/hv_lp_event.h>
+
+/*
+ * iSeries virtual I/O events use the subtype field in
+ * HvLpEvent to figure out what kind of vio event is coming
+ * in. We use a table to route these, and this defines
+ * the maximum number of distinct subtypes
+ */
+#define VIO_MAX_SUBTYPES 8
+
+#define VIOMAXBLOCKDMA 12
+
+struct open_data {
+ u64 disk_size;
+ u16 max_disk;
+ u16 cylinders;
+ u16 tracks;
+ u16 sectors;
+ u16 bytes_per_sector;
+};
+
+struct rw_data {
+ u64 offset;
+ struct {
+ u32 token;
+ u32 reserved;
+ u64 len;
+ } dma_info[VIOMAXBLOCKDMA];
+};
+
+struct vioblocklpevent {
+ struct HvLpEvent event;
+ u32 reserved;
+ u16 version;
+ u16 sub_result;
+ u16 disk;
+ u16 flags;
+ union {
+ struct open_data open_data;
+ struct rw_data rw_data;
+ u64 changed;
+ } u;
+};
+
+#define vioblockflags_ro 0x0001
+
+enum vioblocksubtype {
+ vioblockopen = 0x0001,
+ vioblockclose = 0x0002,
+ vioblockread = 0x0003,
+ vioblockwrite = 0x0004,
+ vioblockflush = 0x0005,
+ vioblockcheck = 0x0007
+};
+
+struct viocdlpevent {
+ struct HvLpEvent event;
+ u32 reserved;
+ u16 version;
+ u16 sub_result;
+ u16 disk;
+ u16 flags;
+ u32 token;
+ u64 offset; /* On open, max number of disks */
+ u64 len; /* On open, size of the disk */
+ u32 block_size; /* Only set on open */
+ u32 media_size; /* Only set on open */
+};
+
+enum viocdsubtype {
+ viocdopen = 0x0001,
+ viocdclose = 0x0002,
+ viocdread = 0x0003,
+ viocdwrite = 0x0004,
+ viocdlockdoor = 0x0005,
+ viocdgetinfo = 0x0006,
+ viocdcheck = 0x0007
+};
+
+struct viotapelpevent {
+ struct HvLpEvent event;
+ u32 reserved;
+ u16 version;
+ u16 sub_type_result;
+ u16 tape;
+ u16 flags;
+ u32 token;
+ u64 len;
+ union {
+ struct {
+ u32 tape_op;
+ u32 count;
+ } op;
+ struct {
+ u32 type;
+ u32 resid;
+ u32 dsreg;
+ u32 gstat;
+ u32 erreg;
+ u32 file_no;
+ u32 block_no;
+ } get_status;
+ struct {
+ u32 block_no;
+ } get_pos;
+ } u;
+};
+
+enum viotapesubtype {
+ viotapeopen = 0x0001,
+ viotapeclose = 0x0002,
+ viotaperead = 0x0003,
+ viotapewrite = 0x0004,
+ viotapegetinfo = 0x0005,
+ viotapeop = 0x0006,
+ viotapegetpos = 0x0007,
+ viotapesetpos = 0x0008,
+ viotapegetstatus = 0x0009
+};
+
+/*
+ * Each subtype can register a handler to process their events.
+ * The handler must have this interface.
+ */
+typedef void (vio_event_handler_t) (struct HvLpEvent * event);
+
+extern int viopath_open(HvLpIndex remoteLp, int subtype, int numReq);
+extern int viopath_close(HvLpIndex remoteLp, int subtype, int numReq);
+extern int vio_setHandler(int subtype, vio_event_handler_t * beh);
+extern int vio_clearHandler(int subtype);
+extern int viopath_isactive(HvLpIndex lp);
+extern HvLpInstanceId viopath_sourceinst(HvLpIndex lp);
+extern HvLpInstanceId viopath_targetinst(HvLpIndex lp);
+extern void vio_set_hostlp(void);
+extern void *vio_get_event_buffer(int subtype);
+extern void vio_free_event_buffer(int subtype, void *buffer);
+
+extern struct vio_dev *vio_create_viodasd(u32 unit);
+
+extern HvLpIndex viopath_hostLp;
+extern HvLpIndex viopath_ourLp;
+
+#define VIOCHAR_MAX_DATA 200
+
+#define VIOMAJOR_SUBTYPE_MASK 0xff00
+#define VIOMINOR_SUBTYPE_MASK 0x00ff
+#define VIOMAJOR_SUBTYPE_SHIFT 8
+
+#define VIOVERSION 0x0101
+
+/*
+ * This is the general structure for VIO errors; each module should have
+ * a table of them, and each table should be terminated by an entry of
+ * { 0, 0, NULL }. Then, to find a specific error message, a module
+ * should pass its local table and the return code.
+ */
+struct vio_error_entry {
+ u16 rc;
+ int errno;
+ const char *msg;
+};
+extern const struct vio_error_entry *vio_lookup_rc(
+ const struct vio_error_entry *local_table, u16 rc);
+
+enum viosubtypes {
+ viomajorsubtype_monitor = 0x0100,
+ viomajorsubtype_blockio = 0x0200,
+ viomajorsubtype_chario = 0x0300,
+ viomajorsubtype_config = 0x0400,
+ viomajorsubtype_cdio = 0x0500,
+ viomajorsubtype_tape = 0x0600,
+ viomajorsubtype_scsi = 0x0700
+};
+
+enum vioconfigsubtype {
+ vioconfigget = 0x0001,
+};
+
+enum viorc {
+ viorc_good = 0x0000,
+ viorc_noConnection = 0x0001,
+ viorc_noReceiver = 0x0002,
+ viorc_noBufferAvailable = 0x0003,
+ viorc_invalidMessageType = 0x0004,
+ viorc_invalidRange = 0x0201,
+ viorc_invalidToken = 0x0202,
+ viorc_DMAError = 0x0203,
+ viorc_useError = 0x0204,
+ viorc_releaseError = 0x0205,
+ viorc_invalidDisk = 0x0206,
+ viorc_openRejected = 0x0301
+};
+
+/*
+ * The structure of the events that flow between us and OS/400 for chario
+ * events. You can't mess with this unless the OS/400 side changes too.
+ */
+struct viocharlpevent {
+ struct HvLpEvent event;
+ u32 reserved;
+ u16 version;
+ u16 subtype_result_code;
+ u8 virtual_device;
+ u8 len;
+ u8 data[VIOCHAR_MAX_DATA];
+};
+
+#define VIOCHAR_WINDOW 10
+
+enum viocharsubtype {
+ viocharopen = 0x0001,
+ viocharclose = 0x0002,
+ viochardata = 0x0003,
+ viocharack = 0x0004,
+ viocharconfig = 0x0005
+};
+
+enum viochar_rc {
+ viochar_rc_ebusy = 1
+};
+
+#endif /* _ASM_POWERPC_ISERIES_VIO_H */
diff --git a/arch/powerpc/include/asm/kdebug.h b/arch/powerpc/include/asm/kdebug.h
new file mode 100644
index 0000000..ae6d206
--- /dev/null
+++ b/arch/powerpc/include/asm/kdebug.h
@@ -0,0 +1,15 @@
+#ifndef _ASM_POWERPC_KDEBUG_H
+#define _ASM_POWERPC_KDEBUG_H
+#ifdef __KERNEL__
+
+/* Grossly misnamed. */
+enum die_val {
+ DIE_OOPS = 1,
+ DIE_IABR_MATCH,
+ DIE_DABR_MATCH,
+ DIE_BPT,
+ DIE_SSTEP,
+};
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_KDEBUG_H */
diff --git a/arch/powerpc/include/asm/kdump.h b/arch/powerpc/include/asm/kdump.h
new file mode 100644
index 0000000..f6c93c7
--- /dev/null
+++ b/arch/powerpc/include/asm/kdump.h
@@ -0,0 +1,35 @@
+#ifndef _PPC64_KDUMP_H
+#define _PPC64_KDUMP_H
+
+/* Kdump kernel runs at 32 MB, change at your peril. */
+#define KDUMP_KERNELBASE 0x2000000
+
+/* How many bytes to reserve at zero for kdump. The reserve limit should
+ * be greater or equal to the trampoline's end address.
+ * Reserve to the end of the FWNMI area, see head_64.S */
+#define KDUMP_RESERVE_LIMIT 0x10000 /* 64K */
+
+#ifdef CONFIG_CRASH_DUMP
+
+#define KDUMP_TRAMPOLINE_START 0x0100
+#define KDUMP_TRAMPOLINE_END 0x3000
+
+#define KDUMP_MIN_TCE_ENTRIES 2048
+
+#endif /* CONFIG_CRASH_DUMP */
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_CRASH_DUMP
+
+extern void reserve_kdump_trampoline(void);
+extern void setup_kdump_trampoline(void);
+
+#else /* !CONFIG_CRASH_DUMP */
+
+static inline void reserve_kdump_trampoline(void) { ; }
+static inline void setup_kdump_trampoline(void) { ; }
+
+#endif /* CONFIG_CRASH_DUMP */
+#endif /* __ASSEMBLY__ */
+
+#endif /* __PPC64_KDUMP_H */
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
new file mode 100644
index 0000000..acdcdc6
--- /dev/null
+++ b/arch/powerpc/include/asm/kexec.h
@@ -0,0 +1,160 @@
+#ifndef _ASM_POWERPC_KEXEC_H
+#define _ASM_POWERPC_KEXEC_H
+#ifdef __KERNEL__
+
+/*
+ * Maximum page that is mapped directly into kernel memory.
+ * XXX: Since we copy virt we can use any page we allocate
+ */
+#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
+
+/*
+ * Maximum address we can reach in physical address mode.
+ * XXX: I want to allow initrd in highmem. Otherwise set to rmo on LPAR.
+ */
+#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
+
+/* Maximum address we can use for the control code buffer */
+#ifdef __powerpc64__
+#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
+#else
+/* TASK_SIZE, probably left over from use_mm ?? */
+#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
+#endif
+
+#define KEXEC_CONTROL_CODE_SIZE 4096
+
+/* The native architecture */
+#ifdef __powerpc64__
+#define KEXEC_ARCH KEXEC_ARCH_PPC64
+#else
+#define KEXEC_ARCH KEXEC_ARCH_PPC
+#endif
+
+#ifndef __ASSEMBLY__
+#include <linux/cpumask.h>
+
+typedef void (*crash_shutdown_t)(void);
+
+#ifdef CONFIG_KEXEC
+
+#ifdef __powerpc64__
+/*
+ * This function is responsible for capturing register states if coming
+ * via panic or invoking dump using sysrq-trigger.
+ */
+static inline void crash_setup_regs(struct pt_regs *newregs,
+ struct pt_regs *oldregs)
+{
+ if (oldregs)
+ memcpy(newregs, oldregs, sizeof(*newregs));
+ else {
+ /* FIXME Merge this with xmon_save_regs ?? */
+ unsigned long tmp1, tmp2;
+ __asm__ __volatile__ (
+ "std 0,0(%2)\n"
+ "std 1,8(%2)\n"
+ "std 2,16(%2)\n"
+ "std 3,24(%2)\n"
+ "std 4,32(%2)\n"
+ "std 5,40(%2)\n"
+ "std 6,48(%2)\n"
+ "std 7,56(%2)\n"
+ "std 8,64(%2)\n"
+ "std 9,72(%2)\n"
+ "std 10,80(%2)\n"
+ "std 11,88(%2)\n"
+ "std 12,96(%2)\n"
+ "std 13,104(%2)\n"
+ "std 14,112(%2)\n"
+ "std 15,120(%2)\n"
+ "std 16,128(%2)\n"
+ "std 17,136(%2)\n"
+ "std 18,144(%2)\n"
+ "std 19,152(%2)\n"
+ "std 20,160(%2)\n"
+ "std 21,168(%2)\n"
+ "std 22,176(%2)\n"
+ "std 23,184(%2)\n"
+ "std 24,192(%2)\n"
+ "std 25,200(%2)\n"
+ "std 26,208(%2)\n"
+ "std 27,216(%2)\n"
+ "std 28,224(%2)\n"
+ "std 29,232(%2)\n"
+ "std 30,240(%2)\n"
+ "std 31,248(%2)\n"
+ "mfmsr %0\n"
+ "std %0, 264(%2)\n"
+ "mfctr %0\n"
+ "std %0, 280(%2)\n"
+ "mflr %0\n"
+ "std %0, 288(%2)\n"
+ "bl 1f\n"
+ "1: mflr %1\n"
+ "std %1, 256(%2)\n"
+ "mtlr %0\n"
+ "mfxer %0\n"
+ "std %0, 296(%2)\n"
+ : "=&r" (tmp1), "=&r" (tmp2)
+ : "b" (newregs)
+ : "memory");
+ }
+}
+#else
+/*
+ * Provide a dummy definition to avoid build failures. Will remain
+ * empty till crash dump support is enabled.
+ */
+static inline void crash_setup_regs(struct pt_regs *newregs,
+ struct pt_regs *oldregs) { }
+#endif /* !__powerpc64 __ */
+
+extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
+ master to copy new code to 0 */
+extern int crashing_cpu;
+extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *));
+extern cpumask_t cpus_in_sr;
+static inline int kexec_sr_activated(int cpu)
+{
+ return cpu_isset(cpu,cpus_in_sr);
+}
+
+struct kimage;
+struct pt_regs;
+extern void default_machine_kexec(struct kimage *image);
+extern int default_machine_kexec_prepare(struct kimage *image);
+extern void default_machine_crash_shutdown(struct pt_regs *regs);
+extern int crash_shutdown_register(crash_shutdown_t handler);
+extern int crash_shutdown_unregister(crash_shutdown_t handler);
+
+extern void machine_kexec_simple(struct kimage *image);
+extern void crash_kexec_secondary(struct pt_regs *regs);
+extern int overlaps_crashkernel(unsigned long start, unsigned long size);
+extern void reserve_crashkernel(void);
+
+#else /* !CONFIG_KEXEC */
+static inline int kexec_sr_activated(int cpu) { return 0; }
+static inline void crash_kexec_secondary(struct pt_regs *regs) { }
+
+static inline int overlaps_crashkernel(unsigned long start, unsigned long size)
+{
+ return 0;
+}
+
+static inline void reserve_crashkernel(void) { ; }
+
+static inline int crash_shutdown_register(crash_shutdown_t handler)
+{
+ return 0;
+}
+
+static inline int crash_shutdown_unregister(crash_shutdown_t handler)
+{
+ return 0;
+}
+
+#endif /* CONFIG_KEXEC */
+#endif /* ! __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_KEXEC_H */
diff --git a/arch/powerpc/include/asm/keylargo.h b/arch/powerpc/include/asm/keylargo.h
new file mode 100644
index 0000000..d8520ef
--- /dev/null
+++ b/arch/powerpc/include/asm/keylargo.h
@@ -0,0 +1,261 @@
+#ifndef _ASM_POWERPC_KEYLARGO_H
+#define _ASM_POWERPC_KEYLARGO_H
+#ifdef __KERNEL__
+/*
+ * keylargo.h: definitions for using the "KeyLargo" I/O controller chip.
+ *
+ */
+
+/* "Pangea" chipset has keylargo device-id 0x25 while core99
+ * has device-id 0x22. The rev. of the pangea one is 0, so we
+ * fake an artificial rev. in keylargo_rev by oring 0x100
+ */
+#define KL_PANGEA_REV 0x100
+
+/* offset from base for feature control registers */
+#define KEYLARGO_MBCR 0x34 /* KL Only, Media bay control/status */
+#define KEYLARGO_FCR0 0x38
+#define KEYLARGO_FCR1 0x3c
+#define KEYLARGO_FCR2 0x40
+#define KEYLARGO_FCR3 0x44
+#define KEYLARGO_FCR4 0x48
+#define KEYLARGO_FCR5 0x4c /* Pangea only */
+
+/* K2 aditional FCRs */
+#define K2_FCR6 0x34
+#define K2_FCR7 0x30
+#define K2_FCR8 0x2c
+#define K2_FCR9 0x28
+#define K2_FCR10 0x24
+
+/* GPIO registers */
+#define KEYLARGO_GPIO_LEVELS0 0x50
+#define KEYLARGO_GPIO_LEVELS1 0x54
+#define KEYLARGO_GPIO_EXTINT_0 0x58
+#define KEYLARGO_GPIO_EXTINT_CNT 18
+#define KEYLARGO_GPIO_0 0x6A
+#define KEYLARGO_GPIO_CNT 17
+#define KEYLARGO_GPIO_EXTINT_DUAL_EDGE 0x80
+#define KEYLARGO_GPIO_OUTPUT_ENABLE 0x04
+#define KEYLARGO_GPIO_OUTOUT_DATA 0x01
+#define KEYLARGO_GPIO_INPUT_DATA 0x02
+
+/* K2 does only extint GPIOs and does 51 of them */
+#define K2_GPIO_EXTINT_0 0x58
+#define K2_GPIO_EXTINT_CNT 51
+
+/* Specific GPIO regs */
+
+#define KL_GPIO_MODEM_RESET (KEYLARGO_GPIO_0+0x03)
+#define KL_GPIO_MODEM_POWER (KEYLARGO_GPIO_0+0x02) /* Pangea */
+
+#define KL_GPIO_SOUND_POWER (KEYLARGO_GPIO_0+0x05)
+
+/* Hrm... this one is only to be used on Pismo. It seeem to also
+ * control the timebase enable on other machines. Still to be
+ * experimented... --BenH.
+ */
+#define KL_GPIO_FW_CABLE_POWER (KEYLARGO_GPIO_0+0x09)
+#define KL_GPIO_TB_ENABLE (KEYLARGO_GPIO_0+0x09)
+
+#define KL_GPIO_ETH_PHY_RESET (KEYLARGO_GPIO_0+0x10)
+
+#define KL_GPIO_EXTINT_CPU1 (KEYLARGO_GPIO_0+0x0a)
+#define KL_GPIO_EXTINT_CPU1_ASSERT 0x04
+#define KL_GPIO_EXTINT_CPU1_RELEASE 0x38
+
+#define KL_GPIO_RESET_CPU0 (KEYLARGO_GPIO_EXTINT_0+0x03)
+#define KL_GPIO_RESET_CPU1 (KEYLARGO_GPIO_EXTINT_0+0x04)
+#define KL_GPIO_RESET_CPU2 (KEYLARGO_GPIO_EXTINT_0+0x0f)
+#define KL_GPIO_RESET_CPU3 (KEYLARGO_GPIO_EXTINT_0+0x10)
+
+#define KL_GPIO_PMU_MESSAGE_IRQ (KEYLARGO_GPIO_EXTINT_0+0x09)
+#define KL_GPIO_PMU_MESSAGE_BIT KEYLARGO_GPIO_INPUT_DATA
+
+#define KL_GPIO_MEDIABAY_IRQ (KEYLARGO_GPIO_EXTINT_0+0x0e)
+
+#define KL_GPIO_AIRPORT_0 (KEYLARGO_GPIO_EXTINT_0+0x0a)
+#define KL_GPIO_AIRPORT_1 (KEYLARGO_GPIO_EXTINT_0+0x0d)
+#define KL_GPIO_AIRPORT_2 (KEYLARGO_GPIO_0+0x0d)
+#define KL_GPIO_AIRPORT_3 (KEYLARGO_GPIO_0+0x0e)
+#define KL_GPIO_AIRPORT_4 (KEYLARGO_GPIO_0+0x0f)
+
+/*
+ * Bits in feature control register. Those bits different for K2 are
+ * listed separately
+ */
+#define KL_MBCR_MB0_PCI_ENABLE 0x00000800 /* exist ? */
+#define KL_MBCR_MB0_IDE_ENABLE 0x00001000
+#define KL_MBCR_MB0_FLOPPY_ENABLE 0x00002000 /* exist ? */
+#define KL_MBCR_MB0_SOUND_ENABLE 0x00004000 /* hrm... */
+#define KL_MBCR_MB0_DEV_MASK 0x00007800
+#define KL_MBCR_MB0_DEV_POWER 0x00000400
+#define KL_MBCR_MB0_DEV_RESET 0x00000200
+#define KL_MBCR_MB0_ENABLE 0x00000100
+#define KL_MBCR_MB1_PCI_ENABLE 0x08000000 /* exist ? */
+#define KL_MBCR_MB1_IDE_ENABLE 0x10000000
+#define KL_MBCR_MB1_FLOPPY_ENABLE 0x20000000 /* exist ? */
+#define KL_MBCR_MB1_SOUND_ENABLE 0x40000000 /* hrm... */
+#define KL_MBCR_MB1_DEV_MASK 0x78000000
+#define KL_MBCR_MB1_DEV_POWER 0x04000000
+#define KL_MBCR_MB1_DEV_RESET 0x02000000
+#define KL_MBCR_MB1_ENABLE 0x01000000
+
+#define KL0_SCC_B_INTF_ENABLE 0x00000001 /* (KL Only) */
+#define KL0_SCC_A_INTF_ENABLE 0x00000002
+#define KL0_SCC_SLOWPCLK 0x00000004
+#define KL0_SCC_RESET 0x00000008
+#define KL0_SCCA_ENABLE 0x00000010
+#define KL0_SCCB_ENABLE 0x00000020
+#define KL0_SCC_CELL_ENABLE 0x00000040
+#define KL0_IRDA_HIGH_BAND 0x00000100 /* (KL Only) */
+#define KL0_IRDA_SOURCE2_SEL 0x00000200 /* (KL Only) */
+#define KL0_IRDA_SOURCE1_SEL 0x00000400 /* (KL Only) */
+#define KL0_PG_USB0_PMI_ENABLE 0x00000400 /* (Pangea/Intrepid Only) */
+#define KL0_IRDA_RESET 0x00000800 /* (KL Only) */
+#define KL0_PG_USB0_REF_SUSPEND_SEL 0x00000800 /* (Pangea/Intrepid Only) */
+#define KL0_IRDA_DEFAULT1 0x00001000 /* (KL Only) */
+#define KL0_PG_USB0_REF_SUSPEND 0x00001000 /* (Pangea/Intrepid Only) */
+#define KL0_IRDA_DEFAULT0 0x00002000 /* (KL Only) */
+#define KL0_PG_USB0_PAD_SUSPEND 0x00002000 /* (Pangea/Intrepid Only) */
+#define KL0_IRDA_FAST_CONNECT 0x00004000 /* (KL Only) */
+#define KL0_PG_USB1_PMI_ENABLE 0x00004000 /* (Pangea/Intrepid Only) */
+#define KL0_IRDA_ENABLE 0x00008000 /* (KL Only) */
+#define KL0_PG_USB1_REF_SUSPEND_SEL 0x00008000 /* (Pangea/Intrepid Only) */
+#define KL0_IRDA_CLK32_ENABLE 0x00010000 /* (KL Only) */
+#define KL0_PG_USB1_REF_SUSPEND 0x00010000 /* (Pangea/Intrepid Only) */
+#define KL0_IRDA_CLK19_ENABLE 0x00020000 /* (KL Only) */
+#define KL0_PG_USB1_PAD_SUSPEND 0x00020000 /* (Pangea/Intrepid Only) */
+#define KL0_USB0_PAD_SUSPEND0 0x00040000
+#define KL0_USB0_PAD_SUSPEND1 0x00080000
+#define KL0_USB0_CELL_ENABLE 0x00100000
+#define KL0_USB1_PAD_SUSPEND0 0x00400000
+#define KL0_USB1_PAD_SUSPEND1 0x00800000
+#define KL0_USB1_CELL_ENABLE 0x01000000
+#define KL0_USB_REF_SUSPEND 0x10000000 /* (KL Only) */
+
+#define KL0_SERIAL_ENABLE (KL0_SCC_B_INTF_ENABLE | \
+ KL0_SCC_SLOWPCLK | \
+ KL0_SCC_CELL_ENABLE | KL0_SCCA_ENABLE)
+
+#define KL1_USB2_PMI_ENABLE 0x00000001 /* Intrepid only */
+#define KL1_AUDIO_SEL_22MCLK 0x00000002 /* KL/Pangea only */
+#define KL1_USB2_REF_SUSPEND_SEL 0x00000002 /* Intrepid only */
+#define KL1_USB2_REF_SUSPEND 0x00000004 /* Intrepid only */
+#define KL1_AUDIO_CLK_ENABLE_BIT 0x00000008 /* KL/Pangea only */
+#define KL1_USB2_PAD_SUSPEND_SEL 0x00000008 /* Intrepid only */
+#define KL1_USB2_PAD_SUSPEND0 0x00000010 /* Intrepid only */
+#define KL1_AUDIO_CLK_OUT_ENABLE 0x00000020 /* KL/Pangea only */
+#define KL1_USB2_PAD_SUSPEND1 0x00000020 /* Intrepid only */
+#define KL1_AUDIO_CELL_ENABLE 0x00000040 /* KL/Pangea only */
+#define KL1_USB2_CELL_ENABLE 0x00000040 /* Intrepid only */
+#define KL1_AUDIO_CHOOSE 0x00000080 /* KL/Pangea only */
+#define KL1_I2S0_CHOOSE 0x00000200 /* KL Only */
+#define KL1_I2S0_CELL_ENABLE 0x00000400
+#define KL1_I2S0_CLK_ENABLE_BIT 0x00001000
+#define KL1_I2S0_ENABLE 0x00002000
+#define KL1_I2S1_CELL_ENABLE 0x00020000
+#define KL1_I2S1_CLK_ENABLE_BIT 0x00080000
+#define KL1_I2S1_ENABLE 0x00100000
+#define KL1_EIDE0_ENABLE 0x00800000 /* KL/Intrepid Only */
+#define KL1_EIDE0_RESET_N 0x01000000 /* KL/Intrepid Only */
+#define KL1_EIDE1_ENABLE 0x04000000 /* KL Only */
+#define KL1_EIDE1_RESET_N 0x08000000 /* KL Only */
+#define KL1_UIDE_ENABLE 0x20000000 /* KL/Pangea Only */
+#define KL1_UIDE_RESET_N 0x40000000 /* KL/Pangea Only */
+
+#define KL2_IOBUS_ENABLE 0x00000002
+#define KL2_SLEEP_STATE_BIT 0x00000100 /* KL Only */
+#define KL2_PG_STOP_ALL_CLOCKS 0x00000100 /* Pangea Only */
+#define KL2_MPIC_ENABLE 0x00020000
+#define KL2_CARDSLOT_RESET 0x00040000 /* Pangea/Intrepid Only */
+#define KL2_ALT_DATA_OUT 0x02000000 /* KL Only ??? */
+#define KL2_MEM_IS_BIG 0x04000000
+#define KL2_CARDSEL_16 0x08000000
+
+#define KL3_SHUTDOWN_PLL_TOTAL 0x00000001 /* KL/Pangea only */
+#define KL3_SHUTDOWN_PLLKW6 0x00000002 /* KL/Pangea only */
+#define KL3_IT_SHUTDOWN_PLL3 0x00000002 /* Intrepid only */
+#define KL3_SHUTDOWN_PLLKW4 0x00000004 /* KL/Pangea only */
+#define KL3_IT_SHUTDOWN_PLL2 0x00000004 /* Intrepid only */
+#define KL3_SHUTDOWN_PLLKW35 0x00000008 /* KL/Pangea only */
+#define KL3_IT_SHUTDOWN_PLL1 0x00000008 /* Intrepid only */
+#define KL3_SHUTDOWN_PLLKW12 0x00000010 /* KL Only */
+#define KL3_IT_ENABLE_PLL3_SHUTDOWN 0x00000010 /* Intrepid only */
+#define KL3_PLL_RESET 0x00000020 /* KL/Pangea only */
+#define KL3_IT_ENABLE_PLL2_SHUTDOWN 0x00000020 /* Intrepid only */
+#define KL3_IT_ENABLE_PLL1_SHUTDOWN 0x00000010 /* Intrepid only */
+#define KL3_SHUTDOWN_PLL2X 0x00000080 /* KL Only */
+#define KL3_CLK66_ENABLE 0x00000100 /* KL Only */
+#define KL3_CLK49_ENABLE 0x00000200
+#define KL3_CLK45_ENABLE 0x00000400
+#define KL3_CLK31_ENABLE 0x00000800 /* KL/Pangea only */
+#define KL3_TIMER_CLK18_ENABLE 0x00001000
+#define KL3_I2S1_CLK18_ENABLE 0x00002000
+#define KL3_I2S0_CLK18_ENABLE 0x00004000
+#define KL3_VIA_CLK16_ENABLE 0x00008000 /* KL/Pangea only */
+#define KL3_IT_VIA_CLK32_ENABLE 0x00008000 /* Intrepid only */
+#define KL3_STOPPING33_ENABLED 0x00080000 /* KL Only */
+#define KL3_PG_PLL_ENABLE_TEST 0x00080000 /* Pangea Only */
+
+/* Intrepid USB bus 2, port 0,1 */
+#define KL3_IT_PORT_WAKEUP_ENABLE(p) (0x00080000 << ((p)<<3))
+#define KL3_IT_PORT_RESUME_WAKE_EN(p) (0x00040000 << ((p)<<3))
+#define KL3_IT_PORT_CONNECT_WAKE_EN(p) (0x00020000 << ((p)<<3))
+#define KL3_IT_PORT_DISCONNECT_WAKE_EN(p) (0x00010000 << ((p)<<3))
+#define KL3_IT_PORT_RESUME_STAT(p) (0x00300000 << ((p)<<3))
+#define KL3_IT_PORT_CONNECT_STAT(p) (0x00200000 << ((p)<<3))
+#define KL3_IT_PORT_DISCONNECT_STAT(p) (0x00100000 << ((p)<<3))
+
+/* Port 0,1 : bus 0, port 2,3 : bus 1 */
+#define KL4_PORT_WAKEUP_ENABLE(p) (0x00000008 << ((p)<<3))
+#define KL4_PORT_RESUME_WAKE_EN(p) (0x00000004 << ((p)<<3))
+#define KL4_PORT_CONNECT_WAKE_EN(p) (0x00000002 << ((p)<<3))
+#define KL4_PORT_DISCONNECT_WAKE_EN(p) (0x00000001 << ((p)<<3))
+#define KL4_PORT_RESUME_STAT(p) (0x00000040 << ((p)<<3))
+#define KL4_PORT_CONNECT_STAT(p) (0x00000020 << ((p)<<3))
+#define KL4_PORT_DISCONNECT_STAT(p) (0x00000010 << ((p)<<3))
+
+/* Pangea and Intrepid only */
+#define KL5_VIA_USE_CLK31 0000000001 /* Pangea Only */
+#define KL5_SCC_USE_CLK31 0x00000002 /* Pangea Only */
+#define KL5_PWM_CLK32_EN 0x00000004
+#define KL5_CLK3_68_EN 0x00000010
+#define KL5_CLK32_EN 0x00000020
+
+
+/* K2 definitions */
+#define K2_FCR0_USB0_SWRESET 0x00200000
+#define K2_FCR0_USB1_SWRESET 0x02000000
+#define K2_FCR0_RING_PME_DISABLE 0x08000000
+
+#define K2_FCR1_PCI1_BUS_RESET_N 0x00000010
+#define K2_FCR1_PCI1_SLEEP_RESET_EN 0x00000020
+#define K2_FCR1_I2S0_CELL_ENABLE 0x00000400
+#define K2_FCR1_I2S0_RESET 0x00000800
+#define K2_FCR1_I2S0_CLK_ENABLE_BIT 0x00001000
+#define K2_FCR1_I2S0_ENABLE 0x00002000
+#define K2_FCR1_PCI1_CLK_ENABLE 0x00004000
+#define K2_FCR1_FW_CLK_ENABLE 0x00008000
+#define K2_FCR1_FW_RESET_N 0x00010000
+#define K2_FCR1_I2S1_CELL_ENABLE 0x00020000
+#define K2_FCR1_I2S1_CLK_ENABLE_BIT 0x00080000
+#define K2_FCR1_I2S1_ENABLE 0x00100000
+#define K2_FCR1_GMAC_CLK_ENABLE 0x00400000
+#define K2_FCR1_GMAC_POWER_DOWN 0x00800000
+#define K2_FCR1_GMAC_RESET_N 0x01000000
+#define K2_FCR1_SATA_CLK_ENABLE 0x02000000
+#define K2_FCR1_SATA_POWER_DOWN 0x04000000
+#define K2_FCR1_SATA_RESET_N 0x08000000
+#define K2_FCR1_UATA_CLK_ENABLE 0x10000000
+#define K2_FCR1_UATA_RESET_N 0x40000000
+#define K2_FCR1_UATA_CHOOSE_CLK66 0x80000000
+
+/* Shasta definitions */
+#define SH_FCR1_I2S2_CELL_ENABLE 0x00000010
+#define SH_FCR1_I2S2_CLK_ENABLE_BIT 0x00000040
+#define SH_FCR1_I2S2_ENABLE 0x00000080
+#define SH_FCR3_I2S2_CLK18_ENABLE 0x00008000
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_KEYLARGO_H */
diff --git a/arch/powerpc/include/asm/kgdb.h b/arch/powerpc/include/asm/kgdb.h
new file mode 100644
index 0000000..edd2170
--- /dev/null
+++ b/arch/powerpc/include/asm/kgdb.h
@@ -0,0 +1,63 @@
+/*
+ * The PowerPC (32/64) specific defines / externs for KGDB. Based on
+ * the previous 32bit and 64bit specific files, which had the following
+ * copyrights:
+ *
+ * PPC64 Mods (C) 2005 Frank Rowand (frowand@mvista.com)
+ * PPC Mods (C) 2004 Tom Rini (trini@mvista.com)
+ * PPC Mods (C) 2003 John Whitney (john.whitney@timesys.com)
+ * PPC Mods (C) 1998 Michael Tesch (tesch@cs.wisc.edu)
+ *
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Author: Tom Rini <trini@kernel.crashing.org>
+ *
+ * 2006 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#ifdef __KERNEL__
+#ifndef __POWERPC_KGDB_H__
+#define __POWERPC_KGDB_H__
+
+#ifndef __ASSEMBLY__
+
+#define BREAK_INSTR_SIZE 4
+#define BUFMAX ((NUMREGBYTES * 2) + 512)
+#define OUTBUFMAX ((NUMREGBYTES * 2) + 512)
+static inline void arch_kgdb_breakpoint(void)
+{
+ asm(".long 0x7d821008"); /* twge r2, r2 */
+}
+#define CACHE_FLUSH_IS_SAFE 1
+
+/* The number bytes of registers we have to save depends on a few
+ * things. For 64bit we default to not including vector registers and
+ * vector state registers. */
+#ifdef CONFIG_PPC64
+/*
+ * 64 bit (8 byte) registers:
+ * 32 gpr, 32 fpr, nip, msr, link, ctr
+ * 32 bit (4 byte) registers:
+ * ccr, xer, fpscr
+ */
+#define NUMREGBYTES ((68 * 8) + (3 * 4))
+#define NUMCRITREGBYTES 184
+#else /* CONFIG_PPC32 */
+/* On non-E500 family PPC32 we determine the size by picking the last
+ * register we need, but on E500 we skip sections so we list what we
+ * need to store, and add it up. */
+#ifndef CONFIG_E500
+#define MAXREG (PT_FPSCR+1)
+#else
+/* 32 GPRs (8 bytes), nip, msr, ccr, link, ctr, xer, acc (8 bytes), spefscr*/
+#define MAXREG ((32*2)+6+2+1)
+#endif
+#define NUMREGBYTES (MAXREG * sizeof(int))
+/* CR/LR, R1, R2, R13-R31 inclusive. */
+#define NUMCRITREGBYTES (23 * sizeof(int))
+#endif /* 32/64 */
+#endif /* !(__ASSEMBLY__) */
+#endif /* !__POWERPC_KGDB_H__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
new file mode 100644
index 0000000..b6bac6f
--- /dev/null
+++ b/arch/powerpc/include/asm/kmap_types.h
@@ -0,0 +1,33 @@
+#ifndef _ASM_POWERPC_KMAP_TYPES_H
+#define _ASM_POWERPC_KMAP_TYPES_H
+
+#ifdef __KERNEL__
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+enum km_type {
+ KM_BOUNCE_READ,
+ KM_SKB_SUNRPC_DATA,
+ KM_SKB_DATA_SOFTIRQ,
+ KM_USER0,
+ KM_USER1,
+ KM_BIO_SRC_IRQ,
+ KM_BIO_DST_IRQ,
+ KM_PTE0,
+ KM_PTE1,
+ KM_IRQ0,
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
+ KM_PPC_SYNC_PAGE,
+ KM_PPC_SYNC_ICACHE,
+ KM_TYPE_NR
+};
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_KMAP_TYPES_H */
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
new file mode 100644
index 0000000..d0e7701
--- /dev/null
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -0,0 +1,118 @@
+#ifndef _ASM_POWERPC_KPROBES_H
+#define _ASM_POWERPC_KPROBES_H
+#ifdef __KERNEL__
+/*
+ * Kernel Probes (KProbes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ *
+ * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
+ * Probes initial implementation ( includes suggestions from
+ * Rusty Russell).
+ * 2004-Nov Modified for PPC64 by Ananth N Mavinakayanahalli
+ * <ananth@in.ibm.com>
+ */
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+
+struct pt_regs;
+struct kprobe;
+
+typedef unsigned int kprobe_opcode_t;
+#define BREAKPOINT_INSTRUCTION 0x7fe00008 /* trap */
+#define MAX_INSN_SIZE 1
+
+#define IS_TW(instr) (((instr) & 0xfc0007fe) == 0x7c000008)
+#define IS_TD(instr) (((instr) & 0xfc0007fe) == 0x7c000088)
+#define IS_TDI(instr) (((instr) & 0xfc000000) == 0x08000000)
+#define IS_TWI(instr) (((instr) & 0xfc000000) == 0x0c000000)
+
+#ifdef CONFIG_PPC64
+/*
+ * 64bit powerpc uses function descriptors.
+ * Handle cases where:
+ * - User passes a <.symbol> or <module:.symbol>
+ * - User passes a <symbol> or <module:symbol>
+ * - User passes a non-existant symbol, kallsyms_lookup_name
+ * returns 0. Don't deref the NULL pointer in that case
+ */
+#define kprobe_lookup_name(name, addr) \
+{ \
+ addr = (kprobe_opcode_t *)kallsyms_lookup_name(name); \
+ if (addr) { \
+ char *colon; \
+ if ((colon = strchr(name, ':')) != NULL) { \
+ colon++; \
+ if (*colon != '\0' && *colon != '.') \
+ addr = *(kprobe_opcode_t **)addr; \
+ } else if (name[0] != '.') \
+ addr = *(kprobe_opcode_t **)addr; \
+ } else { \
+ char dot_name[KSYM_NAME_LEN]; \
+ dot_name[0] = '.'; \
+ dot_name[1] = '\0'; \
+ strncat(dot_name, name, KSYM_NAME_LEN - 2); \
+ addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name); \
+ } \
+}
+
+#define is_trap(instr) (IS_TW(instr) || IS_TD(instr) || \
+ IS_TWI(instr) || IS_TDI(instr))
+#else
+/* Use stock kprobe_lookup_name since ppc32 doesn't use function descriptors */
+#define is_trap(instr) (IS_TW(instr) || IS_TWI(instr))
+#endif
+
+#define flush_insn_slot(p) do { } while (0)
+#define kretprobe_blacklist_size 0
+
+void kretprobe_trampoline(void);
+extern void arch_remove_kprobe(struct kprobe *p);
+
+/* Architecture specific copy of original instruction */
+struct arch_specific_insn {
+ /* copy of original instruction */
+ kprobe_opcode_t *insn;
+ /*
+ * Set in kprobes code, initially to 0. If the instruction can be
+ * eumulated, this is set to 1, if not, to -1.
+ */
+ int boostable;
+};
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned long status;
+ unsigned long saved_msr;
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+ unsigned long kprobe_status;
+ unsigned long kprobe_saved_msr;
+ struct pt_regs jprobe_saved_regs;
+ struct prev_kprobe prev_kprobe;
+};
+
+extern int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_KPROBES_H */
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
new file mode 100644
index 0000000..f993e41
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm.h
@@ -0,0 +1,55 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __LINUX_KVM_POWERPC_H
+#define __LINUX_KVM_POWERPC_H
+
+#include <asm/types.h>
+
+struct kvm_regs {
+ __u64 pc;
+ __u64 cr;
+ __u64 ctr;
+ __u64 lr;
+ __u64 xer;
+ __u64 msr;
+ __u64 srr0;
+ __u64 srr1;
+ __u64 pid;
+
+ __u64 sprg0;
+ __u64 sprg1;
+ __u64 sprg2;
+ __u64 sprg3;
+ __u64 sprg4;
+ __u64 sprg5;
+ __u64 sprg6;
+ __u64 sprg7;
+
+ __u64 gpr[32];
+};
+
+struct kvm_sregs {
+};
+
+struct kvm_fpu {
+ __u64 fpr[32];
+};
+
+#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
new file mode 100644
index 0000000..2197764
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -0,0 +1,55 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __POWERPC_KVM_ASM_H__
+#define __POWERPC_KVM_ASM_H__
+
+/* IVPR must be 64KiB-aligned. */
+#define VCPU_SIZE_ORDER 4
+#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12)
+#define VCPU_TLB_PGSZ PPC44x_TLB_64K
+#define VCPU_SIZE_BYTES (1<<VCPU_SIZE_LOG)
+
+#define BOOKE_INTERRUPT_CRITICAL 0
+#define BOOKE_INTERRUPT_MACHINE_CHECK 1
+#define BOOKE_INTERRUPT_DATA_STORAGE 2
+#define BOOKE_INTERRUPT_INST_STORAGE 3
+#define BOOKE_INTERRUPT_EXTERNAL 4
+#define BOOKE_INTERRUPT_ALIGNMENT 5
+#define BOOKE_INTERRUPT_PROGRAM 6
+#define BOOKE_INTERRUPT_FP_UNAVAIL 7
+#define BOOKE_INTERRUPT_SYSCALL 8
+#define BOOKE_INTERRUPT_AP_UNAVAIL 9
+#define BOOKE_INTERRUPT_DECREMENTER 10
+#define BOOKE_INTERRUPT_FIT 11
+#define BOOKE_INTERRUPT_WATCHDOG 12
+#define BOOKE_INTERRUPT_DTLB_MISS 13
+#define BOOKE_INTERRUPT_ITLB_MISS 14
+#define BOOKE_INTERRUPT_DEBUG 15
+#define BOOKE_MAX_INTERRUPT 15
+
+#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
+#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
+
+#define RESUME_GUEST 0
+#define RESUME_GUEST_NV RESUME_FLAG_NV
+#define RESUME_HOST RESUME_FLAG_HOST
+#define RESUME_HOST_NV (RESUME_FLAG_HOST|RESUME_FLAG_NV)
+
+#endif /* __POWERPC_KVM_ASM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
new file mode 100644
index 0000000..2655e2a
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -0,0 +1,155 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __POWERPC_KVM_HOST_H__
+#define __POWERPC_KVM_HOST_H__
+
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/kvm_types.h>
+#include <asm/kvm_asm.h>
+
+#define KVM_MAX_VCPUS 1
+#define KVM_MEMORY_SLOTS 32
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 4
+
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
+/* We don't currently support large pages. */
+#define KVM_PAGES_PER_HPAGE (1<<31)
+
+struct kvm;
+struct kvm_run;
+struct kvm_vcpu;
+
+struct kvm_vm_stat {
+ u32 remote_tlb_flush;
+};
+
+struct kvm_vcpu_stat {
+ u32 sum_exits;
+ u32 mmio_exits;
+ u32 dcr_exits;
+ u32 signal_exits;
+ u32 light_exits;
+ /* Account for special types of light exits: */
+ u32 itlb_real_miss_exits;
+ u32 itlb_virt_miss_exits;
+ u32 dtlb_real_miss_exits;
+ u32 dtlb_virt_miss_exits;
+ u32 syscall_exits;
+ u32 isi_exits;
+ u32 dsi_exits;
+ u32 emulated_inst_exits;
+ u32 dec_exits;
+ u32 ext_intr_exits;
+ u32 halt_wakeup;
+};
+
+struct tlbe {
+ u32 tid; /* Only the low 8 bits are used. */
+ u32 word0;
+ u32 word1;
+ u32 word2;
+};
+
+struct kvm_arch {
+};
+
+struct kvm_vcpu_arch {
+ /* Unmodified copy of the guest's TLB. */
+ struct tlbe guest_tlb[PPC44x_TLB_SIZE];
+ /* TLB that's actually used when the guest is running. */
+ struct tlbe shadow_tlb[PPC44x_TLB_SIZE];
+ /* Pages which are referenced in the shadow TLB. */
+ struct page *shadow_pages[PPC44x_TLB_SIZE];
+ /* Copy of the host's TLB. */
+ struct tlbe host_tlb[PPC44x_TLB_SIZE];
+
+ u32 host_stack;
+ u32 host_pid;
+
+ u64 fpr[32];
+ u32 gpr[32];
+
+ u32 pc;
+ u32 cr;
+ u32 ctr;
+ u32 lr;
+ u32 xer;
+
+ u32 msr;
+ u32 mmucr;
+ u32 sprg0;
+ u32 sprg1;
+ u32 sprg2;
+ u32 sprg3;
+ u32 sprg4;
+ u32 sprg5;
+ u32 sprg6;
+ u32 sprg7;
+ u32 srr0;
+ u32 srr1;
+ u32 csrr0;
+ u32 csrr1;
+ u32 dsrr0;
+ u32 dsrr1;
+ u32 dear;
+ u32 esr;
+ u32 dec;
+ u32 decar;
+ u32 tbl;
+ u32 tbu;
+ u32 tcr;
+ u32 tsr;
+ u32 ivor[16];
+ u32 ivpr;
+ u32 pir;
+ u32 pid;
+ u32 pvr;
+ u32 ccr0;
+ u32 ccr1;
+ u32 dbcr0;
+ u32 dbcr1;
+
+ u32 last_inst;
+ u32 fault_dear;
+ u32 fault_esr;
+ gpa_t paddr_accessed;
+
+ u8 io_gpr; /* GPR used as IO source/target */
+ u8 mmio_is_bigendian;
+ u8 dcr_needed;
+ u8 dcr_is_write;
+
+ u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
+
+ struct timer_list dec_timer;
+ unsigned long pending_exceptions;
+};
+
+struct kvm_guest_debug {
+ int enabled;
+ unsigned long bp[4];
+ int singlestep;
+};
+
+#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h
new file mode 100644
index 0000000..2d48f6a
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_para.h
@@ -0,0 +1,37 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __POWERPC_KVM_PARA_H__
+#define __POWERPC_KVM_PARA_H__
+
+#ifdef __KERNEL__
+
+static inline int kvm_para_available(void)
+{
+ return 0;
+}
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+ return 0;
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* __POWERPC_KVM_PARA_H__ */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
new file mode 100644
index 0000000..a8b0687
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -0,0 +1,95 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __POWERPC_KVM_PPC_H__
+#define __POWERPC_KVM_PPC_H__
+
+/* This file exists just so we can dereference kvm_vcpu, avoiding nested header
+ * dependencies. */
+
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/kvm_types.h>
+#include <linux/kvm_host.h>
+
+struct kvm_tlb {
+ struct tlbe guest_tlb[PPC44x_TLB_SIZE];
+ struct tlbe shadow_tlb[PPC44x_TLB_SIZE];
+};
+
+enum emulation_result {
+ EMULATE_DONE, /* no further processing */
+ EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
+ EMULATE_DO_DCR, /* kvm_run filled with DCR request */
+ EMULATE_FAIL, /* can't emulate this instruction */
+};
+
+extern const unsigned char exception_priority[];
+extern const unsigned char priority_exception[];
+
+extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
+extern char kvmppc_handlers_start[];
+extern unsigned long kvmppc_handler_len;
+
+extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
+extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int rt, unsigned int bytes,
+ int is_bigendian);
+extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ u32 val, unsigned int bytes, int is_bigendian);
+
+extern int kvmppc_emulate_instruction(struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
+
+extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn,
+ u64 asid, u32 flags);
+extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
+ gva_t eend, u32 asid);
+extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
+
+extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu);
+
+static inline void kvmppc_queue_exception(struct kvm_vcpu *vcpu, int exception)
+{
+ unsigned int priority = exception_priority[exception];
+ set_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+static inline void kvmppc_clear_exception(struct kvm_vcpu *vcpu, int exception)
+{
+ unsigned int priority = exception_priority[exception];
+ clear_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+/* Helper function for "full" MSR writes. No need to call this if only EE is
+ * changing. */
+static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
+{
+ if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR))
+ kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
+
+ vcpu->arch.msr = new_msr;
+
+ if (vcpu->arch.msr & MSR_WE)
+ kvm_vcpu_block(vcpu);
+}
+
+#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/libata-portmap.h b/arch/powerpc/include/asm/libata-portmap.h
new file mode 100644
index 0000000..4d85180
--- /dev/null
+++ b/arch/powerpc/include/asm/libata-portmap.h
@@ -0,0 +1,12 @@
+#ifndef __ASM_POWERPC_LIBATA_PORTMAP_H
+#define __ASM_POWERPC_LIBATA_PORTMAP_H
+
+#define ATA_PRIMARY_CMD 0x1F0
+#define ATA_PRIMARY_CTL 0x3F6
+#define ATA_PRIMARY_IRQ(dev) pci_get_legacy_ide_irq(dev, 0)
+
+#define ATA_SECONDARY_CMD 0x170
+#define ATA_SECONDARY_CTL 0x376
+#define ATA_SECONDARY_IRQ(dev) pci_get_legacy_ide_irq(dev, 1)
+
+#endif
diff --git a/arch/powerpc/include/asm/linkage.h b/arch/powerpc/include/asm/linkage.h
new file mode 100644
index 0000000..e1c4ac1
--- /dev/null
+++ b/arch/powerpc/include/asm/linkage.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_POWERPC_LINKAGE_H
+#define _ASM_POWERPC_LINKAGE_H
+
+/* Nothing to see here... */
+
+#endif /* _ASM_POWERPC_LINKAGE_H */
diff --git a/arch/powerpc/include/asm/lmb.h b/arch/powerpc/include/asm/lmb.h
new file mode 100644
index 0000000..6f5fdf0
--- /dev/null
+++ b/arch/powerpc/include/asm/lmb.h
@@ -0,0 +1,15 @@
+#ifndef _ASM_POWERPC_LMB_H
+#define _ASM_POWERPC_LMB_H
+
+#include <asm/udbg.h>
+
+#define LMB_DBG(fmt...) udbg_printf(fmt)
+
+#ifdef CONFIG_PPC32
+extern phys_addr_t lowmem_end_addr;
+#define LMB_REAL_LIMIT lowmem_end_addr
+#else
+#define LMB_REAL_LIMIT 0
+#endif
+
+#endif /* _ASM_POWERPC_LMB_H */
diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
new file mode 100644
index 0000000..612d832
--- /dev/null
+++ b/arch/powerpc/include/asm/local.h
@@ -0,0 +1,200 @@
+#ifndef _ARCH_POWERPC_LOCAL_H
+#define _ARCH_POWERPC_LOCAL_H
+
+#include <linux/percpu.h>
+#include <asm/atomic.h>
+
+typedef struct
+{
+ atomic_long_t a;
+} local_t;
+
+#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+
+#define local_read(l) atomic_long_read(&(l)->a)
+#define local_set(l,i) atomic_long_set(&(l)->a, (i))
+
+#define local_add(i,l) atomic_long_add((i),(&(l)->a))
+#define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
+#define local_inc(l) atomic_long_inc(&(l)->a)
+#define local_dec(l) atomic_long_dec(&(l)->a)
+
+static __inline__ long local_add_return(long a, local_t *l)
+{
+ long t;
+
+ __asm__ __volatile__(
+"1:" PPC_LLARX "%0,0,%2 # local_add_return\n\
+ add %0,%1,%0\n"
+ PPC405_ERR77(0,%2)
+ PPC_STLCX "%0,0,%2 \n\
+ bne- 1b"
+ : "=&r" (t)
+ : "r" (a), "r" (&(l->a.counter))
+ : "cc", "memory");
+
+ return t;
+}
+
+#define local_add_negative(a, l) (local_add_return((a), (l)) < 0)
+
+static __inline__ long local_sub_return(long a, local_t *l)
+{
+ long t;
+
+ __asm__ __volatile__(
+"1:" PPC_LLARX "%0,0,%2 # local_sub_return\n\
+ subf %0,%1,%0\n"
+ PPC405_ERR77(0,%2)
+ PPC_STLCX "%0,0,%2 \n\
+ bne- 1b"
+ : "=&r" (t)
+ : "r" (a), "r" (&(l->a.counter))
+ : "cc", "memory");
+
+ return t;
+}
+
+static __inline__ long local_inc_return(local_t *l)
+{
+ long t;
+
+ __asm__ __volatile__(
+"1:" PPC_LLARX "%0,0,%1 # local_inc_return\n\
+ addic %0,%0,1\n"
+ PPC405_ERR77(0,%1)
+ PPC_STLCX "%0,0,%1 \n\
+ bne- 1b"
+ : "=&r" (t)
+ : "r" (&(l->a.counter))
+ : "cc", "memory");
+
+ return t;
+}
+
+/*
+ * local_inc_and_test - increment and test
+ * @l: pointer of type local_t
+ *
+ * Atomically increments @l by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define local_inc_and_test(l) (local_inc_return(l) == 0)
+
+static __inline__ long local_dec_return(local_t *l)
+{
+ long t;
+
+ __asm__ __volatile__(
+"1:" PPC_LLARX "%0,0,%1 # local_dec_return\n\
+ addic %0,%0,-1\n"
+ PPC405_ERR77(0,%1)
+ PPC_STLCX "%0,0,%1\n\
+ bne- 1b"
+ : "=&r" (t)
+ : "r" (&(l->a.counter))
+ : "cc", "memory");
+
+ return t;
+}
+
+#define local_cmpxchg(l, o, n) \
+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
+#define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
+
+/**
+ * local_add_unless - add unless the number is a given value
+ * @l: pointer of type local_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @l, so long as it was not @u.
+ * Returns non-zero if @l was not @u, and zero otherwise.
+ */
+static __inline__ int local_add_unless(local_t *l, long a, long u)
+{
+ long t;
+
+ __asm__ __volatile__ (
+"1:" PPC_LLARX "%0,0,%1 # local_add_unless\n\
+ cmpw 0,%0,%3 \n\
+ beq- 2f \n\
+ add %0,%2,%0 \n"
+ PPC405_ERR77(0,%2)
+ PPC_STLCX "%0,0,%1 \n\
+ bne- 1b \n"
+" subf %0,%2,%0 \n\
+2:"
+ : "=&r" (t)
+ : "r" (&(l->a.counter)), "r" (a), "r" (u)
+ : "cc", "memory");
+
+ return t != u;
+}
+
+#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
+
+#define local_sub_and_test(a, l) (local_sub_return((a), (l)) == 0)
+#define local_dec_and_test(l) (local_dec_return((l)) == 0)
+
+/*
+ * Atomically test *l and decrement if it is greater than 0.
+ * The function returns the old value of *l minus 1.
+ */
+static __inline__ long local_dec_if_positive(local_t *l)
+{
+ long t;
+
+ __asm__ __volatile__(
+"1:" PPC_LLARX "%0,0,%1 # local_dec_if_positive\n\
+ cmpwi %0,1\n\
+ addi %0,%0,-1\n\
+ blt- 2f\n"
+ PPC405_ERR77(0,%1)
+ PPC_STLCX "%0,0,%1\n\
+ bne- 1b"
+ "\n\
+2:" : "=&b" (t)
+ : "r" (&(l->a.counter))
+ : "cc", "memory");
+
+ return t;
+}
+
+/* Use these for per-cpu local_t variables: on some archs they are
+ * much more efficient than these naive implementations. Note they take
+ * a variable, not an address.
+ */
+
+#define __local_inc(l) ((l)->a.counter++)
+#define __local_dec(l) ((l)->a.counter++)
+#define __local_add(i,l) ((l)->a.counter+=(i))
+#define __local_sub(i,l) ((l)->a.counter-=(i))
+
+/* Need to disable preemption for the cpu local counters otherwise we could
+ still access a variable of a previous CPU in a non atomic way. */
+#define cpu_local_wrap_v(l) \
+ ({ local_t res__; \
+ preempt_disable(); \
+ res__ = (l); \
+ preempt_enable(); \
+ res__; })
+#define cpu_local_wrap(l) \
+ ({ preempt_disable(); \
+ l; \
+ preempt_enable(); }) \
+
+#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
+#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
+#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
+#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
+#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
+#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
+
+#define __cpu_local_inc(l) cpu_local_inc(l)
+#define __cpu_local_dec(l) cpu_local_dec(l)
+#define __cpu_local_add(i, l) cpu_local_add((i), (l))
+#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
+
+#endif /* _ARCH_POWERPC_LOCAL_H */
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
new file mode 100644
index 0000000..2fe268b
--- /dev/null
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -0,0 +1,159 @@
+/*
+ * lppaca.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_POWERPC_LPPACA_H
+#define _ASM_POWERPC_LPPACA_H
+#ifdef __KERNEL__
+
+//=============================================================================
+//
+// This control block contains the data that is shared between the
+// hypervisor (PLIC) and the OS.
+//
+//
+//----------------------------------------------------------------------------
+#include <linux/cache.h>
+#include <asm/types.h>
+#include <asm/mmu.h>
+
+/* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k
+ * alignment is sufficient to prevent this */
+struct lppaca {
+//=============================================================================
+// CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
+// NOTE: The xDynXyz fields are fields that will be dynamically changed by
+// PLIC when preparing to bring a processor online or when dispatching a
+// virtual processor!
+//=============================================================================
+ u32 desc; // Eye catcher 0xD397D781 x00-x03
+ u16 size; // Size of this struct x04-x05
+ u16 reserved1; // Reserved x06-x07
+ u16 reserved2:14; // Reserved x08-x09
+ u8 shared_proc:1; // Shared processor indicator ...
+ u8 secondary_thread:1; // Secondary thread indicator ...
+ volatile u8 dyn_proc_status:8; // Dynamic Status of this proc x0A-x0A
+ u8 secondary_thread_count; // Secondary thread count x0B-x0B
+ volatile u16 dyn_hv_phys_proc_index;// Dynamic HV Physical Proc Index0C-x0D
+ volatile u16 dyn_hv_log_proc_index;// Dynamic HV Logical Proc Indexx0E-x0F
+ u32 decr_val; // Value for Decr programming x10-x13
+ u32 pmc_val; // Value for PMC regs x14-x17
+ volatile u32 dyn_hw_node_id; // Dynamic Hardware Node id x18-x1B
+ volatile u32 dyn_hw_proc_id; // Dynamic Hardware Proc Id x1C-x1F
+ volatile u32 dyn_pir; // Dynamic ProcIdReg value x20-x23
+ u32 dsei_data; // DSEI data x24-x27
+ u64 sprg3; // SPRG3 value x28-x2F
+ u8 reserved3[80]; // Reserved x30-x7F
+
+//=============================================================================
+// CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data
+//=============================================================================
+ // This Dword contains a byte for each type of interrupt that can occur.
+ // The IPI is a count while the others are just a binary 1 or 0.
+ union {
+ u64 any_int;
+ struct {
+ u16 reserved; // Reserved - cleared by #mpasmbl
+ u8 xirr_int; // Indicates xXirrValue is valid or Immed IO
+ u8 ipi_cnt; // IPI Count
+ u8 decr_int; // DECR interrupt occurred
+ u8 pdc_int; // PDC interrupt occurred
+ u8 quantum_int; // Interrupt quantum reached
+ u8 old_plic_deferred_ext_int; // Old PLIC has a deferred XIRR pending
+ } fields;
+ } int_dword;
+
+ // Whenever any fields in this Dword are set then PLIC will defer the
+ // processing of external interrupts. Note that PLIC will store the
+ // XIRR directly into the xXirrValue field so that another XIRR will
+ // not be presented until this one clears. The layout of the low
+ // 4-bytes of this Dword is upto SLIC - PLIC just checks whether the
+ // entire Dword is zero or not. A non-zero value in the low order
+ // 2-bytes will result in SLIC being granted the highest thread
+ // priority upon return. A 0 will return to SLIC as medium priority.
+ u64 plic_defer_ints_area; // Entire Dword
+
+ // Used to pass the real SRR0/1 from PLIC to SLIC as well as to
+ // pass the target SRR0/1 from SLIC to PLIC on a SetAsrAndRfid.
+ u64 saved_srr0; // Saved SRR0 x10-x17
+ u64 saved_srr1; // Saved SRR1 x18-x1F
+
+ // Used to pass parms from the OS to PLIC for SetAsrAndRfid
+ u64 saved_gpr3; // Saved GPR3 x20-x27
+ u64 saved_gpr4; // Saved GPR4 x28-x2F
+ u64 saved_gpr5; // Saved GPR5 x30-x37
+
+ u8 reserved4; // Reserved x38-x38
+ u8 donate_dedicated_cpu; // Donate dedicated CPU cycles x39-x39
+ u8 fpregs_in_use; // FP regs in use x3A-x3A
+ u8 pmcregs_in_use; // PMC regs in use x3B-x3B
+ volatile u32 saved_decr; // Saved Decr Value x3C-x3F
+ volatile u64 emulated_time_base;// Emulated TB for this thread x40-x47
+ volatile u64 cur_plic_latency; // Unaccounted PLIC latency x48-x4F
+ u64 tot_plic_latency; // Accumulated PLIC latency x50-x57
+ u64 wait_state_cycles; // Wait cycles for this proc x58-x5F
+ u64 end_of_quantum; // TB at end of quantum x60-x67
+ u64 pdc_saved_sprg1; // Saved SPRG1 for PMC int x68-x6F
+ u64 pdc_saved_srr0; // Saved SRR0 for PMC int x70-x77
+ volatile u32 virtual_decr; // Virtual DECR for shared procsx78-x7B
+ u16 slb_count; // # of SLBs to maintain x7C-x7D
+ u8 idle; // Indicate OS is idle x7E
+ u8 vmxregs_in_use; // VMX registers in use x7F
+
+
+//=============================================================================
+// CACHE_LINE_3 0x0100 - 0x017F: This line is shared with other processors
+//=============================================================================
+ // This is the yield_count. An "odd" value (low bit on) means that
+ // the processor is yielded (either because of an OS yield or a PLIC
+ // preempt). An even value implies that the processor is currently
+ // executing.
+ // NOTE: This value will ALWAYS be zero for dedicated processors and
+ // will NEVER be zero for shared processors (ie, initialized to a 1).
+ volatile u32 yield_count; // PLIC increments each dispatchx00-x03
+ u32 reserved6;
+ volatile u64 cmo_faults; // CMO page fault count x08-x0F
+ volatile u64 cmo_fault_time; // CMO page fault time x10-x17
+ u8 reserved7[104]; // Reserved x18-x7F
+
+//=============================================================================
+// CACHE_LINE_4-5 0x0180 - 0x027F Contains PMC interrupt data
+//=============================================================================
+ u8 pmc_save_area[256]; // PMC interrupt Area x00-xFF
+} __attribute__((__aligned__(0x400)));
+
+extern struct lppaca lppaca[];
+
+/*
+ * SLB shadow buffer structure as defined in the PAPR. The save_area
+ * contains adjacent ESID and VSID pairs for each shadowed SLB. The
+ * ESID is stored in the lower 64bits, then the VSID.
+ */
+struct slb_shadow {
+ u32 persistent; // Number of persistent SLBs x00-x03
+ u32 buffer_length; // Total shadow buffer length x04-x07
+ u64 reserved; // Alignment x08-x0f
+ struct {
+ u64 esid;
+ u64 vsid;
+ } save_area[SLB_NUM_BOLTED]; // x10-x40
+} ____cacheline_aligned;
+
+extern struct slb_shadow slb_shadow[];
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_LPPACA_H */
diff --git a/arch/powerpc/include/asm/lv1call.h b/arch/powerpc/include/asm/lv1call.h
new file mode 100644
index 0000000..81713ac
--- /dev/null
+++ b/arch/powerpc/include/asm/lv1call.h
@@ -0,0 +1,348 @@
+/*
+ * PS3 hvcall interface.
+ *
+ * Copyright (C) 2006 Sony Computer Entertainment Inc.
+ * Copyright 2006 Sony Corp.
+ * Copyright 2003, 2004 (c) MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#if !defined(_ASM_POWERPC_LV1CALL_H)
+#define _ASM_POWERPC_LV1CALL_H
+
+#if !defined(__ASSEMBLY__)
+
+#include <linux/types.h>
+
+/* lv1 call declaration macros */
+
+#define LV1_1_IN_ARG_DECL u64 in_1
+#define LV1_2_IN_ARG_DECL LV1_1_IN_ARG_DECL, u64 in_2
+#define LV1_3_IN_ARG_DECL LV1_2_IN_ARG_DECL, u64 in_3
+#define LV1_4_IN_ARG_DECL LV1_3_IN_ARG_DECL, u64 in_4
+#define LV1_5_IN_ARG_DECL LV1_4_IN_ARG_DECL, u64 in_5
+#define LV1_6_IN_ARG_DECL LV1_5_IN_ARG_DECL, u64 in_6
+#define LV1_7_IN_ARG_DECL LV1_6_IN_ARG_DECL, u64 in_7
+#define LV1_8_IN_ARG_DECL LV1_7_IN_ARG_DECL, u64 in_8
+#define LV1_1_OUT_ARG_DECL u64 *out_1
+#define LV1_2_OUT_ARG_DECL LV1_1_OUT_ARG_DECL, u64 *out_2
+#define LV1_3_OUT_ARG_DECL LV1_2_OUT_ARG_DECL, u64 *out_3
+#define LV1_4_OUT_ARG_DECL LV1_3_OUT_ARG_DECL, u64 *out_4
+#define LV1_5_OUT_ARG_DECL LV1_4_OUT_ARG_DECL, u64 *out_5
+#define LV1_6_OUT_ARG_DECL LV1_5_OUT_ARG_DECL, u64 *out_6
+#define LV1_7_OUT_ARG_DECL LV1_6_OUT_ARG_DECL, u64 *out_7
+
+#define LV1_0_IN_0_OUT_ARG_DECL void
+#define LV1_1_IN_0_OUT_ARG_DECL LV1_1_IN_ARG_DECL
+#define LV1_2_IN_0_OUT_ARG_DECL LV1_2_IN_ARG_DECL
+#define LV1_3_IN_0_OUT_ARG_DECL LV1_3_IN_ARG_DECL
+#define LV1_4_IN_0_OUT_ARG_DECL LV1_4_IN_ARG_DECL
+#define LV1_5_IN_0_OUT_ARG_DECL LV1_5_IN_ARG_DECL
+#define LV1_6_IN_0_OUT_ARG_DECL LV1_6_IN_ARG_DECL
+#define LV1_7_IN_0_OUT_ARG_DECL LV1_7_IN_ARG_DECL
+
+#define LV1_0_IN_1_OUT_ARG_DECL LV1_1_OUT_ARG_DECL
+#define LV1_1_IN_1_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_2_IN_1_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_3_IN_1_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_4_IN_1_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_5_IN_1_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_6_IN_1_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_7_IN_1_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_8_IN_1_OUT_ARG_DECL LV1_8_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+
+#define LV1_0_IN_2_OUT_ARG_DECL LV1_2_OUT_ARG_DECL
+#define LV1_1_IN_2_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_2_IN_2_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_3_IN_2_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_4_IN_2_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_5_IN_2_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_6_IN_2_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_7_IN_2_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+
+#define LV1_0_IN_3_OUT_ARG_DECL LV1_3_OUT_ARG_DECL
+#define LV1_1_IN_3_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_2_IN_3_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_3_IN_3_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_4_IN_3_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_5_IN_3_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_6_IN_3_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_7_IN_3_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+
+#define LV1_0_IN_4_OUT_ARG_DECL LV1_4_OUT_ARG_DECL
+#define LV1_1_IN_4_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_2_IN_4_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_3_IN_4_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_4_IN_4_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_5_IN_4_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_6_IN_4_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_7_IN_4_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+
+#define LV1_0_IN_5_OUT_ARG_DECL LV1_5_OUT_ARG_DECL
+#define LV1_1_IN_5_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_2_IN_5_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_3_IN_5_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_4_IN_5_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_5_IN_5_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_6_IN_5_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_7_IN_5_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+
+#define LV1_0_IN_6_OUT_ARG_DECL LV1_6_OUT_ARG_DECL
+#define LV1_1_IN_6_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_2_IN_6_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_3_IN_6_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_4_IN_6_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_5_IN_6_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_6_IN_6_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_7_IN_6_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+
+#define LV1_0_IN_7_OUT_ARG_DECL LV1_7_OUT_ARG_DECL
+#define LV1_1_IN_7_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_2_IN_7_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_3_IN_7_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_4_IN_7_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_5_IN_7_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_6_IN_7_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_7_IN_7_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+
+#define LV1_1_IN_ARGS in_1
+#define LV1_2_IN_ARGS LV1_1_IN_ARGS, in_2
+#define LV1_3_IN_ARGS LV1_2_IN_ARGS, in_3
+#define LV1_4_IN_ARGS LV1_3_IN_ARGS, in_4
+#define LV1_5_IN_ARGS LV1_4_IN_ARGS, in_5
+#define LV1_6_IN_ARGS LV1_5_IN_ARGS, in_6
+#define LV1_7_IN_ARGS LV1_6_IN_ARGS, in_7
+#define LV1_8_IN_ARGS LV1_7_IN_ARGS, in_8
+
+#define LV1_1_OUT_ARGS out_1
+#define LV1_2_OUT_ARGS LV1_1_OUT_ARGS, out_2
+#define LV1_3_OUT_ARGS LV1_2_OUT_ARGS, out_3
+#define LV1_4_OUT_ARGS LV1_3_OUT_ARGS, out_4
+#define LV1_5_OUT_ARGS LV1_4_OUT_ARGS, out_5
+#define LV1_6_OUT_ARGS LV1_5_OUT_ARGS, out_6
+#define LV1_7_OUT_ARGS LV1_6_OUT_ARGS, out_7
+
+#define LV1_0_IN_0_OUT_ARGS
+#define LV1_1_IN_0_OUT_ARGS LV1_1_IN_ARGS
+#define LV1_2_IN_0_OUT_ARGS LV1_2_IN_ARGS
+#define LV1_3_IN_0_OUT_ARGS LV1_3_IN_ARGS
+#define LV1_4_IN_0_OUT_ARGS LV1_4_IN_ARGS
+#define LV1_5_IN_0_OUT_ARGS LV1_5_IN_ARGS
+#define LV1_6_IN_0_OUT_ARGS LV1_6_IN_ARGS
+#define LV1_7_IN_0_OUT_ARGS LV1_7_IN_ARGS
+
+#define LV1_0_IN_1_OUT_ARGS LV1_1_OUT_ARGS
+#define LV1_1_IN_1_OUT_ARGS LV1_1_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_2_IN_1_OUT_ARGS LV1_2_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_3_IN_1_OUT_ARGS LV1_3_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_4_IN_1_OUT_ARGS LV1_4_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_5_IN_1_OUT_ARGS LV1_5_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_6_IN_1_OUT_ARGS LV1_6_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_7_IN_1_OUT_ARGS LV1_7_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_8_IN_1_OUT_ARGS LV1_8_IN_ARGS, LV1_1_OUT_ARGS
+
+#define LV1_0_IN_2_OUT_ARGS LV1_2_OUT_ARGS
+#define LV1_1_IN_2_OUT_ARGS LV1_1_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_2_IN_2_OUT_ARGS LV1_2_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_3_IN_2_OUT_ARGS LV1_3_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_4_IN_2_OUT_ARGS LV1_4_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_5_IN_2_OUT_ARGS LV1_5_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_6_IN_2_OUT_ARGS LV1_6_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_7_IN_2_OUT_ARGS LV1_7_IN_ARGS, LV1_2_OUT_ARGS
+
+#define LV1_0_IN_3_OUT_ARGS LV1_3_OUT_ARGS
+#define LV1_1_IN_3_OUT_ARGS LV1_1_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_2_IN_3_OUT_ARGS LV1_2_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_3_IN_3_OUT_ARGS LV1_3_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_4_IN_3_OUT_ARGS LV1_4_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_5_IN_3_OUT_ARGS LV1_5_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_6_IN_3_OUT_ARGS LV1_6_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_7_IN_3_OUT_ARGS LV1_7_IN_ARGS, LV1_3_OUT_ARGS
+
+#define LV1_0_IN_4_OUT_ARGS LV1_4_OUT_ARGS
+#define LV1_1_IN_4_OUT_ARGS LV1_1_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_2_IN_4_OUT_ARGS LV1_2_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_3_IN_4_OUT_ARGS LV1_3_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_4_IN_4_OUT_ARGS LV1_4_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_5_IN_4_OUT_ARGS LV1_5_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_6_IN_4_OUT_ARGS LV1_6_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_7_IN_4_OUT_ARGS LV1_7_IN_ARGS, LV1_4_OUT_ARGS
+
+#define LV1_0_IN_5_OUT_ARGS LV1_5_OUT_ARGS
+#define LV1_1_IN_5_OUT_ARGS LV1_1_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_2_IN_5_OUT_ARGS LV1_2_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_3_IN_5_OUT_ARGS LV1_3_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_4_IN_5_OUT_ARGS LV1_4_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_5_IN_5_OUT_ARGS LV1_5_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_6_IN_5_OUT_ARGS LV1_6_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_7_IN_5_OUT_ARGS LV1_7_IN_ARGS, LV1_5_OUT_ARGS
+
+#define LV1_0_IN_6_OUT_ARGS LV1_6_OUT_ARGS
+#define LV1_1_IN_6_OUT_ARGS LV1_1_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_2_IN_6_OUT_ARGS LV1_2_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_3_IN_6_OUT_ARGS LV1_3_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_4_IN_6_OUT_ARGS LV1_4_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_5_IN_6_OUT_ARGS LV1_5_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_6_IN_6_OUT_ARGS LV1_6_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_7_IN_6_OUT_ARGS LV1_7_IN_ARGS, LV1_6_OUT_ARGS
+
+#define LV1_0_IN_7_OUT_ARGS LV1_7_OUT_ARGS
+#define LV1_1_IN_7_OUT_ARGS LV1_1_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_2_IN_7_OUT_ARGS LV1_2_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_3_IN_7_OUT_ARGS LV1_3_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_4_IN_7_OUT_ARGS LV1_4_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_5_IN_7_OUT_ARGS LV1_5_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_6_IN_7_OUT_ARGS LV1_6_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_7_IN_7_OUT_ARGS LV1_7_IN_ARGS, LV1_7_OUT_ARGS
+
+/*
+ * This LV1_CALL() macro is for use by callers. It expands into an
+ * inline call wrapper and an underscored HV call declaration. The
+ * wrapper can be used to instrument the lv1 call interface. The
+ * file lv1call.S defines its own LV1_CALL() macro to expand into
+ * the actual underscored call definition.
+ */
+
+#if !defined(LV1_CALL)
+#define LV1_CALL(name, in, out, num) \
+ extern s64 _lv1_##name(LV1_##in##_IN_##out##_OUT_ARG_DECL); \
+ static inline int lv1_##name(LV1_##in##_IN_##out##_OUT_ARG_DECL) \
+ {return _lv1_##name(LV1_##in##_IN_##out##_OUT_ARGS);}
+#endif
+
+#endif /* !defined(__ASSEMBLY__) */
+
+/* lv1 call table */
+
+LV1_CALL(allocate_memory, 4, 2, 0 )
+LV1_CALL(write_htab_entry, 4, 0, 1 )
+LV1_CALL(construct_virtual_address_space, 3, 2, 2 )
+LV1_CALL(invalidate_htab_entries, 5, 0, 3 )
+LV1_CALL(get_virtual_address_space_id_of_ppe, 1, 1, 4 )
+LV1_CALL(query_logical_partition_address_region_info, 1, 5, 6 )
+LV1_CALL(select_virtual_address_space, 1, 0, 7 )
+LV1_CALL(pause, 1, 0, 9 )
+LV1_CALL(destruct_virtual_address_space, 1, 0, 10 )
+LV1_CALL(configure_irq_state_bitmap, 3, 0, 11 )
+LV1_CALL(connect_irq_plug_ext, 5, 0, 12 )
+LV1_CALL(release_memory, 1, 0, 13 )
+LV1_CALL(put_iopte, 5, 0, 15 )
+LV1_CALL(disconnect_irq_plug_ext, 3, 0, 17 )
+LV1_CALL(construct_event_receive_port, 0, 1, 18 )
+LV1_CALL(destruct_event_receive_port, 1, 0, 19 )
+LV1_CALL(send_event_locally, 1, 0, 24 )
+LV1_CALL(end_of_interrupt, 1, 0, 27 )
+LV1_CALL(connect_irq_plug, 2, 0, 28 )
+LV1_CALL(disconnect_irq_plug, 1, 0, 29 )
+LV1_CALL(end_of_interrupt_ext, 3, 0, 30 )
+LV1_CALL(did_update_interrupt_mask, 2, 0, 31 )
+LV1_CALL(shutdown_logical_partition, 1, 0, 44 )
+LV1_CALL(destruct_logical_spe, 1, 0, 54 )
+LV1_CALL(construct_logical_spe, 7, 6, 57 )
+LV1_CALL(set_spe_interrupt_mask, 3, 0, 61 )
+LV1_CALL(set_spe_transition_notifier, 3, 0, 64 )
+LV1_CALL(disable_logical_spe, 2, 0, 65 )
+LV1_CALL(clear_spe_interrupt_status, 4, 0, 66 )
+LV1_CALL(get_spe_interrupt_status, 2, 1, 67 )
+LV1_CALL(get_logical_ppe_id, 0, 1, 69 )
+LV1_CALL(set_interrupt_mask, 5, 0, 73 )
+LV1_CALL(get_logical_partition_id, 0, 1, 74 )
+LV1_CALL(configure_execution_time_variable, 1, 0, 77 )
+LV1_CALL(get_spe_irq_outlet, 2, 1, 78 )
+LV1_CALL(set_spe_privilege_state_area_1_register, 3, 0, 79 )
+LV1_CALL(create_repository_node, 6, 0, 90 )
+LV1_CALL(get_repository_node_value, 5, 2, 91 )
+LV1_CALL(modify_repository_node_value, 6, 0, 92 )
+LV1_CALL(remove_repository_node, 4, 0, 93 )
+LV1_CALL(read_htab_entries, 2, 5, 95 )
+LV1_CALL(set_dabr, 2, 0, 96 )
+LV1_CALL(get_total_execution_time, 2, 1, 103 )
+LV1_CALL(allocate_io_segment, 3, 1, 116 )
+LV1_CALL(release_io_segment, 2, 0, 117 )
+LV1_CALL(construct_io_irq_outlet, 1, 1, 120 )
+LV1_CALL(destruct_io_irq_outlet, 1, 0, 121 )
+LV1_CALL(map_htab, 1, 1, 122 )
+LV1_CALL(unmap_htab, 1, 0, 123 )
+LV1_CALL(get_version_info, 0, 1, 127 )
+LV1_CALL(insert_htab_entry, 6, 3, 158 )
+LV1_CALL(read_virtual_uart, 3, 1, 162 )
+LV1_CALL(write_virtual_uart, 3, 1, 163 )
+LV1_CALL(set_virtual_uart_param, 3, 0, 164 )
+LV1_CALL(get_virtual_uart_param, 2, 1, 165 )
+LV1_CALL(configure_virtual_uart_irq, 1, 1, 166 )
+LV1_CALL(open_device, 3, 0, 170 )
+LV1_CALL(close_device, 2, 0, 171 )
+LV1_CALL(map_device_mmio_region, 5, 1, 172 )
+LV1_CALL(unmap_device_mmio_region, 3, 0, 173 )
+LV1_CALL(allocate_device_dma_region, 5, 1, 174 )
+LV1_CALL(free_device_dma_region, 3, 0, 175 )
+LV1_CALL(map_device_dma_region, 6, 0, 176 )
+LV1_CALL(unmap_device_dma_region, 4, 0, 177 )
+LV1_CALL(net_add_multicast_address, 4, 0, 185 )
+LV1_CALL(net_remove_multicast_address, 4, 0, 186 )
+LV1_CALL(net_start_tx_dma, 4, 0, 187 )
+LV1_CALL(net_stop_tx_dma, 3, 0, 188 )
+LV1_CALL(net_start_rx_dma, 4, 0, 189 )
+LV1_CALL(net_stop_rx_dma, 3, 0, 190 )
+LV1_CALL(net_set_interrupt_status_indicator, 4, 0, 191 )
+LV1_CALL(net_set_interrupt_mask, 4, 0, 193 )
+LV1_CALL(net_control, 6, 2, 194 )
+LV1_CALL(connect_interrupt_event_receive_port, 4, 0, 197 )
+LV1_CALL(disconnect_interrupt_event_receive_port, 4, 0, 198 )
+LV1_CALL(get_spe_all_interrupt_statuses, 1, 1, 199 )
+LV1_CALL(deconfigure_virtual_uart_irq, 0, 0, 202 )
+LV1_CALL(enable_logical_spe, 2, 0, 207 )
+LV1_CALL(gpu_open, 1, 0, 210 )
+LV1_CALL(gpu_close, 0, 0, 211 )
+LV1_CALL(gpu_device_map, 1, 2, 212 )
+LV1_CALL(gpu_device_unmap, 1, 0, 213 )
+LV1_CALL(gpu_memory_allocate, 5, 2, 214 )
+LV1_CALL(gpu_memory_free, 1, 0, 216 )
+LV1_CALL(gpu_context_allocate, 2, 5, 217 )
+LV1_CALL(gpu_context_free, 1, 0, 218 )
+LV1_CALL(gpu_context_iomap, 5, 0, 221 )
+LV1_CALL(gpu_context_attribute, 6, 0, 225 )
+LV1_CALL(gpu_context_intr, 1, 1, 227 )
+LV1_CALL(gpu_attribute, 5, 0, 228 )
+LV1_CALL(get_rtc, 0, 2, 232 )
+LV1_CALL(set_ppe_periodic_tracer_frequency, 1, 0, 240 )
+LV1_CALL(start_ppe_periodic_tracer, 5, 0, 241 )
+LV1_CALL(stop_ppe_periodic_tracer, 1, 1, 242 )
+LV1_CALL(storage_read, 6, 1, 245 )
+LV1_CALL(storage_write, 6, 1, 246 )
+LV1_CALL(storage_send_device_command, 6, 1, 248 )
+LV1_CALL(storage_get_async_status, 1, 2, 249 )
+LV1_CALL(storage_check_async_status, 2, 1, 254 )
+LV1_CALL(panic, 1, 0, 255 )
+LV1_CALL(construct_lpm, 6, 3, 140 )
+LV1_CALL(destruct_lpm, 1, 0, 141 )
+LV1_CALL(start_lpm, 1, 0, 142 )
+LV1_CALL(stop_lpm, 1, 1, 143 )
+LV1_CALL(copy_lpm_trace_buffer, 3, 1, 144 )
+LV1_CALL(add_lpm_event_bookmark, 5, 0, 145 )
+LV1_CALL(delete_lpm_event_bookmark, 3, 0, 146 )
+LV1_CALL(set_lpm_interrupt_mask, 3, 1, 147 )
+LV1_CALL(get_lpm_interrupt_status, 1, 1, 148 )
+LV1_CALL(set_lpm_general_control, 5, 2, 149 )
+LV1_CALL(set_lpm_interval, 3, 1, 150 )
+LV1_CALL(set_lpm_trigger_control, 3, 1, 151 )
+LV1_CALL(set_lpm_counter_control, 4, 1, 152 )
+LV1_CALL(set_lpm_group_control, 3, 1, 153 )
+LV1_CALL(set_lpm_debug_bus_control, 3, 1, 154 )
+LV1_CALL(set_lpm_counter, 5, 2, 155 )
+LV1_CALL(set_lpm_signal, 7, 0, 156 )
+LV1_CALL(set_lpm_spr_trigger, 2, 0, 157 )
+
+#endif
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
new file mode 100644
index 0000000..893aafd
--- /dev/null
+++ b/arch/powerpc/include/asm/machdep.h
@@ -0,0 +1,365 @@
+#ifndef _ASM_POWERPC_MACHDEP_H
+#define _ASM_POWERPC_MACHDEP_H
+#ifdef __KERNEL__
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/setup.h>
+
+/* We export this macro for external modules like Alsa to know if
+ * ppc_md.feature_call is implemented or not
+ */
+#define CONFIG_PPC_HAS_FEATURE_CALLS
+
+struct pt_regs;
+struct pci_bus;
+struct device_node;
+struct iommu_table;
+struct rtc_time;
+struct file;
+struct pci_controller;
+#ifdef CONFIG_KEXEC
+struct kimage;
+#endif
+
+#ifdef CONFIG_SMP
+struct smp_ops_t {
+ void (*message_pass)(int target, int msg);
+ int (*probe)(void);
+ void (*kick_cpu)(int nr);
+ void (*setup_cpu)(int nr);
+ void (*take_timebase)(void);
+ void (*give_timebase)(void);
+ int (*cpu_enable)(unsigned int nr);
+ int (*cpu_disable)(void);
+ void (*cpu_die)(unsigned int nr);
+ int (*cpu_bootable)(unsigned int nr);
+};
+#endif
+
+struct machdep_calls {
+ char *name;
+#ifdef CONFIG_PPC64
+ void (*hpte_invalidate)(unsigned long slot,
+ unsigned long va,
+ int psize, int ssize,
+ int local);
+ long (*hpte_updatepp)(unsigned long slot,
+ unsigned long newpp,
+ unsigned long va,
+ int psize, int ssize,
+ int local);
+ void (*hpte_updateboltedpp)(unsigned long newpp,
+ unsigned long ea,
+ int psize, int ssize);
+ long (*hpte_insert)(unsigned long hpte_group,
+ unsigned long va,
+ unsigned long prpn,
+ unsigned long rflags,
+ unsigned long vflags,
+ int psize, int ssize);
+ long (*hpte_remove)(unsigned long hpte_group);
+ void (*hpte_removebolted)(unsigned long ea,
+ int psize, int ssize);
+ void (*flush_hash_range)(unsigned long number, int local);
+
+ /* special for kexec, to be called in real mode, linar mapping is
+ * destroyed as well */
+ void (*hpte_clear_all)(void);
+
+ int (*tce_build)(struct iommu_table *tbl,
+ long index,
+ long npages,
+ unsigned long uaddr,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs);
+ void (*tce_free)(struct iommu_table *tbl,
+ long index,
+ long npages);
+ unsigned long (*tce_get)(struct iommu_table *tbl,
+ long index);
+ void (*tce_flush)(struct iommu_table *tbl);
+ void (*pci_dma_dev_setup)(struct pci_dev *dev);
+ void (*pci_dma_bus_setup)(struct pci_bus *bus);
+
+ void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size,
+ unsigned long flags);
+ void (*iounmap)(volatile void __iomem *token);
+
+#ifdef CONFIG_PM
+ void (*iommu_save)(void);
+ void (*iommu_restore)(void);
+#endif
+#endif /* CONFIG_PPC64 */
+
+ int (*probe)(void);
+ void (*setup_arch)(void); /* Optional, may be NULL */
+ void (*init_early)(void);
+ /* Optional, may be NULL. */
+ void (*show_cpuinfo)(struct seq_file *m);
+ void (*show_percpuinfo)(struct seq_file *m, int i);
+
+ void (*init_IRQ)(void);
+ unsigned int (*get_irq)(void);
+#ifdef CONFIG_KEXEC
+ void (*kexec_cpu_down)(int crash_shutdown, int secondary);
+#endif
+
+ /* PCI stuff */
+ /* Called after scanning the bus, before allocating resources */
+ void (*pcibios_fixup)(void);
+ int (*pci_probe_mode)(struct pci_bus *);
+ void (*pci_irq_fixup)(struct pci_dev *dev);
+
+ /* To setup PHBs when using automatic OF platform driver for PCI */
+ int (*pci_setup_phb)(struct pci_controller *host);
+
+#ifdef CONFIG_PCI_MSI
+ int (*msi_check_device)(struct pci_dev* dev,
+ int nvec, int type);
+ int (*setup_msi_irqs)(struct pci_dev *dev,
+ int nvec, int type);
+ void (*teardown_msi_irqs)(struct pci_dev *dev);
+#endif
+
+ void (*restart)(char *cmd);
+ void (*power_off)(void);
+ void (*halt)(void);
+ void (*panic)(char *str);
+ void (*cpu_die)(void);
+
+ long (*time_init)(void); /* Optional, may be NULL */
+
+ int (*set_rtc_time)(struct rtc_time *);
+ void (*get_rtc_time)(struct rtc_time *);
+ unsigned long (*get_boot_time)(void);
+ unsigned char (*rtc_read_val)(int addr);
+ void (*rtc_write_val)(int addr, unsigned char val);
+
+ void (*calibrate_decr)(void);
+
+ void (*progress)(char *, unsigned short);
+
+ /* Interface for platform error logging */
+ void (*log_error)(char *buf, unsigned int err_type, int fatal);
+
+ unsigned char (*nvram_read_val)(int addr);
+ void (*nvram_write_val)(int addr, unsigned char val);
+ ssize_t (*nvram_write)(char *buf, size_t count, loff_t *index);
+ ssize_t (*nvram_read)(char *buf, size_t count, loff_t *index);
+ ssize_t (*nvram_size)(void);
+ void (*nvram_sync)(void);
+
+ /* Exception handlers */
+ int (*system_reset_exception)(struct pt_regs *regs);
+ int (*machine_check_exception)(struct pt_regs *regs);
+
+ /* Motherboard/chipset features. This is a kind of general purpose
+ * hook used to control some machine specific features (like reset
+ * lines, chip power control, etc...).
+ */
+ long (*feature_call)(unsigned int feature, ...);
+
+ /* Get legacy PCI/IDE interrupt mapping */
+ int (*pci_get_legacy_ide_irq)(struct pci_dev *dev, int channel);
+
+ /* Get access protection for /dev/mem */
+ pgprot_t (*phys_mem_access_prot)(struct file *file,
+ unsigned long pfn,
+ unsigned long size,
+ pgprot_t vma_prot);
+
+ /* Idle loop for this platform, leave empty for default idle loop */
+ void (*idle_loop)(void);
+
+ /*
+ * Function for waiting for work with reduced power in idle loop;
+ * called with interrupts disabled.
+ */
+ void (*power_save)(void);
+
+ /* Function to enable performance monitor counters for this
+ platform, called once per cpu. */
+ void (*enable_pmcs)(void);
+
+ /* Set DABR for this platform, leave empty for default implemenation */
+ int (*set_dabr)(unsigned long dabr);
+
+#ifdef CONFIG_PPC32 /* XXX for now */
+ /* A general init function, called by ppc_init in init/main.c.
+ May be NULL. */
+ void (*init)(void);
+
+ void (*kgdb_map_scc)(void);
+
+ /*
+ * optional PCI "hooks"
+ */
+ /* Called in indirect_* to avoid touching devices */
+ int (*pci_exclude_device)(struct pci_controller *, unsigned char, unsigned char);
+
+ /* Called at then very end of pcibios_init() */
+ void (*pcibios_after_init)(void);
+
+#endif /* CONFIG_PPC32 */
+
+ /* Called after PPC generic resource fixup to perform
+ machine specific fixups */
+ void (*pcibios_fixup_resources)(struct pci_dev *);
+
+ /* Called for each PCI bus in the system when it's probed */
+ void (*pcibios_fixup_bus)(struct pci_bus *);
+
+ /* Called when pci_enable_device() is called. Returns 0 to
+ * allow assignment/enabling of the device. */
+ int (*pcibios_enable_device_hook)(struct pci_dev *);
+
+ /* Called to shutdown machine specific hardware not already controlled
+ * by other drivers.
+ */
+ void (*machine_shutdown)(void);
+
+#ifdef CONFIG_KEXEC
+ /* Called to do the minimal shutdown needed to run a kexec'd kernel
+ * to run successfully.
+ * XXX Should we move this one out of kexec scope?
+ */
+ void (*machine_crash_shutdown)(struct pt_regs *regs);
+
+ /* Called to do what every setup is needed on image and the
+ * reboot code buffer. Returns 0 on success.
+ * Provide your own (maybe dummy) implementation if your platform
+ * claims to support kexec.
+ */
+ int (*machine_kexec_prepare)(struct kimage *image);
+
+ /* Called to handle any machine specific cleanup on image */
+ void (*machine_kexec_cleanup)(struct kimage *image);
+
+ /* Called to perform the _real_ kexec.
+ * Do NOT allocate memory or fail here. We are past the point of
+ * no return.
+ */
+ void (*machine_kexec)(struct kimage *image);
+#endif /* CONFIG_KEXEC */
+
+#ifdef CONFIG_SUSPEND
+ /* These are called to disable and enable, respectively, IRQs when
+ * entering a suspend state. If NULL, then the generic versions
+ * will be called. The generic versions disable/enable the
+ * decrementer along with interrupts.
+ */
+ void (*suspend_disable_irqs)(void);
+ void (*suspend_enable_irqs)(void);
+#endif
+};
+
+extern void e500_idle(void);
+extern void power4_idle(void);
+extern void power4_cpu_offline_powersave(void);
+extern void ppc6xx_idle(void);
+
+/*
+ * ppc_md contains a copy of the machine description structure for the
+ * current platform. machine_id contains the initial address where the
+ * description was found during boot.
+ */
+extern struct machdep_calls ppc_md;
+extern struct machdep_calls *machine_id;
+
+#define __machine_desc __attribute__ ((__section__ (".machine.desc")))
+
+#define define_machine(name) \
+ extern struct machdep_calls mach_##name; \
+ EXPORT_SYMBOL(mach_##name); \
+ struct machdep_calls mach_##name __machine_desc =
+
+#define machine_is(name) \
+ ({ \
+ extern struct machdep_calls mach_##name \
+ __attribute__((weak)); \
+ machine_id == &mach_##name; \
+ })
+
+extern void probe_machine(void);
+
+extern char cmd_line[COMMAND_LINE_SIZE];
+
+#ifdef CONFIG_PPC_PMAC
+/*
+ * Power macintoshes have either a CUDA, PMU or SMU controlling
+ * system reset, power, NVRAM, RTC.
+ */
+typedef enum sys_ctrler_kind {
+ SYS_CTRLER_UNKNOWN = 0,
+ SYS_CTRLER_CUDA = 1,
+ SYS_CTRLER_PMU = 2,
+ SYS_CTRLER_SMU = 3,
+} sys_ctrler_t;
+extern sys_ctrler_t sys_ctrler;
+
+#endif /* CONFIG_PPC_PMAC */
+
+extern void setup_pci_ptrs(void);
+
+#ifdef CONFIG_SMP
+/* Poor default implementations */
+extern void __devinit smp_generic_give_timebase(void);
+extern void __devinit smp_generic_take_timebase(void);
+#endif /* CONFIG_SMP */
+
+
+/* Functions to produce codes on the leds.
+ * The SRC code should be unique for the message category and should
+ * be limited to the lower 24 bits (the upper 8 are set by these funcs),
+ * and (for boot & dump) should be sorted numerically in the order
+ * the events occur.
+ */
+/* Print a boot progress message. */
+void ppc64_boot_msg(unsigned int src, const char *msg);
+/* Print a termination message (print only -- does not stop the kernel) */
+void ppc64_terminate_msg(unsigned int src, const char *msg);
+
+static inline void log_error(char *buf, unsigned int err_type, int fatal)
+{
+ if (ppc_md.log_error)
+ ppc_md.log_error(buf, err_type, fatal);
+}
+
+#define __define_machine_initcall(mach,level,fn,id) \
+ static int __init __machine_initcall_##mach##_##fn(void) { \
+ if (machine_is(mach)) return fn(); \
+ return 0; \
+ } \
+ __define_initcall(level,__machine_initcall_##mach##_##fn,id);
+
+#define machine_core_initcall(mach,fn) __define_machine_initcall(mach,"1",fn,1)
+#define machine_core_initcall_sync(mach,fn) __define_machine_initcall(mach,"1s",fn,1s)
+#define machine_postcore_initcall(mach,fn) __define_machine_initcall(mach,"2",fn,2)
+#define machine_postcore_initcall_sync(mach,fn) __define_machine_initcall(mach,"2s",fn,2s)
+#define machine_arch_initcall(mach,fn) __define_machine_initcall(mach,"3",fn,3)
+#define machine_arch_initcall_sync(mach,fn) __define_machine_initcall(mach,"3s",fn,3s)
+#define machine_subsys_initcall(mach,fn) __define_machine_initcall(mach,"4",fn,4)
+#define machine_subsys_initcall_sync(mach,fn) __define_machine_initcall(mach,"4s",fn,4s)
+#define machine_fs_initcall(mach,fn) __define_machine_initcall(mach,"5",fn,5)
+#define machine_fs_initcall_sync(mach,fn) __define_machine_initcall(mach,"5s",fn,5s)
+#define machine_rootfs_initcall(mach,fn) __define_machine_initcall(mach,"rootfs",fn,rootfs)
+#define machine_device_initcall(mach,fn) __define_machine_initcall(mach,"6",fn,6)
+#define machine_device_initcall_sync(mach,fn) __define_machine_initcall(mach,"6s",fn,6s)
+#define machine_late_initcall(mach,fn) __define_machine_initcall(mach,"7",fn,7)
+#define machine_late_initcall_sync(mach,fn) __define_machine_initcall(mach,"7s",fn,7s)
+
+void generic_suspend_disable_irqs(void);
+void generic_suspend_enable_irqs(void);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_MACHDEP_H */
diff --git a/arch/powerpc/include/asm/macio.h b/arch/powerpc/include/asm/macio.h
new file mode 100644
index 0000000..079c06e
--- /dev/null
+++ b/arch/powerpc/include/asm/macio.h
@@ -0,0 +1,142 @@
+#ifndef __MACIO_ASIC_H__
+#define __MACIO_ASIC_H__
+#ifdef __KERNEL__
+
+#include <linux/of_device.h>
+
+extern struct bus_type macio_bus_type;
+
+/* MacIO device driver is defined later */
+struct macio_driver;
+struct macio_chip;
+
+#define MACIO_DEV_COUNT_RESOURCES 8
+#define MACIO_DEV_COUNT_IRQS 8
+
+/*
+ * the macio_bus structure is used to describe a "virtual" bus
+ * within a MacIO ASIC. It's typically provided by a macio_pci_asic
+ * PCI device, but could be provided differently as well (nubus
+ * machines using a fake OF tree).
+ *
+ * The pdev field can be NULL on non-PCI machines
+ */
+struct macio_bus
+{
+ struct macio_chip *chip; /* macio_chip (private use) */
+ int index; /* macio chip index in system */
+#ifdef CONFIG_PCI
+ struct pci_dev *pdev; /* PCI device hosting this bus */
+#endif
+};
+
+/*
+ * the macio_dev structure is used to describe a device
+ * within an Apple MacIO ASIC.
+ */
+struct macio_dev
+{
+ struct macio_bus *bus; /* macio bus this device is on */
+ struct macio_dev *media_bay; /* Device is part of a media bay */
+ struct of_device ofdev;
+ int n_resources;
+ struct resource resource[MACIO_DEV_COUNT_RESOURCES];
+ int n_interrupts;
+ struct resource interrupt[MACIO_DEV_COUNT_IRQS];
+};
+#define to_macio_device(d) container_of(d, struct macio_dev, ofdev.dev)
+#define of_to_macio_device(d) container_of(d, struct macio_dev, ofdev)
+
+extern struct macio_dev *macio_dev_get(struct macio_dev *dev);
+extern void macio_dev_put(struct macio_dev *dev);
+
+/*
+ * Accessors to resources & interrupts and other device
+ * fields
+ */
+
+static inline int macio_resource_count(struct macio_dev *dev)
+{
+ return dev->n_resources;
+}
+
+static inline unsigned long macio_resource_start(struct macio_dev *dev, int resource_no)
+{
+ return dev->resource[resource_no].start;
+}
+
+static inline unsigned long macio_resource_end(struct macio_dev *dev, int resource_no)
+{
+ return dev->resource[resource_no].end;
+}
+
+static inline unsigned long macio_resource_len(struct macio_dev *dev, int resource_no)
+{
+ struct resource *res = &dev->resource[resource_no];
+ if (res->start == 0 || res->end == 0 || res->end < res->start)
+ return 0;
+ return res->end - res->start + 1;
+}
+
+extern int macio_request_resource(struct macio_dev *dev, int resource_no, const char *name);
+extern void macio_release_resource(struct macio_dev *dev, int resource_no);
+extern int macio_request_resources(struct macio_dev *dev, const char *name);
+extern void macio_release_resources(struct macio_dev *dev);
+
+static inline int macio_irq_count(struct macio_dev *dev)
+{
+ return dev->n_interrupts;
+}
+
+static inline int macio_irq(struct macio_dev *dev, int irq_no)
+{
+ return dev->interrupt[irq_no].start;
+}
+
+static inline void macio_set_drvdata(struct macio_dev *dev, void *data)
+{
+ dev_set_drvdata(&dev->ofdev.dev, data);
+}
+
+static inline void* macio_get_drvdata(struct macio_dev *dev)
+{
+ return dev_get_drvdata(&dev->ofdev.dev);
+}
+
+static inline struct device_node *macio_get_of_node(struct macio_dev *mdev)
+{
+ return mdev->ofdev.node;
+}
+
+#ifdef CONFIG_PCI
+static inline struct pci_dev *macio_get_pci_dev(struct macio_dev *mdev)
+{
+ return mdev->bus->pdev;
+}
+#endif
+
+/*
+ * A driver for a mac-io chip based device
+ */
+struct macio_driver
+{
+ char *name;
+ struct of_device_id *match_table;
+ struct module *owner;
+
+ int (*probe)(struct macio_dev* dev, const struct of_device_id *match);
+ int (*remove)(struct macio_dev* dev);
+
+ int (*suspend)(struct macio_dev* dev, pm_message_t state);
+ int (*resume)(struct macio_dev* dev);
+ int (*shutdown)(struct macio_dev* dev);
+
+ struct device_driver driver;
+};
+#define to_macio_driver(drv) container_of(drv,struct macio_driver, driver)
+
+extern int macio_register_driver(struct macio_driver *);
+extern void macio_unregister_driver(struct macio_driver *);
+
+#endif /* __KERNEL__ */
+#endif /* __MACIO_ASIC_H__ */
diff --git a/arch/powerpc/include/asm/mc146818rtc.h b/arch/powerpc/include/asm/mc146818rtc.h
new file mode 100644
index 0000000..f2741c8
--- /dev/null
+++ b/arch/powerpc/include/asm/mc146818rtc.h
@@ -0,0 +1,36 @@
+#ifndef _ASM_POWERPC_MC146818RTC_H
+#define _ASM_POWERPC_MC146818RTC_H
+
+/*
+ * Machine dependent access functions for RTC registers.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifdef __KERNEL__
+
+#include <asm/io.h>
+
+#ifndef RTC_PORT
+#define RTC_PORT(x) (0x70 + (x))
+#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */
+#endif
+
+/*
+ * The yet supported machines all access the RTC index register via
+ * an ISA port access but the way to access the date register differs ...
+ */
+#define CMOS_READ(addr) ({ \
+outb_p((addr),RTC_PORT(0)); \
+inb_p(RTC_PORT(1)); \
+})
+#define CMOS_WRITE(val, addr) ({ \
+outb_p((addr),RTC_PORT(0)); \
+outb_p((val),RTC_PORT(1)); \
+})
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_MC146818RTC_H */
diff --git a/arch/powerpc/include/asm/mediabay.h b/arch/powerpc/include/asm/mediabay.h
new file mode 100644
index 0000000..b2efb33
--- /dev/null
+++ b/arch/powerpc/include/asm/mediabay.h
@@ -0,0 +1,43 @@
+/*
+ * mediabay.h: definitions for using the media bay
+ * on PowerBook 3400 and similar computers.
+ *
+ * Copyright (C) 1997 Paul Mackerras.
+ */
+#ifndef _PPC_MEDIABAY_H
+#define _PPC_MEDIABAY_H
+
+#ifdef __KERNEL__
+
+#define MB_FD 0 /* media bay contains floppy drive (automatic eject ?) */
+#define MB_FD1 1 /* media bay contains floppy drive (manual eject ?) */
+#define MB_SOUND 2 /* sound device ? */
+#define MB_CD 3 /* media bay contains ATA drive such as CD or ZIP */
+#define MB_PCI 5 /* media bay contains a PCI device */
+#define MB_POWER 6 /* media bay contains a Power device (???) */
+#define MB_NO 7 /* media bay contains nothing */
+
+/* Number of bays in the machine or 0 */
+extern int media_bay_count;
+
+#ifdef CONFIG_BLK_DEV_IDE_PMAC
+#include <linux/ide.h>
+
+int check_media_bay_by_base(unsigned long base, int what);
+/* called by IDE PMAC host driver to register IDE controller for media bay */
+int media_bay_set_ide_infos(struct device_node *which_bay, unsigned long base,
+ int irq, ide_hwif_t *hwif);
+
+int check_media_bay(struct device_node *which_bay, int what);
+
+#else
+
+static inline int check_media_bay(struct device_node *which_bay, int what)
+{
+ return -ENODEV;
+}
+
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _PPC_MEDIABAY_H */
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
new file mode 100644
index 0000000..9209f75
--- /dev/null
+++ b/arch/powerpc/include/asm/mman.h
@@ -0,0 +1,63 @@
+#ifndef _ASM_POWERPC_MMAN_H
+#define _ASM_POWERPC_MMAN_H
+
+#include <asm-generic/mman.h>
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define PROT_SAO 0x10 /* Strong Access Ordering */
+
+#define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */
+#define MAP_NORESERVE 0x40 /* don't reserve swap pages */
+#define MAP_LOCKED 0x80
+
+#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
+#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
+
+#define MCL_CURRENT 0x2000 /* lock all currently mapped pages */
+#define MCL_FUTURE 0x4000 /* lock all additions to address space */
+
+#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
+#define MAP_NONBLOCK 0x10000 /* do not block on IO */
+
+#ifdef __KERNEL__
+#ifdef CONFIG_PPC64
+
+#include <asm/cputable.h>
+#include <linux/mm.h>
+
+/*
+ * This file is included by linux/mman.h, so we can't use cacl_vm_prot_bits()
+ * here. How important is the optimization?
+ */
+static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
+{
+ return (prot & PROT_SAO) ? VM_SAO : 0;
+}
+#define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
+
+static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
+{
+ return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : 0;
+}
+#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
+
+static inline int arch_validate_prot(unsigned long prot)
+{
+ if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO))
+ return 0;
+ if ((prot & PROT_SAO) && !cpu_has_feature(CPU_FTR_SAO))
+ return 0;
+ return 1;
+}
+#define arch_validate_prot(prot) arch_validate_prot(prot)
+
+#endif /* CONFIG_PPC64 */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_MMAN_H */
diff --git a/arch/powerpc/include/asm/mmu-40x.h b/arch/powerpc/include/asm/mmu-40x.h
new file mode 100644
index 0000000..3d10867
--- /dev/null
+++ b/arch/powerpc/include/asm/mmu-40x.h
@@ -0,0 +1,63 @@
+#ifndef _ASM_POWERPC_MMU_40X_H_
+#define _ASM_POWERPC_MMU_40X_H_
+
+/*
+ * PPC40x support
+ */
+
+#define PPC40X_TLB_SIZE 64
+
+/*
+ * TLB entries are defined by a "high" tag portion and a "low" data
+ * portion. On all architectures, the data portion is 32-bits.
+ *
+ * TLB entries are managed entirely under software control by reading,
+ * writing, and searchoing using the 4xx-specific tlbre, tlbwr, and tlbsx
+ * instructions.
+ */
+
+#define TLB_LO 1
+#define TLB_HI 0
+
+#define TLB_DATA TLB_LO
+#define TLB_TAG TLB_HI
+
+/* Tag portion */
+
+#define TLB_EPN_MASK 0xFFFFFC00 /* Effective Page Number */
+#define TLB_PAGESZ_MASK 0x00000380
+#define TLB_PAGESZ(x) (((x) & 0x7) << 7)
+#define PAGESZ_1K 0
+#define PAGESZ_4K 1
+#define PAGESZ_16K 2
+#define PAGESZ_64K 3
+#define PAGESZ_256K 4
+#define PAGESZ_1M 5
+#define PAGESZ_4M 6
+#define PAGESZ_16M 7
+#define TLB_VALID 0x00000040 /* Entry is valid */
+
+/* Data portion */
+
+#define TLB_RPN_MASK 0xFFFFFC00 /* Real Page Number */
+#define TLB_PERM_MASK 0x00000300
+#define TLB_EX 0x00000200 /* Instruction execution allowed */
+#define TLB_WR 0x00000100 /* Writes permitted */
+#define TLB_ZSEL_MASK 0x000000F0
+#define TLB_ZSEL(x) (((x) & 0xF) << 4)
+#define TLB_ATTR_MASK 0x0000000F
+#define TLB_W 0x00000008 /* Caching is write-through */
+#define TLB_I 0x00000004 /* Caching is inhibited */
+#define TLB_M 0x00000002 /* Memory is coherent */
+#define TLB_G 0x00000001 /* Memory is guarded from prefetch */
+
+#ifndef __ASSEMBLY__
+
+typedef struct {
+ unsigned long id;
+ unsigned long vdso_base;
+} mm_context_t;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_MMU_40X_H_ */
diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/mmu-44x.h
new file mode 100644
index 0000000..a825524
--- /dev/null
+++ b/arch/powerpc/include/asm/mmu-44x.h
@@ -0,0 +1,76 @@
+#ifndef _ASM_POWERPC_MMU_44X_H_
+#define _ASM_POWERPC_MMU_44X_H_
+/*
+ * PPC440 support
+ */
+
+#define PPC44x_MMUCR_TID 0x000000ff
+#define PPC44x_MMUCR_STS 0x00010000
+
+#define PPC44x_TLB_PAGEID 0
+#define PPC44x_TLB_XLAT 1
+#define PPC44x_TLB_ATTRIB 2
+
+/* Page identification fields */
+#define PPC44x_TLB_EPN_MASK 0xfffffc00 /* Effective Page Number */
+#define PPC44x_TLB_VALID 0x00000200 /* Valid flag */
+#define PPC44x_TLB_TS 0x00000100 /* Translation address space */
+#define PPC44x_TLB_1K 0x00000000 /* Page sizes */
+#define PPC44x_TLB_4K 0x00000010
+#define PPC44x_TLB_16K 0x00000020
+#define PPC44x_TLB_64K 0x00000030
+#define PPC44x_TLB_256K 0x00000040
+#define PPC44x_TLB_1M 0x00000050
+#define PPC44x_TLB_16M 0x00000070
+#define PPC44x_TLB_256M 0x00000090
+
+/* Translation fields */
+#define PPC44x_TLB_RPN_MASK 0xfffffc00 /* Real Page Number */
+#define PPC44x_TLB_ERPN_MASK 0x0000000f
+
+/* Storage attribute and access control fields */
+#define PPC44x_TLB_ATTR_MASK 0x0000ff80
+#define PPC44x_TLB_U0 0x00008000 /* User 0 */
+#define PPC44x_TLB_U1 0x00004000 /* User 1 */
+#define PPC44x_TLB_U2 0x00002000 /* User 2 */
+#define PPC44x_TLB_U3 0x00001000 /* User 3 */
+#define PPC44x_TLB_W 0x00000800 /* Caching is write-through */
+#define PPC44x_TLB_I 0x00000400 /* Caching is inhibited */
+#define PPC44x_TLB_M 0x00000200 /* Memory is coherent */
+#define PPC44x_TLB_G 0x00000100 /* Memory is guarded */
+#define PPC44x_TLB_E 0x00000080 /* Memory is guarded */
+
+#define PPC44x_TLB_PERM_MASK 0x0000003f
+#define PPC44x_TLB_UX 0x00000020 /* User execution */
+#define PPC44x_TLB_UW 0x00000010 /* User write */
+#define PPC44x_TLB_UR 0x00000008 /* User read */
+#define PPC44x_TLB_SX 0x00000004 /* Super execution */
+#define PPC44x_TLB_SW 0x00000002 /* Super write */
+#define PPC44x_TLB_SR 0x00000001 /* Super read */
+
+/* Number of TLB entries */
+#define PPC44x_TLB_SIZE 64
+
+#ifndef __ASSEMBLY__
+
+extern unsigned int tlb_44x_hwater;
+
+typedef struct {
+ unsigned long id;
+ unsigned long vdso_base;
+} mm_context_t;
+
+#endif /* !__ASSEMBLY__ */
+
+#ifndef CONFIG_PPC_EARLY_DEBUG_44x
+#define PPC44x_EARLY_TLBS 1
+#else
+#define PPC44x_EARLY_TLBS 2
+#define PPC44x_EARLY_DEBUG_VIRTADDR (ASM_CONST(0xf0000000) \
+ | (ASM_CONST(CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW) & 0xffff))
+#endif
+
+/* Size of the TLBs used for pinning in lowmem */
+#define PPC_PIN_SIZE (1 << 28) /* 256M */
+
+#endif /* _ASM_POWERPC_MMU_44X_H_ */
diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
new file mode 100644
index 0000000..9db877e
--- /dev/null
+++ b/arch/powerpc/include/asm/mmu-8xx.h
@@ -0,0 +1,145 @@
+#ifndef _ASM_POWERPC_MMU_8XX_H_
+#define _ASM_POWERPC_MMU_8XX_H_
+/*
+ * PPC8xx support
+ */
+
+/* Control/status registers for the MPC8xx.
+ * A write operation to these registers causes serialized access.
+ * During software tablewalk, the registers used perform mask/shift-add
+ * operations when written/read. A TLB entry is created when the Mx_RPN
+ * is written, and the contents of several registers are used to
+ * create the entry.
+ */
+#define SPRN_MI_CTR 784 /* Instruction TLB control register */
+#define MI_GPM 0x80000000 /* Set domain manager mode */
+#define MI_PPM 0x40000000 /* Set subpage protection */
+#define MI_CIDEF 0x20000000 /* Set cache inhibit when MMU dis */
+#define MI_RSV4I 0x08000000 /* Reserve 4 TLB entries */
+#define MI_PPCS 0x02000000 /* Use MI_RPN prob/priv state */
+#define MI_IDXMASK 0x00001f00 /* TLB index to be loaded */
+#define MI_RESETVAL 0x00000000 /* Value of register at reset */
+
+/* These are the Ks and Kp from the PowerPC books. For proper operation,
+ * Ks = 0, Kp = 1.
+ */
+#define SPRN_MI_AP 786
+#define MI_Ks 0x80000000 /* Should not be set */
+#define MI_Kp 0x40000000 /* Should always be set */
+
+/* The effective page number register. When read, contains the information
+ * about the last instruction TLB miss. When MI_RPN is written, bits in
+ * this register are used to create the TLB entry.
+ */
+#define SPRN_MI_EPN 787
+#define MI_EPNMASK 0xfffff000 /* Effective page number for entry */
+#define MI_EVALID 0x00000200 /* Entry is valid */
+#define MI_ASIDMASK 0x0000000f /* ASID match value */
+ /* Reset value is undefined */
+
+/* A "level 1" or "segment" or whatever you want to call it register.
+ * For the instruction TLB, it contains bits that get loaded into the
+ * TLB entry when the MI_RPN is written.
+ */
+#define SPRN_MI_TWC 789
+#define MI_APG 0x000001e0 /* Access protection group (0) */
+#define MI_GUARDED 0x00000010 /* Guarded storage */
+#define MI_PSMASK 0x0000000c /* Mask of page size bits */
+#define MI_PS8MEG 0x0000000c /* 8M page size */
+#define MI_PS512K 0x00000004 /* 512K page size */
+#define MI_PS4K_16K 0x00000000 /* 4K or 16K page size */
+#define MI_SVALID 0x00000001 /* Segment entry is valid */
+ /* Reset value is undefined */
+
+/* Real page number. Defined by the pte. Writing this register
+ * causes a TLB entry to be created for the instruction TLB, using
+ * additional information from the MI_EPN, and MI_TWC registers.
+ */
+#define SPRN_MI_RPN 790
+
+/* Define an RPN value for mapping kernel memory to large virtual
+ * pages for boot initialization. This has real page number of 0,
+ * large page size, shared page, cache enabled, and valid.
+ * Also mark all subpages valid and write access.
+ */
+#define MI_BOOTINIT 0x000001fd
+
+#define SPRN_MD_CTR 792 /* Data TLB control register */
+#define MD_GPM 0x80000000 /* Set domain manager mode */
+#define MD_PPM 0x40000000 /* Set subpage protection */
+#define MD_CIDEF 0x20000000 /* Set cache inhibit when MMU dis */
+#define MD_WTDEF 0x10000000 /* Set writethrough when MMU dis */
+#define MD_RSV4I 0x08000000 /* Reserve 4 TLB entries */
+#define MD_TWAM 0x04000000 /* Use 4K page hardware assist */
+#define MD_PPCS 0x02000000 /* Use MI_RPN prob/priv state */
+#define MD_IDXMASK 0x00001f00 /* TLB index to be loaded */
+#define MD_RESETVAL 0x04000000 /* Value of register at reset */
+
+#define SPRN_M_CASID 793 /* Address space ID (context) to match */
+#define MC_ASIDMASK 0x0000000f /* Bits used for ASID value */
+
+
+/* These are the Ks and Kp from the PowerPC books. For proper operation,
+ * Ks = 0, Kp = 1.
+ */
+#define SPRN_MD_AP 794
+#define MD_Ks 0x80000000 /* Should not be set */
+#define MD_Kp 0x40000000 /* Should always be set */
+
+/* The effective page number register. When read, contains the information
+ * about the last instruction TLB miss. When MD_RPN is written, bits in
+ * this register are used to create the TLB entry.
+ */
+#define SPRN_MD_EPN 795
+#define MD_EPNMASK 0xfffff000 /* Effective page number for entry */
+#define MD_EVALID 0x00000200 /* Entry is valid */
+#define MD_ASIDMASK 0x0000000f /* ASID match value */
+ /* Reset value is undefined */
+
+/* The pointer to the base address of the first level page table.
+ * During a software tablewalk, reading this register provides the address
+ * of the entry associated with MD_EPN.
+ */
+#define SPRN_M_TWB 796
+#define M_L1TB 0xfffff000 /* Level 1 table base address */
+#define M_L1INDX 0x00000ffc /* Level 1 index, when read */
+ /* Reset value is undefined */
+
+/* A "level 1" or "segment" or whatever you want to call it register.
+ * For the data TLB, it contains bits that get loaded into the TLB entry
+ * when the MD_RPN is written. It is also provides the hardware assist
+ * for finding the PTE address during software tablewalk.
+ */
+#define SPRN_MD_TWC 797
+#define MD_L2TB 0xfffff000 /* Level 2 table base address */
+#define MD_L2INDX 0xfffffe00 /* Level 2 index (*pte), when read */
+#define MD_APG 0x000001e0 /* Access protection group (0) */
+#define MD_GUARDED 0x00000010 /* Guarded storage */
+#define MD_PSMASK 0x0000000c /* Mask of page size bits */
+#define MD_PS8MEG 0x0000000c /* 8M page size */
+#define MD_PS512K 0x00000004 /* 512K page size */
+#define MD_PS4K_16K 0x00000000 /* 4K or 16K page size */
+#define MD_WT 0x00000002 /* Use writethrough page attribute */
+#define MD_SVALID 0x00000001 /* Segment entry is valid */
+ /* Reset value is undefined */
+
+
+/* Real page number. Defined by the pte. Writing this register
+ * causes a TLB entry to be created for the data TLB, using
+ * additional information from the MD_EPN, and MD_TWC registers.
+ */
+#define SPRN_MD_RPN 798
+
+/* This is a temporary storage register that could be used to save
+ * a processor working register during a tablewalk.
+ */
+#define SPRN_M_TW 799
+
+#ifndef __ASSEMBLY__
+typedef struct {
+ unsigned long id;
+ unsigned long vdso_base;
+} mm_context_t;
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_MMU_8XX_H_ */
diff --git a/arch/powerpc/include/asm/mmu-fsl-booke.h b/arch/powerpc/include/asm/mmu-fsl-booke.h
new file mode 100644
index 0000000..925d93c
--- /dev/null
+++ b/arch/powerpc/include/asm/mmu-fsl-booke.h
@@ -0,0 +1,82 @@
+#ifndef _ASM_POWERPC_MMU_FSL_BOOKE_H_
+#define _ASM_POWERPC_MMU_FSL_BOOKE_H_
+/*
+ * Freescale Book-E MMU support
+ */
+
+/* Book-E defined page sizes */
+#define BOOKE_PAGESZ_1K 0
+#define BOOKE_PAGESZ_4K 1
+#define BOOKE_PAGESZ_16K 2
+#define BOOKE_PAGESZ_64K 3
+#define BOOKE_PAGESZ_256K 4
+#define BOOKE_PAGESZ_1M 5
+#define BOOKE_PAGESZ_4M 6
+#define BOOKE_PAGESZ_16M 7
+#define BOOKE_PAGESZ_64M 8
+#define BOOKE_PAGESZ_256M 9
+#define BOOKE_PAGESZ_1GB 10
+#define BOOKE_PAGESZ_4GB 11
+#define BOOKE_PAGESZ_16GB 12
+#define BOOKE_PAGESZ_64GB 13
+#define BOOKE_PAGESZ_256GB 14
+#define BOOKE_PAGESZ_1TB 15
+
+#define MAS0_TLBSEL(x) ((x << 28) & 0x30000000)
+#define MAS0_ESEL(x) ((x << 16) & 0x0FFF0000)
+#define MAS0_NV(x) ((x) & 0x00000FFF)
+
+#define MAS1_VALID 0x80000000
+#define MAS1_IPROT 0x40000000
+#define MAS1_TID(x) ((x << 16) & 0x3FFF0000)
+#define MAS1_TS 0x00001000
+#define MAS1_TSIZE(x) ((x << 8) & 0x00000F00)
+
+#define MAS2_EPN 0xFFFFF000
+#define MAS2_X0 0x00000040
+#define MAS2_X1 0x00000020
+#define MAS2_W 0x00000010
+#define MAS2_I 0x00000008
+#define MAS2_M 0x00000004
+#define MAS2_G 0x00000002
+#define MAS2_E 0x00000001
+
+#define MAS3_RPN 0xFFFFF000
+#define MAS3_U0 0x00000200
+#define MAS3_U1 0x00000100
+#define MAS3_U2 0x00000080
+#define MAS3_U3 0x00000040
+#define MAS3_UX 0x00000020
+#define MAS3_SX 0x00000010
+#define MAS3_UW 0x00000008
+#define MAS3_SW 0x00000004
+#define MAS3_UR 0x00000002
+#define MAS3_SR 0x00000001
+
+#define MAS4_TLBSELD(x) MAS0_TLBSEL(x)
+#define MAS4_TIDDSEL 0x000F0000
+#define MAS4_TSIZED(x) MAS1_TSIZE(x)
+#define MAS4_X0D 0x00000040
+#define MAS4_X1D 0x00000020
+#define MAS4_WD 0x00000010
+#define MAS4_ID 0x00000008
+#define MAS4_MD 0x00000004
+#define MAS4_GD 0x00000002
+#define MAS4_ED 0x00000001
+
+#define MAS6_SPID0 0x3FFF0000
+#define MAS6_SPID1 0x00007FFE
+#define MAS6_SAS 0x00000001
+#define MAS6_SPID MAS6_SPID0
+
+#define MAS7_RPN 0xFFFFFFFF
+
+#ifndef __ASSEMBLY__
+
+typedef struct {
+ unsigned long id;
+ unsigned long vdso_base;
+} mm_context_t;
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_MMU_FSL_BOOKE_H_ */
diff --git a/arch/powerpc/include/asm/mmu-hash32.h b/arch/powerpc/include/asm/mmu-hash32.h
new file mode 100644
index 0000000..16b1a1e
--- /dev/null
+++ b/arch/powerpc/include/asm/mmu-hash32.h
@@ -0,0 +1,83 @@
+#ifndef _ASM_POWERPC_MMU_HASH32_H_
+#define _ASM_POWERPC_MMU_HASH32_H_
+/*
+ * 32-bit hash table MMU support
+ */
+
+/*
+ * BATs
+ */
+
+/* Block size masks */
+#define BL_128K 0x000
+#define BL_256K 0x001
+#define BL_512K 0x003
+#define BL_1M 0x007
+#define BL_2M 0x00F
+#define BL_4M 0x01F
+#define BL_8M 0x03F
+#define BL_16M 0x07F
+#define BL_32M 0x0FF
+#define BL_64M 0x1FF
+#define BL_128M 0x3FF
+#define BL_256M 0x7FF
+
+/* BAT Access Protection */
+#define BPP_XX 0x00 /* No access */
+#define BPP_RX 0x01 /* Read only */
+#define BPP_RW 0x02 /* Read/write */
+
+#ifndef __ASSEMBLY__
+/* Contort a phys_addr_t into the right format/bits for a BAT */
+#ifdef CONFIG_PHYS_64BIT
+#define BAT_PHYS_ADDR(x) ((u32)((x & 0x00000000fffe0000ULL) | \
+ ((x & 0x0000000e00000000ULL) >> 24) | \
+ ((x & 0x0000000100000000ULL) >> 30)))
+#else
+#define BAT_PHYS_ADDR(x) (x)
+#endif
+
+struct ppc_bat {
+ u32 batu;
+ u32 batl;
+};
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * Hash table
+ */
+
+/* Values for PP (assumes Ks=0, Kp=1) */
+#define PP_RWXX 0 /* Supervisor read/write, User none */
+#define PP_RWRX 1 /* Supervisor read/write, User read */
+#define PP_RWRW 2 /* Supervisor read/write, User read/write */
+#define PP_RXRX 3 /* Supervisor read, User read */
+
+#ifndef __ASSEMBLY__
+
+/* Hardware Page Table Entry */
+struct hash_pte {
+ unsigned long v:1; /* Entry is valid */
+ unsigned long vsid:24; /* Virtual segment identifier */
+ unsigned long h:1; /* Hash algorithm indicator */
+ unsigned long api:6; /* Abbreviated page index */
+ unsigned long rpn:20; /* Real (physical) page number */
+ unsigned long :3; /* Unused */
+ unsigned long r:1; /* Referenced */
+ unsigned long c:1; /* Changed */
+ unsigned long w:1; /* Write-thru cache mode */
+ unsigned long i:1; /* Cache inhibited */
+ unsigned long m:1; /* Memory coherence */
+ unsigned long g:1; /* Guarded */
+ unsigned long :1; /* Unused */
+ unsigned long pp:2; /* Page protection */
+};
+
+typedef struct {
+ unsigned long id;
+ unsigned long vdso_base;
+} mm_context_t;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_MMU_HASH32_H_ */
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
new file mode 100644
index 0000000..19c7a94
--- /dev/null
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -0,0 +1,478 @@
+#ifndef _ASM_POWERPC_MMU_HASH64_H_
+#define _ASM_POWERPC_MMU_HASH64_H_
+/*
+ * PowerPC64 memory management structures
+ *
+ * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
+ * PPC64 rework.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/asm-compat.h>
+#include <asm/page.h>
+
+/*
+ * Segment table
+ */
+
+#define STE_ESID_V 0x80
+#define STE_ESID_KS 0x20
+#define STE_ESID_KP 0x10
+#define STE_ESID_N 0x08
+
+#define STE_VSID_SHIFT 12
+
+/* Location of cpu0's segment table */
+#define STAB0_PAGE 0x6
+#define STAB0_OFFSET (STAB0_PAGE << 12)
+#define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START)
+
+#ifndef __ASSEMBLY__
+extern char initial_stab[];
+#endif /* ! __ASSEMBLY */
+
+/*
+ * SLB
+ */
+
+#define SLB_NUM_BOLTED 3
+#define SLB_CACHE_ENTRIES 8
+
+/* Bits in the SLB ESID word */
+#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
+
+/* Bits in the SLB VSID word */
+#define SLB_VSID_SHIFT 12
+#define SLB_VSID_SHIFT_1T 24
+#define SLB_VSID_SSIZE_SHIFT 62
+#define SLB_VSID_B ASM_CONST(0xc000000000000000)
+#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
+#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
+#define SLB_VSID_KS ASM_CONST(0x0000000000000800)
+#define SLB_VSID_KP ASM_CONST(0x0000000000000400)
+#define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
+#define SLB_VSID_L ASM_CONST(0x0000000000000100)
+#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
+#define SLB_VSID_LP ASM_CONST(0x0000000000000030)
+#define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
+#define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
+#define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
+#define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
+#define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
+
+#define SLB_VSID_KERNEL (SLB_VSID_KP)
+#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
+
+#define SLBIE_C (0x08000000)
+#define SLBIE_SSIZE_SHIFT 25
+
+/*
+ * Hash table
+ */
+
+#define HPTES_PER_GROUP 8
+
+#define HPTE_V_SSIZE_SHIFT 62
+#define HPTE_V_AVPN_SHIFT 7
+#define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
+#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
+#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
+#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
+#define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
+#define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
+#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
+#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
+
+#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
+#define HPTE_R_TS ASM_CONST(0x4000000000000000)
+#define HPTE_R_RPN_SHIFT 12
+#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000)
+#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff)
+#define HPTE_R_PP ASM_CONST(0x0000000000000003)
+#define HPTE_R_N ASM_CONST(0x0000000000000004)
+#define HPTE_R_C ASM_CONST(0x0000000000000080)
+#define HPTE_R_R ASM_CONST(0x0000000000000100)
+
+#define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
+#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
+
+/* Values for PP (assumes Ks=0, Kp=1) */
+/* pp0 will always be 0 for linux */
+#define PP_RWXX 0 /* Supervisor read/write, User none */
+#define PP_RWRX 1 /* Supervisor read/write, User read */
+#define PP_RWRW 2 /* Supervisor read/write, User read/write */
+#define PP_RXRX 3 /* Supervisor read, User read */
+
+#ifndef __ASSEMBLY__
+
+struct hash_pte {
+ unsigned long v;
+ unsigned long r;
+};
+
+extern struct hash_pte *htab_address;
+extern unsigned long htab_size_bytes;
+extern unsigned long htab_hash_mask;
+
+/*
+ * Page size definition
+ *
+ * shift : is the "PAGE_SHIFT" value for that page size
+ * sllp : is a bit mask with the value of SLB L || LP to be or'ed
+ * directly to a slbmte "vsid" value
+ * penc : is the HPTE encoding mask for the "LP" field:
+ *
+ */
+struct mmu_psize_def
+{
+ unsigned int shift; /* number of bits */
+ unsigned int penc; /* HPTE encoding */
+ unsigned int tlbiel; /* tlbiel supported for that page size */
+ unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
+ unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
+};
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * The kernel use the constants below to index in the page sizes array.
+ * The use of fixed constants for this purpose is better for performances
+ * of the low level hash refill handlers.
+ *
+ * A non supported page size has a "shift" field set to 0
+ *
+ * Any new page size being implemented can get a new entry in here. Whether
+ * the kernel will use it or not is a different matter though. The actual page
+ * size used by hugetlbfs is not defined here and may be made variable
+ */
+
+#define MMU_PAGE_4K 0 /* 4K */
+#define MMU_PAGE_64K 1 /* 64K */
+#define MMU_PAGE_64K_AP 2 /* 64K Admixed (in a 4K segment) */
+#define MMU_PAGE_1M 3 /* 1M */
+#define MMU_PAGE_16M 4 /* 16M */
+#define MMU_PAGE_16G 5 /* 16G */
+#define MMU_PAGE_COUNT 6
+
+/*
+ * Segment sizes.
+ * These are the values used by hardware in the B field of
+ * SLB entries and the first dword of MMU hashtable entries.
+ * The B field is 2 bits; the values 2 and 3 are unused and reserved.
+ */
+#define MMU_SEGSIZE_256M 0
+#define MMU_SEGSIZE_1T 1
+
+
+#ifndef __ASSEMBLY__
+
+/*
+ * The current system page and segment sizes
+ */
+extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+extern int mmu_linear_psize;
+extern int mmu_virtual_psize;
+extern int mmu_vmalloc_psize;
+extern int mmu_vmemmap_psize;
+extern int mmu_io_psize;
+extern int mmu_kernel_ssize;
+extern int mmu_highuser_ssize;
+extern u16 mmu_slb_size;
+extern unsigned long tce_alloc_start, tce_alloc_end;
+
+/*
+ * If the processor supports 64k normal pages but not 64k cache
+ * inhibited pages, we have to be prepared to switch processes
+ * to use 4k pages when they create cache-inhibited mappings.
+ * If this is the case, mmu_ci_restrictions will be set to 1.
+ */
+extern int mmu_ci_restrictions;
+
+#ifdef CONFIG_HUGETLB_PAGE
+/*
+ * The page size indexes of the huge pages for use by hugetlbfs
+ */
+extern unsigned int mmu_huge_psizes[MMU_PAGE_COUNT];
+
+#endif /* CONFIG_HUGETLB_PAGE */
+
+/*
+ * This function sets the AVPN and L fields of the HPTE appropriately
+ * for the page size
+ */
+static inline unsigned long hpte_encode_v(unsigned long va, int psize,
+ int ssize)
+{
+ unsigned long v;
+ v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
+ v <<= HPTE_V_AVPN_SHIFT;
+ if (psize != MMU_PAGE_4K)
+ v |= HPTE_V_LARGE;
+ v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
+ return v;
+}
+
+/*
+ * This function sets the ARPN, and LP fields of the HPTE appropriately
+ * for the page size. We assume the pa is already "clean" that is properly
+ * aligned for the requested page size
+ */
+static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
+{
+ unsigned long r;
+
+ /* A 4K page needs no special encoding */
+ if (psize == MMU_PAGE_4K)
+ return pa & HPTE_R_RPN;
+ else {
+ unsigned int penc = mmu_psize_defs[psize].penc;
+ unsigned int shift = mmu_psize_defs[psize].shift;
+ return (pa & ~((1ul << shift) - 1)) | (penc << 12);
+ }
+ return r;
+}
+
+/*
+ * Build a VA given VSID, EA and segment size
+ */
+static inline unsigned long hpt_va(unsigned long ea, unsigned long vsid,
+ int ssize)
+{
+ if (ssize == MMU_SEGSIZE_256M)
+ return (vsid << 28) | (ea & 0xfffffffUL);
+ return (vsid << 40) | (ea & 0xffffffffffUL);
+}
+
+/*
+ * This hashes a virtual address
+ */
+
+static inline unsigned long hpt_hash(unsigned long va, unsigned int shift,
+ int ssize)
+{
+ unsigned long hash, vsid;
+
+ if (ssize == MMU_SEGSIZE_256M) {
+ hash = (va >> 28) ^ ((va & 0x0fffffffUL) >> shift);
+ } else {
+ vsid = va >> 40;
+ hash = vsid ^ (vsid << 25) ^ ((va & 0xffffffffffUL) >> shift);
+ }
+ return hash & 0x7fffffffffUL;
+}
+
+extern int __hash_page_4K(unsigned long ea, unsigned long access,
+ unsigned long vsid, pte_t *ptep, unsigned long trap,
+ unsigned int local, int ssize, int subpage_prot);
+extern int __hash_page_64K(unsigned long ea, unsigned long access,
+ unsigned long vsid, pte_t *ptep, unsigned long trap,
+ unsigned int local, int ssize);
+struct mm_struct;
+extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap);
+extern int hash_huge_page(struct mm_struct *mm, unsigned long access,
+ unsigned long ea, unsigned long vsid, int local,
+ unsigned long trap);
+
+extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
+ unsigned long pstart, unsigned long mode,
+ int psize, int ssize);
+extern void set_huge_psize(int psize);
+extern void add_gpage(unsigned long addr, unsigned long page_size,
+ unsigned long number_of_pages);
+extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
+
+extern void htab_initialize(void);
+extern void htab_initialize_secondary(void);
+extern void hpte_init_native(void);
+extern void hpte_init_lpar(void);
+extern void hpte_init_iSeries(void);
+extern void hpte_init_beat(void);
+extern void hpte_init_beat_v3(void);
+
+extern void stabs_alloc(void);
+extern void slb_initialize(void);
+extern void slb_flush_and_rebolt(void);
+extern void stab_initialize(unsigned long stab);
+
+extern void slb_vmalloc_update(void);
+#endif /* __ASSEMBLY__ */
+
+/*
+ * VSID allocation
+ *
+ * We first generate a 36-bit "proto-VSID". For kernel addresses this
+ * is equal to the ESID, for user addresses it is:
+ * (context << 15) | (esid & 0x7fff)
+ *
+ * The two forms are distinguishable because the top bit is 0 for user
+ * addresses, whereas the top two bits are 1 for kernel addresses.
+ * Proto-VSIDs with the top two bits equal to 0b10 are reserved for
+ * now.
+ *
+ * The proto-VSIDs are then scrambled into real VSIDs with the
+ * multiplicative hash:
+ *
+ * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
+ * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
+ * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
+ *
+ * This scramble is only well defined for proto-VSIDs below
+ * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are
+ * reserved. VSID_MULTIPLIER is prime, so in particular it is
+ * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
+ * Because the modulus is 2^n-1 we can compute it efficiently without
+ * a divide or extra multiply (see below).
+ *
+ * This scheme has several advantages over older methods:
+ *
+ * - We have VSIDs allocated for every kernel address
+ * (i.e. everything above 0xC000000000000000), except the very top
+ * segment, which simplifies several things.
+ *
+ * - We allow for 15 significant bits of ESID and 20 bits of
+ * context for user addresses. i.e. 8T (43 bits) of address space for
+ * up to 1M contexts (although the page table structure and context
+ * allocation will need changes to take advantage of this).
+ *
+ * - The scramble function gives robust scattering in the hash
+ * table (at least based on some initial results). The previous
+ * method was more susceptible to pathological cases giving excessive
+ * hash collisions.
+ */
+/*
+ * WARNING - If you change these you must make sure the asm
+ * implementations in slb_allocate (slb_low.S), do_stab_bolted
+ * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly.
+ *
+ * You'll also need to change the precomputed VSID values in head.S
+ * which are used by the iSeries firmware.
+ */
+
+#define VSID_MULTIPLIER_256M ASM_CONST(200730139) /* 28-bit prime */
+#define VSID_BITS_256M 36
+#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
+
+#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
+#define VSID_BITS_1T 24
+#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
+
+#define CONTEXT_BITS 19
+#define USER_ESID_BITS 16
+#define USER_ESID_BITS_1T 4
+
+#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
+
+/*
+ * This macro generates asm code to compute the VSID scramble
+ * function. Used in slb_allocate() and do_stab_bolted. The function
+ * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
+ *
+ * rt = register continaing the proto-VSID and into which the
+ * VSID will be stored
+ * rx = scratch register (clobbered)
+ *
+ * - rt and rx must be different registers
+ * - The answer will end up in the low VSID_BITS bits of rt. The higher
+ * bits may contain other garbage, so you may need to mask the
+ * result.
+ */
+#define ASM_VSID_SCRAMBLE(rt, rx, size) \
+ lis rx,VSID_MULTIPLIER_##size@h; \
+ ori rx,rx,VSID_MULTIPLIER_##size@l; \
+ mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
+ \
+ srdi rx,rt,VSID_BITS_##size; \
+ clrldi rt,rt,(64-VSID_BITS_##size); \
+ add rt,rt,rx; /* add high and low bits */ \
+ /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
+ * 2^36-1+2^28-1. That in particular means that if r3 >= \
+ * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
+ * the bit clear, r3 already has the answer we want, if it \
+ * doesn't, the answer is the low 36 bits of r3+1. So in all \
+ * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
+ addi rx,rt,1; \
+ srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \
+ add rt,rt,rx
+
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned long mm_context_id_t;
+
+typedef struct {
+ mm_context_id_t id;
+ u16 user_psize; /* page size index */
+
+#ifdef CONFIG_PPC_MM_SLICES
+ u64 low_slices_psize; /* SLB page size encodings */
+ u64 high_slices_psize; /* 4 bits per slice for now */
+#else
+ u16 sllp; /* SLB page size encoding */
+#endif
+ unsigned long vdso_base;
+} mm_context_t;
+
+
+#if 0
+/*
+ * The code below is equivalent to this function for arguments
+ * < 2^VSID_BITS, which is all this should ever be called
+ * with. However gcc is not clever enough to compute the
+ * modulus (2^n-1) without a second multiply.
+ */
+#define vsid_scrample(protovsid, size) \
+ ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
+
+#else /* 1 */
+#define vsid_scramble(protovsid, size) \
+ ({ \
+ unsigned long x; \
+ x = (protovsid) * VSID_MULTIPLIER_##size; \
+ x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
+ (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
+ })
+#endif /* 1 */
+
+/* This is only valid for addresses >= KERNELBASE */
+static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
+{
+ if (ssize == MMU_SEGSIZE_256M)
+ return vsid_scramble(ea >> SID_SHIFT, 256M);
+ return vsid_scramble(ea >> SID_SHIFT_1T, 1T);
+}
+
+/* Returns the segment size indicator for a user address */
+static inline int user_segment_size(unsigned long addr)
+{
+ /* Use 1T segments if possible for addresses >= 1T */
+ if (addr >= (1UL << SID_SHIFT_1T))
+ return mmu_highuser_ssize;
+ return MMU_SEGSIZE_256M;
+}
+
+/* This is only valid for user addresses (which are below 2^44) */
+static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
+ int ssize)
+{
+ if (ssize == MMU_SEGSIZE_256M)
+ return vsid_scramble((context << USER_ESID_BITS)
+ | (ea >> SID_SHIFT), 256M);
+ return vsid_scramble((context << USER_ESID_BITS_1T)
+ | (ea >> SID_SHIFT_1T), 1T);
+}
+
+/*
+ * This is only used on legacy iSeries in lparmap.c,
+ * hence the 256MB segment assumption.
+ */
+#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER_256M) % \
+ VSID_MODULUS_256M)
+#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea))
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_MMU_HASH64_H_ */
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
new file mode 100644
index 0000000..4c0e1b4
--- /dev/null
+++ b/arch/powerpc/include/asm/mmu.h
@@ -0,0 +1,26 @@
+#ifndef _ASM_POWERPC_MMU_H_
+#define _ASM_POWERPC_MMU_H_
+#ifdef __KERNEL__
+
+#ifdef CONFIG_PPC64
+/* 64-bit classic hash table MMU */
+# include <asm/mmu-hash64.h>
+#elif defined(CONFIG_PPC_STD_MMU)
+/* 32-bit classic hash table MMU */
+# include <asm/mmu-hash32.h>
+#elif defined(CONFIG_40x)
+/* 40x-style software loaded TLB */
+# include <asm/mmu-40x.h>
+#elif defined(CONFIG_44x)
+/* 44x-style software loaded TLB */
+# include <asm/mmu-44x.h>
+#elif defined(CONFIG_FSL_BOOKE)
+/* Freescale Book-E software loaded TLB */
+# include <asm/mmu-fsl-booke.h>
+#elif defined (CONFIG_PPC_8xx)
+/* Motorola/Freescale 8xx software loaded TLB */
+# include <asm/mmu-8xx.h>
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_MMU_H_ */
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
new file mode 100644
index 0000000..9102b8b
--- /dev/null
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -0,0 +1,280 @@
+#ifndef __ASM_POWERPC_MMU_CONTEXT_H
+#define __ASM_POWERPC_MMU_CONTEXT_H
+#ifdef __KERNEL__
+
+#include <asm/mmu.h>
+#include <asm/cputable.h>
+#include <asm-generic/mm_hooks.h>
+
+#ifndef CONFIG_PPC64
+#include <asm/atomic.h>
+#include <linux/bitops.h>
+
+/*
+ * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
+ * (virtual segment identifiers) for each context. Although the
+ * hardware supports 24-bit VSIDs, and thus >1 million contexts,
+ * we only use 32,768 of them. That is ample, since there can be
+ * at most around 30,000 tasks in the system anyway, and it means
+ * that we can use a bitmap to indicate which contexts are in use.
+ * Using a bitmap means that we entirely avoid all of the problems
+ * that we used to have when the context number overflowed,
+ * particularly on SMP systems.
+ * -- paulus.
+ */
+
+/*
+ * This function defines the mapping from contexts to VSIDs (virtual
+ * segment IDs). We use a skew on both the context and the high 4 bits
+ * of the 32-bit virtual address (the "effective segment ID") in order
+ * to spread out the entries in the MMU hash table. Note, if this
+ * function is changed then arch/ppc/mm/hashtable.S will have to be
+ * changed to correspond.
+ */
+#define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
+ & 0xffffff)
+
+/*
+ The MPC8xx has only 16 contexts. We rotate through them on each
+ task switch. A better way would be to keep track of tasks that
+ own contexts, and implement an LRU usage. That way very active
+ tasks don't always have to pay the TLB reload overhead. The
+ kernel pages are mapped shared, so the kernel can run on behalf
+ of any task that makes a kernel entry. Shared does not mean they
+ are not protected, just that the ASID comparison is not performed.
+ -- Dan
+
+ The IBM4xx has 256 contexts, so we can just rotate through these
+ as a way of "switching" contexts. If the TID of the TLB is zero,
+ the PID/TID comparison is disabled, so we can use a TID of zero
+ to represent all kernel pages as shared among all contexts.
+ -- Dan
+ */
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+#ifdef CONFIG_8xx
+#define NO_CONTEXT 16
+#define LAST_CONTEXT 15
+#define FIRST_CONTEXT 0
+
+#elif defined(CONFIG_4xx)
+#define NO_CONTEXT 256
+#define LAST_CONTEXT 255
+#define FIRST_CONTEXT 1
+
+#elif defined(CONFIG_E200) || defined(CONFIG_E500)
+#define NO_CONTEXT 256
+#define LAST_CONTEXT 255
+#define FIRST_CONTEXT 1
+
+#else
+
+/* PPC 6xx, 7xx CPUs */
+#define NO_CONTEXT ((unsigned long) -1)
+#define LAST_CONTEXT 32767
+#define FIRST_CONTEXT 1
+#endif
+
+/*
+ * Set the current MMU context.
+ * On 32-bit PowerPCs (other than the 8xx embedded chips), this is done by
+ * loading up the segment registers for the user part of the address space.
+ *
+ * Since the PGD is immediately available, it is much faster to simply
+ * pass this along as a second parameter, which is required for 8xx and
+ * can be used for debugging on all processors (if you happen to have
+ * an Abatron).
+ */
+extern void set_context(unsigned long contextid, pgd_t *pgd);
+
+/*
+ * Bitmap of contexts in use.
+ * The size of this bitmap is LAST_CONTEXT + 1 bits.
+ */
+extern unsigned long context_map[];
+
+/*
+ * This caches the next context number that we expect to be free.
+ * Its use is an optimization only, we can't rely on this context
+ * number to be free, but it usually will be.
+ */
+extern unsigned long next_mmu_context;
+
+/*
+ * If we don't have sufficient contexts to give one to every task
+ * that could be in the system, we need to be able to steal contexts.
+ * These variables support that.
+ */
+#if LAST_CONTEXT < 30000
+#define FEW_CONTEXTS 1
+extern atomic_t nr_free_contexts;
+extern struct mm_struct *context_mm[LAST_CONTEXT+1];
+extern void steal_context(void);
+#endif
+
+/*
+ * Get a new mmu context for the address space described by `mm'.
+ */
+static inline void get_mmu_context(struct mm_struct *mm)
+{
+ unsigned long ctx;
+
+ if (mm->context.id != NO_CONTEXT)
+ return;
+#ifdef FEW_CONTEXTS
+ while (atomic_dec_if_positive(&nr_free_contexts) < 0)
+ steal_context();
+#endif
+ ctx = next_mmu_context;
+ while (test_and_set_bit(ctx, context_map)) {
+ ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
+ if (ctx > LAST_CONTEXT)
+ ctx = 0;
+ }
+ next_mmu_context = (ctx + 1) & LAST_CONTEXT;
+ mm->context.id = ctx;
+#ifdef FEW_CONTEXTS
+ context_mm[ctx] = mm;
+#endif
+}
+
+/*
+ * Set up the context for a new address space.
+ */
+static inline int init_new_context(struct task_struct *t, struct mm_struct *mm)
+{
+ mm->context.id = NO_CONTEXT;
+ mm->context.vdso_base = 0;
+ return 0;
+}
+
+/*
+ * We're finished using the context for an address space.
+ */
+static inline void destroy_context(struct mm_struct *mm)
+{
+ preempt_disable();
+ if (mm->context.id != NO_CONTEXT) {
+ clear_bit(mm->context.id, context_map);
+ mm->context.id = NO_CONTEXT;
+#ifdef FEW_CONTEXTS
+ atomic_inc(&nr_free_contexts);
+#endif
+ }
+ preempt_enable();
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+#ifdef CONFIG_ALTIVEC
+ if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ asm volatile ("dssall;\n"
+#ifndef CONFIG_POWER4
+ "sync;\n" /* G4 needs a sync here, G5 apparently not */
+#endif
+ : : );
+#endif /* CONFIG_ALTIVEC */
+
+ tsk->thread.pgdir = next->pgd;
+
+ /* No need to flush userspace segments if the mm doesnt change */
+ if (prev == next)
+ return;
+
+ /* Setup new userspace context */
+ get_mmu_context(next);
+ set_context(next->context.id, next->pgd);
+}
+
+#define deactivate_mm(tsk,mm) do { } while (0)
+
+/*
+ * After we have set current->mm to a new value, this activates
+ * the context for the new mm so we see the new mappings.
+ */
+#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, current)
+
+extern void mmu_context_init(void);
+
+
+#else
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+/*
+ * Copyright (C) 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+static inline void enter_lazy_tlb(struct mm_struct *mm,
+ struct task_struct *tsk)
+{
+}
+
+/*
+ * The proto-VSID space has 2^35 - 1 segments available for user mappings.
+ * Each segment contains 2^28 bytes. Each context maps 2^44 bytes,
+ * so we can support 2^19-1 contexts (19 == 35 + 28 - 44).
+ */
+#define NO_CONTEXT 0
+#define MAX_CONTEXT ((1UL << 19) - 1)
+
+extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+extern void destroy_context(struct mm_struct *mm);
+
+extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
+extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
+
+/*
+ * switch_mm is the entry point called from the architecture independent
+ * code in kernel/sched.c
+ */
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ if (!cpu_isset(smp_processor_id(), next->cpu_vm_mask))
+ cpu_set(smp_processor_id(), next->cpu_vm_mask);
+
+ /* No need to flush userspace segments if the mm doesnt change */
+ if (prev == next)
+ return;
+
+#ifdef CONFIG_ALTIVEC
+ if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ asm volatile ("dssall");
+#endif /* CONFIG_ALTIVEC */
+
+ if (cpu_has_feature(CPU_FTR_SLB))
+ switch_slb(tsk, next);
+ else
+ switch_stab(tsk, next);
+}
+
+#define deactivate_mm(tsk,mm) do { } while (0)
+
+/*
+ * After we have set current->mm to a new value, this activates
+ * the context for the new mm so we see the new mappings.
+ */
+static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ switch_mm(prev, next, current);
+ local_irq_restore(flags);
+}
+
+#endif /* CONFIG_PPC64 */
+#endif /* __KERNEL__ */
+#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
new file mode 100644
index 0000000..19f299b
--- /dev/null
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -0,0 +1,47 @@
+/*
+ * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99
+ *
+ * PowerPC64 port:
+ * Copyright (C) 2002 Anton Blanchard, IBM Corp.
+ */
+#ifndef _ASM_MMZONE_H_
+#define _ASM_MMZONE_H_
+#ifdef __KERNEL__
+
+
+/*
+ * generic non-linear memory support:
+ *
+ * 1) we will not split memory into more chunks than will fit into the
+ * flags field of the struct page
+ */
+
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+
+extern struct pglist_data *node_data[];
+/*
+ * Return a pointer to the node data for node n.
+ */
+#define NODE_DATA(nid) (node_data[nid])
+
+/*
+ * Following are specific to this numa platform.
+ */
+
+extern int numa_cpu_lookup_table[];
+extern cpumask_t numa_cpumask_lookup_table[];
+#ifdef CONFIG_MEMORY_HOTPLUG
+extern unsigned long max_pfn;
+#endif
+
+/*
+ * Following are macros that each numa implmentation must define.
+ */
+
+#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
+#define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn)
+
+#endif /* CONFIG_NEED_MULTIPLE_NODES */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
new file mode 100644
index 0000000..e5f14b1
--- /dev/null
+++ b/arch/powerpc/include/asm/module.h
@@ -0,0 +1,77 @@
+#ifndef _ASM_POWERPC_MODULE_H
+#define _ASM_POWERPC_MODULE_H
+#ifdef __KERNEL__
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/list.h>
+#include <asm/bug.h>
+
+
+#ifndef __powerpc64__
+/*
+ * Thanks to Paul M for explaining this.
+ *
+ * PPC can only do rel jumps += 32MB, and often the kernel and other
+ * modules are furthur away than this. So, we jump to a table of
+ * trampolines attached to the module (the Procedure Linkage Table)
+ * whenever that happens.
+ */
+
+struct ppc_plt_entry {
+ /* 16 byte jump instruction sequence (4 instructions) */
+ unsigned int jump[4];
+};
+#endif /* __powerpc64__ */
+
+
+struct mod_arch_specific {
+#ifdef __powerpc64__
+ unsigned int stubs_section; /* Index of stubs section in module */
+ unsigned int toc_section; /* What section is the TOC? */
+#else
+ /* Indices of PLT sections within module. */
+ unsigned int core_plt_section;
+ unsigned int init_plt_section;
+#endif
+
+ /* List of BUG addresses, source line numbers and filenames */
+ struct list_head bug_list;
+ struct bug_entry *bug_table;
+ unsigned int num_bugs;
+};
+
+/*
+ * Select ELF headers.
+ * Make empty section for module_frob_arch_sections to expand.
+ */
+
+#ifdef __powerpc64__
+# define Elf_Shdr Elf64_Shdr
+# define Elf_Sym Elf64_Sym
+# define Elf_Ehdr Elf64_Ehdr
+# ifdef MODULE
+ asm(".section .stubs,\"ax\",@nobits; .align 3; .previous");
+# endif
+#else
+# define Elf_Shdr Elf32_Shdr
+# define Elf_Sym Elf32_Sym
+# define Elf_Ehdr Elf32_Ehdr
+# ifdef MODULE
+ asm(".section .plt,\"ax\",@nobits; .align 3; .previous");
+ asm(".section .init.plt,\"ax\",@nobits; .align 3; .previous");
+# endif /* MODULE */
+#endif
+
+
+struct exception_table_entry;
+void sort_ex_table(struct exception_table_entry *start,
+ struct exception_table_entry *finish);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_MODULE_H */
diff --git a/arch/powerpc/include/asm/mpc512x.h b/arch/powerpc/include/asm/mpc512x.h
new file mode 100644
index 0000000..c48a165
--- /dev/null
+++ b/arch/powerpc/include/asm/mpc512x.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: John Rigby, <jrigby@freescale.com>, Friday Apr 13 2007
+ *
+ * Description:
+ * MPC5121 Prototypes and definitions
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef __ASM_POWERPC_MPC512x_H__
+#define __ASM_POWERPC_MPC512x_H__
+
+extern unsigned long mpc512x_find_ips_freq(struct device_node *node);
+
+#endif /* __ASM_POWERPC_MPC512x_H__ */
+
diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h
new file mode 100644
index 0000000..81ef10b
--- /dev/null
+++ b/arch/powerpc/include/asm/mpc52xx.h
@@ -0,0 +1,295 @@
+/*
+ * Prototypes, etc. for the Freescale MPC52xx embedded cpu chips
+ * May need to be cleaned as the port goes on ...
+ *
+ * Copyright (C) 2004-2005 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003 MontaVista, Software, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __ASM_POWERPC_MPC52xx_H__
+#define __ASM_POWERPC_MPC52xx_H__
+
+#ifndef __ASSEMBLY__
+#include <asm/types.h>
+#include <asm/prom.h>
+#endif /* __ASSEMBLY__ */
+
+#include <linux/suspend.h>
+
+/* Variants of the 5200(B) */
+#define MPC5200_SVR 0x80110010
+#define MPC5200_SVR_MASK 0xfffffff0
+#define MPC5200B_SVR 0x80110020
+#define MPC5200B_SVR_MASK 0xfffffff0
+
+/* ======================================================================== */
+/* Structures mapping of some unit register set */
+/* ======================================================================== */
+
+#ifndef __ASSEMBLY__
+
+/* Memory Mapping Control */
+struct mpc52xx_mmap_ctl {
+ u32 mbar; /* MMAP_CTRL + 0x00 */
+
+ u32 cs0_start; /* MMAP_CTRL + 0x04 */
+ u32 cs0_stop; /* MMAP_CTRL + 0x08 */
+ u32 cs1_start; /* MMAP_CTRL + 0x0c */
+ u32 cs1_stop; /* MMAP_CTRL + 0x10 */
+ u32 cs2_start; /* MMAP_CTRL + 0x14 */
+ u32 cs2_stop; /* MMAP_CTRL + 0x18 */
+ u32 cs3_start; /* MMAP_CTRL + 0x1c */
+ u32 cs3_stop; /* MMAP_CTRL + 0x20 */
+ u32 cs4_start; /* MMAP_CTRL + 0x24 */
+ u32 cs4_stop; /* MMAP_CTRL + 0x28 */
+ u32 cs5_start; /* MMAP_CTRL + 0x2c */
+ u32 cs5_stop; /* MMAP_CTRL + 0x30 */
+
+ u32 sdram0; /* MMAP_CTRL + 0x34 */
+ u32 sdram1; /* MMAP_CTRL + 0X38 */
+
+ u32 reserved[4]; /* MMAP_CTRL + 0x3c .. 0x48 */
+
+ u32 boot_start; /* MMAP_CTRL + 0x4c */
+ u32 boot_stop; /* MMAP_CTRL + 0x50 */
+
+ u32 ipbi_ws_ctrl; /* MMAP_CTRL + 0x54 */
+
+ u32 cs6_start; /* MMAP_CTRL + 0x58 */
+ u32 cs6_stop; /* MMAP_CTRL + 0x5c */
+ u32 cs7_start; /* MMAP_CTRL + 0x60 */
+ u32 cs7_stop; /* MMAP_CTRL + 0x64 */
+};
+
+/* SDRAM control */
+struct mpc52xx_sdram {
+ u32 mode; /* SDRAM + 0x00 */
+ u32 ctrl; /* SDRAM + 0x04 */
+ u32 config1; /* SDRAM + 0x08 */
+ u32 config2; /* SDRAM + 0x0c */
+};
+
+/* SDMA */
+struct mpc52xx_sdma {
+ u32 taskBar; /* SDMA + 0x00 */
+ u32 currentPointer; /* SDMA + 0x04 */
+ u32 endPointer; /* SDMA + 0x08 */
+ u32 variablePointer; /* SDMA + 0x0c */
+
+ u8 IntVect1; /* SDMA + 0x10 */
+ u8 IntVect2; /* SDMA + 0x11 */
+ u16 PtdCntrl; /* SDMA + 0x12 */
+
+ u32 IntPend; /* SDMA + 0x14 */
+ u32 IntMask; /* SDMA + 0x18 */
+
+ u16 tcr[16]; /* SDMA + 0x1c .. 0x3a */
+
+ u8 ipr[32]; /* SDMA + 0x3c .. 0x5b */
+
+ u32 cReqSelect; /* SDMA + 0x5c */
+ u32 task_size0; /* SDMA + 0x60 */
+ u32 task_size1; /* SDMA + 0x64 */
+ u32 MDEDebug; /* SDMA + 0x68 */
+ u32 ADSDebug; /* SDMA + 0x6c */
+ u32 Value1; /* SDMA + 0x70 */
+ u32 Value2; /* SDMA + 0x74 */
+ u32 Control; /* SDMA + 0x78 */
+ u32 Status; /* SDMA + 0x7c */
+ u32 PTDDebug; /* SDMA + 0x80 */
+};
+
+/* GPT */
+struct mpc52xx_gpt {
+ u32 mode; /* GPTx + 0x00 */
+ u32 count; /* GPTx + 0x04 */
+ u32 pwm; /* GPTx + 0x08 */
+ u32 status; /* GPTx + 0X0c */
+};
+
+/* GPIO */
+struct mpc52xx_gpio {
+ u32 port_config; /* GPIO + 0x00 */
+ u32 simple_gpioe; /* GPIO + 0x04 */
+ u32 simple_ode; /* GPIO + 0x08 */
+ u32 simple_ddr; /* GPIO + 0x0c */
+ u32 simple_dvo; /* GPIO + 0x10 */
+ u32 simple_ival; /* GPIO + 0x14 */
+ u8 outo_gpioe; /* GPIO + 0x18 */
+ u8 reserved1[3]; /* GPIO + 0x19 */
+ u8 outo_dvo; /* GPIO + 0x1c */
+ u8 reserved2[3]; /* GPIO + 0x1d */
+ u8 sint_gpioe; /* GPIO + 0x20 */
+ u8 reserved3[3]; /* GPIO + 0x21 */
+ u8 sint_ode; /* GPIO + 0x24 */
+ u8 reserved4[3]; /* GPIO + 0x25 */
+ u8 sint_ddr; /* GPIO + 0x28 */
+ u8 reserved5[3]; /* GPIO + 0x29 */
+ u8 sint_dvo; /* GPIO + 0x2c */
+ u8 reserved6[3]; /* GPIO + 0x2d */
+ u8 sint_inten; /* GPIO + 0x30 */
+ u8 reserved7[3]; /* GPIO + 0x31 */
+ u16 sint_itype; /* GPIO + 0x34 */
+ u16 reserved8; /* GPIO + 0x36 */
+ u8 gpio_control; /* GPIO + 0x38 */
+ u8 reserved9[3]; /* GPIO + 0x39 */
+ u8 sint_istat; /* GPIO + 0x3c */
+ u8 sint_ival; /* GPIO + 0x3d */
+ u8 bus_errs; /* GPIO + 0x3e */
+ u8 reserved10; /* GPIO + 0x3f */
+};
+
+#define MPC52xx_GPIO_PSC_CONFIG_UART_WITHOUT_CD 4
+#define MPC52xx_GPIO_PSC_CONFIG_UART_WITH_CD 5
+#define MPC52xx_GPIO_PCI_DIS (1<<15)
+
+/* GPIO with WakeUp*/
+struct mpc52xx_gpio_wkup {
+ u8 wkup_gpioe; /* GPIO_WKUP + 0x00 */
+ u8 reserved1[3]; /* GPIO_WKUP + 0x03 */
+ u8 wkup_ode; /* GPIO_WKUP + 0x04 */
+ u8 reserved2[3]; /* GPIO_WKUP + 0x05 */
+ u8 wkup_ddr; /* GPIO_WKUP + 0x08 */
+ u8 reserved3[3]; /* GPIO_WKUP + 0x09 */
+ u8 wkup_dvo; /* GPIO_WKUP + 0x0C */
+ u8 reserved4[3]; /* GPIO_WKUP + 0x0D */
+ u8 wkup_inten; /* GPIO_WKUP + 0x10 */
+ u8 reserved5[3]; /* GPIO_WKUP + 0x11 */
+ u8 wkup_iinten; /* GPIO_WKUP + 0x14 */
+ u8 reserved6[3]; /* GPIO_WKUP + 0x15 */
+ u16 wkup_itype; /* GPIO_WKUP + 0x18 */
+ u8 reserved7[2]; /* GPIO_WKUP + 0x1A */
+ u8 wkup_maste; /* GPIO_WKUP + 0x1C */
+ u8 reserved8[3]; /* GPIO_WKUP + 0x1D */
+ u8 wkup_ival; /* GPIO_WKUP + 0x20 */
+ u8 reserved9[3]; /* GPIO_WKUP + 0x21 */
+ u8 wkup_istat; /* GPIO_WKUP + 0x24 */
+ u8 reserved10[3]; /* GPIO_WKUP + 0x25 */
+};
+
+/* XLB Bus control */
+struct mpc52xx_xlb {
+ u8 reserved[0x40];
+ u32 config; /* XLB + 0x40 */
+ u32 version; /* XLB + 0x44 */
+ u32 status; /* XLB + 0x48 */
+ u32 int_enable; /* XLB + 0x4c */
+ u32 addr_capture; /* XLB + 0x50 */
+ u32 bus_sig_capture; /* XLB + 0x54 */
+ u32 addr_timeout; /* XLB + 0x58 */
+ u32 data_timeout; /* XLB + 0x5c */
+ u32 bus_act_timeout; /* XLB + 0x60 */
+ u32 master_pri_enable; /* XLB + 0x64 */
+ u32 master_priority; /* XLB + 0x68 */
+ u32 base_address; /* XLB + 0x6c */
+ u32 snoop_window; /* XLB + 0x70 */
+};
+
+#define MPC52xx_XLB_CFG_PLDIS (1 << 31)
+#define MPC52xx_XLB_CFG_SNOOP (1 << 15)
+
+/* Clock Distribution control */
+struct mpc52xx_cdm {
+ u32 jtag_id; /* CDM + 0x00 reg0 read only */
+ u32 rstcfg; /* CDM + 0x04 reg1 read only */
+ u32 breadcrumb; /* CDM + 0x08 reg2 */
+
+ u8 mem_clk_sel; /* CDM + 0x0c reg3 byte0 */
+ u8 xlb_clk_sel; /* CDM + 0x0d reg3 byte1 read only */
+ u8 ipb_clk_sel; /* CDM + 0x0e reg3 byte2 */
+ u8 pci_clk_sel; /* CDM + 0x0f reg3 byte3 */
+
+ u8 ext_48mhz_en; /* CDM + 0x10 reg4 byte0 */
+ u8 fd_enable; /* CDM + 0x11 reg4 byte1 */
+ u16 fd_counters; /* CDM + 0x12 reg4 byte2,3 */
+
+ u32 clk_enables; /* CDM + 0x14 reg5 */
+
+ u8 osc_disable; /* CDM + 0x18 reg6 byte0 */
+ u8 reserved0[3]; /* CDM + 0x19 reg6 byte1,2,3 */
+
+ u8 ccs_sleep_enable; /* CDM + 0x1c reg7 byte0 */
+ u8 osc_sleep_enable; /* CDM + 0x1d reg7 byte1 */
+ u8 reserved1; /* CDM + 0x1e reg7 byte2 */
+ u8 ccs_qreq_test; /* CDM + 0x1f reg7 byte3 */
+
+ u8 soft_reset; /* CDM + 0x20 u8 byte0 */
+ u8 no_ckstp; /* CDM + 0x21 u8 byte0 */
+ u8 reserved2[2]; /* CDM + 0x22 u8 byte1,2,3 */
+
+ u8 pll_lock; /* CDM + 0x24 reg9 byte0 */
+ u8 pll_looselock; /* CDM + 0x25 reg9 byte1 */
+ u8 pll_sm_lockwin; /* CDM + 0x26 reg9 byte2 */
+ u8 reserved3; /* CDM + 0x27 reg9 byte3 */
+
+ u16 reserved4; /* CDM + 0x28 reg10 byte0,1 */
+ u16 mclken_div_psc1; /* CDM + 0x2a reg10 byte2,3 */
+
+ u16 reserved5; /* CDM + 0x2c reg11 byte0,1 */
+ u16 mclken_div_psc2; /* CDM + 0x2e reg11 byte2,3 */
+
+ u16 reserved6; /* CDM + 0x30 reg12 byte0,1 */
+ u16 mclken_div_psc3; /* CDM + 0x32 reg12 byte2,3 */
+
+ u16 reserved7; /* CDM + 0x34 reg13 byte0,1 */
+ u16 mclken_div_psc6; /* CDM + 0x36 reg13 byte2,3 */
+};
+
+#endif /* __ASSEMBLY__ */
+
+
+/* ========================================================================= */
+/* Prototypes for MPC52xx sysdev */
+/* ========================================================================= */
+
+#ifndef __ASSEMBLY__
+
+/* mpc52xx_common.c */
+extern unsigned int mpc52xx_find_ipb_freq(struct device_node *node);
+extern void mpc5200_setup_xlb_arbiter(void);
+extern void mpc52xx_declare_of_platform_devices(void);
+extern void mpc52xx_map_common_devices(void);
+extern int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv);
+extern void mpc52xx_restart(char *cmd);
+
+/* mpc52xx_pic.c */
+extern void mpc52xx_init_irq(void);
+extern unsigned int mpc52xx_get_irq(void);
+
+/* mpc52xx_pci.c */
+#ifdef CONFIG_PCI
+extern int __init mpc52xx_add_bridge(struct device_node *node);
+extern void __init mpc52xx_setup_pci(void);
+#else
+static inline void mpc52xx_setup_pci(void) { }
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_PM
+struct mpc52xx_suspend {
+ void (*board_suspend_prepare)(void __iomem *mbar);
+ void (*board_resume_finish)(void __iomem *mbar);
+};
+
+extern struct mpc52xx_suspend mpc52xx_suspend;
+extern int __init mpc52xx_pm_init(void);
+extern int mpc52xx_set_wakeup_gpio(u8 pin, u8 level);
+
+#ifdef CONFIG_PPC_LITE5200
+extern int __init lite5200_pm_init(void);
+
+/* lite5200 calls mpc5200 suspend functions, so here they are */
+extern int mpc52xx_pm_prepare(void);
+extern int mpc52xx_pm_enter(suspend_state_t);
+extern void mpc52xx_pm_finish(void);
+extern char saved_sram[0x4000]; /* reuse buffer from mpc52xx suspend */
+#endif
+#endif /* CONFIG_PM */
+
+#endif /* __ASM_POWERPC_MPC52xx_H__ */
+
diff --git a/arch/powerpc/include/asm/mpc52xx_psc.h b/arch/powerpc/include/asm/mpc52xx_psc.h
new file mode 100644
index 0000000..8917ed6
--- /dev/null
+++ b/arch/powerpc/include/asm/mpc52xx_psc.h
@@ -0,0 +1,276 @@
+/*
+ * include/asm-ppc/mpc52xx_psc.h
+ *
+ * Definitions of consts/structs to drive the Freescale MPC52xx OnChip
+ * PSCs. Theses are shared between multiple drivers since a PSC can be
+ * UART, AC97, IR, I2S, ... So this header is in asm-ppc.
+ *
+ *
+ * Maintainer : Sylvain Munaut <tnt@246tNt.com>
+ *
+ * Based/Extracted from some header of the 2.4 originally written by
+ * Dale Farnsworth <dfarnsworth@mvista.com>
+ *
+ * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003 MontaVista, Software, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __ASM_MPC52xx_PSC_H__
+#define __ASM_MPC52xx_PSC_H__
+
+#include <asm/types.h>
+
+/* Max number of PSCs */
+#define MPC52xx_PSC_MAXNUM 6
+
+/* Programmable Serial Controller (PSC) status register bits */
+#define MPC52xx_PSC_SR_CDE 0x0080
+#define MPC52xx_PSC_SR_RXRDY 0x0100
+#define MPC52xx_PSC_SR_RXFULL 0x0200
+#define MPC52xx_PSC_SR_TXRDY 0x0400
+#define MPC52xx_PSC_SR_TXEMP 0x0800
+#define MPC52xx_PSC_SR_OE 0x1000
+#define MPC52xx_PSC_SR_PE 0x2000
+#define MPC52xx_PSC_SR_FE 0x4000
+#define MPC52xx_PSC_SR_RB 0x8000
+
+/* PSC Command values */
+#define MPC52xx_PSC_RX_ENABLE 0x0001
+#define MPC52xx_PSC_RX_DISABLE 0x0002
+#define MPC52xx_PSC_TX_ENABLE 0x0004
+#define MPC52xx_PSC_TX_DISABLE 0x0008
+#define MPC52xx_PSC_SEL_MODE_REG_1 0x0010
+#define MPC52xx_PSC_RST_RX 0x0020
+#define MPC52xx_PSC_RST_TX 0x0030
+#define MPC52xx_PSC_RST_ERR_STAT 0x0040
+#define MPC52xx_PSC_RST_BRK_CHG_INT 0x0050
+#define MPC52xx_PSC_START_BRK 0x0060
+#define MPC52xx_PSC_STOP_BRK 0x0070
+
+/* PSC TxRx FIFO status bits */
+#define MPC52xx_PSC_RXTX_FIFO_ERR 0x0040
+#define MPC52xx_PSC_RXTX_FIFO_UF 0x0020
+#define MPC52xx_PSC_RXTX_FIFO_OF 0x0010
+#define MPC52xx_PSC_RXTX_FIFO_FR 0x0008
+#define MPC52xx_PSC_RXTX_FIFO_FULL 0x0004
+#define MPC52xx_PSC_RXTX_FIFO_ALARM 0x0002
+#define MPC52xx_PSC_RXTX_FIFO_EMPTY 0x0001
+
+/* PSC interrupt status/mask bits */
+#define MPC52xx_PSC_IMR_TXRDY 0x0100
+#define MPC52xx_PSC_IMR_RXRDY 0x0200
+#define MPC52xx_PSC_IMR_DB 0x0400
+#define MPC52xx_PSC_IMR_TXEMP 0x0800
+#define MPC52xx_PSC_IMR_ORERR 0x1000
+#define MPC52xx_PSC_IMR_IPC 0x8000
+
+/* PSC input port change bit */
+#define MPC52xx_PSC_CTS 0x01
+#define MPC52xx_PSC_DCD 0x02
+#define MPC52xx_PSC_D_CTS 0x10
+#define MPC52xx_PSC_D_DCD 0x20
+
+/* PSC mode fields */
+#define MPC52xx_PSC_MODE_5_BITS 0x00
+#define MPC52xx_PSC_MODE_6_BITS 0x01
+#define MPC52xx_PSC_MODE_7_BITS 0x02
+#define MPC52xx_PSC_MODE_8_BITS 0x03
+#define MPC52xx_PSC_MODE_BITS_MASK 0x03
+#define MPC52xx_PSC_MODE_PAREVEN 0x00
+#define MPC52xx_PSC_MODE_PARODD 0x04
+#define MPC52xx_PSC_MODE_PARFORCE 0x08
+#define MPC52xx_PSC_MODE_PARNONE 0x10
+#define MPC52xx_PSC_MODE_ERR 0x20
+#define MPC52xx_PSC_MODE_FFULL 0x40
+#define MPC52xx_PSC_MODE_RXRTS 0x80
+
+#define MPC52xx_PSC_MODE_ONE_STOP_5_BITS 0x00
+#define MPC52xx_PSC_MODE_ONE_STOP 0x07
+#define MPC52xx_PSC_MODE_TWO_STOP 0x0f
+
+#define MPC52xx_PSC_RFNUM_MASK 0x01ff
+
+#define MPC52xx_PSC_SICR_DTS1 (1 << 29)
+#define MPC52xx_PSC_SICR_SHDR (1 << 28)
+#define MPC52xx_PSC_SICR_SIM_MASK (0xf << 24)
+#define MPC52xx_PSC_SICR_SIM_UART (0x0 << 24)
+#define MPC52xx_PSC_SICR_SIM_UART_DCD (0x8 << 24)
+#define MPC52xx_PSC_SICR_SIM_CODEC_8 (0x1 << 24)
+#define MPC52xx_PSC_SICR_SIM_CODEC_16 (0x2 << 24)
+#define MPC52xx_PSC_SICR_SIM_AC97 (0x3 << 24)
+#define MPC52xx_PSC_SICR_SIM_SIR (0x8 << 24)
+#define MPC52xx_PSC_SICR_SIM_SIR_DCD (0xc << 24)
+#define MPC52xx_PSC_SICR_SIM_MIR (0x5 << 24)
+#define MPC52xx_PSC_SICR_SIM_FIR (0x6 << 24)
+#define MPC52xx_PSC_SICR_SIM_CODEC_24 (0x7 << 24)
+#define MPC52xx_PSC_SICR_SIM_CODEC_32 (0xf << 24)
+#define MPC52xx_PSC_SICR_GENCLK (1 << 23)
+#define MPC52xx_PSC_SICR_I2S (1 << 22)
+#define MPC52xx_PSC_SICR_CLKPOL (1 << 21)
+#define MPC52xx_PSC_SICR_SYNCPOL (1 << 20)
+#define MPC52xx_PSC_SICR_CELLSLAVE (1 << 19)
+#define MPC52xx_PSC_SICR_CELL2XCLK (1 << 18)
+#define MPC52xx_PSC_SICR_ESAI (1 << 17)
+#define MPC52xx_PSC_SICR_ENAC97 (1 << 16)
+#define MPC52xx_PSC_SICR_SPI (1 << 15)
+#define MPC52xx_PSC_SICR_MSTR (1 << 14)
+#define MPC52xx_PSC_SICR_CPOL (1 << 13)
+#define MPC52xx_PSC_SICR_CPHA (1 << 12)
+#define MPC52xx_PSC_SICR_USEEOF (1 << 11)
+#define MPC52xx_PSC_SICR_DISABLEEOF (1 << 10)
+
+/* Structure of the hardware registers */
+struct mpc52xx_psc {
+ u8 mode; /* PSC + 0x00 */
+ u8 reserved0[3];
+ union { /* PSC + 0x04 */
+ u16 status;
+ u16 clock_select;
+ } sr_csr;
+#define mpc52xx_psc_status sr_csr.status
+#define mpc52xx_psc_clock_select sr_csr.clock_select
+ u16 reserved1;
+ u8 command; /* PSC + 0x08 */
+ u8 reserved2[3];
+ union { /* PSC + 0x0c */
+ u8 buffer_8;
+ u16 buffer_16;
+ u32 buffer_32;
+ } buffer;
+#define mpc52xx_psc_buffer_8 buffer.buffer_8
+#define mpc52xx_psc_buffer_16 buffer.buffer_16
+#define mpc52xx_psc_buffer_32 buffer.buffer_32
+ union { /* PSC + 0x10 */
+ u8 ipcr;
+ u8 acr;
+ } ipcr_acr;
+#define mpc52xx_psc_ipcr ipcr_acr.ipcr
+#define mpc52xx_psc_acr ipcr_acr.acr
+ u8 reserved3[3];
+ union { /* PSC + 0x14 */
+ u16 isr;
+ u16 imr;
+ } isr_imr;
+#define mpc52xx_psc_isr isr_imr.isr
+#define mpc52xx_psc_imr isr_imr.imr
+ u16 reserved4;
+ u8 ctur; /* PSC + 0x18 */
+ u8 reserved5[3];
+ u8 ctlr; /* PSC + 0x1c */
+ u8 reserved6[3];
+ /* BitClkDiv field of CCR is byte swapped in
+ * the hardware for mpc5200/b compatibility */
+ u32 ccr; /* PSC + 0x20 */
+ u32 ac97_slots; /* PSC + 0x24 */
+ u32 ac97_cmd; /* PSC + 0x28 */
+ u32 ac97_data; /* PSC + 0x2c */
+ u8 ivr; /* PSC + 0x30 */
+ u8 reserved8[3];
+ u8 ip; /* PSC + 0x34 */
+ u8 reserved9[3];
+ u8 op1; /* PSC + 0x38 */
+ u8 reserved10[3];
+ u8 op0; /* PSC + 0x3c */
+ u8 reserved11[3];
+ u32 sicr; /* PSC + 0x40 */
+ u8 ircr1; /* PSC + 0x44 */
+ u8 reserved13[3];
+ u8 ircr2; /* PSC + 0x44 */
+ u8 reserved14[3];
+ u8 irsdr; /* PSC + 0x4c */
+ u8 reserved15[3];
+ u8 irmdr; /* PSC + 0x50 */
+ u8 reserved16[3];
+ u8 irfdr; /* PSC + 0x54 */
+ u8 reserved17[3];
+};
+
+struct mpc52xx_psc_fifo {
+ u16 rfnum; /* PSC + 0x58 */
+ u16 reserved18;
+ u16 tfnum; /* PSC + 0x5c */
+ u16 reserved19;
+ u32 rfdata; /* PSC + 0x60 */
+ u16 rfstat; /* PSC + 0x64 */
+ u16 reserved20;
+ u8 rfcntl; /* PSC + 0x68 */
+ u8 reserved21[5];
+ u16 rfalarm; /* PSC + 0x6e */
+ u16 reserved22;
+ u16 rfrptr; /* PSC + 0x72 */
+ u16 reserved23;
+ u16 rfwptr; /* PSC + 0x76 */
+ u16 reserved24;
+ u16 rflrfptr; /* PSC + 0x7a */
+ u16 reserved25;
+ u16 rflwfptr; /* PSC + 0x7e */
+ u32 tfdata; /* PSC + 0x80 */
+ u16 tfstat; /* PSC + 0x84 */
+ u16 reserved26;
+ u8 tfcntl; /* PSC + 0x88 */
+ u8 reserved27[5];
+ u16 tfalarm; /* PSC + 0x8e */
+ u16 reserved28;
+ u16 tfrptr; /* PSC + 0x92 */
+ u16 reserved29;
+ u16 tfwptr; /* PSC + 0x96 */
+ u16 reserved30;
+ u16 tflrfptr; /* PSC + 0x9a */
+ u16 reserved31;
+ u16 tflwfptr; /* PSC + 0x9e */
+};
+
+#define MPC512x_PSC_FIFO_RESET_SLICE 0x80
+#define MPC512x_PSC_FIFO_ENABLE_SLICE 0x01
+#define MPC512x_PSC_FIFO_ENABLE_DMA 0x04
+
+#define MPC512x_PSC_FIFO_EMPTY 0x1
+#define MPC512x_PSC_FIFO_FULL 0x2
+#define MPC512x_PSC_FIFO_ALARM 0x4
+#define MPC512x_PSC_FIFO_URERR 0x8
+#define MPC512x_PSC_FIFO_ORERR 0x01
+#define MPC512x_PSC_FIFO_MEMERROR 0x02
+
+struct mpc512x_psc_fifo {
+ u32 reserved1[10];
+ u32 txcmd; /* PSC + 0x80 */
+ u32 txalarm; /* PSC + 0x84 */
+ u32 txsr; /* PSC + 0x88 */
+ u32 txisr; /* PSC + 0x8c */
+ u32 tximr; /* PSC + 0x90 */
+ u32 txcnt; /* PSC + 0x94 */
+ u32 txptr; /* PSC + 0x98 */
+ u32 txsz; /* PSC + 0x9c */
+ u32 reserved2[7];
+ union {
+ u8 txdata_8;
+ u16 txdata_16;
+ u32 txdata_32;
+ } txdata; /* PSC + 0xbc */
+#define txdata_8 txdata.txdata_8
+#define txdata_16 txdata.txdata_16
+#define txdata_32 txdata.txdata_32
+ u32 rxcmd; /* PSC + 0xc0 */
+ u32 rxalarm; /* PSC + 0xc4 */
+ u32 rxsr; /* PSC + 0xc8 */
+ u32 rxisr; /* PSC + 0xcc */
+ u32 rximr; /* PSC + 0xd0 */
+ u32 rxcnt; /* PSC + 0xd4 */
+ u32 rxptr; /* PSC + 0xd8 */
+ u32 rxsz; /* PSC + 0xdc */
+ u32 reserved3[7];
+ union {
+ u8 rxdata_8;
+ u16 rxdata_16;
+ u32 rxdata_32;
+ } rxdata; /* PSC + 0xfc */
+#define rxdata_8 rxdata.rxdata_8
+#define rxdata_16 rxdata.rxdata_16
+#define rxdata_32 rxdata.rxdata_32
+};
+
+#endif /* __ASM_MPC52xx_PSC_H__ */
diff --git a/arch/powerpc/include/asm/mpc6xx.h b/arch/powerpc/include/asm/mpc6xx.h
new file mode 100644
index 0000000..effc229
--- /dev/null
+++ b/arch/powerpc/include/asm/mpc6xx.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_POWERPC_MPC6xx_H
+#define __ASM_POWERPC_MPC6xx_H
+
+void mpc6xx_enter_standby(void);
+
+#endif
diff --git a/arch/powerpc/include/asm/mpc8260.h b/arch/powerpc/include/asm/mpc8260.h
new file mode 100644
index 0000000..03317e1
--- /dev/null
+++ b/arch/powerpc/include/asm/mpc8260.h
@@ -0,0 +1,25 @@
+/*
+ * Since there are many different boards and no standard configuration,
+ * we have a unique include file for each. Rather than change every
+ * file that has to include MPC8260 configuration, they all include
+ * this one and the configuration switching is done here.
+ */
+#ifdef __KERNEL__
+#ifndef __ASM_POWERPC_MPC8260_H__
+#define __ASM_POWERPC_MPC8260_H__
+
+#define MPC82XX_BCR_PLDP 0x00800000 /* Pipeline Maximum Depth */
+
+#ifdef CONFIG_8260
+
+#if defined(CONFIG_PQ2ADS) || defined (CONFIG_PQ2FADS)
+#include <platforms/82xx/pq2ads.h>
+#endif
+
+#ifdef CONFIG_PCI_8260
+#include <platforms/82xx/m82xx_pci.h>
+#endif
+
+#endif /* CONFIG_8260 */
+#endif /* !__ASM_POWERPC_MPC8260_H__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/mpc86xx.h b/arch/powerpc/include/asm/mpc86xx.h
new file mode 100644
index 0000000..15f650f
--- /dev/null
+++ b/arch/powerpc/include/asm/mpc86xx.h
@@ -0,0 +1,33 @@
+/*
+ * MPC86xx definitions
+ *
+ * Author: Jeff Brown
+ *
+ * Copyright 2004 Freescale Semiconductor, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifdef __KERNEL__
+#ifndef __ASM_POWERPC_MPC86xx_H__
+#define __ASM_POWERPC_MPC86xx_H__
+
+#include <asm/mmu.h>
+
+#ifdef CONFIG_PPC_86xx
+
+#define CPU0_BOOT_RELEASE 0x01000000
+#define CPU1_BOOT_RELEASE 0x02000000
+#define CPU_ALL_RELEASED (CPU0_BOOT_RELEASE | CPU1_BOOT_RELEASE)
+#define MCM_PORT_CONFIG_OFFSET 0x1010
+
+/* Offset from CCSRBAR */
+#define MPC86xx_MCM_OFFSET (0x00000)
+#define MPC86xx_MCM_SIZE (0x02000)
+
+#endif /* CONFIG_PPC_86xx */
+#endif /* __ASM_POWERPC_MPC86xx_H__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/mpc8xx.h b/arch/powerpc/include/asm/mpc8xx.h
new file mode 100644
index 0000000..98f3c4f
--- /dev/null
+++ b/arch/powerpc/include/asm/mpc8xx.h
@@ -0,0 +1,12 @@
+/* This is the single file included by all MPC8xx build options.
+ * Since there are many different boards and no standard configuration,
+ * we have a unique include file for each. Rather than change every
+ * file that has to include MPC8xx configuration, they all include
+ * this one and the configuration switching is done here.
+ */
+#ifndef __CONFIG_8xx_DEFS
+#define __CONFIG_8xx_DEFS
+
+extern struct mpc8xx_pcmcia_ops m8xx_pcmcia_ops;
+
+#endif /* __CONFIG_8xx_DEFS */
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
new file mode 100644
index 0000000..fe566a3
--- /dev/null
+++ b/arch/powerpc/include/asm/mpic.h
@@ -0,0 +1,481 @@
+#ifndef _ASM_POWERPC_MPIC_H
+#define _ASM_POWERPC_MPIC_H
+#ifdef __KERNEL__
+
+#include <linux/irq.h>
+#include <linux/sysdev.h>
+#include <asm/dcr.h>
+
+/*
+ * Global registers
+ */
+
+#define MPIC_GREG_BASE 0x01000
+
+#define MPIC_GREG_FEATURE_0 0x00000
+#define MPIC_GREG_FEATURE_LAST_SRC_MASK 0x07ff0000
+#define MPIC_GREG_FEATURE_LAST_SRC_SHIFT 16
+#define MPIC_GREG_FEATURE_LAST_CPU_MASK 0x00001f00
+#define MPIC_GREG_FEATURE_LAST_CPU_SHIFT 8
+#define MPIC_GREG_FEATURE_VERSION_MASK 0xff
+#define MPIC_GREG_FEATURE_1 0x00010
+#define MPIC_GREG_GLOBAL_CONF_0 0x00020
+#define MPIC_GREG_GCONF_RESET 0x80000000
+#define MPIC_GREG_GCONF_8259_PTHROU_DIS 0x20000000
+#define MPIC_GREG_GCONF_NO_BIAS 0x10000000
+#define MPIC_GREG_GCONF_BASE_MASK 0x000fffff
+#define MPIC_GREG_GCONF_MCK 0x08000000
+#define MPIC_GREG_GLOBAL_CONF_1 0x00030
+#define MPIC_GREG_GLOBAL_CONF_1_SIE 0x08000000
+#define MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK 0x70000000
+#define MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO(r) \
+ (((r) << 28) & MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK)
+#define MPIC_GREG_VENDOR_0 0x00040
+#define MPIC_GREG_VENDOR_1 0x00050
+#define MPIC_GREG_VENDOR_2 0x00060
+#define MPIC_GREG_VENDOR_3 0x00070
+#define MPIC_GREG_VENDOR_ID 0x00080
+#define MPIC_GREG_VENDOR_ID_STEPPING_MASK 0x00ff0000
+#define MPIC_GREG_VENDOR_ID_STEPPING_SHIFT 16
+#define MPIC_GREG_VENDOR_ID_DEVICE_ID_MASK 0x0000ff00
+#define MPIC_GREG_VENDOR_ID_DEVICE_ID_SHIFT 8
+#define MPIC_GREG_VENDOR_ID_VENDOR_ID_MASK 0x000000ff
+#define MPIC_GREG_PROCESSOR_INIT 0x00090
+#define MPIC_GREG_IPI_VECTOR_PRI_0 0x000a0
+#define MPIC_GREG_IPI_VECTOR_PRI_1 0x000b0
+#define MPIC_GREG_IPI_VECTOR_PRI_2 0x000c0
+#define MPIC_GREG_IPI_VECTOR_PRI_3 0x000d0
+#define MPIC_GREG_IPI_STRIDE 0x10
+#define MPIC_GREG_SPURIOUS 0x000e0
+#define MPIC_GREG_TIMER_FREQ 0x000f0
+
+/*
+ *
+ * Timer registers
+ */
+#define MPIC_TIMER_BASE 0x01100
+#define MPIC_TIMER_STRIDE 0x40
+
+#define MPIC_TIMER_CURRENT_CNT 0x00000
+#define MPIC_TIMER_BASE_CNT 0x00010
+#define MPIC_TIMER_VECTOR_PRI 0x00020
+#define MPIC_TIMER_DESTINATION 0x00030
+
+/*
+ * Per-Processor registers
+ */
+
+#define MPIC_CPU_THISBASE 0x00000
+#define MPIC_CPU_BASE 0x20000
+#define MPIC_CPU_STRIDE 0x01000
+
+#define MPIC_CPU_IPI_DISPATCH_0 0x00040
+#define MPIC_CPU_IPI_DISPATCH_1 0x00050
+#define MPIC_CPU_IPI_DISPATCH_2 0x00060
+#define MPIC_CPU_IPI_DISPATCH_3 0x00070
+#define MPIC_CPU_IPI_DISPATCH_STRIDE 0x00010
+#define MPIC_CPU_CURRENT_TASK_PRI 0x00080
+#define MPIC_CPU_TASKPRI_MASK 0x0000000f
+#define MPIC_CPU_WHOAMI 0x00090
+#define MPIC_CPU_WHOAMI_MASK 0x0000001f
+#define MPIC_CPU_INTACK 0x000a0
+#define MPIC_CPU_EOI 0x000b0
+#define MPIC_CPU_MCACK 0x000c0
+
+/*
+ * Per-source registers
+ */
+
+#define MPIC_IRQ_BASE 0x10000
+#define MPIC_IRQ_STRIDE 0x00020
+#define MPIC_IRQ_VECTOR_PRI 0x00000
+#define MPIC_VECPRI_MASK 0x80000000
+#define MPIC_VECPRI_ACTIVITY 0x40000000 /* Read Only */
+#define MPIC_VECPRI_PRIORITY_MASK 0x000f0000
+#define MPIC_VECPRI_PRIORITY_SHIFT 16
+#define MPIC_VECPRI_VECTOR_MASK 0x000007ff
+#define MPIC_VECPRI_POLARITY_POSITIVE 0x00800000
+#define MPIC_VECPRI_POLARITY_NEGATIVE 0x00000000
+#define MPIC_VECPRI_POLARITY_MASK 0x00800000
+#define MPIC_VECPRI_SENSE_LEVEL 0x00400000
+#define MPIC_VECPRI_SENSE_EDGE 0x00000000
+#define MPIC_VECPRI_SENSE_MASK 0x00400000
+#define MPIC_IRQ_DESTINATION 0x00010
+
+#define MPIC_MAX_IRQ_SOURCES 2048
+#define MPIC_MAX_CPUS 32
+#define MPIC_MAX_ISU 32
+
+/*
+ * Tsi108 implementation of MPIC has many differences from the original one
+ */
+
+/*
+ * Global registers
+ */
+
+#define TSI108_GREG_BASE 0x00000
+#define TSI108_GREG_FEATURE_0 0x00000
+#define TSI108_GREG_GLOBAL_CONF_0 0x00004
+#define TSI108_GREG_VENDOR_ID 0x0000c
+#define TSI108_GREG_IPI_VECTOR_PRI_0 0x00204 /* Doorbell 0 */
+#define TSI108_GREG_IPI_STRIDE 0x0c
+#define TSI108_GREG_SPURIOUS 0x00010
+#define TSI108_GREG_TIMER_FREQ 0x00014
+
+/*
+ * Timer registers
+ */
+#define TSI108_TIMER_BASE 0x0030
+#define TSI108_TIMER_STRIDE 0x10
+#define TSI108_TIMER_CURRENT_CNT 0x00000
+#define TSI108_TIMER_BASE_CNT 0x00004
+#define TSI108_TIMER_VECTOR_PRI 0x00008
+#define TSI108_TIMER_DESTINATION 0x0000c
+
+/*
+ * Per-Processor registers
+ */
+#define TSI108_CPU_BASE 0x00300
+#define TSI108_CPU_STRIDE 0x00040
+#define TSI108_CPU_IPI_DISPATCH_0 0x00200
+#define TSI108_CPU_IPI_DISPATCH_STRIDE 0x00000
+#define TSI108_CPU_CURRENT_TASK_PRI 0x00000
+#define TSI108_CPU_WHOAMI 0xffffffff
+#define TSI108_CPU_INTACK 0x00004
+#define TSI108_CPU_EOI 0x00008
+#define TSI108_CPU_MCACK 0x00004 /* Doesn't really exist here */
+
+/*
+ * Per-source registers
+ */
+#define TSI108_IRQ_BASE 0x00100
+#define TSI108_IRQ_STRIDE 0x00008
+#define TSI108_IRQ_VECTOR_PRI 0x00000
+#define TSI108_VECPRI_VECTOR_MASK 0x000000ff
+#define TSI108_VECPRI_POLARITY_POSITIVE 0x01000000
+#define TSI108_VECPRI_POLARITY_NEGATIVE 0x00000000
+#define TSI108_VECPRI_SENSE_LEVEL 0x02000000
+#define TSI108_VECPRI_SENSE_EDGE 0x00000000
+#define TSI108_VECPRI_POLARITY_MASK 0x01000000
+#define TSI108_VECPRI_SENSE_MASK 0x02000000
+#define TSI108_IRQ_DESTINATION 0x00004
+
+/* weird mpic register indices and mask bits in the HW info array */
+enum {
+ MPIC_IDX_GREG_BASE = 0,
+ MPIC_IDX_GREG_FEATURE_0,
+ MPIC_IDX_GREG_GLOBAL_CONF_0,
+ MPIC_IDX_GREG_VENDOR_ID,
+ MPIC_IDX_GREG_IPI_VECTOR_PRI_0,
+ MPIC_IDX_GREG_IPI_STRIDE,
+ MPIC_IDX_GREG_SPURIOUS,
+ MPIC_IDX_GREG_TIMER_FREQ,
+
+ MPIC_IDX_TIMER_BASE,
+ MPIC_IDX_TIMER_STRIDE,
+ MPIC_IDX_TIMER_CURRENT_CNT,
+ MPIC_IDX_TIMER_BASE_CNT,
+ MPIC_IDX_TIMER_VECTOR_PRI,
+ MPIC_IDX_TIMER_DESTINATION,
+
+ MPIC_IDX_CPU_BASE,
+ MPIC_IDX_CPU_STRIDE,
+ MPIC_IDX_CPU_IPI_DISPATCH_0,
+ MPIC_IDX_CPU_IPI_DISPATCH_STRIDE,
+ MPIC_IDX_CPU_CURRENT_TASK_PRI,
+ MPIC_IDX_CPU_WHOAMI,
+ MPIC_IDX_CPU_INTACK,
+ MPIC_IDX_CPU_EOI,
+ MPIC_IDX_CPU_MCACK,
+
+ MPIC_IDX_IRQ_BASE,
+ MPIC_IDX_IRQ_STRIDE,
+ MPIC_IDX_IRQ_VECTOR_PRI,
+
+ MPIC_IDX_VECPRI_VECTOR_MASK,
+ MPIC_IDX_VECPRI_POLARITY_POSITIVE,
+ MPIC_IDX_VECPRI_POLARITY_NEGATIVE,
+ MPIC_IDX_VECPRI_SENSE_LEVEL,
+ MPIC_IDX_VECPRI_SENSE_EDGE,
+ MPIC_IDX_VECPRI_POLARITY_MASK,
+ MPIC_IDX_VECPRI_SENSE_MASK,
+ MPIC_IDX_IRQ_DESTINATION,
+ MPIC_IDX_END
+};
+
+
+#ifdef CONFIG_MPIC_U3_HT_IRQS
+/* Fixup table entry */
+struct mpic_irq_fixup
+{
+ u8 __iomem *base;
+ u8 __iomem *applebase;
+ u32 data;
+ unsigned int index;
+};
+#endif /* CONFIG_MPIC_U3_HT_IRQS */
+
+
+enum mpic_reg_type {
+ mpic_access_mmio_le,
+ mpic_access_mmio_be,
+#ifdef CONFIG_PPC_DCR
+ mpic_access_dcr
+#endif
+};
+
+struct mpic_reg_bank {
+ u32 __iomem *base;
+#ifdef CONFIG_PPC_DCR
+ dcr_host_t dhost;
+#endif /* CONFIG_PPC_DCR */
+};
+
+struct mpic_irq_save {
+ u32 vecprio,
+ dest;
+#ifdef CONFIG_MPIC_U3_HT_IRQS
+ u32 fixup_data;
+#endif
+};
+
+/* The instance data of a given MPIC */
+struct mpic
+{
+ /* The remapper for this MPIC */
+ struct irq_host *irqhost;
+
+ /* The "linux" controller struct */
+ struct irq_chip hc_irq;
+#ifdef CONFIG_MPIC_U3_HT_IRQS
+ struct irq_chip hc_ht_irq;
+#endif
+#ifdef CONFIG_SMP
+ struct irq_chip hc_ipi;
+#endif
+ const char *name;
+ /* Flags */
+ unsigned int flags;
+ /* How many irq sources in a given ISU */
+ unsigned int isu_size;
+ unsigned int isu_shift;
+ unsigned int isu_mask;
+ unsigned int irq_count;
+ /* Number of sources */
+ unsigned int num_sources;
+ /* Number of CPUs */
+ unsigned int num_cpus;
+ /* default senses array */
+ unsigned char *senses;
+ unsigned int senses_count;
+
+ /* vector numbers used for internal sources (ipi/timers) */
+ unsigned int ipi_vecs[4];
+ unsigned int timer_vecs[4];
+
+ /* Spurious vector to program into unused sources */
+ unsigned int spurious_vec;
+
+#ifdef CONFIG_MPIC_U3_HT_IRQS
+ /* The fixup table */
+ struct mpic_irq_fixup *fixups;
+ spinlock_t fixup_lock;
+#endif
+
+ /* Register access method */
+ enum mpic_reg_type reg_type;
+
+ /* The various ioremap'ed bases */
+ struct mpic_reg_bank gregs;
+ struct mpic_reg_bank tmregs;
+ struct mpic_reg_bank cpuregs[MPIC_MAX_CPUS];
+ struct mpic_reg_bank isus[MPIC_MAX_ISU];
+
+ /* Protected sources */
+ unsigned long *protected;
+
+#ifdef CONFIG_MPIC_WEIRD
+ /* Pointer to HW info array */
+ u32 *hw_set;
+#endif
+
+#ifdef CONFIG_PCI_MSI
+ spinlock_t bitmap_lock;
+ unsigned long *hwirq_bitmap;
+#endif
+
+#ifdef CONFIG_MPIC_BROKEN_REGREAD
+ u32 isu_reg0_shadow[MPIC_MAX_IRQ_SOURCES];
+#endif
+
+ /* link */
+ struct mpic *next;
+
+ struct sys_device sysdev;
+
+#ifdef CONFIG_PM
+ struct mpic_irq_save *save_data;
+#endif
+};
+
+/*
+ * MPIC flags (passed to mpic_alloc)
+ *
+ * The top 4 bits contain an MPIC bhw id that is used to index the
+ * register offsets and some masks when CONFIG_MPIC_WEIRD is set.
+ * Note setting any ID (leaving those bits to 0) means standard MPIC
+ */
+
+/* This is the primary controller, only that one has IPIs and
+ * has afinity control. A non-primary MPIC always uses CPU0
+ * registers only
+ */
+#define MPIC_PRIMARY 0x00000001
+
+/* Set this for a big-endian MPIC */
+#define MPIC_BIG_ENDIAN 0x00000002
+/* Broken U3 MPIC */
+#define MPIC_U3_HT_IRQS 0x00000004
+/* Broken IPI registers (autodetected) */
+#define MPIC_BROKEN_IPI 0x00000008
+/* MPIC wants a reset */
+#define MPIC_WANTS_RESET 0x00000010
+/* Spurious vector requires EOI */
+#define MPIC_SPV_EOI 0x00000020
+/* No passthrough disable */
+#define MPIC_NO_PTHROU_DIS 0x00000040
+/* DCR based MPIC */
+#define MPIC_USES_DCR 0x00000080
+/* MPIC has 11-bit vector fields (or larger) */
+#define MPIC_LARGE_VECTORS 0x00000100
+/* Enable delivery of prio 15 interrupts as MCK instead of EE */
+#define MPIC_ENABLE_MCK 0x00000200
+/* Disable bias among target selection, spread interrupts evenly */
+#define MPIC_NO_BIAS 0x00000400
+/* Ignore NIRQS as reported by FRR */
+#define MPIC_BROKEN_FRR_NIRQS 0x00000800
+
+/* MPIC HW modification ID */
+#define MPIC_REGSET_MASK 0xf0000000
+#define MPIC_REGSET(val) (((val) & 0xf ) << 28)
+#define MPIC_GET_REGSET(flags) (((flags) >> 28) & 0xf)
+
+#define MPIC_REGSET_STANDARD MPIC_REGSET(0) /* Original MPIC */
+#define MPIC_REGSET_TSI108 MPIC_REGSET(1) /* Tsi108/109 PIC */
+
+/* Allocate the controller structure and setup the linux irq descs
+ * for the range if interrupts passed in. No HW initialization is
+ * actually performed.
+ *
+ * @phys_addr: physial base address of the MPIC
+ * @flags: flags, see constants above
+ * @isu_size: number of interrupts in an ISU. Use 0 to use a
+ * standard ISU-less setup (aka powermac)
+ * @irq_offset: first irq number to assign to this mpic
+ * @irq_count: number of irqs to use with this mpic IRQ sources. Pass 0
+ * to match the number of sources
+ * @ipi_offset: first irq number to assign to this mpic IPI sources,
+ * used only on primary mpic
+ * @senses: array of sense values
+ * @senses_num: number of entries in the array
+ *
+ * Note about the sense array. If none is passed, all interrupts are
+ * setup to be level negative unless MPIC_U3_HT_IRQS is set in which
+ * case they are edge positive (and the array is ignored anyway).
+ * The values in the array start at the first source of the MPIC,
+ * that is senses[0] correspond to linux irq "irq_offset".
+ */
+extern struct mpic *mpic_alloc(struct device_node *node,
+ phys_addr_t phys_addr,
+ unsigned int flags,
+ unsigned int isu_size,
+ unsigned int irq_count,
+ const char *name);
+
+/* Assign ISUs, to call before mpic_init()
+ *
+ * @mpic: controller structure as returned by mpic_alloc()
+ * @isu_num: ISU number
+ * @phys_addr: physical address of the ISU
+ */
+extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
+ phys_addr_t phys_addr);
+
+/* Set default sense codes
+ *
+ * @mpic: controller
+ * @senses: array of sense codes
+ * @count: size of above array
+ *
+ * Optionally provide an array (indexed on hardware interrupt numbers
+ * for this MPIC) of default sense codes for the chip. Those are linux
+ * sense codes IRQ_TYPE_*
+ *
+ * The driver gets ownership of the pointer, don't dispose of it or
+ * anything like that. __init only.
+ */
+extern void mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count);
+
+
+/* Initialize the controller. After this has been called, none of the above
+ * should be called again for this mpic
+ */
+extern void mpic_init(struct mpic *mpic);
+
+/*
+ * All of the following functions must only be used after the
+ * ISUs have been assigned and the controller fully initialized
+ * with mpic_init()
+ */
+
+
+/* Change the priority of an interrupt. Default is 8 for irqs and
+ * 10 for IPIs. You can call this on both IPIs and IRQ numbers, but the
+ * IPI number is then the offset'ed (linux irq number mapped to the IPI)
+ */
+extern void mpic_irq_set_priority(unsigned int irq, unsigned int pri);
+
+/* Setup a non-boot CPU */
+extern void mpic_setup_this_cpu(void);
+
+/* Clean up for kexec (or cpu offline or ...) */
+extern void mpic_teardown_this_cpu(int secondary);
+
+/* Get the current cpu priority for this cpu (0..15) */
+extern int mpic_cpu_get_priority(void);
+
+/* Set the current cpu priority for this cpu */
+extern void mpic_cpu_set_priority(int prio);
+
+/* Request IPIs on primary mpic */
+extern void mpic_request_ipis(void);
+
+/* Send an IPI (non offseted number 0..3) */
+extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask);
+
+/* Send a message (IPI) to a given target (cpu number or MSG_*) */
+void smp_mpic_message_pass(int target, int msg);
+
+/* Unmask a specific virq */
+extern void mpic_unmask_irq(unsigned int irq);
+/* Mask a specific virq */
+extern void mpic_mask_irq(unsigned int irq);
+/* EOI a specific virq */
+extern void mpic_end_irq(unsigned int irq);
+
+/* Fetch interrupt from a given mpic */
+extern unsigned int mpic_get_one_irq(struct mpic *mpic);
+/* This one gets from the primary mpic */
+extern unsigned int mpic_get_irq(void);
+/* Fetch Machine Check interrupt from primary mpic */
+extern unsigned int mpic_get_mcirq(void);
+
+/* Set the EPIC clock ratio */
+void mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio);
+
+/* Enable/Disable EPIC serial interrupt mode */
+void mpic_set_serial_int(struct mpic *mpic, int enable);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_MPIC_H */
diff --git a/arch/powerpc/include/asm/msgbuf.h b/arch/powerpc/include/asm/msgbuf.h
new file mode 100644
index 0000000..dd76743
--- /dev/null
+++ b/arch/powerpc/include/asm/msgbuf.h
@@ -0,0 +1,33 @@
+#ifndef _ASM_POWERPC_MSGBUF_H
+#define _ASM_POWERPC_MSGBUF_H
+
+/*
+ * The msqid64_ds structure for the PowerPC architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ */
+
+struct msqid64_ds {
+ struct ipc64_perm msg_perm;
+#ifndef __powerpc64__
+ unsigned int __unused1;
+#endif
+ __kernel_time_t msg_stime; /* last msgsnd time */
+#ifndef __powerpc64__
+ unsigned int __unused2;
+#endif
+ __kernel_time_t msg_rtime; /* last msgrcv time */
+#ifndef __powerpc64__
+ unsigned int __unused3;
+#endif
+ __kernel_time_t msg_ctime; /* last change time */
+ unsigned long msg_cbytes; /* current number of bytes on queue */
+ unsigned long msg_qnum; /* number of messages in queue */
+ unsigned long msg_qbytes; /* max number of bytes on queue */
+ __kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __kernel_pid_t msg_lrpid; /* last receive pid */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+#endif /* _ASM_POWERPC_MSGBUF_H */
diff --git a/arch/powerpc/include/asm/mutex.h b/arch/powerpc/include/asm/mutex.h
new file mode 100644
index 0000000..458c1f7
--- /dev/null
+++ b/arch/powerpc/include/asm/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/arch/powerpc/include/asm/nvram.h b/arch/powerpc/include/asm/nvram.h
new file mode 100644
index 0000000..efde5ac
--- /dev/null
+++ b/arch/powerpc/include/asm/nvram.h
@@ -0,0 +1,139 @@
+/*
+ * NVRAM definitions and access functions.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_NVRAM_H
+#define _ASM_POWERPC_NVRAM_H
+
+#include <linux/errno.h>
+
+#define NVRW_CNT 0x20
+#define NVRAM_HEADER_LEN 16 /* sizeof(struct nvram_header) */
+#define NVRAM_BLOCK_LEN 16
+#define NVRAM_MAX_REQ (2080/NVRAM_BLOCK_LEN)
+#define NVRAM_MIN_REQ (1056/NVRAM_BLOCK_LEN)
+
+#define NVRAM_AS0 0x74
+#define NVRAM_AS1 0x75
+#define NVRAM_DATA 0x77
+
+
+/* RTC Offsets */
+
+#define MOTO_RTC_SECONDS 0x1FF9
+#define MOTO_RTC_MINUTES 0x1FFA
+#define MOTO_RTC_HOURS 0x1FFB
+#define MOTO_RTC_DAY_OF_WEEK 0x1FFC
+#define MOTO_RTC_DAY_OF_MONTH 0x1FFD
+#define MOTO_RTC_MONTH 0x1FFE
+#define MOTO_RTC_YEAR 0x1FFF
+#define MOTO_RTC_CONTROLA 0x1FF8
+#define MOTO_RTC_CONTROLB 0x1FF9
+
+#define NVRAM_SIG_SP 0x02 /* support processor */
+#define NVRAM_SIG_OF 0x50 /* open firmware config */
+#define NVRAM_SIG_FW 0x51 /* general firmware */
+#define NVRAM_SIG_HW 0x52 /* hardware (VPD) */
+#define NVRAM_SIG_FLIP 0x5a /* Apple flip/flop header */
+#define NVRAM_SIG_APPL 0x5f /* Apple "system" (???) */
+#define NVRAM_SIG_SYS 0x70 /* system env vars */
+#define NVRAM_SIG_CFG 0x71 /* config data */
+#define NVRAM_SIG_ELOG 0x72 /* error log */
+#define NVRAM_SIG_VEND 0x7e /* vendor defined */
+#define NVRAM_SIG_FREE 0x7f /* Free space */
+#define NVRAM_SIG_OS 0xa0 /* OS defined */
+#define NVRAM_SIG_PANIC 0xa1 /* Apple OSX "panic" */
+
+/* If change this size, then change the size of NVNAME_LEN */
+struct nvram_header {
+ unsigned char signature;
+ unsigned char checksum;
+ unsigned short length;
+ char name[12];
+};
+
+#ifdef __KERNEL__
+
+#include <linux/list.h>
+
+struct nvram_partition {
+ struct list_head partition;
+ struct nvram_header header;
+ unsigned int index;
+};
+
+
+extern int nvram_write_error_log(char * buff, int length,
+ unsigned int err_type, unsigned int err_seq);
+extern int nvram_read_error_log(char * buff, int length,
+ unsigned int * err_type, unsigned int *err_seq);
+extern int nvram_clear_error_log(void);
+extern struct nvram_partition *nvram_find_partition(int sig, const char *name);
+
+extern int pSeries_nvram_init(void);
+
+#ifdef CONFIG_MMIO_NVRAM
+extern int mmio_nvram_init(void);
+#else
+static inline int mmio_nvram_init(void)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __KERNEL__ */
+
+/* PowerMac specific nvram stuffs */
+
+enum {
+ pmac_nvram_OF, /* Open Firmware partition */
+ pmac_nvram_XPRAM, /* MacOS XPRAM partition */
+ pmac_nvram_NR /* MacOS Name Registry partition */
+};
+
+#ifdef __KERNEL__
+/* Return partition offset in nvram */
+extern int pmac_get_partition(int partition);
+
+/* Direct access to XPRAM on PowerMacs */
+extern u8 pmac_xpram_read(int xpaddr);
+extern void pmac_xpram_write(int xpaddr, u8 data);
+
+/* Synchronize NVRAM */
+extern void nvram_sync(void);
+
+/* Normal access to NVRAM */
+extern unsigned char nvram_read_byte(int i);
+extern void nvram_write_byte(unsigned char c, int i);
+#endif
+
+/* Some offsets in XPRAM */
+#define PMAC_XPRAM_MACHINE_LOC 0xe4
+#define PMAC_XPRAM_SOUND_VOLUME 0x08
+
+/* Machine location structure in PowerMac XPRAM */
+struct pmac_machine_location {
+ unsigned int latitude; /* 2+30 bit Fractional number */
+ unsigned int longitude; /* 2+30 bit Fractional number */
+ unsigned int delta; /* mix of GMT delta and DLS */
+};
+
+/*
+ * /dev/nvram ioctls
+ *
+ * Note that PMAC_NVRAM_GET_OFFSET is still supported, but is
+ * definitely obsolete. Do not use it if you can avoid it
+ */
+
+#define OBSOLETE_PMAC_NVRAM_GET_OFFSET \
+ _IOWR('p', 0x40, int)
+
+#define IOC_NVRAM_GET_OFFSET _IOWR('p', 0x42, int) /* Get NVRAM partition offset */
+#define IOC_NVRAM_SYNC _IO('p', 0x43) /* Sync NVRAM image */
+
+#endif /* _ASM_POWERPC_NVRAM_H */
diff --git a/arch/powerpc/include/asm/of_device.h b/arch/powerpc/include/asm/of_device.h
new file mode 100644
index 0000000..3c12399
--- /dev/null
+++ b/arch/powerpc/include/asm/of_device.h
@@ -0,0 +1,31 @@
+#ifndef _ASM_POWERPC_OF_DEVICE_H
+#define _ASM_POWERPC_OF_DEVICE_H
+#ifdef __KERNEL__
+
+#include <linux/device.h>
+#include <linux/of.h>
+
+/*
+ * The of_device is a kind of "base class" that is a superset of
+ * struct device for use by devices attached to an OF node and
+ * probed using OF properties.
+ */
+struct of_device
+{
+ struct device_node *node; /* to be obsoleted */
+ u64 dma_mask; /* DMA mask */
+ struct device dev; /* Generic device interface */
+};
+
+extern struct of_device *of_device_alloc(struct device_node *np,
+ const char *bus_id,
+ struct device *parent);
+
+extern int of_device_uevent(struct device *dev,
+ struct kobj_uevent_env *env);
+
+/* This is just here during the transition */
+#include <linux/of_device.h>
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_OF_DEVICE_H */
diff --git a/arch/powerpc/include/asm/of_platform.h b/arch/powerpc/include/asm/of_platform.h
new file mode 100644
index 0000000..18659ef
--- /dev/null
+++ b/arch/powerpc/include/asm/of_platform.h
@@ -0,0 +1,42 @@
+#ifndef _ASM_POWERPC_OF_PLATFORM_H
+#define _ASM_POWERPC_OF_PLATFORM_H
+/*
+ * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+/* This is just here during the transition */
+#include <linux/of_platform.h>
+
+/* Platform drivers register/unregister */
+static inline int of_register_platform_driver(struct of_platform_driver *drv)
+{
+ return of_register_driver(drv, &of_platform_bus_type);
+}
+static inline void of_unregister_platform_driver(struct of_platform_driver *drv)
+{
+ of_unregister_driver(drv);
+}
+
+/* Platform devices and busses creation */
+extern struct of_device *of_platform_device_create(struct device_node *np,
+ const char *bus_id,
+ struct device *parent);
+/* pseudo "matches" value to not do deep probe */
+#define OF_NO_DEEP_PROBE ((struct of_device_id *)-1)
+
+extern int of_platform_bus_probe(struct device_node *root,
+ const struct of_device_id *matches,
+ struct device *parent);
+
+extern struct of_device *of_find_device_by_phandle(phandle ph);
+
+extern void of_instantiate_rtc(void);
+
+#endif /* _ASM_POWERPC_OF_PLATFORM_H */
diff --git a/arch/powerpc/include/asm/ohare.h b/arch/powerpc/include/asm/ohare.h
new file mode 100644
index 0000000..0d030f9
--- /dev/null
+++ b/arch/powerpc/include/asm/ohare.h
@@ -0,0 +1,54 @@
+#ifndef _ASM_POWERPC_OHARE_H
+#define _ASM_POWERPC_OHARE_H
+#ifdef __KERNEL__
+/*
+ * ohare.h: definitions for using the "O'Hare" I/O controller chip.
+ *
+ * Copyright (C) 1997 Paul Mackerras.
+ *
+ * BenH: Changed to match those of heathrow (but not all of them). Please
+ * check if I didn't break anything (especially the media bay).
+ */
+
+/* offset from ohare base for feature control register */
+#define OHARE_MBCR 0x34
+#define OHARE_FCR 0x38
+
+/*
+ * Bits in feature control register.
+ * These were mostly derived by experiment on a powerbook 3400
+ * and may differ for other machines.
+ */
+#define OH_SCC_RESET 1
+#define OH_BAY_POWER_N 2 /* a guess */
+#define OH_BAY_PCI_ENABLE 4 /* a guess */
+#define OH_BAY_IDE_ENABLE 8
+#define OH_BAY_FLOPPY_ENABLE 0x10
+#define OH_IDE0_ENABLE 0x20
+#define OH_IDE0_RESET_N 0x40 /* a guess */
+#define OH_BAY_DEV_MASK 0x1c
+#define OH_BAY_RESET_N 0x80
+#define OH_IOBUS_ENABLE 0x100 /* IOBUS seems to be IDE */
+#define OH_SCC_ENABLE 0x200
+#define OH_MESH_ENABLE 0x400
+#define OH_FLOPPY_ENABLE 0x800
+#define OH_SCCA_IO 0x4000
+#define OH_SCCB_IO 0x8000
+#define OH_VIA_ENABLE 0x10000 /* Is apparently wrong, to be verified */
+#define OH_IDE1_RESET_N 0x800000
+
+/*
+ * Bits to set in the feature control register on PowerBooks.
+ */
+#define PBOOK_FEATURES (OH_IDE_ENABLE | OH_SCC_ENABLE | \
+ OH_MESH_ENABLE | OH_SCCA_IO | OH_SCCB_IO)
+
+/*
+ * A magic value to put into the feature control register of the
+ * "ohare" I/O controller on Starmaxes to enable the IDE CD interface.
+ * Contributed by Harry Eaton.
+ */
+#define STARMAX_FEATURES 0xbeff7a
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_OHARE_H */
diff --git a/arch/powerpc/include/asm/oprofile_impl.h b/arch/powerpc/include/asm/oprofile_impl.h
new file mode 100644
index 0000000..95035c6
--- /dev/null
+++ b/arch/powerpc/include/asm/oprofile_impl.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
+ *
+ * Based on alpha version.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_OPROFILE_IMPL_H
+#define _ASM_POWERPC_OPROFILE_IMPL_H
+#ifdef __KERNEL__
+
+#define OP_MAX_COUNTER 8
+
+/* Per-counter configuration as set via oprofilefs. */
+struct op_counter_config {
+ unsigned long enabled;
+ unsigned long event;
+ unsigned long count;
+ /* Classic doesn't support per-counter user/kernel selection */
+ unsigned long kernel;
+ unsigned long user;
+ unsigned long unit_mask;
+};
+
+/* System-wide configuration as set via oprofilefs. */
+struct op_system_config {
+#ifdef CONFIG_PPC64
+ unsigned long mmcr0;
+ unsigned long mmcr1;
+ unsigned long mmcra;
+#endif
+ unsigned long enable_kernel;
+ unsigned long enable_user;
+};
+
+/* Per-arch configuration */
+struct op_powerpc_model {
+ int (*reg_setup) (struct op_counter_config *,
+ struct op_system_config *,
+ int num_counters);
+ int (*cpu_setup) (struct op_counter_config *);
+ int (*start) (struct op_counter_config *);
+ int (*global_start) (struct op_counter_config *);
+ void (*stop) (void);
+ void (*global_stop) (void);
+ int (*sync_start)(void);
+ int (*sync_stop)(void);
+ void (*handle_interrupt) (struct pt_regs *,
+ struct op_counter_config *);
+ int num_counters;
+};
+
+extern struct op_powerpc_model op_model_fsl_emb;
+extern struct op_powerpc_model op_model_rs64;
+extern struct op_powerpc_model op_model_power4;
+extern struct op_powerpc_model op_model_7450;
+extern struct op_powerpc_model op_model_cell;
+extern struct op_powerpc_model op_model_pa6t;
+
+
+/* All the classic PPC parts use these */
+static inline unsigned int classic_ctr_read(unsigned int i)
+{
+ switch(i) {
+ case 0:
+ return mfspr(SPRN_PMC1);
+ case 1:
+ return mfspr(SPRN_PMC2);
+ case 2:
+ return mfspr(SPRN_PMC3);
+ case 3:
+ return mfspr(SPRN_PMC4);
+ case 4:
+ return mfspr(SPRN_PMC5);
+ case 5:
+ return mfspr(SPRN_PMC6);
+
+/* No PPC32 chip has more than 6 so far */
+#ifdef CONFIG_PPC64
+ case 6:
+ return mfspr(SPRN_PMC7);
+ case 7:
+ return mfspr(SPRN_PMC8);
+#endif
+ default:
+ return 0;
+ }
+}
+
+static inline void classic_ctr_write(unsigned int i, unsigned int val)
+{
+ switch(i) {
+ case 0:
+ mtspr(SPRN_PMC1, val);
+ break;
+ case 1:
+ mtspr(SPRN_PMC2, val);
+ break;
+ case 2:
+ mtspr(SPRN_PMC3, val);
+ break;
+ case 3:
+ mtspr(SPRN_PMC4, val);
+ break;
+ case 4:
+ mtspr(SPRN_PMC5, val);
+ break;
+ case 5:
+ mtspr(SPRN_PMC6, val);
+ break;
+
+/* No PPC32 chip has more than 6, yet */
+#ifdef CONFIG_PPC64
+ case 6:
+ mtspr(SPRN_PMC7, val);
+ break;
+ case 7:
+ mtspr(SPRN_PMC8, val);
+ break;
+#endif
+ default:
+ break;
+ }
+}
+
+
+extern void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_OPROFILE_IMPL_H */
diff --git a/arch/powerpc/include/asm/pSeries_reconfig.h b/arch/powerpc/include/asm/pSeries_reconfig.h
new file mode 100644
index 0000000..e482e53
--- /dev/null
+++ b/arch/powerpc/include/asm/pSeries_reconfig.h
@@ -0,0 +1,29 @@
+#ifndef _PPC64_PSERIES_RECONFIG_H
+#define _PPC64_PSERIES_RECONFIG_H
+#ifdef __KERNEL__
+
+#include <linux/notifier.h>
+
+/*
+ * Use this API if your code needs to know about OF device nodes being
+ * added or removed on pSeries systems.
+ */
+
+#define PSERIES_RECONFIG_ADD 0x0001
+#define PSERIES_RECONFIG_REMOVE 0x0002
+#define PSERIES_DRCONF_MEM_ADD 0x0003
+#define PSERIES_DRCONF_MEM_REMOVE 0x0004
+
+#ifdef CONFIG_PPC_PSERIES
+extern int pSeries_reconfig_notifier_register(struct notifier_block *);
+extern void pSeries_reconfig_notifier_unregister(struct notifier_block *);
+#else /* !CONFIG_PPC_PSERIES */
+static inline int pSeries_reconfig_notifier_register(struct notifier_block *nb)
+{
+ return 0;
+}
+static inline void pSeries_reconfig_notifier_unregister(struct notifier_block *nb) { }
+#endif /* CONFIG_PPC_PSERIES */
+
+#endif /* __KERNEL__ */
+#endif /* _PPC64_PSERIES_RECONFIG_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
new file mode 100644
index 0000000..6493a39
--- /dev/null
+++ b/arch/powerpc/include/asm/paca.h
@@ -0,0 +1,112 @@
+/*
+ * This control block defines the PACA which defines the processor
+ * specific data for each logical processor on the system.
+ * There are some pointers defined that are utilized by PLIC.
+ *
+ * C 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_PACA_H
+#define _ASM_POWERPC_PACA_H
+#ifdef __KERNEL__
+
+#include <asm/types.h>
+#include <asm/lppaca.h>
+#include <asm/mmu.h>
+
+register struct paca_struct *local_paca asm("r13");
+
+#if defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_SMP)
+extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */
+/*
+ * Add standard checks that preemption cannot occur when using get_paca():
+ * otherwise the paca_struct it points to may be the wrong one just after.
+ */
+#define get_paca() ((void) debug_smp_processor_id(), local_paca)
+#else
+#define get_paca() local_paca
+#endif
+
+#define get_lppaca() (get_paca()->lppaca_ptr)
+#define get_slb_shadow() (get_paca()->slb_shadow_ptr)
+
+struct task_struct;
+
+/*
+ * Defines the layout of the paca.
+ *
+ * This structure is not directly accessed by firmware or the service
+ * processor.
+ */
+struct paca_struct {
+ /*
+ * Because hw_cpu_id, unlike other paca fields, is accessed
+ * routinely from other CPUs (from the IRQ code), we stick to
+ * read-only (after boot) fields in the first cacheline to
+ * avoid cacheline bouncing.
+ */
+
+ struct lppaca *lppaca_ptr; /* Pointer to LpPaca for PLIC */
+
+ /*
+ * MAGIC: the spinlock functions in arch/powerpc/lib/locks.c
+ * load lock_token and paca_index with a single lwz
+ * instruction. They must travel together and be properly
+ * aligned.
+ */
+ u16 lock_token; /* Constant 0x8000, used in locks */
+ u16 paca_index; /* Logical processor number */
+
+ u64 kernel_toc; /* Kernel TOC address */
+ u64 stab_real; /* Absolute address of segment table */
+ u64 stab_addr; /* Virtual address of segment table */
+ void *emergency_sp; /* pointer to emergency stack */
+ u64 data_offset; /* per cpu data offset */
+ s16 hw_cpu_id; /* Physical processor number */
+ u8 cpu_start; /* At startup, processor spins until */
+ /* this becomes non-zero. */
+ struct slb_shadow *slb_shadow_ptr;
+
+ /*
+ * Now, starting in cacheline 2, the exception save areas
+ */
+ /* used for most interrupts/exceptions */
+ u64 exgen[10] __attribute__((aligned(0x80)));
+ u64 exmc[10]; /* used for machine checks */
+ u64 exslb[10]; /* used for SLB/segment table misses
+ * on the linear mapping */
+
+ mm_context_t context;
+ u16 vmalloc_sllp;
+ u16 slb_cache_ptr;
+ u16 slb_cache[SLB_CACHE_ENTRIES];
+
+ /*
+ * then miscellaneous read-write fields
+ */
+ struct task_struct *__current; /* Pointer to current */
+ u64 kstack; /* Saved Kernel stack addr */
+ u64 stab_rr; /* stab/slb round-robin counter */
+ u64 saved_r1; /* r1 save for RTAS calls */
+ u64 saved_msr; /* MSR saved here by enter_rtas */
+ u16 trap_save; /* Used when bad stack is encountered */
+ u8 soft_enabled; /* irq soft-enable flag */
+ u8 hard_enabled; /* set if irqs are enabled in MSR */
+ u8 io_sync; /* writel() needs spin_unlock sync */
+
+ /* Stuff for accurate time accounting */
+ u64 user_time; /* accumulated usermode TB ticks */
+ u64 system_time; /* accumulated system TB ticks */
+ u64 startpurr; /* PURR/TB value snapshot */
+ u64 startspurr; /* SPURR value snapshot */
+};
+
+extern struct paca_struct paca[];
+extern void initialise_pacas(void);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_PACA_H */
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
new file mode 100644
index 0000000..e088545
--- /dev/null
+++ b/arch/powerpc/include/asm/page.h
@@ -0,0 +1,225 @@
+#ifndef _ASM_POWERPC_PAGE_H
+#define _ASM_POWERPC_PAGE_H
+
+/*
+ * Copyright (C) 2001,2005 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/asm-compat.h>
+#include <asm/kdump.h>
+#include <asm/types.h>
+
+/*
+ * On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software
+ * page size. When using 64K pages however, whether we are really supporting
+ * 64K pages in HW or not is irrelevant to those definitions.
+ */
+#ifdef CONFIG_PPC_64K_PAGES
+#define PAGE_SHIFT 16
+#else
+#define PAGE_SHIFT 12
+#endif
+
+#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
+
+/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
+#define __HAVE_ARCH_GATE_AREA 1
+
+/*
+ * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
+ * assign PAGE_MASK to a larger type it gets extended the way we want
+ * (i.e. with 1s in the high bits)
+ */
+#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
+
+/*
+ * KERNELBASE is the virtual address of the start of the kernel, it's often
+ * the same as PAGE_OFFSET, but _might not be_.
+ *
+ * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
+ *
+ * PAGE_OFFSET is the virtual address of the start of lowmem.
+ *
+ * PHYSICAL_START is the physical address of the start of the kernel.
+ *
+ * MEMORY_START is the physical address of the start of lowmem.
+ *
+ * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on
+ * ppc32 and based on how they are set we determine MEMORY_START.
+ *
+ * For the linear mapping the following equation should be true:
+ * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START
+ *
+ * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
+ *
+ * There are two was to determine a physical address from a virtual one:
+ * va = pa + PAGE_OFFSET - MEMORY_START
+ * va = pa + KERNELBASE - PHYSICAL_START
+ *
+ * If you want to know something's offset from the start of the kernel you
+ * should subtract KERNELBASE.
+ *
+ * If you want to test if something's a kernel address, use is_kernel_addr().
+ */
+
+#define KERNELBASE ASM_CONST(CONFIG_KERNEL_START)
+#define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET)
+#define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
+
+#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_FLATMEM)
+#ifndef __ASSEMBLY__
+extern phys_addr_t memstart_addr;
+extern phys_addr_t kernstart_addr;
+#endif
+#define PHYSICAL_START kernstart_addr
+#define MEMORY_START memstart_addr
+#else
+#define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START)
+#define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
+#endif
+
+#ifdef CONFIG_FLATMEM
+#define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT)
+#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < (ARCH_PFN_OFFSET + max_mapnr))
+#endif
+
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+#define __va(x) ((void *)((unsigned long)(x) - PHYSICAL_START + KERNELBASE))
+#define __pa(x) ((unsigned long)(x) + PHYSICAL_START - KERNELBASE)
+
+/*
+ * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
+ * and needs to be executable. This means the whole heap ends
+ * up being executable.
+ */
+#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#ifdef __powerpc64__
+#include <asm/page_64.h>
+#else
+#include <asm/page_32.h>
+#endif
+
+/* align addr on a size boundary - adjust address up/down if needed */
+#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
+#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
+
+/* align addr on a size boundary - adjust address up if needed */
+#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
+
+/*
+ * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
+ * "kernelness", use is_kernel_addr() - it should do what you want.
+ */
+#define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
+
+#ifndef __ASSEMBLY__
+
+#undef STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+/* These are used to make use of C type-checking. */
+
+/* PTE level */
+typedef struct { pte_basic_t pte; } pte_t;
+#define pte_val(x) ((x).pte)
+#define __pte(x) ((pte_t) { (x) })
+
+/* 64k pages additionally define a bigger "real PTE" type that gathers
+ * the "second half" part of the PTE for pseudo 64k pages
+ */
+#ifdef CONFIG_PPC_64K_PAGES
+typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
+#else
+typedef struct { pte_t pte; } real_pte_t;
+#endif
+
+/* PMD level */
+#ifdef CONFIG_PPC64
+typedef struct { unsigned long pmd; } pmd_t;
+#define pmd_val(x) ((x).pmd)
+#define __pmd(x) ((pmd_t) { (x) })
+
+/* PUD level exusts only on 4k pages */
+#ifndef CONFIG_PPC_64K_PAGES
+typedef struct { unsigned long pud; } pud_t;
+#define pud_val(x) ((x).pud)
+#define __pud(x) ((pud_t) { (x) })
+#endif /* !CONFIG_PPC_64K_PAGES */
+#endif /* CONFIG_PPC64 */
+
+/* PGD level */
+typedef struct { unsigned long pgd; } pgd_t;
+#define pgd_val(x) ((x).pgd)
+#define __pgd(x) ((pgd_t) { (x) })
+
+/* Page protection bits */
+typedef struct { unsigned long pgprot; } pgprot_t;
+#define pgprot_val(x) ((x).pgprot)
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+#else
+
+/*
+ * .. while these make it easier on the compiler
+ */
+
+typedef pte_basic_t pte_t;
+#define pte_val(x) (x)
+#define __pte(x) (x)
+
+#ifdef CONFIG_PPC_64K_PAGES
+typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
+#else
+typedef unsigned long real_pte_t;
+#endif
+
+
+#ifdef CONFIG_PPC64
+typedef unsigned long pmd_t;
+#define pmd_val(x) (x)
+#define __pmd(x) (x)
+
+#ifndef CONFIG_PPC_64K_PAGES
+typedef unsigned long pud_t;
+#define pud_val(x) (x)
+#define __pud(x) (x)
+#endif /* !CONFIG_PPC_64K_PAGES */
+#endif /* CONFIG_PPC64 */
+
+typedef unsigned long pgd_t;
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+
+typedef unsigned long pgprot_t;
+#define __pgd(x) (x)
+#define __pgprot(x) (x)
+
+#endif
+
+struct page;
+extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
+extern void copy_user_page(void *to, void *from, unsigned long vaddr,
+ struct page *p);
+extern int page_is_ram(unsigned long pfn);
+
+struct vm_area_struct;
+
+typedef struct page *pgtable_t;
+
+#include <asm-generic/memory_model.h>
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_PAGE_H */
diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h
new file mode 100644
index 0000000..ebfae53
--- /dev/null
+++ b/arch/powerpc/include/asm/page_32.h
@@ -0,0 +1,38 @@
+#ifndef _ASM_POWERPC_PAGE_32_H
+#define _ASM_POWERPC_PAGE_32_H
+
+#if defined(CONFIG_PHYSICAL_ALIGN) && (CONFIG_PHYSICAL_START != 0)
+#if (CONFIG_PHYSICAL_START % CONFIG_PHYSICAL_ALIGN) != 0
+#error "CONFIG_PHYSICAL_START must be a multiple of CONFIG_PHYSICAL_ALIGN"
+#endif
+#endif
+
+#define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+#endif
+
+#ifndef __ASSEMBLY__
+/*
+ * The basic type of a PTE - 64 bits for those CPUs with > 32 bit
+ * physical addressing. For now this just the IBM PPC440.
+ */
+#ifdef CONFIG_PTE_64BIT
+typedef unsigned long long pte_basic_t;
+#define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */
+#else
+typedef unsigned long pte_basic_t;
+#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
+#endif
+
+struct page;
+extern void clear_pages(void *page, int order);
+static inline void clear_page(void *page) { clear_pages(page, 0); }
+extern void copy_page(void *to, void *from);
+
+#include <asm-generic/page.h>
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_PAGE_32_H */
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
new file mode 100644
index 0000000..043bfdf
--- /dev/null
+++ b/arch/powerpc/include/asm/page_64.h
@@ -0,0 +1,185 @@
+#ifndef _ASM_POWERPC_PAGE_64_H
+#define _ASM_POWERPC_PAGE_64_H
+
+/*
+ * Copyright (C) 2001 PPC64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * We always define HW_PAGE_SHIFT to 12 as use of 64K pages remains Linux
+ * specific, every notion of page number shared with the firmware, TCEs,
+ * iommu, etc... still uses a page size of 4K.
+ */
+#define HW_PAGE_SHIFT 12
+#define HW_PAGE_SIZE (ASM_CONST(1) << HW_PAGE_SHIFT)
+#define HW_PAGE_MASK (~(HW_PAGE_SIZE-1))
+
+/*
+ * PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and
+ * HW_PAGE_SHIFT, that is 4K pages.
+ */
+#define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT)
+
+/* Segment size; normal 256M segments */
+#define SID_SHIFT 28
+#define SID_MASK ASM_CONST(0xfffffffff)
+#define ESID_MASK 0xfffffffff0000000UL
+#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
+
+/* 1T segments */
+#define SID_SHIFT_1T 40
+#define SID_MASK_1T 0xffffffUL
+#define ESID_MASK_1T 0xffffff0000000000UL
+#define GET_ESID_1T(x) (((x) >> SID_SHIFT_1T) & SID_MASK_1T)
+
+#ifndef __ASSEMBLY__
+#include <asm/cache.h>
+
+typedef unsigned long pte_basic_t;
+
+static __inline__ void clear_page(void *addr)
+{
+ unsigned long lines, line_size;
+
+ line_size = ppc64_caches.dline_size;
+ lines = ppc64_caches.dlines_per_page;
+
+ __asm__ __volatile__(
+ "mtctr %1 # clear_page\n\
+1: dcbz 0,%0\n\
+ add %0,%0,%3\n\
+ bdnz+ 1b"
+ : "=r" (addr)
+ : "r" (lines), "0" (addr), "r" (line_size)
+ : "ctr", "memory");
+}
+
+extern void copy_4K_page(void *to, void *from);
+
+#ifdef CONFIG_PPC_64K_PAGES
+static inline void copy_page(void *to, void *from)
+{
+ unsigned int i;
+ for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) {
+ copy_4K_page(to, from);
+ to += 4096;
+ from += 4096;
+ }
+}
+#else /* CONFIG_PPC_64K_PAGES */
+static inline void copy_page(void *to, void *from)
+{
+ copy_4K_page(to, from);
+}
+#endif /* CONFIG_PPC_64K_PAGES */
+
+/* Log 2 of page table size */
+extern u64 ppc64_pft_size;
+
+/* Large pages size */
+#ifdef CONFIG_HUGETLB_PAGE
+extern unsigned int HPAGE_SHIFT;
+#else
+#define HPAGE_SHIFT PAGE_SHIFT
+#endif
+#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+#define HUGE_MAX_HSTATE 3
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_PPC_MM_SLICES
+
+#define SLICE_LOW_SHIFT 28
+#define SLICE_HIGH_SHIFT 40
+
+#define SLICE_LOW_TOP (0x100000000ul)
+#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
+#define SLICE_NUM_HIGH (PGTABLE_RANGE >> SLICE_HIGH_SHIFT)
+
+#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
+#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
+
+#ifndef __ASSEMBLY__
+
+struct slice_mask {
+ u16 low_slices;
+ u16 high_slices;
+};
+
+struct mm_struct;
+
+extern unsigned long slice_get_unmapped_area(unsigned long addr,
+ unsigned long len,
+ unsigned long flags,
+ unsigned int psize,
+ int topdown,
+ int use_cache);
+
+extern unsigned int get_slice_psize(struct mm_struct *mm,
+ unsigned long addr);
+
+extern void slice_init_context(struct mm_struct *mm, unsigned int psize);
+extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
+extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
+ unsigned long len, unsigned int psize);
+
+#define slice_mm_new_context(mm) ((mm)->context.id == 0)
+
+#endif /* __ASSEMBLY__ */
+#else
+#define slice_init()
+#define get_slice_psize(mm, addr) ((mm)->context.user_psize)
+#define slice_set_user_psize(mm, psize) \
+do { \
+ (mm)->context.user_psize = (psize); \
+ (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \
+} while (0)
+#define slice_set_range_psize(mm, start, len, psize) \
+ slice_set_user_psize((mm), (psize))
+#define slice_mm_new_context(mm) 1
+#endif /* CONFIG_PPC_MM_SLICES */
+
+#ifdef CONFIG_HUGETLB_PAGE
+
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+
+#endif /* !CONFIG_HUGETLB_PAGE */
+
+#ifdef MODULE
+#define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
+#else
+#define __page_aligned \
+ __attribute__((__aligned__(PAGE_SIZE), \
+ __section__(".data.page_aligned")))
+#endif
+
+#define VM_DATA_DEFAULT_FLAGS \
+ (test_thread_flag(TIF_32BIT) ? \
+ VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
+
+/*
+ * This is the default if a program doesn't have a PT_GNU_STACK
+ * program header entry. The PPC64 ELF ABI has a non executable stack
+ * stack by default, so in the absense of a PT_GNU_STACK program header
+ * we turn execute permission off.
+ */
+#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#define VM_STACK_DEFAULT_FLAGS \
+ (test_thread_flag(TIF_32BIT) ? \
+ VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
+
+#include <asm-generic/page.h>
+
+#endif /* _ASM_POWERPC_PAGE_64_H */
diff --git a/arch/powerpc/include/asm/param.h b/arch/powerpc/include/asm/param.h
new file mode 100644
index 0000000..094f63d
--- /dev/null
+++ b/arch/powerpc/include/asm/param.h
@@ -0,0 +1,22 @@
+#ifndef _ASM_POWERPC_PARAM_H
+#define _ASM_POWERPC_PARAM_H
+
+#ifdef __KERNEL__
+#define HZ CONFIG_HZ /* internal kernel timer frequency */
+#define USER_HZ 100 /* for user interfaces in "ticks" */
+#define CLOCKS_PER_SEC (USER_HZ) /* frequency at which times() counts */
+#endif /* __KERNEL__ */
+
+#ifndef HZ
+#define HZ 100
+#endif
+
+#define EXEC_PAGESIZE 4096
+
+#ifndef NOGROUP
+#define NOGROUP (-1)
+#endif
+
+#define MAXHOSTNAMELEN 64 /* max length of hostname */
+
+#endif /* _ASM_POWERPC_PARAM_H */
diff --git a/arch/powerpc/include/asm/parport.h b/arch/powerpc/include/asm/parport.h
new file mode 100644
index 0000000..414c50e
--- /dev/null
+++ b/arch/powerpc/include/asm/parport.h
@@ -0,0 +1,39 @@
+/*
+ * parport.h: platform-specific PC-style parport initialisation
+ *
+ * Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk>
+ *
+ * This file should only be included by drivers/parport/parport_pc.c.
+ */
+
+#ifndef _ASM_POWERPC_PARPORT_H
+#define _ASM_POWERPC_PARPORT_H
+#ifdef __KERNEL__
+
+#include <asm/prom.h>
+
+static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
+{
+ struct device_node *np;
+ const u32 *prop;
+ u32 io1, io2;
+ int propsize;
+ int count = 0;
+ for (np = NULL; (np = of_find_compatible_node(np,
+ "parallel",
+ "pnpPNP,400")) != NULL;) {
+ prop = of_get_property(np, "reg", &propsize);
+ if (!prop || propsize > 6*sizeof(u32))
+ continue;
+ io1 = prop[1]; io2 = prop[2];
+ prop = of_get_property(np, "interrupts", NULL);
+ if (!prop)
+ continue;
+ if (parport_pc_probe_port(io1, io2, prop[0], autodma, NULL) != NULL)
+ count++;
+ }
+ return count;
+}
+
+#endif /* __KERNEL__ */
+#endif /* !(_ASM_POWERPC_PARPORT_H) */
diff --git a/arch/powerpc/include/asm/pasemi_dma.h b/arch/powerpc/include/asm/pasemi_dma.h
new file mode 100644
index 0000000..19fd793
--- /dev/null
+++ b/arch/powerpc/include/asm/pasemi_dma.h
@@ -0,0 +1,538 @@
+/*
+ * Copyright (C) 2006-2008 PA Semi, Inc
+ *
+ * Hardware register layout and descriptor formats for the on-board
+ * DMA engine on PA Semi PWRficient. Used by ethernet, function and security
+ * drivers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef ASM_PASEMI_DMA_H
+#define ASM_PASEMI_DMA_H
+
+/* status register layout in IOB region, at 0xfb800000 */
+struct pasdma_status {
+ u64 rx_sta[64]; /* RX channel status */
+ u64 tx_sta[20]; /* TX channel status */
+};
+
+
+/* All these registers live in the PCI configuration space for the DMA PCI
+ * device. Use the normal PCI config access functions for them.
+ */
+enum {
+ PAS_DMA_CAP_TXCH = 0x44, /* Transmit Channel Info */
+ PAS_DMA_CAP_RXCH = 0x48, /* Transmit Channel Info */
+ PAS_DMA_CAP_IFI = 0x4c, /* Interface Info */
+ PAS_DMA_COM_TXCMD = 0x100, /* Transmit Command Register */
+ PAS_DMA_COM_TXSTA = 0x104, /* Transmit Status Register */
+ PAS_DMA_COM_RXCMD = 0x108, /* Receive Command Register */
+ PAS_DMA_COM_RXSTA = 0x10c, /* Receive Status Register */
+ PAS_DMA_COM_CFG = 0x114, /* Common config reg */
+ PAS_DMA_TXF_SFLG0 = 0x140, /* Set flags */
+ PAS_DMA_TXF_SFLG1 = 0x144, /* Set flags */
+ PAS_DMA_TXF_CFLG0 = 0x148, /* Set flags */
+ PAS_DMA_TXF_CFLG1 = 0x14c, /* Set flags */
+};
+
+
+#define PAS_DMA_CAP_TXCH_TCHN_M 0x00ff0000 /* # of TX channels */
+#define PAS_DMA_CAP_TXCH_TCHN_S 16
+
+#define PAS_DMA_CAP_RXCH_RCHN_M 0x00ff0000 /* # of RX channels */
+#define PAS_DMA_CAP_RXCH_RCHN_S 16
+
+#define PAS_DMA_CAP_IFI_IOFF_M 0xff000000 /* Cfg reg for intf pointers */
+#define PAS_DMA_CAP_IFI_IOFF_S 24
+#define PAS_DMA_CAP_IFI_NIN_M 0x00ff0000 /* # of interfaces */
+#define PAS_DMA_CAP_IFI_NIN_S 16
+
+#define PAS_DMA_COM_TXCMD_EN 0x00000001 /* enable */
+#define PAS_DMA_COM_TXSTA_ACT 0x00000001 /* active */
+#define PAS_DMA_COM_RXCMD_EN 0x00000001 /* enable */
+#define PAS_DMA_COM_RXSTA_ACT 0x00000001 /* active */
+
+
+/* Per-interface and per-channel registers */
+#define _PAS_DMA_RXINT_STRIDE 0x20
+#define PAS_DMA_RXINT_RCMDSTA(i) (0x200+(i)*_PAS_DMA_RXINT_STRIDE)
+#define PAS_DMA_RXINT_RCMDSTA_EN 0x00000001
+#define PAS_DMA_RXINT_RCMDSTA_ST 0x00000002
+#define PAS_DMA_RXINT_RCMDSTA_MBT 0x00000008
+#define PAS_DMA_RXINT_RCMDSTA_MDR 0x00000010
+#define PAS_DMA_RXINT_RCMDSTA_MOO 0x00000020
+#define PAS_DMA_RXINT_RCMDSTA_MBP 0x00000040
+#define PAS_DMA_RXINT_RCMDSTA_BT 0x00000800
+#define PAS_DMA_RXINT_RCMDSTA_DR 0x00001000
+#define PAS_DMA_RXINT_RCMDSTA_OO 0x00002000
+#define PAS_DMA_RXINT_RCMDSTA_BP 0x00004000
+#define PAS_DMA_RXINT_RCMDSTA_TB 0x00008000
+#define PAS_DMA_RXINT_RCMDSTA_ACT 0x00010000
+#define PAS_DMA_RXINT_RCMDSTA_DROPS_M 0xfffe0000
+#define PAS_DMA_RXINT_RCMDSTA_DROPS_S 17
+#define PAS_DMA_RXINT_CFG(i) (0x204+(i)*_PAS_DMA_RXINT_STRIDE)
+#define PAS_DMA_RXINT_CFG_RBP 0x80000000
+#define PAS_DMA_RXINT_CFG_ITRR 0x40000000
+#define PAS_DMA_RXINT_CFG_DHL_M 0x07000000
+#define PAS_DMA_RXINT_CFG_DHL_S 24
+#define PAS_DMA_RXINT_CFG_DHL(x) (((x) << PAS_DMA_RXINT_CFG_DHL_S) & \
+ PAS_DMA_RXINT_CFG_DHL_M)
+#define PAS_DMA_RXINT_CFG_ITR 0x00400000
+#define PAS_DMA_RXINT_CFG_LW 0x00200000
+#define PAS_DMA_RXINT_CFG_L2 0x00100000
+#define PAS_DMA_RXINT_CFG_HEN 0x00080000
+#define PAS_DMA_RXINT_CFG_WIF 0x00000002
+#define PAS_DMA_RXINT_CFG_WIL 0x00000001
+
+#define PAS_DMA_RXINT_INCR(i) (0x210+(i)*_PAS_DMA_RXINT_STRIDE)
+#define PAS_DMA_RXINT_INCR_INCR_M 0x0000ffff
+#define PAS_DMA_RXINT_INCR_INCR_S 0
+#define PAS_DMA_RXINT_INCR_INCR(x) ((x) & 0x0000ffff)
+#define PAS_DMA_RXINT_BASEL(i) (0x218+(i)*_PAS_DMA_RXINT_STRIDE)
+#define PAS_DMA_RXINT_BASEL_BRBL(x) ((x) & ~0x3f)
+#define PAS_DMA_RXINT_BASEU(i) (0x21c+(i)*_PAS_DMA_RXINT_STRIDE)
+#define PAS_DMA_RXINT_BASEU_BRBH(x) ((x) & 0xfff)
+#define PAS_DMA_RXINT_BASEU_SIZ_M 0x3fff0000 /* # of cache lines worth of buffer ring */
+#define PAS_DMA_RXINT_BASEU_SIZ_S 16 /* 0 = 16K */
+#define PAS_DMA_RXINT_BASEU_SIZ(x) (((x) << PAS_DMA_RXINT_BASEU_SIZ_S) & \
+ PAS_DMA_RXINT_BASEU_SIZ_M)
+
+
+#define _PAS_DMA_TXCHAN_STRIDE 0x20 /* Size per channel */
+#define _PAS_DMA_TXCHAN_TCMDSTA 0x300 /* Command / Status */
+#define _PAS_DMA_TXCHAN_CFG 0x304 /* Configuration */
+#define _PAS_DMA_TXCHAN_DSCRBU 0x308 /* Descriptor BU Allocation */
+#define _PAS_DMA_TXCHAN_INCR 0x310 /* Descriptor increment */
+#define _PAS_DMA_TXCHAN_CNT 0x314 /* Descriptor count/offset */
+#define _PAS_DMA_TXCHAN_BASEL 0x318 /* Descriptor ring base (low) */
+#define _PAS_DMA_TXCHAN_BASEU 0x31c /* (high) */
+#define PAS_DMA_TXCHAN_TCMDSTA(c) (0x300+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define PAS_DMA_TXCHAN_TCMDSTA_EN 0x00000001 /* Enabled */
+#define PAS_DMA_TXCHAN_TCMDSTA_ST 0x00000002 /* Stop interface */
+#define PAS_DMA_TXCHAN_TCMDSTA_ACT 0x00010000 /* Active */
+#define PAS_DMA_TXCHAN_TCMDSTA_SZ 0x00000800
+#define PAS_DMA_TXCHAN_TCMDSTA_DB 0x00000400
+#define PAS_DMA_TXCHAN_TCMDSTA_DE 0x00000200
+#define PAS_DMA_TXCHAN_TCMDSTA_DA 0x00000100
+#define PAS_DMA_TXCHAN_CFG(c) (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define PAS_DMA_TXCHAN_CFG_TY_IFACE 0x00000000 /* Type = interface */
+#define PAS_DMA_TXCHAN_CFG_TY_COPY 0x00000001 /* Type = copy only */
+#define PAS_DMA_TXCHAN_CFG_TY_FUNC 0x00000002 /* Type = function */
+#define PAS_DMA_TXCHAN_CFG_TY_XOR 0x00000003 /* Type = xor only */
+#define PAS_DMA_TXCHAN_CFG_TATTR_M 0x0000003c
+#define PAS_DMA_TXCHAN_CFG_TATTR_S 2
+#define PAS_DMA_TXCHAN_CFG_TATTR(x) (((x) << PAS_DMA_TXCHAN_CFG_TATTR_S) & \
+ PAS_DMA_TXCHAN_CFG_TATTR_M)
+#define PAS_DMA_TXCHAN_CFG_LPDQ 0x00000800
+#define PAS_DMA_TXCHAN_CFG_LPSQ 0x00000400
+#define PAS_DMA_TXCHAN_CFG_WT_M 0x000003c0
+#define PAS_DMA_TXCHAN_CFG_WT_S 6
+#define PAS_DMA_TXCHAN_CFG_WT(x) (((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \
+ PAS_DMA_TXCHAN_CFG_WT_M)
+#define PAS_DMA_TXCHAN_CFG_TRD 0x00010000 /* translate data */
+#define PAS_DMA_TXCHAN_CFG_TRR 0x00008000 /* translate rings */
+#define PAS_DMA_TXCHAN_CFG_UP 0x00004000 /* update tx descr when sent */
+#define PAS_DMA_TXCHAN_CFG_CL 0x00002000 /* Clean last line */
+#define PAS_DMA_TXCHAN_CFG_CF 0x00001000 /* Clean first line */
+#define PAS_DMA_TXCHAN_INCR(c) (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define PAS_DMA_TXCHAN_BASEL(c) (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define PAS_DMA_TXCHAN_BASEL_BRBL_M 0xffffffc0
+#define PAS_DMA_TXCHAN_BASEL_BRBL_S 0
+#define PAS_DMA_TXCHAN_BASEL_BRBL(x) (((x) << PAS_DMA_TXCHAN_BASEL_BRBL_S) & \
+ PAS_DMA_TXCHAN_BASEL_BRBL_M)
+#define PAS_DMA_TXCHAN_BASEU(c) (0x31c+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define PAS_DMA_TXCHAN_BASEU_BRBH_M 0x00000fff
+#define PAS_DMA_TXCHAN_BASEU_BRBH_S 0
+#define PAS_DMA_TXCHAN_BASEU_BRBH(x) (((x) << PAS_DMA_TXCHAN_BASEU_BRBH_S) & \
+ PAS_DMA_TXCHAN_BASEU_BRBH_M)
+/* # of cache lines worth of buffer ring */
+#define PAS_DMA_TXCHAN_BASEU_SIZ_M 0x3fff0000
+#define PAS_DMA_TXCHAN_BASEU_SIZ_S 16 /* 0 = 16K */
+#define PAS_DMA_TXCHAN_BASEU_SIZ(x) (((x) << PAS_DMA_TXCHAN_BASEU_SIZ_S) & \
+ PAS_DMA_TXCHAN_BASEU_SIZ_M)
+
+#define _PAS_DMA_RXCHAN_STRIDE 0x20 /* Size per channel */
+#define _PAS_DMA_RXCHAN_CCMDSTA 0x800 /* Command / Status */
+#define _PAS_DMA_RXCHAN_CFG 0x804 /* Configuration */
+#define _PAS_DMA_RXCHAN_INCR 0x810 /* Descriptor increment */
+#define _PAS_DMA_RXCHAN_CNT 0x814 /* Descriptor count/offset */
+#define _PAS_DMA_RXCHAN_BASEL 0x818 /* Descriptor ring base (low) */
+#define _PAS_DMA_RXCHAN_BASEU 0x81c /* (high) */
+#define PAS_DMA_RXCHAN_CCMDSTA(c) (0x800+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define PAS_DMA_RXCHAN_CCMDSTA_EN 0x00000001 /* Enabled */
+#define PAS_DMA_RXCHAN_CCMDSTA_ST 0x00000002 /* Stop interface */
+#define PAS_DMA_RXCHAN_CCMDSTA_ACT 0x00010000 /* Active */
+#define PAS_DMA_RXCHAN_CCMDSTA_DU 0x00020000
+#define PAS_DMA_RXCHAN_CCMDSTA_OD 0x00002000
+#define PAS_DMA_RXCHAN_CCMDSTA_FD 0x00001000
+#define PAS_DMA_RXCHAN_CCMDSTA_DT 0x00000800
+#define PAS_DMA_RXCHAN_CFG(c) (0x804+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define PAS_DMA_RXCHAN_CFG_CTR 0x00000400
+#define PAS_DMA_RXCHAN_CFG_HBU_M 0x00000380
+#define PAS_DMA_RXCHAN_CFG_HBU_S 7
+#define PAS_DMA_RXCHAN_CFG_HBU(x) (((x) << PAS_DMA_RXCHAN_CFG_HBU_S) & \
+ PAS_DMA_RXCHAN_CFG_HBU_M)
+#define PAS_DMA_RXCHAN_INCR(c) (0x810+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define PAS_DMA_RXCHAN_BASEL(c) (0x818+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define PAS_DMA_RXCHAN_BASEL_BRBL_M 0xffffffc0
+#define PAS_DMA_RXCHAN_BASEL_BRBL_S 0
+#define PAS_DMA_RXCHAN_BASEL_BRBL(x) (((x) << PAS_DMA_RXCHAN_BASEL_BRBL_S) & \
+ PAS_DMA_RXCHAN_BASEL_BRBL_M)
+#define PAS_DMA_RXCHAN_BASEU(c) (0x81c+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define PAS_DMA_RXCHAN_BASEU_BRBH_M 0x00000fff
+#define PAS_DMA_RXCHAN_BASEU_BRBH_S 0
+#define PAS_DMA_RXCHAN_BASEU_BRBH(x) (((x) << PAS_DMA_RXCHAN_BASEU_BRBH_S) & \
+ PAS_DMA_RXCHAN_BASEU_BRBH_M)
+/* # of cache lines worth of buffer ring */
+#define PAS_DMA_RXCHAN_BASEU_SIZ_M 0x3fff0000
+#define PAS_DMA_RXCHAN_BASEU_SIZ_S 16 /* 0 = 16K */
+#define PAS_DMA_RXCHAN_BASEU_SIZ(x) (((x) << PAS_DMA_RXCHAN_BASEU_SIZ_S) & \
+ PAS_DMA_RXCHAN_BASEU_SIZ_M)
+
+#define PAS_STATUS_PCNT_M 0x000000000000ffffull
+#define PAS_STATUS_PCNT_S 0
+#define PAS_STATUS_DCNT_M 0x00000000ffff0000ull
+#define PAS_STATUS_DCNT_S 16
+#define PAS_STATUS_BPCNT_M 0x0000ffff00000000ull
+#define PAS_STATUS_BPCNT_S 32
+#define PAS_STATUS_CAUSE_M 0xf000000000000000ull
+#define PAS_STATUS_TIMER 0x1000000000000000ull
+#define PAS_STATUS_ERROR 0x2000000000000000ull
+#define PAS_STATUS_SOFT 0x4000000000000000ull
+#define PAS_STATUS_INT 0x8000000000000000ull
+
+#define PAS_IOB_COM_PKTHDRCNT 0x120
+#define PAS_IOB_COM_PKTHDRCNT_PKTHDR1_M 0x0fff0000
+#define PAS_IOB_COM_PKTHDRCNT_PKTHDR1_S 16
+#define PAS_IOB_COM_PKTHDRCNT_PKTHDR0_M 0x00000fff
+#define PAS_IOB_COM_PKTHDRCNT_PKTHDR0_S 0
+
+#define PAS_IOB_DMA_RXCH_CFG(i) (0x1100 + (i)*4)
+#define PAS_IOB_DMA_RXCH_CFG_CNTTH_M 0x00000fff
+#define PAS_IOB_DMA_RXCH_CFG_CNTTH_S 0
+#define PAS_IOB_DMA_RXCH_CFG_CNTTH(x) (((x) << PAS_IOB_DMA_RXCH_CFG_CNTTH_S) & \
+ PAS_IOB_DMA_RXCH_CFG_CNTTH_M)
+#define PAS_IOB_DMA_TXCH_CFG(i) (0x1200 + (i)*4)
+#define PAS_IOB_DMA_TXCH_CFG_CNTTH_M 0x00000fff
+#define PAS_IOB_DMA_TXCH_CFG_CNTTH_S 0
+#define PAS_IOB_DMA_TXCH_CFG_CNTTH(x) (((x) << PAS_IOB_DMA_TXCH_CFG_CNTTH_S) & \
+ PAS_IOB_DMA_TXCH_CFG_CNTTH_M)
+#define PAS_IOB_DMA_RXCH_STAT(i) (0x1300 + (i)*4)
+#define PAS_IOB_DMA_RXCH_STAT_INTGEN 0x00001000
+#define PAS_IOB_DMA_RXCH_STAT_CNTDEL_M 0x00000fff
+#define PAS_IOB_DMA_RXCH_STAT_CNTDEL_S 0
+#define PAS_IOB_DMA_RXCH_STAT_CNTDEL(x) (((x) << PAS_IOB_DMA_RXCH_STAT_CNTDEL_S) &\
+ PAS_IOB_DMA_RXCH_STAT_CNTDEL_M)
+#define PAS_IOB_DMA_TXCH_STAT(i) (0x1400 + (i)*4)
+#define PAS_IOB_DMA_TXCH_STAT_INTGEN 0x00001000
+#define PAS_IOB_DMA_TXCH_STAT_CNTDEL_M 0x00000fff
+#define PAS_IOB_DMA_TXCH_STAT_CNTDEL_S 0
+#define PAS_IOB_DMA_TXCH_STAT_CNTDEL(x) (((x) << PAS_IOB_DMA_TXCH_STAT_CNTDEL_S) &\
+ PAS_IOB_DMA_TXCH_STAT_CNTDEL_M)
+#define PAS_IOB_DMA_RXCH_RESET(i) (0x1500 + (i)*4)
+#define PAS_IOB_DMA_RXCH_RESET_PCNT_M 0xffff0000
+#define PAS_IOB_DMA_RXCH_RESET_PCNT_S 16
+#define PAS_IOB_DMA_RXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \
+ PAS_IOB_DMA_RXCH_RESET_PCNT_M)
+#define PAS_IOB_DMA_RXCH_RESET_PCNTRST 0x00000020
+#define PAS_IOB_DMA_RXCH_RESET_DCNTRST 0x00000010
+#define PAS_IOB_DMA_RXCH_RESET_TINTC 0x00000008
+#define PAS_IOB_DMA_RXCH_RESET_DINTC 0x00000004
+#define PAS_IOB_DMA_RXCH_RESET_SINTC 0x00000002
+#define PAS_IOB_DMA_RXCH_RESET_PINTC 0x00000001
+#define PAS_IOB_DMA_TXCH_RESET(i) (0x1600 + (i)*4)
+#define PAS_IOB_DMA_TXCH_RESET_PCNT_M 0xffff0000
+#define PAS_IOB_DMA_TXCH_RESET_PCNT_S 16
+#define PAS_IOB_DMA_TXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \
+ PAS_IOB_DMA_TXCH_RESET_PCNT_M)
+#define PAS_IOB_DMA_TXCH_RESET_PCNTRST 0x00000020
+#define PAS_IOB_DMA_TXCH_RESET_DCNTRST 0x00000010
+#define PAS_IOB_DMA_TXCH_RESET_TINTC 0x00000008
+#define PAS_IOB_DMA_TXCH_RESET_DINTC 0x00000004
+#define PAS_IOB_DMA_TXCH_RESET_SINTC 0x00000002
+#define PAS_IOB_DMA_TXCH_RESET_PINTC 0x00000001
+
+#define PAS_IOB_DMA_COM_TIMEOUTCFG 0x1700
+#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M 0x00ffffff
+#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S 0
+#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(x) (((x) << PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S) & \
+ PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M)
+
+/* Transmit descriptor fields */
+#define XCT_MACTX_T 0x8000000000000000ull
+#define XCT_MACTX_ST 0x4000000000000000ull
+#define XCT_MACTX_NORES 0x0000000000000000ull
+#define XCT_MACTX_8BRES 0x1000000000000000ull
+#define XCT_MACTX_24BRES 0x2000000000000000ull
+#define XCT_MACTX_40BRES 0x3000000000000000ull
+#define XCT_MACTX_I 0x0800000000000000ull
+#define XCT_MACTX_O 0x0400000000000000ull
+#define XCT_MACTX_E 0x0200000000000000ull
+#define XCT_MACTX_VLAN_M 0x0180000000000000ull
+#define XCT_MACTX_VLAN_NOP 0x0000000000000000ull
+#define XCT_MACTX_VLAN_REMOVE 0x0080000000000000ull
+#define XCT_MACTX_VLAN_INSERT 0x0100000000000000ull
+#define XCT_MACTX_VLAN_REPLACE 0x0180000000000000ull
+#define XCT_MACTX_CRC_M 0x0060000000000000ull
+#define XCT_MACTX_CRC_NOP 0x0000000000000000ull
+#define XCT_MACTX_CRC_INSERT 0x0020000000000000ull
+#define XCT_MACTX_CRC_PAD 0x0040000000000000ull
+#define XCT_MACTX_CRC_REPLACE 0x0060000000000000ull
+#define XCT_MACTX_SS 0x0010000000000000ull
+#define XCT_MACTX_LLEN_M 0x00007fff00000000ull
+#define XCT_MACTX_LLEN_S 32ull
+#define XCT_MACTX_LLEN(x) ((((long)(x)) << XCT_MACTX_LLEN_S) & \
+ XCT_MACTX_LLEN_M)
+#define XCT_MACTX_IPH_M 0x00000000f8000000ull
+#define XCT_MACTX_IPH_S 27ull
+#define XCT_MACTX_IPH(x) ((((long)(x)) << XCT_MACTX_IPH_S) & \
+ XCT_MACTX_IPH_M)
+#define XCT_MACTX_IPO_M 0x0000000007c00000ull
+#define XCT_MACTX_IPO_S 22ull
+#define XCT_MACTX_IPO(x) ((((long)(x)) << XCT_MACTX_IPO_S) & \
+ XCT_MACTX_IPO_M)
+#define XCT_MACTX_CSUM_M 0x0000000000000060ull
+#define XCT_MACTX_CSUM_NOP 0x0000000000000000ull
+#define XCT_MACTX_CSUM_TCP 0x0000000000000040ull
+#define XCT_MACTX_CSUM_UDP 0x0000000000000060ull
+#define XCT_MACTX_V6 0x0000000000000010ull
+#define XCT_MACTX_C 0x0000000000000004ull
+#define XCT_MACTX_AL2 0x0000000000000002ull
+
+/* Receive descriptor fields */
+#define XCT_MACRX_T 0x8000000000000000ull
+#define XCT_MACRX_ST 0x4000000000000000ull
+#define XCT_MACRX_RR_M 0x3000000000000000ull
+#define XCT_MACRX_RR_NORES 0x0000000000000000ull
+#define XCT_MACRX_RR_8BRES 0x1000000000000000ull
+#define XCT_MACRX_O 0x0400000000000000ull
+#define XCT_MACRX_E 0x0200000000000000ull
+#define XCT_MACRX_FF 0x0100000000000000ull
+#define XCT_MACRX_PF 0x0080000000000000ull
+#define XCT_MACRX_OB 0x0040000000000000ull
+#define XCT_MACRX_OD 0x0020000000000000ull
+#define XCT_MACRX_FS 0x0010000000000000ull
+#define XCT_MACRX_NB_M 0x000fc00000000000ull
+#define XCT_MACRX_NB_S 46ULL
+#define XCT_MACRX_NB(x) ((((long)(x)) << XCT_MACRX_NB_S) & \
+ XCT_MACRX_NB_M)
+#define XCT_MACRX_LLEN_M 0x00003fff00000000ull
+#define XCT_MACRX_LLEN_S 32ULL
+#define XCT_MACRX_LLEN(x) ((((long)(x)) << XCT_MACRX_LLEN_S) & \
+ XCT_MACRX_LLEN_M)
+#define XCT_MACRX_CRC 0x0000000080000000ull
+#define XCT_MACRX_LEN_M 0x0000000060000000ull
+#define XCT_MACRX_LEN_TOOSHORT 0x0000000020000000ull
+#define XCT_MACRX_LEN_BELOWMIN 0x0000000040000000ull
+#define XCT_MACRX_LEN_TRUNC 0x0000000060000000ull
+#define XCT_MACRX_CAST_M 0x0000000018000000ull
+#define XCT_MACRX_CAST_UNI 0x0000000000000000ull
+#define XCT_MACRX_CAST_MULTI 0x0000000008000000ull
+#define XCT_MACRX_CAST_BROAD 0x0000000010000000ull
+#define XCT_MACRX_CAST_PAUSE 0x0000000018000000ull
+#define XCT_MACRX_VLC_M 0x0000000006000000ull
+#define XCT_MACRX_FM 0x0000000001000000ull
+#define XCT_MACRX_HTY_M 0x0000000000c00000ull
+#define XCT_MACRX_HTY_IPV4_OK 0x0000000000000000ull
+#define XCT_MACRX_HTY_IPV6 0x0000000000400000ull
+#define XCT_MACRX_HTY_IPV4_BAD 0x0000000000800000ull
+#define XCT_MACRX_HTY_NONIP 0x0000000000c00000ull
+#define XCT_MACRX_IPP_M 0x00000000003f0000ull
+#define XCT_MACRX_IPP_S 16
+#define XCT_MACRX_CSUM_M 0x000000000000ffffull
+#define XCT_MACRX_CSUM_S 0
+
+#define XCT_PTR_T 0x8000000000000000ull
+#define XCT_PTR_LEN_M 0x7ffff00000000000ull
+#define XCT_PTR_LEN_S 44
+#define XCT_PTR_LEN(x) ((((long)(x)) << XCT_PTR_LEN_S) & \
+ XCT_PTR_LEN_M)
+#define XCT_PTR_ADDR_M 0x00000fffffffffffull
+#define XCT_PTR_ADDR_S 0
+#define XCT_PTR_ADDR(x) ((((long)(x)) << XCT_PTR_ADDR_S) & \
+ XCT_PTR_ADDR_M)
+
+/* Receive interface 8byte result fields */
+#define XCT_RXRES_8B_L4O_M 0xff00000000000000ull
+#define XCT_RXRES_8B_L4O_S 56
+#define XCT_RXRES_8B_RULE_M 0x00ffff0000000000ull
+#define XCT_RXRES_8B_RULE_S 40
+#define XCT_RXRES_8B_EVAL_M 0x000000ffff000000ull
+#define XCT_RXRES_8B_EVAL_S 24
+#define XCT_RXRES_8B_HTYPE_M 0x0000000000f00000ull
+#define XCT_RXRES_8B_HASH_M 0x00000000000fffffull
+#define XCT_RXRES_8B_HASH_S 0
+
+/* Receive interface buffer fields */
+#define XCT_RXB_LEN_M 0x0ffff00000000000ull
+#define XCT_RXB_LEN_S 44
+#define XCT_RXB_LEN(x) ((((long)(x)) << XCT_RXB_LEN_S) & \
+ XCT_RXB_LEN_M)
+#define XCT_RXB_ADDR_M 0x00000fffffffffffull
+#define XCT_RXB_ADDR_S 0
+#define XCT_RXB_ADDR(x) ((((long)(x)) << XCT_RXB_ADDR_S) & \
+ XCT_RXB_ADDR_M)
+
+/* Copy descriptor fields */
+#define XCT_COPY_T 0x8000000000000000ull
+#define XCT_COPY_ST 0x4000000000000000ull
+#define XCT_COPY_RR_M 0x3000000000000000ull
+#define XCT_COPY_RR_NORES 0x0000000000000000ull
+#define XCT_COPY_RR_8BRES 0x1000000000000000ull
+#define XCT_COPY_RR_24BRES 0x2000000000000000ull
+#define XCT_COPY_RR_40BRES 0x3000000000000000ull
+#define XCT_COPY_I 0x0800000000000000ull
+#define XCT_COPY_O 0x0400000000000000ull
+#define XCT_COPY_E 0x0200000000000000ull
+#define XCT_COPY_STY_ZERO 0x01c0000000000000ull
+#define XCT_COPY_DTY_PREF 0x0038000000000000ull
+#define XCT_COPY_LLEN_M 0x0007ffff00000000ull
+#define XCT_COPY_LLEN_S 32
+#define XCT_COPY_LLEN(x) ((((long)(x)) << XCT_COPY_LLEN_S) & \
+ XCT_COPY_LLEN_M)
+#define XCT_COPY_SE 0x0000000000000001ull
+
+/* Function descriptor fields */
+#define XCT_FUN_T 0x8000000000000000ull
+#define XCT_FUN_ST 0x4000000000000000ull
+#define XCT_FUN_RR_M 0x3000000000000000ull
+#define XCT_FUN_RR_NORES 0x0000000000000000ull
+#define XCT_FUN_RR_8BRES 0x1000000000000000ull
+#define XCT_FUN_RR_24BRES 0x2000000000000000ull
+#define XCT_FUN_RR_40BRES 0x3000000000000000ull
+#define XCT_FUN_I 0x0800000000000000ull
+#define XCT_FUN_O 0x0400000000000000ull
+#define XCT_FUN_E 0x0200000000000000ull
+#define XCT_FUN_FUN_M 0x01c0000000000000ull
+#define XCT_FUN_FUN_S 54
+#define XCT_FUN_FUN(x) ((((long)(x)) << XCT_FUN_FUN_S) & XCT_FUN_FUN_M)
+#define XCT_FUN_CRM_M 0x0038000000000000ull
+#define XCT_FUN_CRM_NOP 0x0000000000000000ull
+#define XCT_FUN_CRM_SIG 0x0008000000000000ull
+#define XCT_FUN_LLEN_M 0x0007ffff00000000ull
+#define XCT_FUN_LLEN_S 32
+#define XCT_FUN_LLEN(x) ((((long)(x)) << XCT_FUN_LLEN_S) & XCT_FUN_LLEN_M)
+#define XCT_FUN_SHL_M 0x00000000f8000000ull
+#define XCT_FUN_SHL_S 27
+#define XCT_FUN_SHL(x) ((((long)(x)) << XCT_FUN_SHL_S) & XCT_FUN_SHL_M)
+#define XCT_FUN_CHL_M 0x0000000007c00000ull
+#define XCT_FUN_HSZ_M 0x00000000003c0000ull
+#define XCT_FUN_ALG_M 0x0000000000038000ull
+#define XCT_FUN_HP 0x0000000000004000ull
+#define XCT_FUN_BCM_M 0x0000000000003800ull
+#define XCT_FUN_BCP_M 0x0000000000000600ull
+#define XCT_FUN_SIG_M 0x00000000000001f0ull
+#define XCT_FUN_SIG_TCP4 0x0000000000000140ull
+#define XCT_FUN_SIG_TCP6 0x0000000000000150ull
+#define XCT_FUN_SIG_UDP4 0x0000000000000160ull
+#define XCT_FUN_SIG_UDP6 0x0000000000000170ull
+#define XCT_FUN_A 0x0000000000000008ull
+#define XCT_FUN_C 0x0000000000000004ull
+#define XCT_FUN_AL2 0x0000000000000002ull
+#define XCT_FUN_SE 0x0000000000000001ull
+
+/* Function descriptor 8byte result fields */
+#define XCT_FUNRES_8B_CS_M 0x0000ffff00000000ull
+#define XCT_FUNRES_8B_CS_S 32
+#define XCT_FUNRES_8B_CRC_M 0x00000000ffffffffull
+#define XCT_FUNRES_8B_CRC_S 0
+
+/* Control descriptor fields */
+#define CTRL_CMD_T 0x8000000000000000ull
+#define CTRL_CMD_META_EVT 0x2000000000000000ull
+#define CTRL_CMD_O 0x0400000000000000ull
+#define CTRL_CMD_ETYPE_M 0x0038000000000000ull
+#define CTRL_CMD_ETYPE_EXT 0x0000000000000000ull
+#define CTRL_CMD_ETYPE_WSET 0x0020000000000000ull
+#define CTRL_CMD_ETYPE_WCLR 0x0028000000000000ull
+#define CTRL_CMD_ETYPE_SET 0x0030000000000000ull
+#define CTRL_CMD_ETYPE_CLR 0x0038000000000000ull
+#define CTRL_CMD_REG_M 0x000000000000007full
+#define CTRL_CMD_REG_S 0
+#define CTRL_CMD_REG(x) ((((long)(x)) << CTRL_CMD_REG_S) & \
+ CTRL_CMD_REG_M)
+
+
+
+/* Prototypes for the shared DMA functions in the platform code. */
+
+/* DMA TX Channel type. Right now only limitations used are event types 0/1,
+ * for event-triggered DMA transactions.
+ */
+
+enum pasemi_dmachan_type {
+ RXCHAN = 0, /* Any RX chan */
+ TXCHAN = 1, /* Any TX chan */
+ TXCHAN_EVT0 = 0x1001, /* TX chan in event class 0 (chan 0-9) */
+ TXCHAN_EVT1 = 0x2001, /* TX chan in event class 1 (chan 10-19) */
+};
+
+struct pasemi_dmachan {
+ int chno; /* Channel number */
+ enum pasemi_dmachan_type chan_type; /* TX / RX */
+ u64 *status; /* Ptr to cacheable status */
+ int irq; /* IRQ used by channel */
+ unsigned int ring_size; /* size of allocated ring */
+ dma_addr_t ring_dma; /* DMA address for ring */
+ u64 *ring_virt; /* Virt address for ring */
+ void *priv; /* Ptr to start of client struct */
+};
+
+/* Read/write the different registers in the I/O Bridge, Ethernet
+ * and DMA Controller
+ */
+extern unsigned int pasemi_read_iob_reg(unsigned int reg);
+extern void pasemi_write_iob_reg(unsigned int reg, unsigned int val);
+
+extern unsigned int pasemi_read_mac_reg(int intf, unsigned int reg);
+extern void pasemi_write_mac_reg(int intf, unsigned int reg, unsigned int val);
+
+extern unsigned int pasemi_read_dma_reg(unsigned int reg);
+extern void pasemi_write_dma_reg(unsigned int reg, unsigned int val);
+
+/* Channel management routines */
+
+extern void *pasemi_dma_alloc_chan(enum pasemi_dmachan_type type,
+ int total_size, int offset);
+extern void pasemi_dma_free_chan(struct pasemi_dmachan *chan);
+
+extern void pasemi_dma_start_chan(const struct pasemi_dmachan *chan,
+ const u32 cmdsta);
+extern int pasemi_dma_stop_chan(const struct pasemi_dmachan *chan);
+
+/* Common routines to allocate rings and buffers */
+
+extern int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size);
+extern void pasemi_dma_free_ring(struct pasemi_dmachan *chan);
+
+extern void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size,
+ dma_addr_t *handle);
+extern void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size,
+ dma_addr_t *handle);
+
+/* Routines to allocate flags (events) for channel syncronization */
+extern int pasemi_dma_alloc_flag(void);
+extern void pasemi_dma_free_flag(int flag);
+extern void pasemi_dma_set_flag(int flag);
+extern void pasemi_dma_clear_flag(int flag);
+
+/* Routines to allocate function engines */
+extern int pasemi_dma_alloc_fun(void);
+extern void pasemi_dma_free_fun(int fun);
+
+/* Initialize the library, must be called before any other functions */
+extern int pasemi_dma_init(void);
+
+#endif /* ASM_PASEMI_DMA_H */
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
new file mode 100644
index 0000000..ae2ea80
--- /dev/null
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -0,0 +1,302 @@
+#ifndef _ASM_POWERPC_PCI_BRIDGE_H
+#define _ASM_POWERPC_PCI_BRIDGE_H
+#ifdef __KERNEL__
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/ioport.h>
+
+struct device_node;
+
+extern unsigned int ppc_pci_flags;
+enum {
+ /* Force re-assigning all resources (ignore firmware
+ * setup completely)
+ */
+ PPC_PCI_REASSIGN_ALL_RSRC = 0x00000001,
+
+ /* Re-assign all bus numbers */
+ PPC_PCI_REASSIGN_ALL_BUS = 0x00000002,
+
+ /* Do not try to assign, just use existing setup */
+ PPC_PCI_PROBE_ONLY = 0x00000004,
+
+ /* Don't bother with ISA alignment unless the bridge has
+ * ISA forwarding enabled
+ */
+ PPC_PCI_CAN_SKIP_ISA_ALIGN = 0x00000008,
+
+ /* Enable domain numbers in /proc */
+ PPC_PCI_ENABLE_PROC_DOMAINS = 0x00000010,
+ /* ... except for domain 0 */
+ PPC_PCI_COMPAT_DOMAIN_0 = 0x00000020,
+};
+
+
+/*
+ * Structure of a PCI controller (host bridge)
+ */
+struct pci_controller {
+ struct pci_bus *bus;
+ char is_dynamic;
+#ifdef CONFIG_PPC64
+ int node;
+#endif
+ struct device_node *dn;
+ struct list_head list_node;
+ struct device *parent;
+
+ int first_busno;
+ int last_busno;
+#ifndef CONFIG_PPC64
+ int self_busno;
+#endif
+
+ void __iomem *io_base_virt;
+#ifdef CONFIG_PPC64
+ void *io_base_alloc;
+#endif
+ resource_size_t io_base_phys;
+#ifndef CONFIG_PPC64
+ resource_size_t pci_io_size;
+#endif
+
+ /* Some machines (PReP) have a non 1:1 mapping of
+ * the PCI memory space in the CPU bus space
+ */
+ resource_size_t pci_mem_offset;
+#ifdef CONFIG_PPC64
+ unsigned long pci_io_size;
+#endif
+
+ struct pci_ops *ops;
+ unsigned int __iomem *cfg_addr;
+ void __iomem *cfg_data;
+
+#ifndef CONFIG_PPC64
+ /*
+ * Used for variants of PCI indirect handling and possible quirks:
+ * SET_CFG_TYPE - used on 4xx or any PHB that does explicit type0/1
+ * EXT_REG - provides access to PCI-e extended registers
+ * SURPRESS_PRIMARY_BUS - we surpress the setting of PCI_PRIMARY_BUS
+ * on Freescale PCI-e controllers since they used the PCI_PRIMARY_BUS
+ * to determine which bus number to match on when generating type0
+ * config cycles
+ * NO_PCIE_LINK - the Freescale PCI-e controllers have issues with
+ * hanging if we don't have link and try to do config cycles to
+ * anything but the PHB. Only allow talking to the PHB if this is
+ * set.
+ * BIG_ENDIAN - cfg_addr is a big endian register
+ * BROKEN_MRM - the 440EPx/GRx chips have an errata that causes hangs on
+ * the PLB4. Effectively disable MRM commands by setting this.
+ */
+#define PPC_INDIRECT_TYPE_SET_CFG_TYPE 0x00000001
+#define PPC_INDIRECT_TYPE_EXT_REG 0x00000002
+#define PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS 0x00000004
+#define PPC_INDIRECT_TYPE_NO_PCIE_LINK 0x00000008
+#define PPC_INDIRECT_TYPE_BIG_ENDIAN 0x00000010
+#define PPC_INDIRECT_TYPE_BROKEN_MRM 0x00000020
+ u32 indirect_type;
+#endif /* !CONFIG_PPC64 */
+ /* Currently, we limit ourselves to 1 IO range and 3 mem
+ * ranges since the common pci_bus structure can't handle more
+ */
+ struct resource io_resource;
+ struct resource mem_resources[3];
+ int global_number; /* PCI domain number */
+#ifdef CONFIG_PPC64
+ unsigned long buid;
+ unsigned long dma_window_base_cur;
+ unsigned long dma_window_size;
+
+ void *private_data;
+#endif /* CONFIG_PPC64 */
+};
+
+#ifndef CONFIG_PPC64
+
+static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
+{
+ return bus->sysdata;
+}
+
+static inline int isa_vaddr_is_ioport(void __iomem *address)
+{
+ /* No specific ISA handling on ppc32 at this stage, it
+ * all goes through PCI
+ */
+ return 0;
+}
+
+/* These are used for config access before all the PCI probing
+ has been done. */
+extern int early_read_config_byte(struct pci_controller *hose, int bus,
+ int dev_fn, int where, u8 *val);
+extern int early_read_config_word(struct pci_controller *hose, int bus,
+ int dev_fn, int where, u16 *val);
+extern int early_read_config_dword(struct pci_controller *hose, int bus,
+ int dev_fn, int where, u32 *val);
+extern int early_write_config_byte(struct pci_controller *hose, int bus,
+ int dev_fn, int where, u8 val);
+extern int early_write_config_word(struct pci_controller *hose, int bus,
+ int dev_fn, int where, u16 val);
+extern int early_write_config_dword(struct pci_controller *hose, int bus,
+ int dev_fn, int where, u32 val);
+
+extern int early_find_capability(struct pci_controller *hose, int bus,
+ int dev_fn, int cap);
+
+extern void setup_indirect_pci(struct pci_controller* hose,
+ resource_size_t cfg_addr,
+ resource_size_t cfg_data, u32 flags);
+extern void setup_grackle(struct pci_controller *hose);
+#else /* CONFIG_PPC64 */
+
+/*
+ * PCI stuff, for nodes representing PCI devices, pointed to
+ * by device_node->data.
+ */
+struct iommu_table;
+
+struct pci_dn {
+ int busno; /* pci bus number */
+ int devfn; /* pci device and function number */
+
+ struct pci_controller *phb; /* for pci devices */
+ struct iommu_table *iommu_table; /* for phb's or bridges */
+ struct device_node *node; /* back-pointer to the device_node */
+
+ int pci_ext_config_space; /* for pci devices */
+
+#ifdef CONFIG_EEH
+ struct pci_dev *pcidev; /* back-pointer to the pci device */
+ int class_code; /* pci device class */
+ int eeh_mode; /* See eeh.h for possible EEH_MODEs */
+ int eeh_config_addr;
+ int eeh_pe_config_addr; /* new-style partition endpoint address */
+ int eeh_check_count; /* # times driver ignored error */
+ int eeh_freeze_count; /* # times this device froze up. */
+ int eeh_false_positives; /* # times this device reported #ff's */
+ u32 config_space[16]; /* saved PCI config space */
+#endif
+};
+
+/* Get the pointer to a device_node's pci_dn */
+#define PCI_DN(dn) ((struct pci_dn *) (dn)->data)
+
+extern struct device_node *fetch_dev_dn(struct pci_dev *dev);
+
+/* Get a device_node from a pci_dev. This code must be fast except
+ * in the case where the sysdata is incorrect and needs to be fixed
+ * up (this will only happen once).
+ * In this case the sysdata will have been inherited from a PCI host
+ * bridge or a PCI-PCI bridge further up the tree, so it will point
+ * to a valid struct pci_dn, just not the one we want.
+ */
+static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev)
+{
+ struct device_node *dn = dev->sysdata;
+ struct pci_dn *pdn = dn->data;
+
+ if (pdn && pdn->devfn == dev->devfn && pdn->busno == dev->bus->number)
+ return dn; /* fast path. sysdata is good */
+ return fetch_dev_dn(dev);
+}
+
+static inline int pci_device_from_OF_node(struct device_node *np,
+ u8 *bus, u8 *devfn)
+{
+ if (!PCI_DN(np))
+ return -ENODEV;
+ *bus = PCI_DN(np)->busno;
+ *devfn = PCI_DN(np)->devfn;
+ return 0;
+}
+
+static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
+{
+ if (bus->self)
+ return pci_device_to_OF_node(bus->self);
+ else
+ return bus->sysdata; /* Must be root bus (PHB) */
+}
+
+/** Find the bus corresponding to the indicated device node */
+extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn);
+
+/** Remove all of the PCI devices under this bus */
+extern void pcibios_remove_pci_devices(struct pci_bus *bus);
+
+/** Discover new pci devices under this bus, and add them */
+extern void pcibios_add_pci_devices(struct pci_bus *bus);
+extern void pcibios_fixup_new_pci_devices(struct pci_bus *bus);
+
+extern int pcibios_remove_root_bus(struct pci_controller *phb);
+
+static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
+{
+ struct device_node *busdn = bus->sysdata;
+
+ BUG_ON(busdn == NULL);
+ return PCI_DN(busdn)->phb;
+}
+
+
+extern void isa_bridge_find_early(struct pci_controller *hose);
+
+static inline int isa_vaddr_is_ioport(void __iomem *address)
+{
+ /* Check if address hits the reserved legacy IO range */
+ unsigned long ea = (unsigned long)address;
+ return ea >= ISA_IO_BASE && ea < ISA_IO_END;
+}
+
+extern int pcibios_unmap_io_space(struct pci_bus *bus);
+extern int pcibios_map_io_space(struct pci_bus *bus);
+
+/* Return values for ppc_md.pci_probe_mode function */
+#define PCI_PROBE_NONE -1 /* Don't look at this bus at all */
+#define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */
+#define PCI_PROBE_DEVTREE 1 /* Instantiate from device tree */
+
+#ifdef CONFIG_NUMA
+#define PHB_SET_NODE(PHB, NODE) ((PHB)->node = (NODE))
+#else
+#define PHB_SET_NODE(PHB, NODE) ((PHB)->node = -1)
+#endif
+
+#endif /* CONFIG_PPC64 */
+
+/* Get the PCI host controller for an OF device */
+extern struct pci_controller *pci_find_hose_for_OF_device(
+ struct device_node* node);
+
+/* Fill up host controller resources from the OF node */
+extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
+ struct device_node *dev, int primary);
+
+/* Allocate & free a PCI host bridge structure */
+extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev);
+extern void pcibios_free_controller(struct pci_controller *phb);
+
+#ifdef CONFIG_PCI
+extern unsigned long pci_address_to_pio(phys_addr_t address);
+extern int pcibios_vaddr_is_ioport(void __iomem *address);
+#else
+static inline unsigned long pci_address_to_pio(phys_addr_t address)
+{
+ return (unsigned long)-1;
+}
+static inline int pcibios_vaddr_is_ioport(void __iomem *address)
+{
+ return 0;
+}
+#endif /* CONFIG_PCI */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_PCI_BRIDGE_H */
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
new file mode 100644
index 0000000..a05a942
--- /dev/null
+++ b/arch/powerpc/include/asm/pci.h
@@ -0,0 +1,228 @@
+#ifndef __ASM_POWERPC_PCI_H
+#define __ASM_POWERPC_PCI_H
+#ifdef __KERNEL__
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/machdep.h>
+#include <asm/scatterlist.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+
+#include <asm-generic/pci-dma-compat.h>
+
+#define PCIBIOS_MIN_IO 0x1000
+#define PCIBIOS_MIN_MEM 0x10000000
+
+struct pci_dev;
+
+/* Values for the `which' argument to sys_pciconfig_iobase syscall. */
+#define IOBASE_BRIDGE_NUMBER 0
+#define IOBASE_MEMORY 1
+#define IOBASE_IO 2
+#define IOBASE_ISA_IO 3
+#define IOBASE_ISA_MEM 4
+
+/*
+ * Set this to 1 if you want the kernel to re-assign all PCI
+ * bus numbers (don't do that on ppc64 yet !)
+ */
+#define pcibios_assign_all_busses() (ppc_pci_flags & \
+ PPC_PCI_REASSIGN_ALL_BUS)
+#define pcibios_scan_all_fns(a, b) 0
+
+static inline void pcibios_set_master(struct pci_dev *dev)
+{
+ /* No special bus mastering setup handling */
+}
+
+static inline void pcibios_penalize_isa_irq(int irq, int active)
+{
+ /* We don't do dynamic PCI IRQ allocation */
+}
+
+#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+ if (ppc_md.pci_get_legacy_ide_irq)
+ return ppc_md.pci_get_legacy_ide_irq(dev, channel);
+ return channel ? 15 : 14;
+}
+
+#ifdef CONFIG_PPC64
+
+/*
+ * We want to avoid touching the cacheline size or MWI bit.
+ * pSeries firmware sets the cacheline size (which is not the cpu cacheline
+ * size in all cases) and hardware treats MWI the same as memory write.
+ */
+#define PCI_DISABLE_MWI
+
+#ifdef CONFIG_PCI
+extern void set_pci_dma_ops(struct dma_mapping_ops *dma_ops);
+extern struct dma_mapping_ops *get_pci_dma_ops(void);
+
+static inline void pci_dma_burst_advice(struct pci_dev *pdev,
+ enum pci_dma_burst_strategy *strat,
+ unsigned long *strategy_parameter)
+{
+ unsigned long cacheline_size;
+ u8 byte;
+
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
+ if (byte == 0)
+ cacheline_size = 1024;
+ else
+ cacheline_size = (int) byte * 4;
+
+ *strat = PCI_DMA_BURST_MULTIPLE;
+ *strategy_parameter = cacheline_size;
+}
+#else /* CONFIG_PCI */
+#define set_pci_dma_ops(d)
+#define get_pci_dma_ops() NULL
+#endif
+
+#else /* 32-bit */
+
+#ifdef CONFIG_PCI
+static inline void pci_dma_burst_advice(struct pci_dev *pdev,
+ enum pci_dma_burst_strategy *strat,
+ unsigned long *strategy_parameter)
+{
+ *strat = PCI_DMA_BURST_INFINITY;
+ *strategy_parameter = ~0UL;
+}
+#endif
+#endif /* CONFIG_PPC64 */
+
+extern int pci_domain_nr(struct pci_bus *bus);
+
+/* Decide whether to display the domain number in /proc */
+extern int pci_proc_domain(struct pci_bus *bus);
+
+
+struct vm_area_struct;
+/* Map a range of PCI memory or I/O space for a device into user space */
+int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine);
+
+/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
+#define HAVE_PCI_MMAP 1
+
+#if defined(CONFIG_PPC64) || defined(CONFIG_NOT_COHERENT_CACHE)
+/*
+ * For 64-bit kernels, pci_unmap_{single,page} is not a nop.
+ * For 32-bit non-coherent kernels, pci_dma_sync_single_for_cpu() and
+ * so on are not nops.
+ * and thus...
+ */
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
+ dma_addr_t ADDR_NAME;
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
+ __u32 LEN_NAME;
+#define pci_unmap_addr(PTR, ADDR_NAME) \
+ ((PTR)->ADDR_NAME)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
+ (((PTR)->ADDR_NAME) = (VAL))
+#define pci_unmap_len(PTR, LEN_NAME) \
+ ((PTR)->LEN_NAME)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
+ (((PTR)->LEN_NAME) = (VAL))
+
+#else /* 32-bit && coherent */
+
+/* pci_unmap_{page,single} is a nop so... */
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
+#define pci_unmap_addr(PTR, ADDR_NAME) (0)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
+#define pci_unmap_len(PTR, LEN_NAME) (0)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
+
+#endif /* CONFIG_PPC64 || CONFIG_NOT_COHERENT_CACHE */
+
+#ifdef CONFIG_PPC64
+
+/* The PCI address space does not equal the physical memory address
+ * space (we have an IOMMU). The IDE and SCSI device layers use
+ * this boolean for bounce buffer decisions.
+ */
+#define PCI_DMA_BUS_IS_PHYS (0)
+
+#else /* 32-bit */
+
+/* The PCI address space does equal the physical memory
+ * address space (no IOMMU). The IDE and SCSI device layers use
+ * this boolean for bounce buffer decisions.
+ */
+#define PCI_DMA_BUS_IS_PHYS (1)
+
+#endif /* CONFIG_PPC64 */
+
+extern void pcibios_resource_to_bus(struct pci_dev *dev,
+ struct pci_bus_region *region,
+ struct resource *res);
+
+extern void pcibios_bus_to_resource(struct pci_dev *dev,
+ struct resource *res,
+ struct pci_bus_region *region);
+
+static inline struct resource *pcibios_select_root(struct pci_dev *pdev,
+ struct resource *res)
+{
+ struct resource *root = NULL;
+
+ if (res->flags & IORESOURCE_IO)
+ root = &ioport_resource;
+ if (res->flags & IORESOURCE_MEM)
+ root = &iomem_resource;
+
+ return root;
+}
+
+extern void pcibios_setup_new_device(struct pci_dev *dev);
+
+extern void pcibios_claim_one_bus(struct pci_bus *b);
+
+extern void pcibios_resource_survey(void);
+
+extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
+
+extern struct pci_dev *of_create_pci_dev(struct device_node *node,
+ struct pci_bus *bus, int devfn);
+
+extern void of_scan_pci_bridge(struct device_node *node,
+ struct pci_dev *dev);
+
+extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
+
+extern int pci_read_irq_line(struct pci_dev *dev);
+
+struct file;
+extern pgprot_t pci_phys_mem_access_prot(struct file *file,
+ unsigned long pfn,
+ unsigned long size,
+ pgprot_t prot);
+
+#define HAVE_ARCH_PCI_RESOURCE_TO_USER
+extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
+ const struct resource *rsrc,
+ resource_size_t *start, resource_size_t *end);
+
+extern void pcibios_do_bus_setup(struct pci_bus *bus);
+extern void pcibios_fixup_of_probed_bus(struct pci_bus *bus);
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_POWERPC_PCI_H */
diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h
new file mode 100644
index 0000000..f879252
--- /dev/null
+++ b/arch/powerpc/include/asm/percpu.h
@@ -0,0 +1,24 @@
+#ifndef _ASM_POWERPC_PERCPU_H_
+#define _ASM_POWERPC_PERCPU_H_
+#ifdef __powerpc64__
+#include <linux/compiler.h>
+
+/*
+ * Same as asm-generic/percpu.h, except that we store the per cpu offset
+ * in the paca. Based on the x86-64 implementation.
+ */
+
+#ifdef CONFIG_SMP
+
+#include <asm/paca.h>
+
+#define __per_cpu_offset(cpu) (paca[cpu].data_offset)
+#define __my_cpu_offset local_paca->data_offset
+#define per_cpu_offset(x) (__per_cpu_offset(x))
+
+#endif /* CONFIG_SMP */
+#endif /* __powerpc64__ */
+
+#include <asm-generic/percpu.h>
+
+#endif /* _ASM_POWERPC_PERCPU_H_ */
diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h
new file mode 100644
index 0000000..58c0714
--- /dev/null
+++ b/arch/powerpc/include/asm/pgalloc-32.h
@@ -0,0 +1,43 @@
+#ifndef _ASM_POWERPC_PGALLOC_32_H
+#define _ASM_POWERPC_PGALLOC_32_H
+
+#include <linux/threads.h>
+
+extern void __bad_pte(pmd_t *pmd);
+
+extern pgd_t *pgd_alloc(struct mm_struct *mm);
+extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
+
+/*
+ * We don't have any real pmd's, and this code never triggers because
+ * the pgd will always be present..
+ */
+/* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */
+#define pmd_free(mm, x) do { } while (0)
+#define __pmd_free_tlb(tlb,x) do { } while (0)
+/* #define pgd_populate(mm, pmd, pte) BUG() */
+
+#ifndef CONFIG_BOOKE
+#define pmd_populate_kernel(mm, pmd, pte) \
+ (pmd_val(*(pmd)) = __pa(pte) | _PMD_PRESENT)
+#define pmd_populate(mm, pmd, pte) \
+ (pmd_val(*(pmd)) = (page_to_pfn(pte) << PAGE_SHIFT) | _PMD_PRESENT)
+#define pmd_pgtable(pmd) pmd_page(pmd)
+#else
+#define pmd_populate_kernel(mm, pmd, pte) \
+ (pmd_val(*(pmd)) = (unsigned long)pte | _PMD_PRESENT)
+#define pmd_populate(mm, pmd, pte) \
+ (pmd_val(*(pmd)) = (unsigned long)lowmem_page_address(pte) | _PMD_PRESENT)
+#define pmd_pgtable(pmd) pmd_page(pmd)
+#endif
+
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
+extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
+extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
+extern void pte_free(struct mm_struct *mm, pgtable_t pte);
+
+#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
+
+#define check_pgt_cache() do { } while (0)
+
+#endif /* _ASM_POWERPC_PGALLOC_32_H */
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
new file mode 100644
index 0000000..812a1d8
--- /dev/null
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -0,0 +1,166 @@
+#ifndef _ASM_POWERPC_PGALLOC_64_H
+#define _ASM_POWERPC_PGALLOC_64_H
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/cpumask.h>
+#include <linux/percpu.h>
+
+#ifndef CONFIG_PPC_SUBPAGE_PROT
+static inline void subpage_prot_free(pgd_t *pgd) {}
+#endif
+
+extern struct kmem_cache *pgtable_cache[];
+
+#define PGD_CACHE_NUM 0
+#define PUD_CACHE_NUM 1
+#define PMD_CACHE_NUM 1
+#define HUGEPTE_CACHE_NUM 2
+#define PTE_NONCACHE_NUM 7 /* from GFP rather than kmem_cache */
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL);
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ subpage_prot_free(pgd);
+ kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
+}
+
+#ifndef CONFIG_PPC_64K_PAGES
+
+#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM],
+ GFP_KERNEL|__GFP_REPEAT);
+}
+
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud);
+}
+
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+ pud_set(pud, (unsigned long)pmd);
+}
+
+#define pmd_populate(mm, pmd, pte_page) \
+ pmd_populate_kernel(mm, pmd, page_address(pte_page))
+#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+
+#else /* CONFIG_PPC_64K_PAGES */
+
+#define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+ pte_t *pte)
+{
+ pmd_set(pmd, (unsigned long)pte);
+}
+
+#define pmd_populate(mm, pmd, pte_page) \
+ pmd_populate_kernel(mm, pmd, page_address(pte_page))
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+#endif /* CONFIG_PPC_64K_PAGES */
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM],
+ GFP_KERNEL|__GFP_REPEAT);
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+ kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd);
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+}
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+{
+ struct page *page;
+ pte_t *pte;
+
+ pte = pte_alloc_one_kernel(mm, address);
+ if (!pte)
+ return NULL;
+ page = virt_to_page(pte);
+ pgtable_page_ctor(page);
+ return page;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
+{
+ pgtable_page_dtor(ptepage);
+ __free_page(ptepage);
+}
+
+#define PGF_CACHENUM_MASK 0x7
+
+typedef struct pgtable_free {
+ unsigned long val;
+} pgtable_free_t;
+
+static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
+ unsigned long mask)
+{
+ BUG_ON(cachenum > PGF_CACHENUM_MASK);
+
+ return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
+}
+
+static inline void pgtable_free(pgtable_free_t pgf)
+{
+ void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
+ int cachenum = pgf.val & PGF_CACHENUM_MASK;
+
+ if (cachenum == PTE_NONCACHE_NUM)
+ free_page((unsigned long)p);
+ else
+ kmem_cache_free(pgtable_cache[cachenum], p);
+}
+
+extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
+
+#define __pte_free_tlb(tlb,ptepage) \
+do { \
+ pgtable_page_dtor(ptepage); \
+ pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
+ PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
+} while (0)
+#define __pmd_free_tlb(tlb, pmd) \
+ pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
+ PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
+#ifndef CONFIG_PPC_64K_PAGES
+#define __pud_free_tlb(tlb, pud) \
+ pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
+ PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
+#endif /* CONFIG_PPC_64K_PAGES */
+
+#define check_pgt_cache() do { } while (0)
+
+#endif /* _ASM_POWERPC_PGALLOC_64_H */
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
new file mode 100644
index 0000000..b4505ed
--- /dev/null
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_POWERPC_PGALLOC_H
+#define _ASM_POWERPC_PGALLOC_H
+#ifdef __KERNEL__
+
+#ifdef CONFIG_PPC64
+#include <asm/pgalloc-64.h>
+#else
+#include <asm/pgalloc-32.h>
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/pgtable-4k.h b/arch/powerpc/include/asm/pgtable-4k.h
new file mode 100644
index 0000000..6b18ba9
--- /dev/null
+++ b/arch/powerpc/include/asm/pgtable-4k.h
@@ -0,0 +1,117 @@
+#ifndef _ASM_POWERPC_PGTABLE_4K_H
+#define _ASM_POWERPC_PGTABLE_4K_H
+/*
+ * Entries per page directory level. The PTE level must use a 64b record
+ * for each page table entry. The PMD and PGD level use a 32b record for
+ * each entry by assuming that each entry is page aligned.
+ */
+#define PTE_INDEX_SIZE 9
+#define PMD_INDEX_SIZE 7
+#define PUD_INDEX_SIZE 7
+#define PGD_INDEX_SIZE 9
+
+#ifndef __ASSEMBLY__
+#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
+#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
+#define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
+#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
+#endif /* __ASSEMBLY__ */
+
+#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
+#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
+#define PTRS_PER_PUD (1 << PMD_INDEX_SIZE)
+#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
+
+/* PMD_SHIFT determines what a second-level page table entry can map */
+#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+
+/* With 4k base page size, hugepage PTEs go at the PMD level */
+#define MIN_HUGEPTE_SHIFT PMD_SHIFT
+
+/* PUD_SHIFT determines what a third-level page table entry can map */
+#define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+
+/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
+#define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE)
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/* PTE bits */
+#define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */
+#define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */
+#define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */
+#define _PAGE_F_SECOND _PAGE_SECONDARY
+#define _PAGE_F_GIX _PAGE_GROUP_IX
+#define _PAGE_SPECIAL 0x10000 /* software: special page */
+#define __HAVE_ARCH_PTE_SPECIAL
+
+/* PTE flags to conserve for HPTE identification */
+#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \
+ _PAGE_SECONDARY | _PAGE_GROUP_IX)
+
+/* There is no 4K PFN hack on 4K pages */
+#define _PAGE_4K_PFN 0
+
+/* PAGE_MASK gives the right answer below, but only by accident */
+/* It should be preserving the high 48 bits and then specifically */
+/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
+ _PAGE_HPTEFLAGS)
+
+/* Bits to mask out from a PMD to get to the PTE page */
+#define PMD_MASKED_BITS 0
+/* Bits to mask out from a PUD to get to the PMD page */
+#define PUD_MASKED_BITS 0
+/* Bits to mask out from a PGD to get to the PUD page */
+#define PGD_MASKED_BITS 0
+
+/* shift to put page number into pte */
+#define PTE_RPN_SHIFT (17)
+
+#ifdef STRICT_MM_TYPECHECKS
+#define __real_pte(e,p) ((real_pte_t){(e)})
+#define __rpte_to_pte(r) ((r).pte)
+#else
+#define __real_pte(e,p) (e)
+#define __rpte_to_pte(r) (__pte(r))
+#endif
+#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12)
+
+#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
+ do { \
+ index = 0; \
+ shift = mmu_psize_defs[psize].shift; \
+
+#define pte_iterate_hashed_end() } while(0)
+
+#ifdef CONFIG_PPC_HAS_HASH_64K
+#define pte_pagesize_index(mm, addr, pte) get_slice_psize(mm, addr)
+#else
+#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
+#endif
+
+/*
+ * 4-level page tables related bits
+ */
+
+#define pgd_none(pgd) (!pgd_val(pgd))
+#define pgd_bad(pgd) (pgd_val(pgd) == 0)
+#define pgd_present(pgd) (pgd_val(pgd) != 0)
+#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0)
+#define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS)
+#define pgd_page(pgd) virt_to_page(pgd_page_vaddr(pgd))
+
+#define pud_offset(pgdp, addr) \
+ (((pud_t *) pgd_page_vaddr(*(pgdp))) + \
+ (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
+
+#define pud_ERROR(e) \
+ printk("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
+
+#define remap_4k_pfn(vma, addr, pfn, prot) \
+ remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
+#endif /* _ASM_POWERPC_PGTABLE_4K_H */
diff --git a/arch/powerpc/include/asm/pgtable-64k.h b/arch/powerpc/include/asm/pgtable-64k.h
new file mode 100644
index 0000000..07b0d8f
--- /dev/null
+++ b/arch/powerpc/include/asm/pgtable-64k.h
@@ -0,0 +1,155 @@
+#ifndef _ASM_POWERPC_PGTABLE_64K_H
+#define _ASM_POWERPC_PGTABLE_64K_H
+
+#include <asm-generic/pgtable-nopud.h>
+
+
+#define PTE_INDEX_SIZE 12
+#define PMD_INDEX_SIZE 12
+#define PUD_INDEX_SIZE 0
+#define PGD_INDEX_SIZE 4
+
+#ifndef __ASSEMBLY__
+#define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE)
+#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
+#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
+
+#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
+#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
+#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
+
+#ifdef CONFIG_PPC_SUBPAGE_PROT
+/*
+ * For the sub-page protection option, we extend the PGD with one of
+ * these. Basically we have a 3-level tree, with the top level being
+ * the protptrs array. To optimize speed and memory consumption when
+ * only addresses < 4GB are being protected, pointers to the first
+ * four pages of sub-page protection words are stored in the low_prot
+ * array.
+ * Each page of sub-page protection words protects 1GB (4 bytes
+ * protects 64k). For the 3-level tree, each page of pointers then
+ * protects 8TB.
+ */
+struct subpage_prot_table {
+ unsigned long maxaddr; /* only addresses < this are protected */
+ unsigned int **protptrs[2];
+ unsigned int *low_prot[4];
+};
+
+#undef PGD_TABLE_SIZE
+#define PGD_TABLE_SIZE ((sizeof(pgd_t) << PGD_INDEX_SIZE) + \
+ sizeof(struct subpage_prot_table))
+
+#define SBP_L1_BITS (PAGE_SHIFT - 2)
+#define SBP_L2_BITS (PAGE_SHIFT - 3)
+#define SBP_L1_COUNT (1 << SBP_L1_BITS)
+#define SBP_L2_COUNT (1 << SBP_L2_BITS)
+#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
+#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
+
+extern void subpage_prot_free(pgd_t *pgd);
+
+static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd)
+{
+ return (struct subpage_prot_table *)(pgd + PTRS_PER_PGD);
+}
+#endif /* CONFIG_PPC_SUBPAGE_PROT */
+#endif /* __ASSEMBLY__ */
+
+/* With 4k base page size, hugepage PTEs go at the PMD level */
+#define MIN_HUGEPTE_SHIFT PAGE_SHIFT
+
+/* PMD_SHIFT determines what a second-level page table entry can map */
+#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+
+/* PGDIR_SHIFT determines what a third-level page table entry can map */
+#define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/* Additional PTE bits (don't change without checking asm in hash_low.S) */
+#define __HAVE_ARCH_PTE_SPECIAL
+#define _PAGE_SPECIAL 0x00000400 /* software: special page */
+#define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */
+#define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */
+#define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */
+#define _PAGE_4K_PFN 0x20000000 /* PFN is for a single 4k page */
+
+/* For 64K page, we don't have a separate _PAGE_HASHPTE bit. Instead,
+ * we set that to be the whole sub-bits mask. The C code will only
+ * test this, so a multi-bit mask will work. For combo pages, this
+ * is equivalent as effectively, the old _PAGE_HASHPTE was an OR of
+ * all the sub bits. For real 64k pages, we now have the assembly set
+ * _PAGE_HPTE_SUB0 in addition to setting the HIDX bits which overlap
+ * that mask. This is fine as long as the HIDX bits are never set on
+ * a PTE that isn't hashed, which is the case today.
+ *
+ * A little nit is for the huge page C code, which does the hashing
+ * in C, we need to provide which bit to use.
+ */
+#define _PAGE_HASHPTE _PAGE_HPTE_SUB
+
+/* Note the full page bits must be in the same location as for normal
+ * 4k pages as the same asssembly will be used to insert 64K pages
+ * wether the kernel has CONFIG_PPC_64K_PAGES or not
+ */
+#define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */
+#define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */
+
+/* PTE flags to conserve for HPTE identification */
+#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_COMBO)
+
+/* Shift to put page number into pte.
+ *
+ * That gives us a max RPN of 34 bits, which means a max of 50 bits
+ * of addressable physical space, or 46 bits for the special 4k PFNs.
+ */
+#define PTE_RPN_SHIFT (30)
+#define PTE_RPN_MAX (1UL << (64 - PTE_RPN_SHIFT))
+#define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1))
+
+/* _PAGE_CHG_MASK masks of bits that are to be preserved accross
+ * pgprot changes
+ */
+#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
+ _PAGE_ACCESSED)
+
+/* Bits to mask out from a PMD to get to the PTE page */
+#define PMD_MASKED_BITS 0x1ff
+/* Bits to mask out from a PGD/PUD to get to the PMD page */
+#define PUD_MASKED_BITS 0x1ff
+
+/* Manipulate "rpte" values */
+#define __real_pte(e,p) ((real_pte_t) { \
+ (e), pte_val(*((p) + PTRS_PER_PTE)) })
+#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
+ (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
+#define __rpte_to_pte(r) ((r).pte)
+#define __rpte_sub_valid(rpte, index) \
+ (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index)))
+
+
+/* Trick: we set __end to va + 64k, which happens works for
+ * a 16M page as well as we want only one iteration
+ */
+#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
+ do { \
+ unsigned long __end = va + PAGE_SIZE; \
+ unsigned __split = (psize == MMU_PAGE_4K || \
+ psize == MMU_PAGE_64K_AP); \
+ shift = mmu_psize_defs[psize].shift; \
+ for (index = 0; va < __end; index++, va += (1L << shift)) { \
+ if (!__split || __rpte_sub_valid(rpte, index)) do { \
+
+#define pte_iterate_hashed_end() } while(0); } } while(0)
+
+#define pte_pagesize_index(mm, addr, pte) \
+ (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
+
+#define remap_4k_pfn(vma, addr, pfn, prot) \
+ remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \
+ __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN))
+
+#endif /* _ASM_POWERPC_PGTABLE_64K_H */
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
new file mode 100644
index 0000000..6fe39e3
--- /dev/null
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -0,0 +1,802 @@
+#ifndef _ASM_POWERPC_PGTABLE_PPC32_H
+#define _ASM_POWERPC_PGTABLE_PPC32_H
+
+#include <asm-generic/pgtable-nopmd.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/sched.h>
+#include <linux/threads.h>
+#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
+
+extern unsigned long va_to_phys(unsigned long address);
+extern pte_t *va_to_pte(unsigned long address);
+extern unsigned long ioremap_bot, ioremap_base;
+
+#ifdef CONFIG_44x
+extern int icache_44x_need_flush;
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * The PowerPC MMU uses a hash table containing PTEs, together with
+ * a set of 16 segment registers (on 32-bit implementations), to define
+ * the virtual to physical address mapping.
+ *
+ * We use the hash table as an extended TLB, i.e. a cache of currently
+ * active mappings. We maintain a two-level page table tree, much
+ * like that used by the i386, for the sake of the Linux memory
+ * management code. Low-level assembler code in hashtable.S
+ * (procedure hash_page) is responsible for extracting ptes from the
+ * tree and putting them into the hash table when necessary, and
+ * updating the accessed and modified bits in the page table tree.
+ */
+
+/*
+ * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk.
+ * We also use the two level tables, but we can put the real bits in them
+ * needed for the TLB and tablewalk. These definitions require Mx_CTR.PPM = 0,
+ * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1. The level 2 descriptor has
+ * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit
+ * based upon user/super access. The TLB does not have accessed nor write
+ * protect. We assume that if the TLB get loaded with an entry it is
+ * accessed, and overload the changed bit for write protect. We use
+ * two bits in the software pte that are supposed to be set to zero in
+ * the TLB entry (24 and 25) for these indicators. Although the level 1
+ * descriptor contains the guarded and writethrough/copyback bits, we can
+ * set these at the page level since they get copied from the Mx_TWC
+ * register when the TLB entry is loaded. We will use bit 27 for guard, since
+ * that is where it exists in the MD_TWC, and bit 26 for writethrough.
+ * These will get masked from the level 2 descriptor at TLB load time, and
+ * copied to the MD_TWC before it gets loaded.
+ * Large page sizes added. We currently support two sizes, 4K and 8M.
+ * This also allows a TLB hander optimization because we can directly
+ * load the PMD into MD_TWC. The 8M pages are only used for kernel
+ * mapping of well known areas. The PMD (PGD) entries contain control
+ * flags in addition to the address, so care must be taken that the
+ * software no longer assumes these are only pointers.
+ */
+
+/*
+ * At present, all PowerPC 400-class processors share a similar TLB
+ * architecture. The instruction and data sides share a unified,
+ * 64-entry, fully-associative TLB which is maintained totally under
+ * software control. In addition, the instruction side has a
+ * hardware-managed, 4-entry, fully-associative TLB which serves as a
+ * first level to the shared TLB. These two TLBs are known as the UTLB
+ * and ITLB, respectively (see "mmu.h" for definitions).
+ */
+
+/*
+ * The normal case is that PTEs are 32-bits and we have a 1-page
+ * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
+ *
+ * For any >32-bit physical address platform, we can use the following
+ * two level page table layout where the pgdir is 8KB and the MS 13 bits
+ * are an index to the second level table. The combined pgdir/pmd first
+ * level has 2048 entries and the second level has 512 64-bit PTE entries.
+ * -Matt
+ */
+/* PGDIR_SHIFT determines what a top-level page table entry can map */
+#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * entries per page directory level: our page-table tree is two-level, so
+ * we don't really have any PMD directory.
+ */
+#ifndef __ASSEMBLY__
+#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT)
+#define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT))
+#endif /* __ASSEMBLY__ */
+
+#define PTRS_PER_PTE (1 << PTE_SHIFT)
+#define PTRS_PER_PMD 1
+#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
+
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+#define FIRST_USER_ADDRESS 0
+
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
+ (unsigned long long)pte_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/*
+ * Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 64MB value just means that there will be a 64MB "hole" after the
+ * physical memory until the kernel virtual memory starts. That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ *
+ * We no longer map larger than phys RAM with the BATs so we don't have
+ * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
+ * about clashes between our early calls to ioremap() that start growing down
+ * from ioremap_base being run into the VM area allocations (growing upwards
+ * from VMALLOC_START). For this reason we have ioremap_bot to check when
+ * we actually run into our mappings setup in the early boot with the VM
+ * system. This really does become a problem for machines with good amounts
+ * of RAM. -- Cort
+ */
+#define VMALLOC_OFFSET (0x1000000) /* 16M */
+#ifdef PPC_PIN_SIZE
+#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
+#else
+#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
+#endif
+#define VMALLOC_END ioremap_bot
+
+/*
+ * Bits in a linux-style PTE. These match the bits in the
+ * (hardware-defined) PowerPC PTE as closely as possible.
+ */
+
+#if defined(CONFIG_40x)
+
+/* There are several potential gotchas here. The 40x hardware TLBLO
+ field looks like this:
+
+ 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ RPN..................... 0 0 EX WR ZSEL....... W I M G
+
+ Where possible we make the Linux PTE bits match up with this
+
+ - bits 20 and 21 must be cleared, because we use 4k pages (40x can
+ support down to 1k pages), this is done in the TLBMiss exception
+ handler.
+ - We use only zones 0 (for kernel pages) and 1 (for user pages)
+ of the 16 available. Bit 24-26 of the TLB are cleared in the TLB
+ miss handler. Bit 27 is PAGE_USER, thus selecting the correct
+ zone.
+ - PRESENT *must* be in the bottom two bits because swap cache
+ entries use the top 30 bits. Because 40x doesn't support SMP
+ anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30
+ is cleared in the TLB miss handler before the TLB entry is loaded.
+ - All other bits of the PTE are loaded into TLBLO without
+ modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
+ software PTE bits. We actually use use bits 21, 24, 25, and
+ 30 respectively for the software bits: ACCESSED, DIRTY, RW, and
+ PRESENT.
+*/
+
+/* Definitions for 40x embedded chips. */
+#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */
+#define _PAGE_FILE 0x001 /* when !present: nonlinear file mapping */
+#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
+#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
+#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
+#define _PAGE_USER 0x010 /* matches one of the zone permission bits */
+#define _PAGE_RW 0x040 /* software: Writes permitted */
+#define _PAGE_DIRTY 0x080 /* software: dirty page */
+#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */
+#define _PAGE_HWEXEC 0x200 /* hardware: EX permission */
+#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
+
+#define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */
+#define _PMD_BAD 0x802
+#define _PMD_SIZE 0x0e0 /* size field, != 0 for large-page PMD entry */
+#define _PMD_SIZE_4M 0x0c0
+#define _PMD_SIZE_16M 0x0e0
+#define PMD_PAGE_SIZE(pmdval) (1024 << (((pmdval) & _PMD_SIZE) >> 4))
+
+/* Until my rework is finished, 40x still needs atomic PTE updates */
+#define PTE_ATOMIC_UPDATES 1
+
+#elif defined(CONFIG_44x)
+/*
+ * Definitions for PPC440
+ *
+ * Because of the 3 word TLB entries to support 36-bit addressing,
+ * the attribute are difficult to map in such a fashion that they
+ * are easily loaded during exception processing. I decided to
+ * organize the entry so the ERPN is the only portion in the
+ * upper word of the PTE and the attribute bits below are packed
+ * in as sensibly as they can be in the area below a 4KB page size
+ * oriented RPN. This at least makes it easy to load the RPN and
+ * ERPN fields in the TLB. -Matt
+ *
+ * Note that these bits preclude future use of a page size
+ * less than 4KB.
+ *
+ *
+ * PPC 440 core has following TLB attribute fields;
+ *
+ * TLB1:
+ * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ * RPN................................. - - - - - - ERPN.......
+ *
+ * TLB2:
+ * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ * - - - - - - U0 U1 U2 U3 W I M G E - UX UW UR SX SW SR
+ *
+ * Newer 440 cores (440x6 as used on AMCC 460EX/460GT) have additional
+ * TLB2 storage attibute fields. Those are:
+ *
+ * TLB2:
+ * 0...10 11 12 13 14 15 16...31
+ * no change WL1 IL1I IL1D IL2I IL2D no change
+ *
+ * There are some constrains and options, to decide mapping software bits
+ * into TLB entry.
+ *
+ * - PRESENT *must* be in the bottom three bits because swap cache
+ * entries use the top 29 bits for TLB2.
+ *
+ * - FILE *must* be in the bottom three bits because swap cache
+ * entries use the top 29 bits for TLB2.
+ *
+ * - CACHE COHERENT bit (M) has no effect on PPC440 core, because it
+ * doesn't support SMP. So we can use this as software bit, like
+ * DIRTY.
+ *
+ * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used
+ * for memory protection related functions (see PTE structure in
+ * include/asm-ppc/mmu.h). The _PAGE_XXX definitions in this file map to the
+ * above bits. Note that the bit values are CPU specific, not architecture
+ * specific.
+ *
+ * The kernel PTE entry holds an arch-dependent swp_entry structure under
+ * certain situations. In other words, in such situations some portion of
+ * the PTE bits are used as a swp_entry. In the PPC implementation, the
+ * 3-24th LSB are shared with swp_entry, however the 0-2nd three LSB still
+ * hold protection values. That means the three protection bits are
+ * reserved for both PTE and SWAP entry at the most significant three
+ * LSBs.
+ *
+ * There are three protection bits available for SWAP entry:
+ * _PAGE_PRESENT
+ * _PAGE_FILE
+ * _PAGE_HASHPTE (if HW has)
+ *
+ * So those three bits have to be inside of 0-2nd LSB of PTE.
+ *
+ */
+
+#define _PAGE_PRESENT 0x00000001 /* S: PTE valid */
+#define _PAGE_RW 0x00000002 /* S: Write permission */
+#define _PAGE_FILE 0x00000004 /* S: nonlinear file mapping */
+#define _PAGE_HWEXEC 0x00000004 /* H: Execute permission */
+#define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */
+#define _PAGE_DIRTY 0x00000010 /* S: Page dirty */
+#define _PAGE_USER 0x00000040 /* S: User page */
+#define _PAGE_ENDIAN 0x00000080 /* H: E bit */
+#define _PAGE_GUARDED 0x00000100 /* H: G bit */
+#define _PAGE_COHERENT 0x00000200 /* H: M bit */
+#define _PAGE_NO_CACHE 0x00000400 /* H: I bit */
+#define _PAGE_WRITETHRU 0x00000800 /* H: W bit */
+
+/* TODO: Add large page lowmem mapping support */
+#define _PMD_PRESENT 0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD (~PAGE_MASK)
+
+/* ERPN in a PTE never gets cleared, ignore it */
+#define _PTE_NONE_MASK 0xffffffff00000000ULL
+
+
+#elif defined(CONFIG_FSL_BOOKE)
+/*
+ MMU Assist Register 3:
+
+ 32 33 34 35 36 ... 50 51 52 53 54 55 56 57 58 59 60 61 62 63
+ RPN...................... 0 0 U0 U1 U2 U3 UX SX UW SW UR SR
+
+ - PRESENT *must* be in the bottom three bits because swap cache
+ entries use the top 29 bits.
+
+ - FILE *must* be in the bottom three bits because swap cache
+ entries use the top 29 bits.
+*/
+
+/* Definitions for FSL Book-E Cores */
+#define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */
+#define _PAGE_USER 0x00002 /* S: User page (maps to UR) */
+#define _PAGE_FILE 0x00002 /* S: when !present: nonlinear file mapping */
+#define _PAGE_RW 0x00004 /* S: Write permission (SW) */
+#define _PAGE_DIRTY 0x00008 /* S: Page dirty */
+#define _PAGE_HWEXEC 0x00010 /* H: SX permission */
+#define _PAGE_ACCESSED 0x00020 /* S: Page referenced */
+
+#define _PAGE_ENDIAN 0x00040 /* H: E bit */
+#define _PAGE_GUARDED 0x00080 /* H: G bit */
+#define _PAGE_COHERENT 0x00100 /* H: M bit */
+#define _PAGE_NO_CACHE 0x00200 /* H: I bit */
+#define _PAGE_WRITETHRU 0x00400 /* H: W bit */
+
+#ifdef CONFIG_PTE_64BIT
+/* ERPN in a PTE never gets cleared, ignore it */
+#define _PTE_NONE_MASK 0xffffffffffff0000ULL
+#endif
+
+#define _PMD_PRESENT 0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD (~PAGE_MASK)
+
+#elif defined(CONFIG_8xx)
+/* Definitions for 8xx embedded chips. */
+#define _PAGE_PRESENT 0x0001 /* Page is valid */
+#define _PAGE_FILE 0x0002 /* when !present: nonlinear file mapping */
+#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */
+#define _PAGE_SHARED 0x0004 /* No ASID (context) compare */
+
+/* These five software bits must be masked out when the entry is loaded
+ * into the TLB.
+ */
+#define _PAGE_EXEC 0x0008 /* software: i-cache coherency required */
+#define _PAGE_GUARDED 0x0010 /* software: guarded access */
+#define _PAGE_DIRTY 0x0020 /* software: page changed */
+#define _PAGE_RW 0x0040 /* software: user write access allowed */
+#define _PAGE_ACCESSED 0x0080 /* software: page referenced */
+
+/* Setting any bits in the nibble with the follow two controls will
+ * require a TLB exception handler change. It is assumed unused bits
+ * are always zero.
+ */
+#define _PAGE_HWWRITE 0x0100 /* h/w write enable: never set in Linux PTE */
+#define _PAGE_USER 0x0800 /* One of the PP bits, the other is USER&~RW */
+
+#define _PMD_PRESENT 0x0001
+#define _PMD_BAD 0x0ff0
+#define _PMD_PAGE_MASK 0x000c
+#define _PMD_PAGE_8M 0x000c
+
+#define _PTE_NONE_MASK _PAGE_ACCESSED
+
+/* Until my rework is finished, 8xx still needs atomic PTE updates */
+#define PTE_ATOMIC_UPDATES 1
+
+#else /* CONFIG_6xx */
+/* Definitions for 60x, 740/750, etc. */
+#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
+#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
+#define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
+#define _PAGE_USER 0x004 /* usermode access allowed */
+#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
+#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
+#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
+#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
+#define _PAGE_DIRTY 0x080 /* C: page changed */
+#define _PAGE_ACCESSED 0x100 /* R: page referenced */
+#define _PAGE_EXEC 0x200 /* software: i-cache coherency required */
+#define _PAGE_RW 0x400 /* software: user write access allowed */
+
+#define _PTE_NONE_MASK _PAGE_HASHPTE
+
+#define _PMD_PRESENT 0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD (~PAGE_MASK)
+
+/* Hash table based platforms need atomic updates of the linux PTE */
+#define PTE_ATOMIC_UPDATES 1
+
+#endif
+
+/*
+ * Some bits are only used on some cpu families...
+ */
+#ifndef _PAGE_HASHPTE
+#define _PAGE_HASHPTE 0
+#endif
+#ifndef _PTE_NONE_MASK
+#define _PTE_NONE_MASK 0
+#endif
+#ifndef _PAGE_SHARED
+#define _PAGE_SHARED 0
+#endif
+#ifndef _PAGE_HWWRITE
+#define _PAGE_HWWRITE 0
+#endif
+#ifndef _PAGE_HWEXEC
+#define _PAGE_HWEXEC 0
+#endif
+#ifndef _PAGE_EXEC
+#define _PAGE_EXEC 0
+#endif
+#ifndef _PAGE_ENDIAN
+#define _PAGE_ENDIAN 0
+#endif
+#ifndef _PAGE_COHERENT
+#define _PAGE_COHERENT 0
+#endif
+#ifndef _PAGE_WRITETHRU
+#define _PAGE_WRITETHRU 0
+#endif
+#ifndef _PMD_PRESENT_MASK
+#define _PMD_PRESENT_MASK _PMD_PRESENT
+#endif
+#ifndef _PMD_SIZE
+#define _PMD_SIZE 0
+#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
+#endif
+
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+
+#define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
+ _PAGE_WRITETHRU | _PAGE_ENDIAN | \
+ _PAGE_USER | _PAGE_ACCESSED | \
+ _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
+ _PAGE_EXEC | _PAGE_HWEXEC)
+/*
+ * Note: the _PAGE_COHERENT bit automatically gets set in the hardware
+ * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
+ * to have it in the Linux PTE, and in fact the bit could be reused for
+ * another purpose. -- paulus.
+ */
+
+#ifdef CONFIG_44x
+#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_GUARDED)
+#else
+#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
+#endif
+#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
+#define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE)
+
+#ifdef CONFIG_PPC_STD_MMU
+/* On standard PPC MMU, no user access implies kernel read/write access,
+ * so to write-protect kernel memory we must turn on user access */
+#define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED | _PAGE_USER)
+#else
+#define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED)
+#endif
+
+#define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)
+#define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC)
+
+#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
+ defined(CONFIG_KPROBES)
+/* We want the debuggers to be able to set breakpoints anywhere, so
+ * don't write protect the kernel text */
+#define _PAGE_RAM_TEXT _PAGE_RAM
+#else
+#define _PAGE_RAM_TEXT (_PAGE_KERNEL_RO | _PAGE_HWEXEC)
+#endif
+
+#define PAGE_NONE __pgprot(_PAGE_BASE)
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+
+#define PAGE_KERNEL __pgprot(_PAGE_RAM)
+#define PAGE_KERNEL_NOCACHE __pgprot(_PAGE_IO)
+
+/*
+ * The PowerPC can only do execute protection on a segment (256MB) basis,
+ * not on a page basis. So we consider execute permission the same as read.
+ * Also, write permissions imply read permissions.
+ * This is the closest we can get..
+ */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY_X
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY_X
+#define __P100 PAGE_READONLY
+#define __P101 PAGE_READONLY_X
+#define __P110 PAGE_COPY
+#define __P111 PAGE_COPY_X
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY_X
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED_X
+#define __S100 PAGE_READONLY
+#define __S101 PAGE_READONLY_X
+#define __S110 PAGE_SHARED
+#define __S111 PAGE_SHARED_X
+
+#ifndef __ASSEMBLY__
+/* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a
+ * kernel without large page PMD support */
+extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
+
+/*
+ * Conversions between PTE values and page frame numbers.
+ */
+
+/* in some case we want to additionaly adjust where the pfn is in the pte to
+ * allow room for more flags */
+#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
+#define PFN_SHIFT_OFFSET (PAGE_SHIFT + 8)
+#else
+#define PFN_SHIFT_OFFSET (PAGE_SHIFT)
+#endif
+
+#define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET)
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+
+#define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) |\
+ pgprot_val(prot))
+#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
+#endif /* __ASSEMBLY__ */
+
+#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
+#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
+#define pte_clear(mm,addr,ptep) do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
+
+#define pmd_none(pmd) (!pmd_val(pmd))
+#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
+#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
+#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
+
+#ifndef __ASSEMBLY__
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_special(pte_t pte) { return 0; }
+
+static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
+static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
+
+static inline pte_t pte_wrprotect(pte_t pte) {
+ pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
+static inline pte_t pte_mkclean(pte_t pte) {
+ pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
+static inline pte_t pte_mkold(pte_t pte) {
+ pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
+
+static inline pte_t pte_mkwrite(pte_t pte) {
+ pte_val(pte) |= _PAGE_RW; return pte; }
+static inline pte_t pte_mkdirty(pte_t pte) {
+ pte_val(pte) |= _PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkyoung(pte_t pte) {
+ pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkspecial(pte_t pte) {
+ return pte; }
+static inline unsigned long pte_pgprot(pte_t pte)
+{
+ return __pgprot(pte_val(pte)) & PAGE_PROT_BITS;
+}
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
+ return pte;
+}
+
+/*
+ * When flushing the tlb entry for a page, we also need to flush the hash
+ * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
+ */
+extern int flush_hash_pages(unsigned context, unsigned long va,
+ unsigned long pmdval, int count);
+
+/* Add an HPTE to the hash table */
+extern void add_hash_page(unsigned context, unsigned long va,
+ unsigned long pmdval);
+
+/*
+ * Atomic PTE updates.
+ *
+ * pte_update clears and sets bit atomically, and returns
+ * the old pte value. In the 64-bit PTE case we lock around the
+ * low PTE word since we expect ALL flag bits to be there
+ */
+#ifndef CONFIG_PTE_64BIT
+static inline unsigned long pte_update(pte_t *p,
+ unsigned long clr,
+ unsigned long set)
+{
+#ifdef PTE_ATOMIC_UPDATES
+ unsigned long old, tmp;
+
+ __asm__ __volatile__("\
+1: lwarx %0,0,%3\n\
+ andc %1,%0,%4\n\
+ or %1,%1,%5\n"
+ PPC405_ERR77(0,%3)
+" stwcx. %1,0,%3\n\
+ bne- 1b"
+ : "=&r" (old), "=&r" (tmp), "=m" (*p)
+ : "r" (p), "r" (clr), "r" (set), "m" (*p)
+ : "cc" );
+#else /* PTE_ATOMIC_UPDATES */
+ unsigned long old = pte_val(*p);
+ *p = __pte((old & ~clr) | set);
+#endif /* !PTE_ATOMIC_UPDATES */
+
+#ifdef CONFIG_44x
+ if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC))
+ icache_44x_need_flush = 1;
+#endif
+ return old;
+}
+#else /* CONFIG_PTE_64BIT */
+/* TODO: Change that to only modify the low word and move set_pte_at()
+ * out of line
+ */
+static inline unsigned long long pte_update(pte_t *p,
+ unsigned long clr,
+ unsigned long set)
+{
+#ifdef PTE_ATOMIC_UPDATES
+ unsigned long long old;
+ unsigned long tmp;
+
+ __asm__ __volatile__("\
+1: lwarx %L0,0,%4\n\
+ lwzx %0,0,%3\n\
+ andc %1,%L0,%5\n\
+ or %1,%1,%6\n"
+ PPC405_ERR77(0,%3)
+" stwcx. %1,0,%4\n\
+ bne- 1b"
+ : "=&r" (old), "=&r" (tmp), "=m" (*p)
+ : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
+ : "cc" );
+#else /* PTE_ATOMIC_UPDATES */
+ unsigned long long old = pte_val(*p);
+ *p = __pte((old & ~(unsigned long long)clr) | set);
+#endif /* !PTE_ATOMIC_UPDATES */
+
+#ifdef CONFIG_44x
+ if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC))
+ icache_44x_need_flush = 1;
+#endif
+ return old;
+}
+#endif /* CONFIG_PTE_64BIT */
+
+/*
+ * set_pte stores a linux PTE into the linux page table.
+ * On machines which use an MMU hash table we avoid changing the
+ * _PAGE_HASHPTE bit.
+ */
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+#if _PAGE_HASHPTE != 0
+ pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE);
+#else
+ *ptep = pte;
+#endif
+}
+
+/*
+ * 2.6 calls this without flushing the TLB entry; this is wrong
+ * for our hash-based implementation, we fix that up here.
+ */
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
+{
+ unsigned long old;
+ old = pte_update(ptep, _PAGE_ACCESSED, 0);
+#if _PAGE_HASHPTE != 0
+ if (old & _PAGE_HASHPTE) {
+ unsigned long ptephys = __pa(ptep) & PAGE_MASK;
+ flush_hash_pages(context, addr, ptephys, 1);
+ }
+#endif
+ return (old & _PAGE_ACCESSED) != 0;
+}
+#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
+ __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
+}
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
+}
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_set_wrprotect(mm, addr, ptep);
+}
+
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
+{
+ unsigned long bits = pte_val(entry) &
+ (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW);
+ pte_update(ptep, 0, bits);
+}
+
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+({ \
+ int __changed = !pte_same(*(__ptep), __entry); \
+ if (__changed) { \
+ __ptep_set_access_flags(__ptep, __entry, __dirty); \
+ flush_tlb_page_nohash(__vma, __address); \
+ } \
+ __changed; \
+})
+
+/*
+ * Macro to mark a page protection value as "uncacheable".
+ */
+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
+
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot);
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+
+#define __HAVE_ARCH_PTE_SAME
+#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
+
+/*
+ * Note that on Book E processors, the pmd contains the kernel virtual
+ * (lowmem) address of the pte page. The physical address is less useful
+ * because everything runs with translation enabled (even the TLB miss
+ * handler). On everything else the pmd contains the physical address
+ * of the pte page. -- paulus
+ */
+#ifndef CONFIG_BOOKE
+#define pmd_page_vaddr(pmd) \
+ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+#define pmd_page(pmd) \
+ (mem_map + (pmd_val(pmd) >> PAGE_SHIFT))
+#else
+#define pmd_page_vaddr(pmd) \
+ ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
+#define pmd_page(pmd) \
+ pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
+#endif
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/* to find an entry in a page-table-directory */
+#define pgd_index(address) ((address) >> PGDIR_SHIFT)
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
+
+/* Find an entry in the third-level page table.. */
+#define pte_index(address) \
+ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, addr) \
+ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
+#define pte_offset_map(dir, addr) \
+ ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
+#define pte_offset_map_nested(dir, addr) \
+ ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
+
+#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
+#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
+
+/*
+ * Encode and decode a swap entry.
+ * Note that the bits we use in a PTE for representing a swap entry
+ * must not include the _PAGE_PRESENT bit, the _PAGE_FILE bit, or the
+ *_PAGE_HASHPTE bit (if used). -- paulus
+ */
+#define __swp_type(entry) ((entry).val & 0x1f)
+#define __swp_offset(entry) ((entry).val >> 5)
+#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
+
+/* Encode and decode a nonlinear file mapping entry */
+#define PTE_FILE_MAX_BITS 29
+#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
+#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init() do { } while (0)
+
+extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
+ pmd_t **pmdp);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_PGTABLE_PPC32_H */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
new file mode 100644
index 0000000..db0b8f3
--- /dev/null
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -0,0 +1,468 @@
+#ifndef _ASM_POWERPC_PGTABLE_PPC64_H_
+#define _ASM_POWERPC_PGTABLE_PPC64_H_
+/*
+ * This file contains the functions and defines necessary to modify and use
+ * the ppc64 hashed page table.
+ */
+
+#ifndef __ASSEMBLY__
+#include <linux/stddef.h>
+#include <asm/tlbflush.h>
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_PPC_64K_PAGES
+#include <asm/pgtable-64k.h>
+#else
+#include <asm/pgtable-4k.h>
+#endif
+
+#define FIRST_USER_ADDRESS 0
+
+/*
+ * Size of EA range mapped by our pagetables.
+ */
+#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
+ PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
+#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
+
+#if TASK_SIZE_USER64 > PGTABLE_RANGE
+#error TASK_SIZE_USER64 exceeds pagetable range
+#endif
+
+#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
+#error TASK_SIZE_USER64 exceeds user VSID range
+#endif
+
+
+/*
+ * Define the address range of the vmalloc VM area.
+ */
+#define VMALLOC_START ASM_CONST(0xD000000000000000)
+#define VMALLOC_SIZE (PGTABLE_RANGE >> 1)
+#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
+
+/*
+ * Define the address ranges for MMIO and IO space :
+ *
+ * ISA_IO_BASE = VMALLOC_END, 64K reserved area
+ * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
+ * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
+ */
+#define FULL_IO_SIZE 0x80000000ul
+#define ISA_IO_BASE (VMALLOC_END)
+#define ISA_IO_END (VMALLOC_END + 0x10000ul)
+#define PHB_IO_BASE (ISA_IO_END)
+#define PHB_IO_END (VMALLOC_END + FULL_IO_SIZE)
+#define IOREMAP_BASE (PHB_IO_END)
+#define IOREMAP_END (VMALLOC_START + PGTABLE_RANGE)
+
+/*
+ * Region IDs
+ */
+#define REGION_SHIFT 60UL
+#define REGION_MASK (0xfUL << REGION_SHIFT)
+#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
+
+#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START))
+#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
+#define VMEMMAP_REGION_ID (0xfUL)
+#define USER_REGION_ID (0UL)
+
+/*
+ * Defines the address of the vmemap area, in its own region
+ */
+#define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT)
+#define vmemmap ((struct page *)VMEMMAP_BASE)
+
+
+/*
+ * Common bits in a linux-style PTE. These match the bits in the
+ * (hardware-defined) PowerPC PTE as closely as possible. Additional
+ * bits may be defined in pgtable-*.h
+ */
+#define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */
+#define _PAGE_USER 0x0002 /* matches one of the PP bits */
+#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */
+#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */
+#define _PAGE_GUARDED 0x0008
+#define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */
+#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */
+#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */
+#define _PAGE_DIRTY 0x0080 /* C: page changed */
+#define _PAGE_ACCESSED 0x0100 /* R: page referenced */
+#define _PAGE_RW 0x0200 /* software: user write access allowed */
+#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */
+
+/* Strong Access Ordering */
+#define _PAGE_SAO (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT)
+
+#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
+
+#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY)
+
+/* __pgprot defined in arch/powerpc/incliude/asm/page.h */
+#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
+
+#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE)
+#define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+ _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED)
+#define PAGE_KERNEL_EXEC __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_EXEC)
+
+#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE)
+#define HAVE_PAGE_AGP
+
+#define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | \
+ _PAGE_NO_CACHE | _PAGE_WRITETHRU | \
+ _PAGE_4K_PFN | _PAGE_RW | _PAGE_USER | \
+ _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC)
+/* PTEIDX nibble */
+#define _PTEIDX_SECONDARY 0x8
+#define _PTEIDX_GROUP_IX 0x7
+
+
+/*
+ * POWER4 and newer have per page execute protection, older chips can only
+ * do this on a segment (256MB) basis.
+ *
+ * Also, write permissions imply read permissions.
+ * This is the closest we can get..
+ *
+ * Note due to the way vm flags are laid out, the bits are XWR
+ */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY_X
+#define __P101 PAGE_READONLY_X
+#define __P110 PAGE_COPY_X
+#define __P111 PAGE_COPY_X
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY_X
+#define __S101 PAGE_READONLY_X
+#define __S110 PAGE_SHARED_X
+#define __S111 PAGE_SHARED_X
+
+#ifdef CONFIG_HUGETLB_PAGE
+
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+
+#endif
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ *
+ * mk_pte takes a (struct page *) as input
+ */
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
+{
+ pte_t pte;
+
+
+ pte_val(pte) = (pfn << PTE_RPN_SHIFT) | pgprot_val(pgprot);
+ return pte;
+}
+
+#define pte_modify(_pte, newprot) \
+ (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)))
+
+#define pte_none(pte) ((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0)
+#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
+
+/* pte_clear moved to later in this file */
+
+#define pte_pfn(x) ((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT)))
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+
+#define PMD_BAD_BITS (PTE_TABLE_SIZE-1)
+#define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
+
+#define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval))
+#define pmd_none(pmd) (!pmd_val(pmd))
+#define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \
+ || (pmd_val(pmd) & PMD_BAD_BITS))
+#define pmd_present(pmd) (pmd_val(pmd) != 0)
+#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
+#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS)
+#define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd))
+
+#define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval))
+#define pud_none(pud) (!pud_val(pud))
+#define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \
+ || (pud_val(pud) & PUD_BAD_BITS))
+#define pud_present(pud) (pud_val(pud) != 0)
+#define pud_clear(pudp) (pud_val(*(pudp)) = 0)
+#define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS)
+#define pud_page(pud) virt_to_page(pud_page_vaddr(pud))
+
+#define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);})
+
+/*
+ * Find an entry in a page-table-directory. We combine the address region
+ * (the high order N bits) and the pgd portion of the address.
+ */
+/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */
+#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff)
+
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
+
+#define pmd_offset(pudp,addr) \
+ (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
+
+#define pte_offset_kernel(dir,addr) \
+ (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
+
+#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
+#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
+#define pte_unmap(pte) do { } while(0)
+#define pte_unmap_nested(pte) do { } while(0)
+
+/* to find an entry in a kernel page-table-directory */
+/* This now only contains the vmalloc pages */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;}
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;}
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
+static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
+static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
+
+static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
+static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
+
+static inline pte_t pte_wrprotect(pte_t pte) {
+ pte_val(pte) &= ~(_PAGE_RW); return pte; }
+static inline pte_t pte_mkclean(pte_t pte) {
+ pte_val(pte) &= ~(_PAGE_DIRTY); return pte; }
+static inline pte_t pte_mkold(pte_t pte) {
+ pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte) {
+ pte_val(pte) |= _PAGE_RW; return pte; }
+static inline pte_t pte_mkdirty(pte_t pte) {
+ pte_val(pte) |= _PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkyoung(pte_t pte) {
+ pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkhuge(pte_t pte) {
+ return pte; }
+static inline pte_t pte_mkspecial(pte_t pte) {
+ pte_val(pte) |= _PAGE_SPECIAL; return pte; }
+static inline unsigned long pte_pgprot(pte_t pte)
+{
+ return __pgprot(pte_val(pte)) & PAGE_PROT_BITS;
+}
+
+/* Atomic PTE updates */
+static inline unsigned long pte_update(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep, unsigned long clr,
+ int huge)
+{
+ unsigned long old, tmp;
+
+ __asm__ __volatile__(
+ "1: ldarx %0,0,%3 # pte_update\n\
+ andi. %1,%0,%6\n\
+ bne- 1b \n\
+ andc %1,%0,%4 \n\
+ stdcx. %1,0,%3 \n\
+ bne- 1b"
+ : "=&r" (old), "=&r" (tmp), "=m" (*ptep)
+ : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY)
+ : "cc" );
+
+ if (old & _PAGE_HASHPTE)
+ hpte_need_flush(mm, addr, ptep, old, huge);
+ return old;
+}
+
+static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ unsigned long old;
+
+ if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
+ return 0;
+ old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0);
+ return (old & _PAGE_ACCESSED) != 0;
+}
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
+({ \
+ int __r; \
+ __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
+ __r; \
+})
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ unsigned long old;
+
+ if ((pte_val(*ptep) & _PAGE_RW) == 0)
+ return;
+ old = pte_update(mm, addr, ptep, _PAGE_RW, 0);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ unsigned long old;
+
+ if ((pte_val(*ptep) & _PAGE_RW) == 0)
+ return;
+ old = pte_update(mm, addr, ptep, _PAGE_RW, 1);
+}
+
+/*
+ * We currently remove entries from the hashtable regardless of whether
+ * the entry was young or dirty. The generic routines only flush if the
+ * entry was young or dirty which is not good enough.
+ *
+ * We should be more intelligent about this but for the moment we override
+ * these functions and force a tlb flush unconditionally
+ */
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define ptep_clear_flush_young(__vma, __address, __ptep) \
+({ \
+ int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
+ __ptep); \
+ __young; \
+})
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0);
+ return __pte(old);
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t * ptep)
+{
+ pte_update(mm, addr, ptep, ~0UL, 0);
+}
+
+/*
+ * set_pte stores a linux PTE into the linux page table.
+ */
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ if (pte_present(*ptep))
+ pte_clear(mm, addr, ptep);
+ pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
+ *ptep = pte;
+}
+
+/* Set the dirty and/or accessed bits atomically in a linux PTE, this
+ * function doesn't need to flush the hash entry
+ */
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
+{
+ unsigned long bits = pte_val(entry) &
+ (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
+ unsigned long old, tmp;
+
+ __asm__ __volatile__(
+ "1: ldarx %0,0,%4\n\
+ andi. %1,%0,%6\n\
+ bne- 1b \n\
+ or %0,%3,%0\n\
+ stdcx. %0,0,%4\n\
+ bne- 1b"
+ :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
+ :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY)
+ :"cc");
+}
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+({ \
+ int __changed = !pte_same(*(__ptep), __entry); \
+ if (__changed) { \
+ __ptep_set_access_flags(__ptep, __entry, __dirty); \
+ flush_tlb_page_nohash(__vma, __address); \
+ } \
+ __changed; \
+})
+
+/*
+ * Macro to mark a page protection value as "uncacheable".
+ */
+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
+
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot);
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+
+#define __HAVE_ARCH_PTE_SAME
+#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
+
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pmd_ERROR(e) \
+ printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/* Encode and de-code a swap entry */
+#define __swp_type(entry) (((entry).val >> 1) & 0x3f)
+#define __swp_offset(entry) ((entry).val >> 8)
+#define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)})
+#define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT})
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT })
+#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_RPN_SHIFT)
+#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE})
+#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT)
+
+void pgtable_cache_init(void);
+
+/*
+ * find_linux_pte returns the address of a linux pte for a given
+ * effective address and directory. If not found, it returns zero.
+ */static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
+{
+ pgd_t *pg;
+ pud_t *pu;
+ pmd_t *pm;
+ pte_t *pt = NULL;
+
+ pg = pgdir + pgd_index(ea);
+ if (!pgd_none(*pg)) {
+ pu = pud_offset(pg, ea);
+ if (!pud_none(*pu)) {
+ pm = pmd_offset(pu, ea);
+ if (pmd_present(*pm))
+ pt = pte_offset_kernel(pm, ea);
+ }
+ }
+ return pt;
+}
+
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long address);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
new file mode 100644
index 0000000..dbb8ca1
--- /dev/null
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -0,0 +1,57 @@
+#ifndef _ASM_POWERPC_PGTABLE_H
+#define _ASM_POWERPC_PGTABLE_H
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+#include <asm/processor.h> /* For TASK_SIZE */
+#include <asm/mmu.h>
+#include <asm/page.h>
+struct mm_struct;
+#endif /* !__ASSEMBLY__ */
+
+#if defined(CONFIG_PPC64)
+# include <asm/pgtable-ppc64.h>
+#else
+# include <asm/pgtable-ppc32.h>
+#endif
+
+#ifndef __ASSEMBLY__
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+extern pgd_t swapper_pg_dir[];
+
+extern void paging_init(void);
+
+/*
+ * kern_addr_valid is intended to indicate whether an address is a valid
+ * kernel address. Most 32-bit archs define it as always true (like this)
+ * but most 64-bit archs actually perform a test. What should we do here?
+ */
+#define kern_addr_valid(addr) (1)
+
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#include <asm-generic/pgtable.h>
+
+
+/*
+ * This gets called at the end of handling a page fault, when
+ * the kernel has put a new PTE into the page table for the process.
+ * We use it to ensure coherency between the i-cache and d-cache
+ * for the page which has just been mapped in.
+ * On machines which use an MMU hash table, we use this to put a
+ * corresponding HPTE into the hash table ahead of time, instead of
+ * waiting for the inevitable extra hash-table miss exception.
+ */
+extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/phyp_dump.h b/arch/powerpc/include/asm/phyp_dump.h
new file mode 100644
index 0000000..fa74c6c
--- /dev/null
+++ b/arch/powerpc/include/asm/phyp_dump.h
@@ -0,0 +1,47 @@
+/*
+ * Hypervisor-assisted dump
+ *
+ * Linas Vepstas, Manish Ahuja 2008
+ * Copyright 2008 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _PPC64_PHYP_DUMP_H
+#define _PPC64_PHYP_DUMP_H
+
+#ifdef CONFIG_PHYP_DUMP
+
+/* The RMR region will be saved for later dumping
+ * whenever the kernel crashes. Set this to 256MB. */
+#define PHYP_DUMP_RMR_START 0x0
+#define PHYP_DUMP_RMR_END (1UL<<28)
+
+struct phyp_dump {
+ /* Memory that is reserved during very early boot. */
+ unsigned long init_reserve_start;
+ unsigned long init_reserve_size;
+ /* cmd line options during boot */
+ unsigned long reserve_bootvar;
+ unsigned long phyp_dump_at_boot;
+ /* Check status during boot if dump supported, active & present*/
+ unsigned long phyp_dump_configured;
+ unsigned long phyp_dump_is_active;
+ /* store cpu & hpte size */
+ unsigned long cpu_state_size;
+ unsigned long hpte_region_size;
+ /* previous scratch area values */
+ unsigned long reserved_scratch_addr;
+ unsigned long reserved_scratch_size;
+};
+
+extern struct phyp_dump *phyp_dump_info;
+
+int early_init_dt_scan_phyp_dump(unsigned long node,
+ const char *uname, int depth, void *data);
+
+#endif /* CONFIG_PHYP_DUMP */
+#endif /* _PPC64_PHYP_DUMP_H */
diff --git a/arch/powerpc/include/asm/pmac_feature.h b/arch/powerpc/include/asm/pmac_feature.h
new file mode 100644
index 0000000..877c35a
--- /dev/null
+++ b/arch/powerpc/include/asm/pmac_feature.h
@@ -0,0 +1,405 @@
+/*
+ * Definition of platform feature hooks for PowerMacs
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Paul Mackerras &
+ * Ben. Herrenschmidt.
+ *
+ *
+ * Note: I removed media-bay details from the feature stuff, I believe it's
+ * not worth it, the media-bay driver can directly use the mac-io
+ * ASIC registers.
+ *
+ * Implementation note: Currently, none of these functions will block.
+ * However, they may internally protect themselves with a spinlock
+ * for way too long. Be prepared for at least some of these to block
+ * in the future.
+ *
+ * Unless specifically defined, the result code is assumed to be an
+ * error when negative, 0 is the default success result. Some functions
+ * may return additional positive result values.
+ *
+ * To keep implementation simple, all feature calls are assumed to have
+ * the prototype parameters (struct device_node* node, int value).
+ * When either is not used, pass 0.
+ */
+
+#ifdef __KERNEL__
+#ifndef __ASM_POWERPC_PMAC_FEATURE_H
+#define __ASM_POWERPC_PMAC_FEATURE_H
+
+#include <asm/macio.h>
+#include <asm/machdep.h>
+
+/*
+ * Known Mac motherboard models
+ *
+ * Please, report any error here to benh@kernel.crashing.org, thanks !
+ *
+ * Note that I don't fully maintain this list for Core99 & MacRISC2
+ * and I'm considering removing all NewWorld entries from it and
+ * entirely rely on the model string.
+ */
+
+/* PowerSurge are the first generation of PCI Pmacs. This include
+ * all of the Grand-Central based machines. We currently don't
+ * differenciate most of them.
+ */
+#define PMAC_TYPE_PSURGE 0x10 /* PowerSurge */
+#define PMAC_TYPE_ANS 0x11 /* Apple Network Server */
+
+/* Here is the infamous serie of OHare based machines
+ */
+#define PMAC_TYPE_COMET 0x20 /* Beleived to be PowerBook 2400 */
+#define PMAC_TYPE_HOOPER 0x21 /* Beleived to be PowerBook 3400 */
+#define PMAC_TYPE_KANGA 0x22 /* PowerBook 3500 (first G3) */
+#define PMAC_TYPE_ALCHEMY 0x23 /* Alchemy motherboard base */
+#define PMAC_TYPE_GAZELLE 0x24 /* Spartacus, some 5xxx/6xxx */
+#define PMAC_TYPE_UNKNOWN_OHARE 0x2f /* Unknown, but OHare based */
+
+/* Here are the Heathrow based machines
+ * FIXME: Differenciate wallstreet,mainstreet,wallstreetII
+ */
+#define PMAC_TYPE_GOSSAMER 0x30 /* Gossamer motherboard */
+#define PMAC_TYPE_SILK 0x31 /* Desktop PowerMac G3 */
+#define PMAC_TYPE_WALLSTREET 0x32 /* Wallstreet/Mainstreet PowerBook*/
+#define PMAC_TYPE_UNKNOWN_HEATHROW 0x3f /* Unknown but heathrow based */
+
+/* Here are newworld machines based on Paddington (heathrow derivative)
+ */
+#define PMAC_TYPE_101_PBOOK 0x40 /* 101 PowerBook (aka Lombard) */
+#define PMAC_TYPE_ORIG_IMAC 0x41 /* First generation iMac */
+#define PMAC_TYPE_YOSEMITE 0x42 /* B&W G3 */
+#define PMAC_TYPE_YIKES 0x43 /* Yikes G4 (PCI graphics) */
+#define PMAC_TYPE_UNKNOWN_PADDINGTON 0x4f /* Unknown but paddington based */
+
+/* Core99 machines based on UniNorth 1.0 and 1.5
+ *
+ * Note: A single entry here may cover several actual models according
+ * to the device-tree. (Sawtooth is most tower G4s, FW_IMAC is most
+ * FireWire based iMacs, etc...). Those machines are too similar to be
+ * distinguished here, when they need to be differencied, use the
+ * device-tree "model" or "compatible" property.
+ */
+#define PMAC_TYPE_ORIG_IBOOK 0x40 /* First iBook model (no firewire) */
+#define PMAC_TYPE_SAWTOOTH 0x41 /* Desktop G4s */
+#define PMAC_TYPE_FW_IMAC 0x42 /* FireWire iMacs (except Pangea based) */
+#define PMAC_TYPE_FW_IBOOK 0x43 /* FireWire iBooks (except iBook2) */
+#define PMAC_TYPE_CUBE 0x44 /* Cube PowerMac */
+#define PMAC_TYPE_QUICKSILVER 0x45 /* QuickSilver G4s */
+#define PMAC_TYPE_PISMO 0x46 /* Pismo PowerBook */
+#define PMAC_TYPE_TITANIUM 0x47 /* Titanium PowerBook */
+#define PMAC_TYPE_TITANIUM2 0x48 /* Titanium II PowerBook (no L3, M6) */
+#define PMAC_TYPE_TITANIUM3 0x49 /* Titanium III PowerBook (with L3 & M7) */
+#define PMAC_TYPE_TITANIUM4 0x50 /* Titanium IV PowerBook (with L3 & M9) */
+#define PMAC_TYPE_EMAC 0x50 /* eMac */
+#define PMAC_TYPE_UNKNOWN_CORE99 0x5f
+
+/* MacRisc2 with UniNorth 2.0 */
+#define PMAC_TYPE_RACKMAC 0x80 /* XServe */
+#define PMAC_TYPE_WINDTUNNEL 0x81
+
+/* MacRISC2 machines based on the Pangea chipset
+ */
+#define PMAC_TYPE_PANGEA_IMAC 0x100 /* Flower Power iMac */
+#define PMAC_TYPE_IBOOK2 0x101 /* iBook2 (polycarbonate) */
+#define PMAC_TYPE_FLAT_PANEL_IMAC 0x102 /* Flat panel iMac */
+#define PMAC_TYPE_UNKNOWN_PANGEA 0x10f
+
+/* MacRISC2 machines based on the Intrepid chipset
+ */
+#define PMAC_TYPE_UNKNOWN_INTREPID 0x11f /* Generic */
+
+/* MacRISC4 / G5 machines. We don't have per-machine selection here anymore,
+ * but rather machine families
+ */
+#define PMAC_TYPE_POWERMAC_G5 0x150 /* U3 & U3H based */
+#define PMAC_TYPE_POWERMAC_G5_U3L 0x151 /* U3L based desktop */
+#define PMAC_TYPE_IMAC_G5 0x152 /* iMac G5 */
+#define PMAC_TYPE_XSERVE_G5 0x153 /* Xserve G5 */
+#define PMAC_TYPE_UNKNOWN_K2 0x19f /* Any other K2 based */
+#define PMAC_TYPE_UNKNOWN_SHASTA 0x19e /* Any other Shasta based */
+
+/*
+ * Motherboard flags
+ */
+
+#define PMAC_MB_CAN_SLEEP 0x00000001
+#define PMAC_MB_HAS_FW_POWER 0x00000002
+#define PMAC_MB_OLD_CORE99 0x00000004
+#define PMAC_MB_MOBILE 0x00000008
+#define PMAC_MB_MAY_SLEEP 0x00000010
+
+/*
+ * Feature calls supported on pmac
+ *
+ */
+
+/*
+ * Use this inline wrapper
+ */
+struct device_node;
+
+static inline long pmac_call_feature(int selector, struct device_node* node,
+ long param, long value)
+{
+ if (!ppc_md.feature_call || !machine_is(powermac))
+ return -ENODEV;
+ return ppc_md.feature_call(selector, node, param, value);
+}
+
+/* PMAC_FTR_SERIAL_ENABLE (struct device_node* node, int param, int value)
+ * enable/disable an SCC side. Pass the node corresponding to the
+ * channel side as a parameter.
+ * param is the type of port
+ * if param is ored with PMAC_SCC_FLAG_XMON, then the SCC is locked enabled
+ * for use by xmon.
+ */
+#define PMAC_FTR_SCC_ENABLE PMAC_FTR_DEF(0)
+ #define PMAC_SCC_ASYNC 0
+ #define PMAC_SCC_IRDA 1
+ #define PMAC_SCC_I2S1 2
+ #define PMAC_SCC_FLAG_XMON 0x00001000
+
+/* PMAC_FTR_MODEM_ENABLE (struct device_node* node, 0, int value)
+ * enable/disable the internal modem.
+ */
+#define PMAC_FTR_MODEM_ENABLE PMAC_FTR_DEF(1)
+
+/* PMAC_FTR_SWIM3_ENABLE (struct device_node* node, 0,int value)
+ * enable/disable the swim3 (floppy) cell of a mac-io ASIC
+ */
+#define PMAC_FTR_SWIM3_ENABLE PMAC_FTR_DEF(2)
+
+/* PMAC_FTR_MESH_ENABLE (struct device_node* node, 0, int value)
+ * enable/disable the mesh (scsi) cell of a mac-io ASIC
+ */
+#define PMAC_FTR_MESH_ENABLE PMAC_FTR_DEF(3)
+
+/* PMAC_FTR_IDE_ENABLE (struct device_node* node, int busID, int value)
+ * enable/disable an IDE port of a mac-io ASIC
+ * pass the busID parameter
+ */
+#define PMAC_FTR_IDE_ENABLE PMAC_FTR_DEF(4)
+
+/* PMAC_FTR_IDE_RESET (struct device_node* node, int busID, int value)
+ * assert(1)/release(0) an IDE reset line (mac-io IDE only)
+ */
+#define PMAC_FTR_IDE_RESET PMAC_FTR_DEF(5)
+
+/* PMAC_FTR_BMAC_ENABLE (struct device_node* node, 0, int value)
+ * enable/disable the bmac (ethernet) cell of a mac-io ASIC, also drive
+ * it's reset line
+ */
+#define PMAC_FTR_BMAC_ENABLE PMAC_FTR_DEF(6)
+
+/* PMAC_FTR_GMAC_ENABLE (struct device_node* node, 0, int value)
+ * enable/disable the gmac (ethernet) cell of an uninorth ASIC. This
+ * control the cell's clock.
+ */
+#define PMAC_FTR_GMAC_ENABLE PMAC_FTR_DEF(7)
+
+/* PMAC_FTR_GMAC_PHY_RESET (struct device_node* node, 0, 0)
+ * Perform a HW reset of the PHY connected to a gmac controller.
+ * Pass the gmac device node, not the PHY node.
+ */
+#define PMAC_FTR_GMAC_PHY_RESET PMAC_FTR_DEF(8)
+
+/* PMAC_FTR_SOUND_CHIP_ENABLE (struct device_node* node, 0, int value)
+ * enable/disable the sound chip, whatever it is and provided it can
+ * acually be controlled
+ */
+#define PMAC_FTR_SOUND_CHIP_ENABLE PMAC_FTR_DEF(9)
+
+/* -- add various tweaks related to sound routing -- */
+
+/* PMAC_FTR_AIRPORT_ENABLE (struct device_node* node, 0, int value)
+ * enable/disable the airport card
+ */
+#define PMAC_FTR_AIRPORT_ENABLE PMAC_FTR_DEF(10)
+
+/* PMAC_FTR_RESET_CPU (NULL, int cpu_nr, 0)
+ * toggle the reset line of a CPU on an uninorth-based SMP machine
+ */
+#define PMAC_FTR_RESET_CPU PMAC_FTR_DEF(11)
+
+/* PMAC_FTR_USB_ENABLE (struct device_node* node, 0, int value)
+ * enable/disable an USB cell, along with the power of the USB "pad"
+ * on keylargo based machines
+ */
+#define PMAC_FTR_USB_ENABLE PMAC_FTR_DEF(12)
+
+/* PMAC_FTR_1394_ENABLE (struct device_node* node, 0, int value)
+ * enable/disable the firewire cell of an uninorth ASIC.
+ */
+#define PMAC_FTR_1394_ENABLE PMAC_FTR_DEF(13)
+
+/* PMAC_FTR_1394_CABLE_POWER (struct device_node* node, 0, int value)
+ * enable/disable the firewire cable power supply of the uninorth
+ * firewire cell
+ */
+#define PMAC_FTR_1394_CABLE_POWER PMAC_FTR_DEF(14)
+
+/* PMAC_FTR_SLEEP_STATE (struct device_node* node, 0, int value)
+ * set the sleep state of the motherboard.
+ *
+ * Pass -1 as value to query for sleep capability
+ * Pass 1 to set IOs to sleep
+ * Pass 0 to set IOs to wake
+ */
+#define PMAC_FTR_SLEEP_STATE PMAC_FTR_DEF(15)
+
+/* PMAC_FTR_GET_MB_INFO (NULL, selector, 0)
+ *
+ * returns some motherboard infos.
+ * selector: 0 - model id
+ * 1 - model flags (capabilities)
+ * 2 - model name (cast to const char *)
+ */
+#define PMAC_FTR_GET_MB_INFO PMAC_FTR_DEF(16)
+#define PMAC_MB_INFO_MODEL 0
+#define PMAC_MB_INFO_FLAGS 1
+#define PMAC_MB_INFO_NAME 2
+
+/* PMAC_FTR_READ_GPIO (NULL, int index, 0)
+ *
+ * read a GPIO from a mac-io controller of type KeyLargo or Pangea.
+ * the value returned is a byte (positive), or a negative error code
+ */
+#define PMAC_FTR_READ_GPIO PMAC_FTR_DEF(17)
+
+/* PMAC_FTR_WRITE_GPIO (NULL, int index, int value)
+ *
+ * write a GPIO of a mac-io controller of type KeyLargo or Pangea.
+ */
+#define PMAC_FTR_WRITE_GPIO PMAC_FTR_DEF(18)
+
+/* PMAC_FTR_ENABLE_MPIC
+ *
+ * Enable the MPIC cell
+ */
+#define PMAC_FTR_ENABLE_MPIC PMAC_FTR_DEF(19)
+
+/* PMAC_FTR_AACK_DELAY_ENABLE (NULL, int enable, 0)
+ *
+ * Enable/disable the AACK delay on the northbridge for systems using DFS
+ */
+#define PMAC_FTR_AACK_DELAY_ENABLE PMAC_FTR_DEF(20)
+
+/* PMAC_FTR_DEVICE_CAN_WAKE
+ *
+ * Used by video drivers to inform system that they can actually perform
+ * wakeup from sleep
+ */
+#define PMAC_FTR_DEVICE_CAN_WAKE PMAC_FTR_DEF(22)
+
+
+/* Don't use those directly, they are for the sake of pmac_setup.c */
+extern long pmac_do_feature_call(unsigned int selector, ...);
+extern void pmac_feature_init(void);
+
+/* Video suspend tweak */
+extern void pmac_set_early_video_resume(void (*proc)(void *data), void *data);
+extern void pmac_call_early_video_resume(void);
+
+#define PMAC_FTR_DEF(x) ((0x6660000) | (x))
+
+/* The AGP driver registers itself here */
+extern void pmac_register_agp_pm(struct pci_dev *bridge,
+ int (*suspend)(struct pci_dev *bridge),
+ int (*resume)(struct pci_dev *bridge));
+
+/* Those are meant to be used by video drivers to deal with AGP
+ * suspend resume properly
+ */
+extern void pmac_suspend_agp_for_card(struct pci_dev *dev);
+extern void pmac_resume_agp_for_card(struct pci_dev *dev);
+
+/*
+ * The part below is for use by macio_asic.c only, do not rely
+ * on the data structures or constants below in a normal driver
+ *
+ */
+
+#define MAX_MACIO_CHIPS 2
+
+enum {
+ macio_unknown = 0,
+ macio_grand_central,
+ macio_ohare,
+ macio_ohareII,
+ macio_heathrow,
+ macio_gatwick,
+ macio_paddington,
+ macio_keylargo,
+ macio_pangea,
+ macio_intrepid,
+ macio_keylargo2,
+ macio_shasta,
+};
+
+struct macio_chip
+{
+ struct device_node *of_node;
+ int type;
+ const char *name;
+ int rev;
+ volatile u32 __iomem *base;
+ unsigned long flags;
+
+ /* For use by macio_asic PCI driver */
+ struct macio_bus lbus;
+};
+
+extern struct macio_chip macio_chips[MAX_MACIO_CHIPS];
+
+#define MACIO_FLAG_SCCA_ON 0x00000001
+#define MACIO_FLAG_SCCB_ON 0x00000002
+#define MACIO_FLAG_SCC_LOCKED 0x00000004
+#define MACIO_FLAG_AIRPORT_ON 0x00000010
+#define MACIO_FLAG_FW_SUPPORTED 0x00000020
+
+extern struct macio_chip* macio_find(struct device_node* child, int type);
+
+#define MACIO_FCR32(macio, r) ((macio)->base + ((r) >> 2))
+#define MACIO_FCR8(macio, r) (((volatile u8 __iomem *)((macio)->base)) + (r))
+
+#define MACIO_IN32(r) (in_le32(MACIO_FCR32(macio,r)))
+#define MACIO_OUT32(r,v) (out_le32(MACIO_FCR32(macio,r), (v)))
+#define MACIO_BIS(r,v) (MACIO_OUT32((r), MACIO_IN32(r) | (v)))
+#define MACIO_BIC(r,v) (MACIO_OUT32((r), MACIO_IN32(r) & ~(v)))
+#define MACIO_IN8(r) (in_8(MACIO_FCR8(macio,r)))
+#define MACIO_OUT8(r,v) (out_8(MACIO_FCR8(macio,r), (v)))
+
+/*
+ * Those are exported by pmac feature for internal use by arch code
+ * only like the platform function callbacks, do not use directly in drivers
+ */
+extern spinlock_t feature_lock;
+extern struct device_node *uninorth_node;
+extern u32 __iomem *uninorth_base;
+
+/*
+ * Uninorth reg. access. Note that Uni-N regs are big endian
+ */
+
+#define UN_REG(r) (uninorth_base + ((r) >> 2))
+#define UN_IN(r) (in_be32(UN_REG(r)))
+#define UN_OUT(r,v) (out_be32(UN_REG(r), (v)))
+#define UN_BIS(r,v) (UN_OUT((r), UN_IN(r) | (v)))
+#define UN_BIC(r,v) (UN_OUT((r), UN_IN(r) & ~(v)))
+
+/* Uninorth variant:
+ *
+ * 0 = not uninorth
+ * 1 = U1.x or U2.x
+ * 3 = U3
+ * 4 = U4
+ */
+extern int pmac_get_uninorth_variant(void);
+
+#endif /* __ASM_POWERPC_PMAC_FEATURE_H */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/pmac_low_i2c.h b/arch/powerpc/include/asm/pmac_low_i2c.h
new file mode 100644
index 0000000..131011b
--- /dev/null
+++ b/arch/powerpc/include/asm/pmac_low_i2c.h
@@ -0,0 +1,107 @@
+/*
+ * include/asm-ppc/pmac_low_i2c.h
+ *
+ * Copyright (C) 2003 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#ifndef __PMAC_LOW_I2C_H__
+#define __PMAC_LOW_I2C_H__
+#ifdef __KERNEL__
+
+/* i2c mode (based on the platform functions format) */
+enum {
+ pmac_i2c_mode_dumb = 1,
+ pmac_i2c_mode_std = 2,
+ pmac_i2c_mode_stdsub = 3,
+ pmac_i2c_mode_combined = 4,
+};
+
+/* RW bit in address */
+enum {
+ pmac_i2c_read = 0x01,
+ pmac_i2c_write = 0x00
+};
+
+/* i2c bus type */
+enum {
+ pmac_i2c_bus_keywest = 0,
+ pmac_i2c_bus_pmu = 1,
+ pmac_i2c_bus_smu = 2,
+};
+
+/* i2c bus features */
+enum {
+ /* can_largesub : supports >1 byte subaddresses (SMU only) */
+ pmac_i2c_can_largesub = 0x00000001u,
+
+ /* multibus : device node holds multiple busses, bus number is
+ * encoded in bits 0xff00 of "reg" of a given device
+ */
+ pmac_i2c_multibus = 0x00000002u,
+};
+
+/* i2c busses in the system */
+struct pmac_i2c_bus;
+struct i2c_adapter;
+
+/* Init, called early during boot */
+extern int pmac_i2c_init(void);
+
+/* Lookup an i2c bus for a device-node. The node can be either the bus
+ * node itself or a device below it. In the case of a multibus, the bus
+ * node itself is the controller node, else, it's a child of the controller
+ * node
+ */
+extern struct pmac_i2c_bus *pmac_i2c_find_bus(struct device_node *node);
+
+/* Get the address for an i2c device. This strips the bus number if
+ * necessary. The 7 bits address is returned 1 bit right shifted so that the
+ * direction can be directly ored in
+ */
+extern u8 pmac_i2c_get_dev_addr(struct device_node *device);
+
+/* Get infos about a bus */
+extern struct device_node *pmac_i2c_get_controller(struct pmac_i2c_bus *bus);
+extern struct device_node *pmac_i2c_get_bus_node(struct pmac_i2c_bus *bus);
+extern int pmac_i2c_get_type(struct pmac_i2c_bus *bus);
+extern int pmac_i2c_get_flags(struct pmac_i2c_bus *bus);
+extern int pmac_i2c_get_channel(struct pmac_i2c_bus *bus);
+
+/* i2c layer adapter attach/detach */
+extern void pmac_i2c_attach_adapter(struct pmac_i2c_bus *bus,
+ struct i2c_adapter *adapter);
+extern void pmac_i2c_detach_adapter(struct pmac_i2c_bus *bus,
+ struct i2c_adapter *adapter);
+extern struct i2c_adapter *pmac_i2c_get_adapter(struct pmac_i2c_bus *bus);
+extern struct pmac_i2c_bus *pmac_i2c_adapter_to_bus(struct i2c_adapter *adapter);
+
+/* March a device or bus with an i2c adapter structure, to be used by drivers
+ * to match device-tree nodes with i2c adapters during adapter discovery
+ * callbacks
+ */
+extern int pmac_i2c_match_adapter(struct device_node *dev,
+ struct i2c_adapter *adapter);
+
+
+/* (legacy) Locking functions exposed to i2c-keywest */
+extern int pmac_low_i2c_lock(struct device_node *np);
+extern int pmac_low_i2c_unlock(struct device_node *np);
+
+/* Access functions for platform code */
+extern int pmac_i2c_open(struct pmac_i2c_bus *bus, int polled);
+extern void pmac_i2c_close(struct pmac_i2c_bus *bus);
+extern int pmac_i2c_setmode(struct pmac_i2c_bus *bus, int mode);
+extern int pmac_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
+ u32 subaddr, u8 *data, int len);
+
+/* Suspend/resume code called by via-pmu directly for now */
+extern void pmac_pfunc_i2c_suspend(void);
+extern void pmac_pfunc_i2c_resume(void);
+
+#endif /* __KERNEL__ */
+#endif /* __PMAC_LOW_I2C_H__ */
diff --git a/arch/powerpc/include/asm/pmac_pfunc.h b/arch/powerpc/include/asm/pmac_pfunc.h
new file mode 100644
index 0000000..1330d6a
--- /dev/null
+++ b/arch/powerpc/include/asm/pmac_pfunc.h
@@ -0,0 +1,252 @@
+#ifndef __PMAC_PFUNC_H__
+#define __PMAC_PFUNC_H__
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+/* Flags in command lists */
+#define PMF_FLAGS_ON_INIT 0x80000000u
+#define PMF_FLGAS_ON_TERM 0x40000000u
+#define PMF_FLAGS_ON_SLEEP 0x20000000u
+#define PMF_FLAGS_ON_WAKE 0x10000000u
+#define PMF_FLAGS_ON_DEMAND 0x08000000u
+#define PMF_FLAGS_INT_GEN 0x04000000u
+#define PMF_FLAGS_HIGH_SPEED 0x02000000u
+#define PMF_FLAGS_LOW_SPEED 0x01000000u
+#define PMF_FLAGS_SIDE_EFFECTS 0x00800000u
+
+/*
+ * Arguments to a platform function call.
+ *
+ * NOTE: By convention, pointer arguments point to an u32
+ */
+struct pmf_args {
+ union {
+ u32 v;
+ u32 *p;
+ } u[4];
+ unsigned int count;
+};
+
+/*
+ * A driver capable of interpreting commands provides a handlers
+ * structure filled with whatever handlers are implemented by this
+ * driver. Non implemented handlers are left NULL.
+ *
+ * PMF_STD_ARGS are the same arguments that are passed to the parser
+ * and that gets passed back to the various handlers.
+ *
+ * Interpreting a given function always start with a begin() call which
+ * returns an instance data to be passed around subsequent calls, and
+ * ends with an end() call. This allows the low level driver to implement
+ * locking policy or per-function instance data.
+ *
+ * For interrupt capable functions, irq_enable() is called when a client
+ * registers, and irq_disable() is called when the last client unregisters
+ * Note that irq_enable & irq_disable are called within a semaphore held
+ * by the core, thus you should not try to register yourself to some other
+ * pmf interrupt during those calls.
+ */
+
+#define PMF_STD_ARGS struct pmf_function *func, void *instdata, \
+ struct pmf_args *args
+
+struct pmf_function;
+
+struct pmf_handlers {
+ void * (*begin)(struct pmf_function *func, struct pmf_args *args);
+ void (*end)(struct pmf_function *func, void *instdata);
+
+ int (*irq_enable)(struct pmf_function *func);
+ int (*irq_disable)(struct pmf_function *func);
+
+ int (*write_gpio)(PMF_STD_ARGS, u8 value, u8 mask);
+ int (*read_gpio)(PMF_STD_ARGS, u8 mask, int rshift, u8 xor);
+
+ int (*write_reg32)(PMF_STD_ARGS, u32 offset, u32 value, u32 mask);
+ int (*read_reg32)(PMF_STD_ARGS, u32 offset);
+ int (*write_reg16)(PMF_STD_ARGS, u32 offset, u16 value, u16 mask);
+ int (*read_reg16)(PMF_STD_ARGS, u32 offset);
+ int (*write_reg8)(PMF_STD_ARGS, u32 offset, u8 value, u8 mask);
+ int (*read_reg8)(PMF_STD_ARGS, u32 offset);
+
+ int (*delay)(PMF_STD_ARGS, u32 duration);
+
+ int (*wait_reg32)(PMF_STD_ARGS, u32 offset, u32 value, u32 mask);
+ int (*wait_reg16)(PMF_STD_ARGS, u32 offset, u16 value, u16 mask);
+ int (*wait_reg8)(PMF_STD_ARGS, u32 offset, u8 value, u8 mask);
+
+ int (*read_i2c)(PMF_STD_ARGS, u32 len);
+ int (*write_i2c)(PMF_STD_ARGS, u32 len, const u8 *data);
+ int (*rmw_i2c)(PMF_STD_ARGS, u32 masklen, u32 valuelen, u32 totallen,
+ const u8 *maskdata, const u8 *valuedata);
+
+ int (*read_cfg)(PMF_STD_ARGS, u32 offset, u32 len);
+ int (*write_cfg)(PMF_STD_ARGS, u32 offset, u32 len, const u8 *data);
+ int (*rmw_cfg)(PMF_STD_ARGS, u32 offset, u32 masklen, u32 valuelen,
+ u32 totallen, const u8 *maskdata, const u8 *valuedata);
+
+ int (*read_i2c_sub)(PMF_STD_ARGS, u8 subaddr, u32 len);
+ int (*write_i2c_sub)(PMF_STD_ARGS, u8 subaddr, u32 len, const u8 *data);
+ int (*set_i2c_mode)(PMF_STD_ARGS, int mode);
+ int (*rmw_i2c_sub)(PMF_STD_ARGS, u8 subaddr, u32 masklen, u32 valuelen,
+ u32 totallen, const u8 *maskdata,
+ const u8 *valuedata);
+
+ int (*read_reg32_msrx)(PMF_STD_ARGS, u32 offset, u32 mask, u32 shift,
+ u32 xor);
+ int (*read_reg16_msrx)(PMF_STD_ARGS, u32 offset, u32 mask, u32 shift,
+ u32 xor);
+ int (*read_reg8_msrx)(PMF_STD_ARGS, u32 offset, u32 mask, u32 shift,
+ u32 xor);
+
+ int (*write_reg32_slm)(PMF_STD_ARGS, u32 offset, u32 shift, u32 mask);
+ int (*write_reg16_slm)(PMF_STD_ARGS, u32 offset, u32 shift, u32 mask);
+ int (*write_reg8_slm)(PMF_STD_ARGS, u32 offset, u32 shift, u32 mask);
+
+ int (*mask_and_compare)(PMF_STD_ARGS, u32 len, const u8 *maskdata,
+ const u8 *valuedata);
+
+ struct module *owner;
+};
+
+
+/*
+ * Drivers who expose platform functions register at init time, this
+ * causes the platform functions for that device node to be parsed in
+ * advance and associated with the device. The data structures are
+ * partially public so a driver can walk the list of platform functions
+ * and eventually inspect the flags
+ */
+struct pmf_device;
+
+struct pmf_function {
+ /* All functions for a given driver are linked */
+ struct list_head link;
+
+ /* Function node & driver data */
+ struct device_node *node;
+ void *driver_data;
+
+ /* For internal use by core */
+ struct pmf_device *dev;
+
+ /* The name is the "xxx" in "platform-do-xxx", this is how
+ * platform functions are identified by this code. Some functions
+ * only operate for a given target, in which case the phandle is
+ * here (or 0 if the filter doesn't apply)
+ */
+ const char *name;
+ u32 phandle;
+
+ /* The flags for that function. You can have several functions
+ * with the same name and different flag
+ */
+ u32 flags;
+
+ /* The actual tokenized function blob */
+ const void *data;
+ unsigned int length;
+
+ /* Interrupt clients */
+ struct list_head irq_clients;
+
+ /* Refcounting */
+ struct kref ref;
+};
+
+/*
+ * For platform functions that are interrupts, one can register
+ * irq_client structures. You canNOT use the same structure twice
+ * as it contains a link member. Also, the callback is called with
+ * a spinlock held, you must not call back into any of the pmf_* functions
+ * from within that callback
+ */
+struct pmf_irq_client {
+ void (*handler)(void *data);
+ void *data;
+ struct module *owner;
+ struct list_head link;
+ struct pmf_function *func;
+};
+
+
+/*
+ * Register/Unregister a function-capable driver and its handlers
+ */
+extern int pmf_register_driver(struct device_node *np,
+ struct pmf_handlers *handlers,
+ void *driverdata);
+
+extern void pmf_unregister_driver(struct device_node *np);
+
+
+/*
+ * Register/Unregister interrupt clients
+ */
+extern int pmf_register_irq_client(struct device_node *np,
+ const char *name,
+ struct pmf_irq_client *client);
+
+extern void pmf_unregister_irq_client(struct pmf_irq_client *client);
+
+/*
+ * Called by the handlers when an irq happens
+ */
+extern void pmf_do_irq(struct pmf_function *func);
+
+
+/*
+ * Low level call to platform functions.
+ *
+ * The phandle can filter on the target object for functions that have
+ * multiple targets, the flags allow you to restrict the call to a given
+ * combination of flags.
+ *
+ * The args array contains as many arguments as is required by the function,
+ * this is dependent on the function you are calling, unfortunately Apple
+ * mechanism provides no way to encode that so you have to get it right at
+ * the call site. Some functions require no args, in which case, you can
+ * pass NULL.
+ *
+ * You can also pass NULL to the name. This will match any function that has
+ * the appropriate combination of flags & phandle or you can pass 0 to the
+ * phandle to match any
+ */
+extern int pmf_do_functions(struct device_node *np, const char *name,
+ u32 phandle, u32 flags, struct pmf_args *args);
+
+
+
+/*
+ * High level call to a platform function.
+ *
+ * This one looks for the platform-xxx first so you should call it to the
+ * actual target if any. It will fallback to platform-do-xxx if it can't
+ * find one. It will also exclusively target functions that have
+ * the "OnDemand" flag.
+ */
+
+extern int pmf_call_function(struct device_node *target, const char *name,
+ struct pmf_args *args);
+
+
+/*
+ * For low latency interrupt usage, you can lookup for on-demand functions
+ * using the functions below
+ */
+
+extern struct pmf_function *pmf_find_function(struct device_node *target,
+ const char *name);
+
+extern struct pmf_function * pmf_get_function(struct pmf_function *func);
+extern void pmf_put_function(struct pmf_function *func);
+
+extern int pmf_call_one(struct pmf_function *func, struct pmf_args *args);
+
+
+/* Suspend/resume code called by via-pmu directly for now */
+extern void pmac_pfunc_base_suspend(void);
+extern void pmac_pfunc_base_resume(void);
+
+#endif /* __PMAC_PFUNC_H__ */
diff --git a/arch/powerpc/include/asm/pmc.h b/arch/powerpc/include/asm/pmc.h
new file mode 100644
index 0000000..d6a616a
--- /dev/null
+++ b/arch/powerpc/include/asm/pmc.h
@@ -0,0 +1,37 @@
+/*
+ * pmc.h
+ * Copyright (C) 2004 David Gibson, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _POWERPC_PMC_H
+#define _POWERPC_PMC_H
+#ifdef __KERNEL__
+
+#include <asm/ptrace.h>
+
+typedef void (*perf_irq_t)(struct pt_regs *);
+extern perf_irq_t perf_irq;
+
+int reserve_pmc_hardware(perf_irq_t new_perf_irq);
+void release_pmc_hardware(void);
+
+#ifdef CONFIG_PPC64
+void power4_enable_pmcs(void);
+void pasemi_enable_pmcs(void);
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _POWERPC_PMC_H */
diff --git a/arch/powerpc/include/asm/pmi.h b/arch/powerpc/include/asm/pmi.h
new file mode 100644
index 0000000..b4e91fb
--- /dev/null
+++ b/arch/powerpc/include/asm/pmi.h
@@ -0,0 +1,66 @@
+#ifndef _POWERPC_PMI_H
+#define _POWERPC_PMI_H
+
+/*
+ * Definitions for talking with PMI device on PowerPC
+ *
+ * PMI (Platform Management Interrupt) is a way to communicate
+ * with the BMC (Baseboard Management Controller) via interrupts.
+ * Unlike IPMI it is bidirectional and has a low latency.
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Christian Krafft <krafft@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifdef __KERNEL__
+
+#define PMI_TYPE_FREQ_CHANGE 0x01
+#define PMI_TYPE_POWER_BUTTON 0x02
+#define PMI_READ_TYPE 0
+#define PMI_READ_DATA0 1
+#define PMI_READ_DATA1 2
+#define PMI_READ_DATA2 3
+#define PMI_WRITE_TYPE 4
+#define PMI_WRITE_DATA0 5
+#define PMI_WRITE_DATA1 6
+#define PMI_WRITE_DATA2 7
+
+#define PMI_ACK 0x80
+
+#define PMI_TIMEOUT 100
+
+typedef struct {
+ u8 type;
+ u8 data0;
+ u8 data1;
+ u8 data2;
+} pmi_message_t;
+
+struct pmi_handler {
+ struct list_head node;
+ u8 type;
+ void (*handle_pmi_message) (pmi_message_t);
+};
+
+int pmi_register_handler(struct pmi_handler *);
+void pmi_unregister_handler(struct pmi_handler *);
+
+int pmi_send_message(pmi_message_t);
+
+#endif /* __KERNEL__ */
+#endif /* _POWERPC_PMI_H */
diff --git a/arch/powerpc/include/asm/poll.h b/arch/powerpc/include/asm/poll.h
new file mode 100644
index 0000000..c98509d
--- /dev/null
+++ b/arch/powerpc/include/asm/poll.h
@@ -0,0 +1 @@
+#include <asm-generic/poll.h>
diff --git a/arch/powerpc/include/asm/posix_types.h b/arch/powerpc/include/asm/posix_types.h
new file mode 100644
index 0000000..c4e396b
--- /dev/null
+++ b/arch/powerpc/include/asm/posix_types.h
@@ -0,0 +1,128 @@
+#ifndef _ASM_POWERPC_POSIX_TYPES_H
+#define _ASM_POWERPC_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc. Also, we cannot
+ * assume GCC is being used.
+ */
+
+typedef unsigned long __kernel_ino_t;
+typedef unsigned int __kernel_mode_t;
+typedef long __kernel_off_t;
+typedef int __kernel_pid_t;
+typedef unsigned int __kernel_uid_t;
+typedef unsigned int __kernel_gid_t;
+typedef long __kernel_ptrdiff_t;
+typedef long __kernel_time_t;
+typedef long __kernel_clock_t;
+typedef int __kernel_timer_t;
+typedef int __kernel_clockid_t;
+typedef long __kernel_suseconds_t;
+typedef int __kernel_daddr_t;
+typedef char * __kernel_caddr_t;
+typedef unsigned short __kernel_uid16_t;
+typedef unsigned short __kernel_gid16_t;
+typedef unsigned int __kernel_uid32_t;
+typedef unsigned int __kernel_gid32_t;
+typedef unsigned int __kernel_old_uid_t;
+typedef unsigned int __kernel_old_gid_t;
+
+#ifdef __powerpc64__
+typedef unsigned long __kernel_nlink_t;
+typedef int __kernel_ipc_pid_t;
+typedef unsigned long __kernel_size_t;
+typedef long __kernel_ssize_t;
+typedef unsigned long __kernel_old_dev_t;
+#else
+typedef unsigned short __kernel_nlink_t;
+typedef short __kernel_ipc_pid_t;
+typedef unsigned int __kernel_size_t;
+typedef int __kernel_ssize_t;
+typedef unsigned int __kernel_old_dev_t;
+#endif
+
+#ifdef __powerpc64__
+typedef long long __kernel_loff_t;
+#else
+#ifdef __GNUC__
+typedef long long __kernel_loff_t;
+#endif
+#endif
+
+typedef struct {
+ int val[2];
+} __kernel_fsid_t;
+
+#ifndef __GNUC__
+
+#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
+#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
+#define __FD_ISSET(d, set) (((set)->fds_bits[__FDELT(d)] & __FDMASK(d)) != 0)
+#define __FD_ZERO(set) \
+ ((void) memset ((void *) (set), 0, sizeof (__kernel_fd_set)))
+
+#else /* __GNUC__ */
+
+#if defined(__KERNEL__)
+/* With GNU C, use inline functions instead so args are evaluated only once: */
+
+#undef __FD_SET
+static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
+{
+ unsigned long _tmp = fd / __NFDBITS;
+ unsigned long _rem = fd % __NFDBITS;
+ fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
+}
+
+#undef __FD_CLR
+static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
+{
+ unsigned long _tmp = fd / __NFDBITS;
+ unsigned long _rem = fd % __NFDBITS;
+ fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
+}
+
+#undef __FD_ISSET
+static __inline__ int __FD_ISSET(unsigned long fd, __kernel_fd_set *p)
+{
+ unsigned long _tmp = fd / __NFDBITS;
+ unsigned long _rem = fd % __NFDBITS;
+ return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
+}
+
+/*
+ * This will unroll the loop for the normal constant case (8 ints,
+ * for a 256-bit fd_set)
+ */
+#undef __FD_ZERO
+static __inline__ void __FD_ZERO(__kernel_fd_set *p)
+{
+ unsigned long *tmp = (unsigned long *)p->fds_bits;
+ int i;
+
+ if (__builtin_constant_p(__FDSET_LONGS)) {
+ switch (__FDSET_LONGS) {
+ case 16:
+ tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
+ tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
+
+ case 8:
+ tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
+
+ case 4:
+ tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+ return;
+ }
+ }
+ i = __FDSET_LONGS;
+ while (i) {
+ i--;
+ *tmp = 0;
+ tmp++;
+ }
+}
+
+#endif /* defined(__KERNEL__) */
+#endif /* __GNUC__ */
+#endif /* _ASM_POWERPC_POSIX_TYPES_H */
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
new file mode 100644
index 0000000..854ab71
--- /dev/null
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -0,0 +1,149 @@
+/*
+ * c 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_PPC_PCI_H
+#define _ASM_POWERPC_PPC_PCI_H
+#ifdef __KERNEL__
+
+#ifdef CONFIG_PCI
+
+#include <linux/pci.h>
+#include <asm/pci-bridge.h>
+
+extern unsigned long isa_io_base;
+
+extern void pci_setup_phb_io(struct pci_controller *hose, int primary);
+extern void pci_setup_phb_io_dynamic(struct pci_controller *hose, int primary);
+
+
+extern struct list_head hose_list;
+
+extern void find_and_init_phbs(void);
+
+extern struct pci_dev *isa_bridge_pcidev; /* may be NULL if no ISA bus */
+
+/** Bus Unit ID macros; get low and hi 32-bits of the 64-bit BUID */
+#define BUID_HI(buid) ((buid) >> 32)
+#define BUID_LO(buid) ((buid) & 0xffffffff)
+
+/* PCI device_node operations */
+struct device_node;
+typedef void *(*traverse_func)(struct device_node *me, void *data);
+void *traverse_pci_devices(struct device_node *start, traverse_func pre,
+ void *data);
+
+extern void pci_devs_phb_init(void);
+extern void pci_devs_phb_init_dynamic(struct pci_controller *phb);
+extern void scan_phb(struct pci_controller *hose);
+
+/* From rtas_pci.h */
+extern void init_pci_config_tokens (void);
+extern unsigned long get_phb_buid (struct device_node *);
+extern int rtas_setup_phb(struct pci_controller *phb);
+
+extern unsigned long pci_probe_only;
+
+/* ---- EEH internal-use-only related routines ---- */
+#ifdef CONFIG_EEH
+
+void pci_addr_cache_insert_device(struct pci_dev *dev);
+void pci_addr_cache_remove_device(struct pci_dev *dev);
+void pci_addr_cache_build(void);
+struct pci_dev *pci_get_device_by_addr(unsigned long addr);
+
+/**
+ * eeh_slot_error_detail -- record and EEH error condition to the log
+ * @pdn: pci device node
+ * @severity: EEH_LOG_TEMP_FAILURE or EEH_LOG_PERM_FAILURE
+ *
+ * Obtains the EEH error details from the RTAS subsystem,
+ * and then logs these details with the RTAS error log system.
+ */
+#define EEH_LOG_TEMP_FAILURE 1
+#define EEH_LOG_PERM_FAILURE 2
+void eeh_slot_error_detail (struct pci_dn *pdn, int severity);
+
+/**
+ * rtas_pci_enable - enable IO transfers for this slot
+ * @pdn: pci device node
+ * @function: either EEH_THAW_MMIO or EEH_THAW_DMA
+ *
+ * Enable I/O transfers to this slot
+ */
+#define EEH_THAW_MMIO 2
+#define EEH_THAW_DMA 3
+int rtas_pci_enable(struct pci_dn *pdn, int function);
+
+/**
+ * rtas_set_slot_reset -- unfreeze a frozen slot
+ * @pdn: pci device node
+ *
+ * Clear the EEH-frozen condition on a slot. This routine
+ * does this by asserting the PCI #RST line for 1/8th of
+ * a second; this routine will sleep while the adapter is
+ * being reset.
+ *
+ * Returns a non-zero value if the reset failed.
+ */
+int rtas_set_slot_reset (struct pci_dn *);
+int eeh_wait_for_slot_status(struct pci_dn *pdn, int max_wait_msecs);
+
+/**
+ * eeh_restore_bars - Restore device configuration info.
+ * @pdn: pci device node
+ *
+ * A reset of a PCI device will clear out its config space.
+ * This routines will restore the config space for this
+ * device, and is children, to values previously obtained
+ * from the firmware.
+ */
+void eeh_restore_bars(struct pci_dn *);
+
+/**
+ * rtas_configure_bridge -- firmware initialization of pci bridge
+ * @pdn: pci device node
+ *
+ * Ask the firmware to configure all PCI bridges devices
+ * located behind the indicated node. Required after a
+ * pci device reset. Does essentially the same hing as
+ * eeh_restore_bars, but for brdges, and lets firmware
+ * do the work.
+ */
+void rtas_configure_bridge(struct pci_dn *);
+
+int rtas_write_config(struct pci_dn *, int where, int size, u32 val);
+int rtas_read_config(struct pci_dn *, int where, int size, u32 *val);
+
+/**
+ * eeh_mark_slot -- set mode flags for pertition endpoint
+ * @pdn: pci device node
+ *
+ * mark and clear slots: find "partition endpoint" PE and set or
+ * clear the flags for each subnode of the PE.
+ */
+void eeh_mark_slot (struct device_node *dn, int mode_flag);
+void eeh_clear_slot (struct device_node *dn, int mode_flag);
+
+/**
+ * find_device_pe -- Find the associated "Partiationable Endpoint" PE
+ * @pdn: pci device node
+ */
+struct device_node * find_device_pe(struct device_node *dn);
+
+void eeh_sysfs_add_device(struct pci_dev *pdev);
+void eeh_sysfs_remove_device(struct pci_dev *pdev);
+
+#endif /* CONFIG_EEH */
+
+#else /* CONFIG_PCI */
+static inline void find_and_init_phbs(void) { }
+static inline void init_pci_config_tokens(void) { }
+#endif /* !CONFIG_PCI */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_PPC_PCI_H */
diff --git a/arch/powerpc/include/asm/ppc4xx.h b/arch/powerpc/include/asm/ppc4xx.h
new file mode 100644
index 0000000..033039a
--- /dev/null
+++ b/arch/powerpc/include/asm/ppc4xx.h
@@ -0,0 +1,18 @@
+/*
+ * PPC4xx Prototypes and definitions
+ *
+ * Copyright 2008 DENX Software Engineering, Stefan Roese <sr@denx.de>
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef __ASM_POWERPC_PPC4xx_H__
+#define __ASM_POWERPC_PPC4xx_H__
+
+extern void ppc4xx_reset_system(char *cmd);
+
+#endif /* __ASM_POWERPC_PPC4xx_H__ */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
new file mode 100644
index 0000000..0966899
--- /dev/null
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -0,0 +1,689 @@
+/*
+ * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
+ */
+#ifndef _ASM_POWERPC_PPC_ASM_H
+#define _ASM_POWERPC_PPC_ASM_H
+
+#include <linux/stringify.h>
+#include <asm/asm-compat.h>
+#include <asm/processor.h>
+
+#ifndef __ASSEMBLY__
+#error __FILE__ should only be used in assembler files
+#else
+
+#define SZL (BITS_PER_LONG/8)
+
+/*
+ * Stuff for accurate CPU time accounting.
+ * These macros handle transitions between user and system state
+ * in exception entry and exit and accumulate time to the
+ * user_time and system_time fields in the paca.
+ */
+
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+#define ACCOUNT_CPU_USER_ENTRY(ra, rb)
+#define ACCOUNT_CPU_USER_EXIT(ra, rb)
+#else
+#define ACCOUNT_CPU_USER_ENTRY(ra, rb) \
+ beq 2f; /* if from kernel mode */ \
+BEGIN_FTR_SECTION; \
+ mfspr ra,SPRN_PURR; /* get processor util. reg */ \
+END_FTR_SECTION_IFSET(CPU_FTR_PURR); \
+BEGIN_FTR_SECTION; \
+ MFTB(ra); /* or get TB if no PURR */ \
+END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \
+ ld rb,PACA_STARTPURR(r13); \
+ std ra,PACA_STARTPURR(r13); \
+ subf rb,rb,ra; /* subtract start value */ \
+ ld ra,PACA_USER_TIME(r13); \
+ add ra,ra,rb; /* add on to user time */ \
+ std ra,PACA_USER_TIME(r13); \
+2:
+
+#define ACCOUNT_CPU_USER_EXIT(ra, rb) \
+BEGIN_FTR_SECTION; \
+ mfspr ra,SPRN_PURR; /* get processor util. reg */ \
+END_FTR_SECTION_IFSET(CPU_FTR_PURR); \
+BEGIN_FTR_SECTION; \
+ MFTB(ra); /* or get TB if no PURR */ \
+END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \
+ ld rb,PACA_STARTPURR(r13); \
+ std ra,PACA_STARTPURR(r13); \
+ subf rb,rb,ra; /* subtract start value */ \
+ ld ra,PACA_SYSTEM_TIME(r13); \
+ add ra,ra,rb; /* add on to user time */ \
+ std ra,PACA_SYSTEM_TIME(r13);
+#endif
+
+/*
+ * Macros for storing registers into and loading registers from
+ * exception frames.
+ */
+#ifdef __powerpc64__
+#define SAVE_GPR(n, base) std n,GPR0+8*(n)(base)
+#define REST_GPR(n, base) ld n,GPR0+8*(n)(base)
+#define SAVE_NVGPRS(base) SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
+#define REST_NVGPRS(base) REST_8GPRS(14, base); REST_10GPRS(22, base)
+#else
+#define SAVE_GPR(n, base) stw n,GPR0+4*(n)(base)
+#define REST_GPR(n, base) lwz n,GPR0+4*(n)(base)
+#define SAVE_NVGPRS(base) SAVE_GPR(13, base); SAVE_8GPRS(14, base); \
+ SAVE_10GPRS(22, base)
+#define REST_NVGPRS(base) REST_GPR(13, base); REST_8GPRS(14, base); \
+ REST_10GPRS(22, base)
+#endif
+
+/*
+ * Define what the VSX XX1 form instructions will look like, then add
+ * the 128 bit load store instructions based on that.
+ */
+#define VSX_XX1(xs, ra, rb) (((xs) & 0x1f) << 21 | ((ra) << 16) | \
+ ((rb) << 11) | (((xs) >> 5)))
+
+#define STXVD2X(xs, ra, rb) .long (0x7c000798 | VSX_XX1((xs), (ra), (rb)))
+#define LXVD2X(xs, ra, rb) .long (0x7c000698 | VSX_XX1((xs), (ra), (rb)))
+
+#define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base)
+#define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
+#define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
+#define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
+#define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base)
+#define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base)
+#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
+#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
+
+#define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base)
+#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
+#define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
+#define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
+#define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
+#define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
+#define REST_FPR(n, base) lfd n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base)
+#define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base)
+#define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base)
+#define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base)
+#define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base)
+#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base)
+
+#define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); stvx n,b,base
+#define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
+#define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
+#define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
+#define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
+#define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
+#define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); lvx n,b,base
+#define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base)
+#define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
+#define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
+#define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
+#define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
+
+/* Save the lower 32 VSRs in the thread VSR region */
+#define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,b,base)
+#define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)
+#define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base)
+#define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base)
+#define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base)
+#define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base)
+#define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); LXVD2X(n,b,base)
+#define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base)
+#define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base)
+#define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
+#define REST_16VSRS(n,b,base) REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base)
+#define REST_32VSRS(n,b,base) REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base)
+/* Save the upper 32 VSRs (32-63) in the thread VSX region (0-31) */
+#define SAVE_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); STXVD2X(n+32,b,base)
+#define SAVE_2VSRSU(n,b,base) SAVE_VSRU(n,b,base); SAVE_VSRU(n+1,b,base)
+#define SAVE_4VSRSU(n,b,base) SAVE_2VSRSU(n,b,base); SAVE_2VSRSU(n+2,b,base)
+#define SAVE_8VSRSU(n,b,base) SAVE_4VSRSU(n,b,base); SAVE_4VSRSU(n+4,b,base)
+#define SAVE_16VSRSU(n,b,base) SAVE_8VSRSU(n,b,base); SAVE_8VSRSU(n+8,b,base)
+#define SAVE_32VSRSU(n,b,base) SAVE_16VSRSU(n,b,base); SAVE_16VSRSU(n+16,b,base)
+#define REST_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); LXVD2X(n+32,b,base)
+#define REST_2VSRSU(n,b,base) REST_VSRU(n,b,base); REST_VSRU(n+1,b,base)
+#define REST_4VSRSU(n,b,base) REST_2VSRSU(n,b,base); REST_2VSRSU(n+2,b,base)
+#define REST_8VSRSU(n,b,base) REST_4VSRSU(n,b,base); REST_4VSRSU(n+4,b,base)
+#define REST_16VSRSU(n,b,base) REST_8VSRSU(n,b,base); REST_8VSRSU(n+8,b,base)
+#define REST_32VSRSU(n,b,base) REST_16VSRSU(n,b,base); REST_16VSRSU(n+16,b,base)
+
+#define SAVE_EVR(n,s,base) evmergehi s,s,n; stw s,THREAD_EVR0+4*(n)(base)
+#define SAVE_2EVRS(n,s,base) SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base)
+#define SAVE_4EVRS(n,s,base) SAVE_2EVRS(n,s,base); SAVE_2EVRS(n+2,s,base)
+#define SAVE_8EVRS(n,s,base) SAVE_4EVRS(n,s,base); SAVE_4EVRS(n+4,s,base)
+#define SAVE_16EVRS(n,s,base) SAVE_8EVRS(n,s,base); SAVE_8EVRS(n+8,s,base)
+#define SAVE_32EVRS(n,s,base) SAVE_16EVRS(n,s,base); SAVE_16EVRS(n+16,s,base)
+#define REST_EVR(n,s,base) lwz s,THREAD_EVR0+4*(n)(base); evmergelo n,s,n
+#define REST_2EVRS(n,s,base) REST_EVR(n,s,base); REST_EVR(n+1,s,base)
+#define REST_4EVRS(n,s,base) REST_2EVRS(n,s,base); REST_2EVRS(n+2,s,base)
+#define REST_8EVRS(n,s,base) REST_4EVRS(n,s,base); REST_4EVRS(n+4,s,base)
+#define REST_16EVRS(n,s,base) REST_8EVRS(n,s,base); REST_8EVRS(n+8,s,base)
+#define REST_32EVRS(n,s,base) REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base)
+
+/* Macros to adjust thread priority for hardware multithreading */
+#define HMT_VERY_LOW or 31,31,31 # very low priority
+#define HMT_LOW or 1,1,1
+#define HMT_MEDIUM_LOW or 6,6,6 # medium low priority
+#define HMT_MEDIUM or 2,2,2
+#define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority
+#define HMT_HIGH or 3,3,3
+
+/* handle instructions that older assemblers may not know */
+#define RFCI .long 0x4c000066 /* rfci instruction */
+#define RFDI .long 0x4c00004e /* rfdi instruction */
+#define RFMCI .long 0x4c00004c /* rfmci instruction */
+
+#ifdef __KERNEL__
+#ifdef CONFIG_PPC64
+
+#define XGLUE(a,b) a##b
+#define GLUE(a,b) XGLUE(a,b)
+
+#define _GLOBAL(name) \
+ .section ".text"; \
+ .align 2 ; \
+ .globl name; \
+ .globl GLUE(.,name); \
+ .section ".opd","aw"; \
+name: \
+ .quad GLUE(.,name); \
+ .quad .TOC.@tocbase; \
+ .quad 0; \
+ .previous; \
+ .type GLUE(.,name),@function; \
+GLUE(.,name):
+
+#define _INIT_GLOBAL(name) \
+ .section ".text.init.refok"; \
+ .align 2 ; \
+ .globl name; \
+ .globl GLUE(.,name); \
+ .section ".opd","aw"; \
+name: \
+ .quad GLUE(.,name); \
+ .quad .TOC.@tocbase; \
+ .quad 0; \
+ .previous; \
+ .type GLUE(.,name),@function; \
+GLUE(.,name):
+
+#define _KPROBE(name) \
+ .section ".kprobes.text","a"; \
+ .align 2 ; \
+ .globl name; \
+ .globl GLUE(.,name); \
+ .section ".opd","aw"; \
+name: \
+ .quad GLUE(.,name); \
+ .quad .TOC.@tocbase; \
+ .quad 0; \
+ .previous; \
+ .type GLUE(.,name),@function; \
+GLUE(.,name):
+
+#define _STATIC(name) \
+ .section ".text"; \
+ .align 2 ; \
+ .section ".opd","aw"; \
+name: \
+ .quad GLUE(.,name); \
+ .quad .TOC.@tocbase; \
+ .quad 0; \
+ .previous; \
+ .type GLUE(.,name),@function; \
+GLUE(.,name):
+
+#define _INIT_STATIC(name) \
+ .section ".text.init.refok"; \
+ .align 2 ; \
+ .section ".opd","aw"; \
+name: \
+ .quad GLUE(.,name); \
+ .quad .TOC.@tocbase; \
+ .quad 0; \
+ .previous; \
+ .type GLUE(.,name),@function; \
+GLUE(.,name):
+
+#else /* 32-bit */
+
+#define _ENTRY(n) \
+ .globl n; \
+n:
+
+#define _GLOBAL(n) \
+ .text; \
+ .stabs __stringify(n:F-1),N_FUN,0,0,n;\
+ .globl n; \
+n:
+
+#define _KPROBE(n) \
+ .section ".kprobes.text","a"; \
+ .globl n; \
+n:
+
+#endif
+
+/*
+ * LOAD_REG_IMMEDIATE(rn, expr)
+ * Loads the value of the constant expression 'expr' into register 'rn'
+ * using immediate instructions only. Use this when it's important not
+ * to reference other data (i.e. on ppc64 when the TOC pointer is not
+ * valid).
+ *
+ * LOAD_REG_ADDR(rn, name)
+ * Loads the address of label 'name' into register 'rn'. Use this when
+ * you don't particularly need immediate instructions only, but you need
+ * the whole address in one register (e.g. it's a structure address and
+ * you want to access various offsets within it). On ppc32 this is
+ * identical to LOAD_REG_IMMEDIATE.
+ *
+ * LOAD_REG_ADDRBASE(rn, name)
+ * ADDROFF(name)
+ * LOAD_REG_ADDRBASE loads part of the address of label 'name' into
+ * register 'rn'. ADDROFF(name) returns the remainder of the address as
+ * a constant expression. ADDROFF(name) is a signed expression < 16 bits
+ * in size, so is suitable for use directly as an offset in load and store
+ * instructions. Use this when loading/storing a single word or less as:
+ * LOAD_REG_ADDRBASE(rX, name)
+ * ld rY,ADDROFF(name)(rX)
+ */
+#ifdef __powerpc64__
+#define LOAD_REG_IMMEDIATE(reg,expr) \
+ lis (reg),(expr)@highest; \
+ ori (reg),(reg),(expr)@higher; \
+ rldicr (reg),(reg),32,31; \
+ oris (reg),(reg),(expr)@h; \
+ ori (reg),(reg),(expr)@l;
+
+#define LOAD_REG_ADDR(reg,name) \
+ ld (reg),name@got(r2)
+
+#define LOAD_REG_ADDRBASE(reg,name) LOAD_REG_ADDR(reg,name)
+#define ADDROFF(name) 0
+
+/* offsets for stack frame layout */
+#define LRSAVE 16
+
+#else /* 32-bit */
+
+#define LOAD_REG_IMMEDIATE(reg,expr) \
+ lis (reg),(expr)@ha; \
+ addi (reg),(reg),(expr)@l;
+
+#define LOAD_REG_ADDR(reg,name) LOAD_REG_IMMEDIATE(reg, name)
+
+#define LOAD_REG_ADDRBASE(reg, name) lis (reg),name@ha
+#define ADDROFF(name) name@l
+
+/* offsets for stack frame layout */
+#define LRSAVE 4
+
+#endif
+
+/* various errata or part fixups */
+#ifdef CONFIG_PPC601_SYNC_FIX
+#define SYNC \
+BEGIN_FTR_SECTION \
+ sync; \
+ isync; \
+END_FTR_SECTION_IFSET(CPU_FTR_601)
+#define SYNC_601 \
+BEGIN_FTR_SECTION \
+ sync; \
+END_FTR_SECTION_IFSET(CPU_FTR_601)
+#define ISYNC_601 \
+BEGIN_FTR_SECTION \
+ isync; \
+END_FTR_SECTION_IFSET(CPU_FTR_601)
+#else
+#define SYNC
+#define SYNC_601
+#define ISYNC_601
+#endif
+
+#ifdef CONFIG_PPC_CELL
+#define MFTB(dest) \
+90: mftb dest; \
+BEGIN_FTR_SECTION_NESTED(96); \
+ cmpwi dest,0; \
+ beq- 90b; \
+END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
+#else
+#define MFTB(dest) mftb dest
+#endif
+
+#ifndef CONFIG_SMP
+#define TLBSYNC
+#else /* CONFIG_SMP */
+/* tlbsync is not implemented on 601 */
+#define TLBSYNC \
+BEGIN_FTR_SECTION \
+ tlbsync; \
+ sync; \
+END_FTR_SECTION_IFCLR(CPU_FTR_601)
+#endif
+
+
+/*
+ * This instruction is not implemented on the PPC 603 or 601; however, on
+ * the 403GCX and 405GP tlbia IS defined and tlbie is not.
+ * All of these instructions exist in the 8xx, they have magical powers,
+ * and they must be used.
+ */
+
+#if !defined(CONFIG_4xx) && !defined(CONFIG_8xx)
+#define tlbia \
+ li r4,1024; \
+ mtctr r4; \
+ lis r4,KERNELBASE@h; \
+0: tlbie r4; \
+ addi r4,r4,0x1000; \
+ bdnz 0b
+#endif
+
+
+#ifdef CONFIG_IBM440EP_ERR42
+#define PPC440EP_ERR42 isync
+#else
+#define PPC440EP_ERR42
+#endif
+
+
+#if defined(CONFIG_BOOKE)
+#define toreal(rd)
+#define fromreal(rd)
+
+/*
+ * We use addis to ensure compatibility with the "classic" ppc versions of
+ * these macros, which use rs = 0 to get the tophys offset in rd, rather than
+ * converting the address in r0, and so this version has to do that too
+ * (i.e. set register rd to 0 when rs == 0).
+ */
+#define tophys(rd,rs) \
+ addis rd,rs,0
+
+#define tovirt(rd,rs) \
+ addis rd,rs,0
+
+#elif defined(CONFIG_PPC64)
+#define toreal(rd) /* we can access c000... in real mode */
+#define fromreal(rd)
+
+#define tophys(rd,rs) \
+ clrldi rd,rs,2
+
+#define tovirt(rd,rs) \
+ rotldi rd,rs,16; \
+ ori rd,rd,((KERNELBASE>>48)&0xFFFF);\
+ rotldi rd,rd,48
+#else
+/*
+ * On APUS (Amiga PowerPC cpu upgrade board), we don't know the
+ * physical base address of RAM at compile time.
+ */
+#define toreal(rd) tophys(rd,rd)
+#define fromreal(rd) tovirt(rd,rd)
+
+#define tophys(rd,rs) \
+0: addis rd,rs,-KERNELBASE@h; \
+ .section ".vtop_fixup","aw"; \
+ .align 1; \
+ .long 0b; \
+ .previous
+
+#define tovirt(rd,rs) \
+0: addis rd,rs,KERNELBASE@h; \
+ .section ".ptov_fixup","aw"; \
+ .align 1; \
+ .long 0b; \
+ .previous
+#endif
+
+#ifdef CONFIG_PPC64
+#define RFI rfid
+#define MTMSRD(r) mtmsrd r
+
+#else
+#define FIX_SRR1(ra, rb)
+#ifndef CONFIG_40x
+#define RFI rfi
+#else
+#define RFI rfi; b . /* Prevent prefetch past rfi */
+#endif
+#define MTMSRD(r) mtmsr r
+#define CLR_TOP32(r)
+#endif
+
+#endif /* __KERNEL__ */
+
+/* The boring bits... */
+
+/* Condition Register Bit Fields */
+
+#define cr0 0
+#define cr1 1
+#define cr2 2
+#define cr3 3
+#define cr4 4
+#define cr5 5
+#define cr6 6
+#define cr7 7
+
+
+/* General Purpose Registers (GPRs) */
+
+#define r0 0
+#define r1 1
+#define r2 2
+#define r3 3
+#define r4 4
+#define r5 5
+#define r6 6
+#define r7 7
+#define r8 8
+#define r9 9
+#define r10 10
+#define r11 11
+#define r12 12
+#define r13 13
+#define r14 14
+#define r15 15
+#define r16 16
+#define r17 17
+#define r18 18
+#define r19 19
+#define r20 20
+#define r21 21
+#define r22 22
+#define r23 23
+#define r24 24
+#define r25 25
+#define r26 26
+#define r27 27
+#define r28 28
+#define r29 29
+#define r30 30
+#define r31 31
+
+
+/* Floating Point Registers (FPRs) */
+
+#define fr0 0
+#define fr1 1
+#define fr2 2
+#define fr3 3
+#define fr4 4
+#define fr5 5
+#define fr6 6
+#define fr7 7
+#define fr8 8
+#define fr9 9
+#define fr10 10
+#define fr11 11
+#define fr12 12
+#define fr13 13
+#define fr14 14
+#define fr15 15
+#define fr16 16
+#define fr17 17
+#define fr18 18
+#define fr19 19
+#define fr20 20
+#define fr21 21
+#define fr22 22
+#define fr23 23
+#define fr24 24
+#define fr25 25
+#define fr26 26
+#define fr27 27
+#define fr28 28
+#define fr29 29
+#define fr30 30
+#define fr31 31
+
+/* AltiVec Registers (VPRs) */
+
+#define vr0 0
+#define vr1 1
+#define vr2 2
+#define vr3 3
+#define vr4 4
+#define vr5 5
+#define vr6 6
+#define vr7 7
+#define vr8 8
+#define vr9 9
+#define vr10 10
+#define vr11 11
+#define vr12 12
+#define vr13 13
+#define vr14 14
+#define vr15 15
+#define vr16 16
+#define vr17 17
+#define vr18 18
+#define vr19 19
+#define vr20 20
+#define vr21 21
+#define vr22 22
+#define vr23 23
+#define vr24 24
+#define vr25 25
+#define vr26 26
+#define vr27 27
+#define vr28 28
+#define vr29 29
+#define vr30 30
+#define vr31 31
+
+/* VSX Registers (VSRs) */
+
+#define vsr0 0
+#define vsr1 1
+#define vsr2 2
+#define vsr3 3
+#define vsr4 4
+#define vsr5 5
+#define vsr6 6
+#define vsr7 7
+#define vsr8 8
+#define vsr9 9
+#define vsr10 10
+#define vsr11 11
+#define vsr12 12
+#define vsr13 13
+#define vsr14 14
+#define vsr15 15
+#define vsr16 16
+#define vsr17 17
+#define vsr18 18
+#define vsr19 19
+#define vsr20 20
+#define vsr21 21
+#define vsr22 22
+#define vsr23 23
+#define vsr24 24
+#define vsr25 25
+#define vsr26 26
+#define vsr27 27
+#define vsr28 28
+#define vsr29 29
+#define vsr30 30
+#define vsr31 31
+#define vsr32 32
+#define vsr33 33
+#define vsr34 34
+#define vsr35 35
+#define vsr36 36
+#define vsr37 37
+#define vsr38 38
+#define vsr39 39
+#define vsr40 40
+#define vsr41 41
+#define vsr42 42
+#define vsr43 43
+#define vsr44 44
+#define vsr45 45
+#define vsr46 46
+#define vsr47 47
+#define vsr48 48
+#define vsr49 49
+#define vsr50 50
+#define vsr51 51
+#define vsr52 52
+#define vsr53 53
+#define vsr54 54
+#define vsr55 55
+#define vsr56 56
+#define vsr57 57
+#define vsr58 58
+#define vsr59 59
+#define vsr60 60
+#define vsr61 61
+#define vsr62 62
+#define vsr63 63
+
+/* SPE Registers (EVPRs) */
+
+#define evr0 0
+#define evr1 1
+#define evr2 2
+#define evr3 3
+#define evr4 4
+#define evr5 5
+#define evr6 6
+#define evr7 7
+#define evr8 8
+#define evr9 9
+#define evr10 10
+#define evr11 11
+#define evr12 12
+#define evr13 13
+#define evr14 14
+#define evr15 15
+#define evr16 16
+#define evr17 17
+#define evr18 18
+#define evr19 19
+#define evr20 20
+#define evr21 21
+#define evr22 22
+#define evr23 23
+#define evr24 24
+#define evr25 25
+#define evr26 26
+#define evr27 27
+#define evr28 28
+#define evr29 29
+#define evr30 30
+#define evr31 31
+
+/* some stab codes */
+#define N_FUN 36
+#define N_RSYM 64
+#define N_SLINE 68
+#define N_SO 100
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_PPC_ASM_H */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
new file mode 100644
index 0000000..101ed87
--- /dev/null
+++ b/arch/powerpc/include/asm/processor.h
@@ -0,0 +1,314 @@
+#ifndef _ASM_POWERPC_PROCESSOR_H
+#define _ASM_POWERPC_PROCESSOR_H
+
+/*
+ * Copyright (C) 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/reg.h>
+
+#ifdef CONFIG_VSX
+#define TS_FPRWIDTH 2
+#else
+#define TS_FPRWIDTH 1
+#endif
+
+#ifndef __ASSEMBLY__
+#include <linux/compiler.h>
+#include <asm/ptrace.h>
+#include <asm/types.h>
+
+/* We do _not_ want to define new machine types at all, those must die
+ * in favor of using the device-tree
+ * -- BenH.
+ */
+
+/* PREP sub-platform types see residual.h for these */
+#define _PREP_Motorola 0x01 /* motorola prep */
+#define _PREP_Firm 0x02 /* firmworks prep */
+#define _PREP_IBM 0x00 /* ibm prep */
+#define _PREP_Bull 0x03 /* bull prep */
+
+/* CHRP sub-platform types. These are arbitrary */
+#define _CHRP_Motorola 0x04 /* motorola chrp, the cobra */
+#define _CHRP_IBM 0x05 /* IBM chrp, the longtrail and longtrail 2 */
+#define _CHRP_Pegasos 0x06 /* Genesi/bplan's Pegasos and Pegasos2 */
+#define _CHRP_briq 0x07 /* TotalImpact's briQ */
+
+#if defined(__KERNEL__) && defined(CONFIG_PPC32)
+
+extern int _chrp_type;
+
+#ifdef CONFIG_PPC_PREP
+
+/* what kind of prep workstation we are */
+extern int _prep_type;
+
+#endif /* CONFIG_PPC_PREP */
+
+#endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l;})
+
+/* Macros for adjusting thread priority (hardware multi-threading) */
+#define HMT_very_low() asm volatile("or 31,31,31 # very low priority")
+#define HMT_low() asm volatile("or 1,1,1 # low priority")
+#define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority")
+#define HMT_medium() asm volatile("or 2,2,2 # medium priority")
+#define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority")
+#define HMT_high() asm volatile("or 3,3,3 # high priority")
+
+#ifdef __KERNEL__
+
+extern int have_of;
+
+struct task_struct;
+void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
+void release_thread(struct task_struct *);
+
+/* Prepare to copy thread state - unlazy all lazy status */
+extern void prepare_to_copy(struct task_struct *tsk);
+
+/* Create a new kernel thread. */
+extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
+
+/* Lazy FPU handling on uni-processor */
+extern struct task_struct *last_task_used_math;
+extern struct task_struct *last_task_used_altivec;
+extern struct task_struct *last_task_used_vsx;
+extern struct task_struct *last_task_used_spe;
+
+#ifdef CONFIG_PPC32
+
+#if CONFIG_TASK_SIZE > CONFIG_KERNEL_START
+#error User TASK_SIZE overlaps with KERNEL_START address
+#endif
+#define TASK_SIZE (CONFIG_TASK_SIZE)
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3)
+#endif
+
+#ifdef CONFIG_PPC64
+/* 64-bit user address space is 44-bits (16TB user VM) */
+#define TASK_SIZE_USER64 (0x0000100000000000UL)
+
+/*
+ * 32-bit user address space is 4GB - 1 page
+ * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT
+ */
+#define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE))
+
+#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
+ TASK_SIZE_USER32 : TASK_SIZE_USER64)
+#define TASK_SIZE TASK_SIZE_OF(current)
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
+#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_USER64 / 4))
+
+#define TASK_UNMAPPED_BASE ((test_thread_flag(TIF_32BIT)) ? \
+ TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
+#endif
+
+#ifdef __KERNEL__
+#ifdef __powerpc64__
+
+#define STACK_TOP_USER64 TASK_SIZE_USER64
+#define STACK_TOP_USER32 TASK_SIZE_USER32
+
+#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
+ STACK_TOP_USER32 : STACK_TOP_USER64)
+
+#define STACK_TOP_MAX STACK_TOP_USER64
+
+#else /* __powerpc64__ */
+
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
+
+#endif /* __powerpc64__ */
+#endif /* __KERNEL__ */
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+#define TS_FPROFFSET 0
+#define TS_VSRLOWOFFSET 1
+#define TS_FPR(i) fpr[i][TS_FPROFFSET]
+
+struct thread_struct {
+ unsigned long ksp; /* Kernel stack pointer */
+ unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
+
+#ifdef CONFIG_PPC64
+ unsigned long ksp_vsid;
+#endif
+ struct pt_regs *regs; /* Pointer to saved register state */
+ mm_segment_t fs; /* for get_fs() validation */
+#ifdef CONFIG_PPC32
+ void *pgdir; /* root of page-table tree */
+#endif
+#if defined(CONFIG_4xx) || defined (CONFIG_BOOKE)
+ unsigned long dbcr0; /* debug control register values */
+ unsigned long dbcr1;
+#endif
+ /* FP and VSX 0-31 register set */
+ double fpr[32][TS_FPRWIDTH];
+ struct {
+
+ unsigned int pad;
+ unsigned int val; /* Floating point status */
+ } fpscr;
+ int fpexc_mode; /* floating-point exception mode */
+ unsigned int align_ctl; /* alignment handling control */
+#ifdef CONFIG_PPC64
+ unsigned long start_tb; /* Start purr when proc switched in */
+ unsigned long accum_tb; /* Total accumilated purr for process */
+#endif
+ unsigned long dabr; /* Data address breakpoint register */
+#ifdef CONFIG_ALTIVEC
+ /* Complete AltiVec register set */
+ vector128 vr[32] __attribute__((aligned(16)));
+ /* AltiVec status */
+ vector128 vscr __attribute__((aligned(16)));
+ unsigned long vrsave;
+ int used_vr; /* set if process has used altivec */
+#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_VSX
+ /* VSR status */
+ int used_vsr; /* set if process has used altivec */
+#endif /* CONFIG_VSX */
+#ifdef CONFIG_SPE
+ unsigned long evr[32]; /* upper 32-bits of SPE regs */
+ u64 acc; /* Accumulator */
+ unsigned long spefscr; /* SPE & eFP status */
+ int used_spe; /* set if process has used spe */
+#endif /* CONFIG_SPE */
+};
+
+#define ARCH_MIN_TASKALIGN 16
+
+#define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)
+#define INIT_SP_LIMIT \
+ (_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack)
+
+
+#ifdef CONFIG_PPC32
+#define INIT_THREAD { \
+ .ksp = INIT_SP, \
+ .ksp_limit = INIT_SP_LIMIT, \
+ .fs = KERNEL_DS, \
+ .pgdir = swapper_pg_dir, \
+ .fpexc_mode = MSR_FE0 | MSR_FE1, \
+}
+#else
+#define INIT_THREAD { \
+ .ksp = INIT_SP, \
+ .ksp_limit = INIT_SP_LIMIT, \
+ .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
+ .fs = KERNEL_DS, \
+ .fpr = {{0}}, \
+ .fpscr = { .val = 0, }, \
+ .fpexc_mode = 0, \
+}
+#endif
+
+/*
+ * Return saved PC of a blocked thread. For now, this is the "user" PC
+ */
+#define thread_saved_pc(tsk) \
+ ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
+
+#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.regs)
+
+unsigned long get_wchan(struct task_struct *p);
+
+#define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
+#define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
+
+/* Get/set floating-point exception mode */
+#define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr))
+#define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val))
+
+extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);
+extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);
+
+#define GET_ENDIAN(tsk, adr) get_endian((tsk), (adr))
+#define SET_ENDIAN(tsk, val) set_endian((tsk), (val))
+
+extern int get_endian(struct task_struct *tsk, unsigned long adr);
+extern int set_endian(struct task_struct *tsk, unsigned int val);
+
+#define GET_UNALIGN_CTL(tsk, adr) get_unalign_ctl((tsk), (adr))
+#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
+
+extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
+extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
+
+static inline unsigned int __unpack_fe01(unsigned long msr_bits)
+{
+ return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
+}
+
+static inline unsigned long __pack_fe01(unsigned int fpmode)
+{
+ return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1);
+}
+
+#ifdef CONFIG_PPC64
+#define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0)
+#else
+#define cpu_relax() barrier()
+#endif
+
+/* Check that a certain kernel stack pointer is valid in task_struct p */
+int validate_sp(unsigned long sp, struct task_struct *p,
+ unsigned long nbytes);
+
+/*
+ * Prefetch macros.
+ */
+#define ARCH_HAS_PREFETCH
+#define ARCH_HAS_PREFETCHW
+#define ARCH_HAS_SPINLOCK_PREFETCH
+
+static inline void prefetch(const void *x)
+{
+ if (unlikely(!x))
+ return;
+
+ __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
+}
+
+static inline void prefetchw(const void *x)
+{
+ if (unlikely(!x))
+ return;
+
+ __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
+}
+
+#define spin_lock_prefetch(x) prefetchw(x)
+
+#ifdef CONFIG_PPC64
+#define HAVE_ARCH_PICK_MMAP_LAYOUT
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_POWERPC_PROCESSOR_H */
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
new file mode 100644
index 0000000..eb3bd2e
--- /dev/null
+++ b/arch/powerpc/include/asm/prom.h
@@ -0,0 +1,356 @@
+#ifndef _POWERPC_PROM_H
+#define _POWERPC_PROM_H
+#ifdef __KERNEL__
+
+/*
+ * Definitions for talking to the Open Firmware PROM on
+ * Power Macintosh computers.
+ *
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/platform_device.h>
+#include <asm/irq.h>
+#include <asm/atomic.h>
+
+#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 1
+#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1
+
+#define of_compat_cmp(s1, s2, l) strcasecmp((s1), (s2))
+#define of_prop_cmp(s1, s2) strcmp((s1), (s2))
+#define of_node_cmp(s1, s2) strcasecmp((s1), (s2))
+
+/* Definitions used by the flattened device tree */
+#define OF_DT_HEADER 0xd00dfeed /* marker */
+#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
+#define OF_DT_END_NODE 0x2 /* End node */
+#define OF_DT_PROP 0x3 /* Property: name off, size,
+ * content */
+#define OF_DT_NOP 0x4 /* nop */
+#define OF_DT_END 0x9
+
+#define OF_DT_VERSION 0x10
+
+/*
+ * This is what gets passed to the kernel by prom_init or kexec
+ *
+ * The dt struct contains the device tree structure, full pathes and
+ * property contents. The dt strings contain a separate block with just
+ * the strings for the property names, and is fully page aligned and
+ * self contained in a page, so that it can be kept around by the kernel,
+ * each property name appears only once in this page (cheap compression)
+ *
+ * the mem_rsvmap contains a map of reserved ranges of physical memory,
+ * passing it here instead of in the device-tree itself greatly simplifies
+ * the job of everybody. It's just a list of u64 pairs (base/size) that
+ * ends when size is 0
+ */
+struct boot_param_header
+{
+ u32 magic; /* magic word OF_DT_HEADER */
+ u32 totalsize; /* total size of DT block */
+ u32 off_dt_struct; /* offset to structure */
+ u32 off_dt_strings; /* offset to strings */
+ u32 off_mem_rsvmap; /* offset to memory reserve map */
+ u32 version; /* format version */
+ u32 last_comp_version; /* last compatible version */
+ /* version 2 fields below */
+ u32 boot_cpuid_phys; /* Physical CPU id we're booting on */
+ /* version 3 fields below */
+ u32 dt_strings_size; /* size of the DT strings block */
+ /* version 17 fields below */
+ u32 dt_struct_size; /* size of the DT structure block */
+};
+
+
+
+typedef u32 phandle;
+typedef u32 ihandle;
+
+struct property {
+ char *name;
+ int length;
+ void *value;
+ struct property *next;
+};
+
+struct device_node {
+ const char *name;
+ const char *type;
+ phandle node;
+ phandle linux_phandle;
+ char *full_name;
+
+ struct property *properties;
+ struct property *deadprops; /* removed properties */
+ struct device_node *parent;
+ struct device_node *child;
+ struct device_node *sibling;
+ struct device_node *next; /* next device of same type */
+ struct device_node *allnext; /* next in list of all nodes */
+ struct proc_dir_entry *pde; /* this node's proc directory */
+ struct kref kref;
+ unsigned long _flags;
+ void *data;
+};
+
+extern struct device_node *of_chosen;
+
+static inline int of_node_check_flag(struct device_node *n, unsigned long flag)
+{
+ return test_bit(flag, &n->_flags);
+}
+
+static inline void of_node_set_flag(struct device_node *n, unsigned long flag)
+{
+ set_bit(flag, &n->_flags);
+}
+
+
+#define HAVE_ARCH_DEVTREE_FIXUPS
+
+static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de)
+{
+ dn->pde = de;
+}
+
+
+extern struct device_node *of_find_all_nodes(struct device_node *prev);
+extern struct device_node *of_node_get(struct device_node *node);
+extern void of_node_put(struct device_node *node);
+
+/* For scanning the flat device-tree at boot time */
+extern int __init of_scan_flat_dt(int (*it)(unsigned long node,
+ const char *uname, int depth,
+ void *data),
+ void *data);
+extern void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
+ unsigned long *size);
+extern int __init of_flat_dt_is_compatible(unsigned long node, const char *name);
+extern unsigned long __init of_get_flat_dt_root(void);
+
+/* For updating the device tree at runtime */
+extern void of_attach_node(struct device_node *);
+extern void of_detach_node(struct device_node *);
+
+/* Other Prototypes */
+extern void finish_device_tree(void);
+extern void unflatten_device_tree(void);
+extern void early_init_devtree(void *);
+extern int machine_is_compatible(const char *compat);
+extern void print_properties(struct device_node *node);
+extern int prom_n_intr_cells(struct device_node* np);
+extern void prom_get_irq_senses(unsigned char *senses, int off, int max);
+extern int prom_add_property(struct device_node* np, struct property* prop);
+extern int prom_remove_property(struct device_node *np, struct property *prop);
+extern int prom_update_property(struct device_node *np,
+ struct property *newprop,
+ struct property *oldprop);
+
+#ifdef CONFIG_PPC32
+/*
+ * PCI <-> OF matching functions
+ * (XXX should these be here?)
+ */
+struct pci_bus;
+struct pci_dev;
+extern int pci_device_from_OF_node(struct device_node *node,
+ u8* bus, u8* devfn);
+extern struct device_node* pci_busdev_to_OF_node(struct pci_bus *, int);
+extern struct device_node* pci_device_to_OF_node(struct pci_dev *);
+extern void pci_create_OF_bus_map(void);
+#endif
+
+extern struct resource *request_OF_resource(struct device_node* node,
+ int index, const char* name_postfix);
+extern int release_OF_resource(struct device_node* node, int index);
+
+
+/*
+ * OF address retreival & translation
+ */
+
+
+/* Helper to read a big number; size is in cells (not bytes) */
+static inline u64 of_read_number(const u32 *cell, int size)
+{
+ u64 r = 0;
+ while (size--)
+ r = (r << 32) | *(cell++);
+ return r;
+}
+
+/* Like of_read_number, but we want an unsigned long result */
+#ifdef CONFIG_PPC32
+static inline unsigned long of_read_ulong(const u32 *cell, int size)
+{
+ return cell[size-1];
+}
+#else
+#define of_read_ulong(cell, size) of_read_number(cell, size)
+#endif
+
+/* Translate an OF address block into a CPU physical address
+ */
+extern u64 of_translate_address(struct device_node *np, const u32 *addr);
+
+/* Translate a DMA address from device space to CPU space */
+extern u64 of_translate_dma_address(struct device_node *dev,
+ const u32 *in_addr);
+
+/* Extract an address from a device, returns the region size and
+ * the address space flags too. The PCI version uses a BAR number
+ * instead of an absolute index
+ */
+extern const u32 *of_get_address(struct device_node *dev, int index,
+ u64 *size, unsigned int *flags);
+#ifdef CONFIG_PCI
+extern const u32 *of_get_pci_address(struct device_node *dev, int bar_no,
+ u64 *size, unsigned int *flags);
+#else
+static inline const u32 *of_get_pci_address(struct device_node *dev,
+ int bar_no, u64 *size, unsigned int *flags)
+{
+ return NULL;
+}
+#endif /* CONFIG_PCI */
+
+/* Get an address as a resource. Note that if your address is
+ * a PIO address, the conversion will fail if the physical address
+ * can't be internally converted to an IO token with
+ * pci_address_to_pio(), that is because it's either called to early
+ * or it can't be matched to any host bridge IO space
+ */
+extern int of_address_to_resource(struct device_node *dev, int index,
+ struct resource *r);
+#ifdef CONFIG_PCI
+extern int of_pci_address_to_resource(struct device_node *dev, int bar,
+ struct resource *r);
+#else
+static inline int of_pci_address_to_resource(struct device_node *dev, int bar,
+ struct resource *r)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_PCI */
+
+/* Parse the ibm,dma-window property of an OF node into the busno, phys and
+ * size parameters.
+ */
+void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
+ unsigned long *busno, unsigned long *phys, unsigned long *size);
+
+extern void kdump_move_device_tree(void);
+
+/* CPU OF node matching */
+struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
+
+/* Get the MAC address */
+extern const void *of_get_mac_address(struct device_node *np);
+
+/*
+ * OF interrupt mapping
+ */
+
+/* This structure is returned when an interrupt is mapped. The controller
+ * field needs to be put() after use
+ */
+
+#define OF_MAX_IRQ_SPEC 4 /* We handle specifiers of at most 4 cells */
+
+struct of_irq {
+ struct device_node *controller; /* Interrupt controller node */
+ u32 size; /* Specifier size */
+ u32 specifier[OF_MAX_IRQ_SPEC]; /* Specifier copy */
+};
+
+/**
+ * of_irq_map_init - Initialize the irq remapper
+ * @flags: flags defining workarounds to enable
+ *
+ * Some machines have bugs in the device-tree which require certain workarounds
+ * to be applied. Call this before any interrupt mapping attempts to enable
+ * those workarounds.
+ */
+#define OF_IMAP_OLDWORLD_MAC 0x00000001
+#define OF_IMAP_NO_PHANDLE 0x00000002
+
+extern void of_irq_map_init(unsigned int flags);
+
+/**
+ * of_irq_map_raw - Low level interrupt tree parsing
+ * @parent: the device interrupt parent
+ * @intspec: interrupt specifier ("interrupts" property of the device)
+ * @ointsize: size of the passed in interrupt specifier
+ * @addr: address specifier (start of "reg" property of the device)
+ * @out_irq: structure of_irq filled by this function
+ *
+ * Returns 0 on success and a negative number on error
+ *
+ * This function is a low-level interrupt tree walking function. It
+ * can be used to do a partial walk with synthetized reg and interrupts
+ * properties, for example when resolving PCI interrupts when no device
+ * node exist for the parent.
+ *
+ */
+
+extern int of_irq_map_raw(struct device_node *parent, const u32 *intspec,
+ u32 ointsize, const u32 *addr,
+ struct of_irq *out_irq);
+
+
+/**
+ * of_irq_map_one - Resolve an interrupt for a device
+ * @device: the device whose interrupt is to be resolved
+ * @index: index of the interrupt to resolve
+ * @out_irq: structure of_irq filled by this function
+ *
+ * This function resolves an interrupt, walking the tree, for a given
+ * device-tree node. It's the high level pendant to of_irq_map_raw().
+ * It also implements the workarounds for OldWolrd Macs.
+ */
+extern int of_irq_map_one(struct device_node *device, int index,
+ struct of_irq *out_irq);
+
+/**
+ * of_irq_map_pci - Resolve the interrupt for a PCI device
+ * @pdev: the device whose interrupt is to be resolved
+ * @out_irq: structure of_irq filled by this function
+ *
+ * This function resolves the PCI interrupt for a given PCI device. If a
+ * device-node exists for a given pci_dev, it will use normal OF tree
+ * walking. If not, it will implement standard swizzling and walk up the
+ * PCI tree until an device-node is found, at which point it will finish
+ * resolving using the OF tree walking.
+ */
+struct pci_dev;
+extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
+
+extern int of_irq_to_resource(struct device_node *dev, int index,
+ struct resource *r);
+
+/**
+ * of_iomap - Maps the memory mapped IO for a given device_node
+ * @device: the device whose io range will be mapped
+ * @index: index of the io range
+ *
+ * Returns a pointer to the mapped memory
+ */
+extern void __iomem *of_iomap(struct device_node *device, int index);
+
+/*
+ * NB: This is here while we transition from using asm/prom.h
+ * to linux/of.h
+ */
+#include <linux/of.h>
+
+#endif /* __KERNEL__ */
+#endif /* _POWERPC_PROM_H */
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
new file mode 100644
index 0000000..f9e34c4
--- /dev/null
+++ b/arch/powerpc/include/asm/ps3.h
@@ -0,0 +1,519 @@
+/*
+ * PS3 platform declarations.
+ *
+ * Copyright (C) 2006 Sony Computer Entertainment Inc.
+ * Copyright 2006 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#if !defined(_ASM_POWERPC_PS3_H)
+#define _ASM_POWERPC_PS3_H
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include "cell-pmu.h"
+
+union ps3_firmware_version {
+ u64 raw;
+ struct {
+ u16 pad;
+ u16 major;
+ u16 minor;
+ u16 rev;
+ };
+};
+
+void ps3_get_firmware_version(union ps3_firmware_version *v);
+int ps3_compare_firmware_version(u16 major, u16 minor, u16 rev);
+
+/* 'Other OS' area */
+
+enum ps3_param_av_multi_out {
+ PS3_PARAM_AV_MULTI_OUT_NTSC = 0,
+ PS3_PARAM_AV_MULTI_OUT_PAL_RGB = 1,
+ PS3_PARAM_AV_MULTI_OUT_PAL_YCBCR = 2,
+ PS3_PARAM_AV_MULTI_OUT_SECAM = 3,
+};
+
+enum ps3_param_av_multi_out ps3_os_area_get_av_multi_out(void);
+
+/* dma routines */
+
+enum ps3_dma_page_size {
+ PS3_DMA_4K = 12U,
+ PS3_DMA_64K = 16U,
+ PS3_DMA_1M = 20U,
+ PS3_DMA_16M = 24U,
+};
+
+enum ps3_dma_region_type {
+ PS3_DMA_OTHER = 0,
+ PS3_DMA_INTERNAL = 2,
+};
+
+struct ps3_dma_region_ops;
+
+/**
+ * struct ps3_dma_region - A per device dma state variables structure
+ * @did: The HV device id.
+ * @page_size: The ioc pagesize.
+ * @region_type: The HV region type.
+ * @bus_addr: The 'translated' bus address of the region.
+ * @len: The length in bytes of the region.
+ * @offset: The offset from the start of memory of the region.
+ * @ioid: The IOID of the device who owns this region
+ * @chunk_list: Opaque variable used by the ioc page manager.
+ * @region_ops: struct ps3_dma_region_ops - dma region operations
+ */
+
+struct ps3_dma_region {
+ struct ps3_system_bus_device *dev;
+ /* device variables */
+ const struct ps3_dma_region_ops *region_ops;
+ unsigned char ioid;
+ enum ps3_dma_page_size page_size;
+ enum ps3_dma_region_type region_type;
+ unsigned long len;
+ unsigned long offset;
+
+ /* driver variables (set by ps3_dma_region_create) */
+ unsigned long bus_addr;
+ struct {
+ spinlock_t lock;
+ struct list_head head;
+ } chunk_list;
+};
+
+struct ps3_dma_region_ops {
+ int (*create)(struct ps3_dma_region *);
+ int (*free)(struct ps3_dma_region *);
+ int (*map)(struct ps3_dma_region *,
+ unsigned long virt_addr,
+ unsigned long len,
+ unsigned long *bus_addr,
+ u64 iopte_pp);
+ int (*unmap)(struct ps3_dma_region *,
+ unsigned long bus_addr,
+ unsigned long len);
+};
+/**
+ * struct ps3_dma_region_init - Helper to initialize structure variables
+ *
+ * Helper to properly initialize variables prior to calling
+ * ps3_system_bus_device_register.
+ */
+
+struct ps3_system_bus_device;
+
+int ps3_dma_region_init(struct ps3_system_bus_device *dev,
+ struct ps3_dma_region *r, enum ps3_dma_page_size page_size,
+ enum ps3_dma_region_type region_type, void *addr, unsigned long len);
+int ps3_dma_region_create(struct ps3_dma_region *r);
+int ps3_dma_region_free(struct ps3_dma_region *r);
+int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
+ unsigned long len, unsigned long *bus_addr,
+ u64 iopte_pp);
+int ps3_dma_unmap(struct ps3_dma_region *r, unsigned long bus_addr,
+ unsigned long len);
+
+/* mmio routines */
+
+enum ps3_mmio_page_size {
+ PS3_MMIO_4K = 12U,
+ PS3_MMIO_64K = 16U
+};
+
+struct ps3_mmio_region_ops;
+/**
+ * struct ps3_mmio_region - a per device mmio state variables structure
+ *
+ * Current systems can be supported with a single region per device.
+ */
+
+struct ps3_mmio_region {
+ struct ps3_system_bus_device *dev;
+ const struct ps3_mmio_region_ops *mmio_ops;
+ unsigned long bus_addr;
+ unsigned long len;
+ enum ps3_mmio_page_size page_size;
+ unsigned long lpar_addr;
+};
+
+struct ps3_mmio_region_ops {
+ int (*create)(struct ps3_mmio_region *);
+ int (*free)(struct ps3_mmio_region *);
+};
+/**
+ * struct ps3_mmio_region_init - Helper to initialize structure variables
+ *
+ * Helper to properly initialize variables prior to calling
+ * ps3_system_bus_device_register.
+ */
+
+int ps3_mmio_region_init(struct ps3_system_bus_device *dev,
+ struct ps3_mmio_region *r, unsigned long bus_addr, unsigned long len,
+ enum ps3_mmio_page_size page_size);
+int ps3_mmio_region_create(struct ps3_mmio_region *r);
+int ps3_free_mmio_region(struct ps3_mmio_region *r);
+unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr);
+
+/* inrerrupt routines */
+
+enum ps3_cpu_binding {
+ PS3_BINDING_CPU_ANY = -1,
+ PS3_BINDING_CPU_0 = 0,
+ PS3_BINDING_CPU_1 = 1,
+};
+
+int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
+ unsigned int *virq);
+int ps3_irq_plug_destroy(unsigned int virq);
+int ps3_event_receive_port_setup(enum ps3_cpu_binding cpu, unsigned int *virq);
+int ps3_event_receive_port_destroy(unsigned int virq);
+int ps3_send_event_locally(unsigned int virq);
+
+int ps3_io_irq_setup(enum ps3_cpu_binding cpu, unsigned int interrupt_id,
+ unsigned int *virq);
+int ps3_io_irq_destroy(unsigned int virq);
+int ps3_vuart_irq_setup(enum ps3_cpu_binding cpu, void* virt_addr_bmp,
+ unsigned int *virq);
+int ps3_vuart_irq_destroy(unsigned int virq);
+int ps3_spe_irq_setup(enum ps3_cpu_binding cpu, unsigned long spe_id,
+ unsigned int class, unsigned int *virq);
+int ps3_spe_irq_destroy(unsigned int virq);
+
+int ps3_sb_event_receive_port_setup(struct ps3_system_bus_device *dev,
+ enum ps3_cpu_binding cpu, unsigned int *virq);
+int ps3_sb_event_receive_port_destroy(struct ps3_system_bus_device *dev,
+ unsigned int virq);
+
+/* lv1 result codes */
+
+enum lv1_result {
+ LV1_SUCCESS = 0,
+ /* not used -1 */
+ LV1_RESOURCE_SHORTAGE = -2,
+ LV1_NO_PRIVILEGE = -3,
+ LV1_DENIED_BY_POLICY = -4,
+ LV1_ACCESS_VIOLATION = -5,
+ LV1_NO_ENTRY = -6,
+ LV1_DUPLICATE_ENTRY = -7,
+ LV1_TYPE_MISMATCH = -8,
+ LV1_BUSY = -9,
+ LV1_EMPTY = -10,
+ LV1_WRONG_STATE = -11,
+ /* not used -12 */
+ LV1_NO_MATCH = -13,
+ LV1_ALREADY_CONNECTED = -14,
+ LV1_UNSUPPORTED_PARAMETER_VALUE = -15,
+ LV1_CONDITION_NOT_SATISFIED = -16,
+ LV1_ILLEGAL_PARAMETER_VALUE = -17,
+ LV1_BAD_OPTION = -18,
+ LV1_IMPLEMENTATION_LIMITATION = -19,
+ LV1_NOT_IMPLEMENTED = -20,
+ LV1_INVALID_CLASS_ID = -21,
+ LV1_CONSTRAINT_NOT_SATISFIED = -22,
+ LV1_ALIGNMENT_ERROR = -23,
+ LV1_HARDWARE_ERROR = -24,
+ LV1_INVALID_DATA_FORMAT = -25,
+ LV1_INVALID_OPERATION = -26,
+ LV1_INTERNAL_ERROR = -32768,
+};
+
+static inline const char* ps3_result(int result)
+{
+#if defined(DEBUG)
+ switch (result) {
+ case LV1_SUCCESS:
+ return "LV1_SUCCESS (0)";
+ case -1:
+ return "** unknown result ** (-1)";
+ case LV1_RESOURCE_SHORTAGE:
+ return "LV1_RESOURCE_SHORTAGE (-2)";
+ case LV1_NO_PRIVILEGE:
+ return "LV1_NO_PRIVILEGE (-3)";
+ case LV1_DENIED_BY_POLICY:
+ return "LV1_DENIED_BY_POLICY (-4)";
+ case LV1_ACCESS_VIOLATION:
+ return "LV1_ACCESS_VIOLATION (-5)";
+ case LV1_NO_ENTRY:
+ return "LV1_NO_ENTRY (-6)";
+ case LV1_DUPLICATE_ENTRY:
+ return "LV1_DUPLICATE_ENTRY (-7)";
+ case LV1_TYPE_MISMATCH:
+ return "LV1_TYPE_MISMATCH (-8)";
+ case LV1_BUSY:
+ return "LV1_BUSY (-9)";
+ case LV1_EMPTY:
+ return "LV1_EMPTY (-10)";
+ case LV1_WRONG_STATE:
+ return "LV1_WRONG_STATE (-11)";
+ case -12:
+ return "** unknown result ** (-12)";
+ case LV1_NO_MATCH:
+ return "LV1_NO_MATCH (-13)";
+ case LV1_ALREADY_CONNECTED:
+ return "LV1_ALREADY_CONNECTED (-14)";
+ case LV1_UNSUPPORTED_PARAMETER_VALUE:
+ return "LV1_UNSUPPORTED_PARAMETER_VALUE (-15)";
+ case LV1_CONDITION_NOT_SATISFIED:
+ return "LV1_CONDITION_NOT_SATISFIED (-16)";
+ case LV1_ILLEGAL_PARAMETER_VALUE:
+ return "LV1_ILLEGAL_PARAMETER_VALUE (-17)";
+ case LV1_BAD_OPTION:
+ return "LV1_BAD_OPTION (-18)";
+ case LV1_IMPLEMENTATION_LIMITATION:
+ return "LV1_IMPLEMENTATION_LIMITATION (-19)";
+ case LV1_NOT_IMPLEMENTED:
+ return "LV1_NOT_IMPLEMENTED (-20)";
+ case LV1_INVALID_CLASS_ID:
+ return "LV1_INVALID_CLASS_ID (-21)";
+ case LV1_CONSTRAINT_NOT_SATISFIED:
+ return "LV1_CONSTRAINT_NOT_SATISFIED (-22)";
+ case LV1_ALIGNMENT_ERROR:
+ return "LV1_ALIGNMENT_ERROR (-23)";
+ case LV1_HARDWARE_ERROR:
+ return "LV1_HARDWARE_ERROR (-24)";
+ case LV1_INVALID_DATA_FORMAT:
+ return "LV1_INVALID_DATA_FORMAT (-25)";
+ case LV1_INVALID_OPERATION:
+ return "LV1_INVALID_OPERATION (-26)";
+ case LV1_INTERNAL_ERROR:
+ return "LV1_INTERNAL_ERROR (-32768)";
+ default:
+ BUG();
+ return "** unknown result **";
+ };
+#else
+ return "";
+#endif
+}
+
+/* system bus routines */
+
+enum ps3_match_id {
+ PS3_MATCH_ID_EHCI = 1,
+ PS3_MATCH_ID_OHCI = 2,
+ PS3_MATCH_ID_GELIC = 3,
+ PS3_MATCH_ID_AV_SETTINGS = 4,
+ PS3_MATCH_ID_SYSTEM_MANAGER = 5,
+ PS3_MATCH_ID_STOR_DISK = 6,
+ PS3_MATCH_ID_STOR_ROM = 7,
+ PS3_MATCH_ID_STOR_FLASH = 8,
+ PS3_MATCH_ID_SOUND = 9,
+ PS3_MATCH_ID_GRAPHICS = 10,
+ PS3_MATCH_ID_LPM = 11,
+};
+
+#define PS3_MODULE_ALIAS_EHCI "ps3:1"
+#define PS3_MODULE_ALIAS_OHCI "ps3:2"
+#define PS3_MODULE_ALIAS_GELIC "ps3:3"
+#define PS3_MODULE_ALIAS_AV_SETTINGS "ps3:4"
+#define PS3_MODULE_ALIAS_SYSTEM_MANAGER "ps3:5"
+#define PS3_MODULE_ALIAS_STOR_DISK "ps3:6"
+#define PS3_MODULE_ALIAS_STOR_ROM "ps3:7"
+#define PS3_MODULE_ALIAS_STOR_FLASH "ps3:8"
+#define PS3_MODULE_ALIAS_SOUND "ps3:9"
+#define PS3_MODULE_ALIAS_GRAPHICS "ps3:10"
+#define PS3_MODULE_ALIAS_LPM "ps3:11"
+
+enum ps3_system_bus_device_type {
+ PS3_DEVICE_TYPE_IOC0 = 1,
+ PS3_DEVICE_TYPE_SB,
+ PS3_DEVICE_TYPE_VUART,
+ PS3_DEVICE_TYPE_LPM,
+};
+
+enum ps3_match_sub_id {
+ /* for PS3_MATCH_ID_GRAPHICS */
+ PS3_MATCH_SUB_ID_FB = 1,
+};
+
+/**
+ * struct ps3_system_bus_device - a device on the system bus
+ */
+
+struct ps3_system_bus_device {
+ enum ps3_match_id match_id;
+ enum ps3_match_sub_id match_sub_id;
+ enum ps3_system_bus_device_type dev_type;
+
+ u64 bus_id; /* SB */
+ u64 dev_id; /* SB */
+ unsigned int interrupt_id; /* SB */
+ struct ps3_dma_region *d_region; /* SB, IOC0 */
+ struct ps3_mmio_region *m_region; /* SB, IOC0*/
+ unsigned int port_number; /* VUART */
+ struct { /* LPM */
+ u64 node_id;
+ u64 pu_id;
+ u64 rights;
+ } lpm;
+
+/* struct iommu_table *iommu_table; -- waiting for BenH's cleanups */
+ struct device core;
+ void *driver_priv; /* private driver variables */
+};
+
+int ps3_open_hv_device(struct ps3_system_bus_device *dev);
+int ps3_close_hv_device(struct ps3_system_bus_device *dev);
+
+/**
+ * struct ps3_system_bus_driver - a driver for a device on the system bus
+ */
+
+struct ps3_system_bus_driver {
+ enum ps3_match_id match_id;
+ enum ps3_match_sub_id match_sub_id;
+ struct device_driver core;
+ int (*probe)(struct ps3_system_bus_device *);
+ int (*remove)(struct ps3_system_bus_device *);
+ int (*shutdown)(struct ps3_system_bus_device *);
+/* int (*suspend)(struct ps3_system_bus_device *, pm_message_t); */
+/* int (*resume)(struct ps3_system_bus_device *); */
+};
+
+int ps3_system_bus_device_register(struct ps3_system_bus_device *dev);
+int ps3_system_bus_driver_register(struct ps3_system_bus_driver *drv);
+void ps3_system_bus_driver_unregister(struct ps3_system_bus_driver *drv);
+
+static inline struct ps3_system_bus_driver *ps3_drv_to_system_bus_drv(
+ struct device_driver *_drv)
+{
+ return container_of(_drv, struct ps3_system_bus_driver, core);
+}
+static inline struct ps3_system_bus_device *ps3_dev_to_system_bus_dev(
+ struct device *_dev)
+{
+ return container_of(_dev, struct ps3_system_bus_device, core);
+}
+static inline struct ps3_system_bus_driver *
+ ps3_system_bus_dev_to_system_bus_drv(struct ps3_system_bus_device *_dev)
+{
+ BUG_ON(!_dev);
+ BUG_ON(!_dev->core.driver);
+ return ps3_drv_to_system_bus_drv(_dev->core.driver);
+}
+
+/**
+ * ps3_system_bus_set_drvdata -
+ * @dev: device structure
+ * @data: Data to set
+ */
+
+static inline void ps3_system_bus_set_driver_data(
+ struct ps3_system_bus_device *dev, void *data)
+{
+ dev->core.driver_data = data;
+}
+static inline void *ps3_system_bus_get_driver_data(
+ struct ps3_system_bus_device *dev)
+{
+ return dev->core.driver_data;
+}
+
+/* These two need global scope for get_dma_ops(). */
+
+extern struct bus_type ps3_system_bus_type;
+
+/* system manager */
+
+struct ps3_sys_manager_ops {
+ struct ps3_system_bus_device *dev;
+ void (*power_off)(struct ps3_system_bus_device *dev);
+ void (*restart)(struct ps3_system_bus_device *dev);
+};
+
+void ps3_sys_manager_register_ops(const struct ps3_sys_manager_ops *ops);
+void __noreturn ps3_sys_manager_power_off(void);
+void __noreturn ps3_sys_manager_restart(void);
+void __noreturn ps3_sys_manager_halt(void);
+int ps3_sys_manager_get_wol(void);
+void ps3_sys_manager_set_wol(int state);
+
+struct ps3_prealloc {
+ const char *name;
+ void *address;
+ unsigned long size;
+ unsigned long align;
+};
+
+extern struct ps3_prealloc ps3fb_videomemory;
+extern struct ps3_prealloc ps3flash_bounce_buffer;
+
+/* logical performance monitor */
+
+/**
+ * enum ps3_lpm_rights - Rigths granted by the system policy module.
+ *
+ * @PS3_LPM_RIGHTS_USE_LPM: The right to use the lpm.
+ * @PS3_LPM_RIGHTS_USE_TB: The right to use the internal trace buffer.
+ */
+
+enum ps3_lpm_rights {
+ PS3_LPM_RIGHTS_USE_LPM = 0x001,
+ PS3_LPM_RIGHTS_USE_TB = 0x100,
+};
+
+/**
+ * enum ps3_lpm_tb_type - Type of trace buffer lv1 should use.
+ *
+ * @PS3_LPM_TB_TYPE_NONE: Do not use a trace buffer.
+ * @PS3_LPM_RIGHTS_USE_TB: Use the lv1 internal trace buffer. Must have
+ * rights @PS3_LPM_RIGHTS_USE_TB.
+ */
+
+enum ps3_lpm_tb_type {
+ PS3_LPM_TB_TYPE_NONE = 0,
+ PS3_LPM_TB_TYPE_INTERNAL = 1,
+};
+
+int ps3_lpm_open(enum ps3_lpm_tb_type tb_type, void *tb_cache,
+ u64 tb_cache_size);
+int ps3_lpm_close(void);
+int ps3_lpm_copy_tb(unsigned long offset, void *buf, unsigned long count,
+ unsigned long *bytes_copied);
+int ps3_lpm_copy_tb_to_user(unsigned long offset, void __user *buf,
+ unsigned long count, unsigned long *bytes_copied);
+void ps3_set_bookmark(u64 bookmark);
+void ps3_set_pm_bookmark(u64 tag, u64 incident, u64 th_id);
+int ps3_set_signal(u64 rtas_signal_group, u8 signal_bit, u16 sub_unit,
+ u8 bus_word);
+
+u32 ps3_read_phys_ctr(u32 cpu, u32 phys_ctr);
+void ps3_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val);
+u32 ps3_read_ctr(u32 cpu, u32 ctr);
+void ps3_write_ctr(u32 cpu, u32 ctr, u32 val);
+
+u32 ps3_read_pm07_control(u32 cpu, u32 ctr);
+void ps3_write_pm07_control(u32 cpu, u32 ctr, u32 val);
+u32 ps3_read_pm(u32 cpu, enum pm_reg_name reg);
+void ps3_write_pm(u32 cpu, enum pm_reg_name reg, u32 val);
+
+u32 ps3_get_ctr_size(u32 cpu, u32 phys_ctr);
+void ps3_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size);
+
+void ps3_enable_pm(u32 cpu);
+void ps3_disable_pm(u32 cpu);
+void ps3_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask);
+void ps3_disable_pm_interrupts(u32 cpu);
+
+u32 ps3_get_and_clear_pm_interrupts(u32 cpu);
+void ps3_sync_irq(int node);
+u32 ps3_get_hw_thread_id(int cpu);
+u64 ps3_get_spe_id(void *arg);
+
+#endif
diff --git a/arch/powerpc/include/asm/ps3av.h b/arch/powerpc/include/asm/ps3av.h
new file mode 100644
index 0000000..fda9871
--- /dev/null
+++ b/arch/powerpc/include/asm/ps3av.h
@@ -0,0 +1,744 @@
+/*
+ * PS3 AV backend support.
+ *
+ * Copyright (C) 2007 Sony Computer Entertainment Inc.
+ * Copyright 2007 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_POWERPC_PS3AV_H_
+#define _ASM_POWERPC_PS3AV_H_
+
+/** command for ioctl() **/
+#define PS3AV_VERSION 0x205 /* version of ps3av command */
+
+#define PS3AV_CID_AV_INIT 0x00000001
+#define PS3AV_CID_AV_FIN 0x00000002
+#define PS3AV_CID_AV_GET_HW_CONF 0x00000003
+#define PS3AV_CID_AV_GET_MONITOR_INFO 0x00000004
+#define PS3AV_CID_AV_ENABLE_EVENT 0x00000006
+#define PS3AV_CID_AV_DISABLE_EVENT 0x00000007
+#define PS3AV_CID_AV_TV_MUTE 0x0000000a
+
+#define PS3AV_CID_AV_VIDEO_CS 0x00010001
+#define PS3AV_CID_AV_VIDEO_MUTE 0x00010002
+#define PS3AV_CID_AV_VIDEO_DISABLE_SIG 0x00010003
+#define PS3AV_CID_AV_AUDIO_PARAM 0x00020001
+#define PS3AV_CID_AV_AUDIO_MUTE 0x00020002
+#define PS3AV_CID_AV_HDMI_MODE 0x00040001
+
+#define PS3AV_CID_VIDEO_INIT 0x01000001
+#define PS3AV_CID_VIDEO_MODE 0x01000002
+#define PS3AV_CID_VIDEO_FORMAT 0x01000004
+#define PS3AV_CID_VIDEO_PITCH 0x01000005
+
+#define PS3AV_CID_AUDIO_INIT 0x02000001
+#define PS3AV_CID_AUDIO_MODE 0x02000002
+#define PS3AV_CID_AUDIO_MUTE 0x02000003
+#define PS3AV_CID_AUDIO_ACTIVE 0x02000004
+#define PS3AV_CID_AUDIO_INACTIVE 0x02000005
+#define PS3AV_CID_AUDIO_SPDIF_BIT 0x02000006
+#define PS3AV_CID_AUDIO_CTRL 0x02000007
+
+#define PS3AV_CID_EVENT_UNPLUGGED 0x10000001
+#define PS3AV_CID_EVENT_PLUGGED 0x10000002
+#define PS3AV_CID_EVENT_HDCP_DONE 0x10000003
+#define PS3AV_CID_EVENT_HDCP_FAIL 0x10000004
+#define PS3AV_CID_EVENT_HDCP_AUTH 0x10000005
+#define PS3AV_CID_EVENT_HDCP_ERROR 0x10000006
+
+#define PS3AV_CID_AVB_PARAM 0x04000001
+
+/* max backend ports */
+#define PS3AV_HDMI_MAX 2 /* HDMI_0 HDMI_1 */
+#define PS3AV_AVMULTI_MAX 1 /* AVMULTI_0 */
+#define PS3AV_AV_PORT_MAX (PS3AV_HDMI_MAX + PS3AV_AVMULTI_MAX)
+#define PS3AV_OPT_PORT_MAX 1 /* SPDIF0 */
+#define PS3AV_HEAD_MAX 2 /* HEAD_A HEAD_B */
+
+/* num of pkt for PS3AV_CID_AVB_PARAM */
+#define PS3AV_AVB_NUM_VIDEO PS3AV_HEAD_MAX
+#define PS3AV_AVB_NUM_AUDIO 0 /* not supported */
+#define PS3AV_AVB_NUM_AV_VIDEO PS3AV_AV_PORT_MAX
+#define PS3AV_AVB_NUM_AV_AUDIO PS3AV_HDMI_MAX
+
+#define PS3AV_MUTE_PORT_MAX 1 /* num of ports in mute pkt */
+
+/* event_bit */
+#define PS3AV_CMD_EVENT_BIT_UNPLUGGED (1 << 0)
+#define PS3AV_CMD_EVENT_BIT_PLUGGED (1 << 1)
+#define PS3AV_CMD_EVENT_BIT_HDCP_DONE (1 << 2)
+#define PS3AV_CMD_EVENT_BIT_HDCP_FAIL (1 << 3)
+#define PS3AV_CMD_EVENT_BIT_HDCP_REAUTH (1 << 4)
+#define PS3AV_CMD_EVENT_BIT_HDCP_TOPOLOGY (1 << 5)
+
+/* common params */
+/* mute */
+#define PS3AV_CMD_MUTE_OFF 0x0000
+#define PS3AV_CMD_MUTE_ON 0x0001
+/* avport */
+#define PS3AV_CMD_AVPORT_HDMI_0 0x0000
+#define PS3AV_CMD_AVPORT_HDMI_1 0x0001
+#define PS3AV_CMD_AVPORT_AVMULTI_0 0x0010
+#define PS3AV_CMD_AVPORT_SPDIF_0 0x0020
+#define PS3AV_CMD_AVPORT_SPDIF_1 0x0021
+
+/* for av backend */
+/* av_mclk */
+#define PS3AV_CMD_AV_MCLK_128 0x0000
+#define PS3AV_CMD_AV_MCLK_256 0x0001
+#define PS3AV_CMD_AV_MCLK_512 0x0003
+/* av_inputlen */
+#define PS3AV_CMD_AV_INPUTLEN_16 0x02
+#define PS3AV_CMD_AV_INPUTLEN_20 0x0a
+#define PS3AV_CMD_AV_INPUTLEN_24 0x0b
+/* alayout */
+#define PS3AV_CMD_AV_LAYOUT_32 (1 << 0)
+#define PS3AV_CMD_AV_LAYOUT_44 (1 << 1)
+#define PS3AV_CMD_AV_LAYOUT_48 (1 << 2)
+#define PS3AV_CMD_AV_LAYOUT_88 (1 << 3)
+#define PS3AV_CMD_AV_LAYOUT_96 (1 << 4)
+#define PS3AV_CMD_AV_LAYOUT_176 (1 << 5)
+#define PS3AV_CMD_AV_LAYOUT_192 (1 << 6)
+/* hdmi_mode */
+#define PS3AV_CMD_AV_HDMI_MODE_NORMAL 0xff
+#define PS3AV_CMD_AV_HDMI_HDCP_OFF 0x01
+#define PS3AV_CMD_AV_HDMI_EDID_PASS 0x80
+#define PS3AV_CMD_AV_HDMI_DVI 0x40
+
+/* for video module */
+/* video_head */
+#define PS3AV_CMD_VIDEO_HEAD_A 0x0000
+#define PS3AV_CMD_VIDEO_HEAD_B 0x0001
+/* video_cs_out video_cs_in */
+#define PS3AV_CMD_VIDEO_CS_NONE 0x0000
+#define PS3AV_CMD_VIDEO_CS_RGB_8 0x0001
+#define PS3AV_CMD_VIDEO_CS_YUV444_8 0x0002
+#define PS3AV_CMD_VIDEO_CS_YUV422_8 0x0003
+#define PS3AV_CMD_VIDEO_CS_XVYCC_8 0x0004
+#define PS3AV_CMD_VIDEO_CS_RGB_10 0x0005
+#define PS3AV_CMD_VIDEO_CS_YUV444_10 0x0006
+#define PS3AV_CMD_VIDEO_CS_YUV422_10 0x0007
+#define PS3AV_CMD_VIDEO_CS_XVYCC_10 0x0008
+#define PS3AV_CMD_VIDEO_CS_RGB_12 0x0009
+#define PS3AV_CMD_VIDEO_CS_YUV444_12 0x000a
+#define PS3AV_CMD_VIDEO_CS_YUV422_12 0x000b
+#define PS3AV_CMD_VIDEO_CS_XVYCC_12 0x000c
+/* video_vid */
+#define PS3AV_CMD_VIDEO_VID_NONE 0x0000
+#define PS3AV_CMD_VIDEO_VID_480I 0x0001
+#define PS3AV_CMD_VIDEO_VID_576I 0x0003
+#define PS3AV_CMD_VIDEO_VID_480P 0x0005
+#define PS3AV_CMD_VIDEO_VID_576P 0x0006
+#define PS3AV_CMD_VIDEO_VID_1080I_60HZ 0x0007
+#define PS3AV_CMD_VIDEO_VID_1080I_50HZ 0x0008
+#define PS3AV_CMD_VIDEO_VID_720P_60HZ 0x0009
+#define PS3AV_CMD_VIDEO_VID_720P_50HZ 0x000a
+#define PS3AV_CMD_VIDEO_VID_1080P_60HZ 0x000b
+#define PS3AV_CMD_VIDEO_VID_1080P_50HZ 0x000c
+#define PS3AV_CMD_VIDEO_VID_WXGA 0x000d
+#define PS3AV_CMD_VIDEO_VID_SXGA 0x000e
+#define PS3AV_CMD_VIDEO_VID_WUXGA 0x000f
+#define PS3AV_CMD_VIDEO_VID_480I_A 0x0010
+/* video_format */
+#define PS3AV_CMD_VIDEO_FORMAT_BLACK 0x0000
+#define PS3AV_CMD_VIDEO_FORMAT_ARGB_8BIT 0x0007
+/* video_order */
+#define PS3AV_CMD_VIDEO_ORDER_RGB 0x0000
+#define PS3AV_CMD_VIDEO_ORDER_BGR 0x0001
+/* video_fmt */
+#define PS3AV_CMD_VIDEO_FMT_X8R8G8B8 0x0000
+/* video_out_format */
+#define PS3AV_CMD_VIDEO_OUT_FORMAT_RGB_12BIT 0x0000
+/* video_cl_cnv */
+#define PS3AV_CMD_VIDEO_CL_CNV_ENABLE_LUT 0x0000
+#define PS3AV_CMD_VIDEO_CL_CNV_DISABLE_LUT 0x0010
+/* video_sync */
+#define PS3AV_CMD_VIDEO_SYNC_VSYNC 0x0001
+#define PS3AV_CMD_VIDEO_SYNC_CSYNC 0x0004
+#define PS3AV_CMD_VIDEO_SYNC_HSYNC 0x0010
+
+/* for audio module */
+/* num_of_ch */
+#define PS3AV_CMD_AUDIO_NUM_OF_CH_2 0x0000
+#define PS3AV_CMD_AUDIO_NUM_OF_CH_3 0x0001
+#define PS3AV_CMD_AUDIO_NUM_OF_CH_4 0x0002
+#define PS3AV_CMD_AUDIO_NUM_OF_CH_5 0x0003
+#define PS3AV_CMD_AUDIO_NUM_OF_CH_6 0x0004
+#define PS3AV_CMD_AUDIO_NUM_OF_CH_7 0x0005
+#define PS3AV_CMD_AUDIO_NUM_OF_CH_8 0x0006
+/* audio_fs */
+#define PS3AV_CMD_AUDIO_FS_32K 0x0001
+#define PS3AV_CMD_AUDIO_FS_44K 0x0002
+#define PS3AV_CMD_AUDIO_FS_48K 0x0003
+#define PS3AV_CMD_AUDIO_FS_88K 0x0004
+#define PS3AV_CMD_AUDIO_FS_96K 0x0005
+#define PS3AV_CMD_AUDIO_FS_176K 0x0006
+#define PS3AV_CMD_AUDIO_FS_192K 0x0007
+/* audio_word_bits */
+#define PS3AV_CMD_AUDIO_WORD_BITS_16 0x0001
+#define PS3AV_CMD_AUDIO_WORD_BITS_20 0x0002
+#define PS3AV_CMD_AUDIO_WORD_BITS_24 0x0003
+/* audio_format */
+#define PS3AV_CMD_AUDIO_FORMAT_PCM 0x0001
+#define PS3AV_CMD_AUDIO_FORMAT_BITSTREAM 0x00ff
+/* audio_source */
+#define PS3AV_CMD_AUDIO_SOURCE_SERIAL 0x0000
+#define PS3AV_CMD_AUDIO_SOURCE_SPDIF 0x0001
+/* audio_swap */
+#define PS3AV_CMD_AUDIO_SWAP_0 0x0000
+#define PS3AV_CMD_AUDIO_SWAP_1 0x0000
+/* audio_map */
+#define PS3AV_CMD_AUDIO_MAP_OUTPUT_0 0x0000
+#define PS3AV_CMD_AUDIO_MAP_OUTPUT_1 0x0001
+#define PS3AV_CMD_AUDIO_MAP_OUTPUT_2 0x0002
+#define PS3AV_CMD_AUDIO_MAP_OUTPUT_3 0x0003
+/* audio_layout */
+#define PS3AV_CMD_AUDIO_LAYOUT_2CH 0x0000
+#define PS3AV_CMD_AUDIO_LAYOUT_6CH 0x000b /* LREClr */
+#define PS3AV_CMD_AUDIO_LAYOUT_8CH 0x001f /* LREClrXY */
+/* audio_downmix */
+#define PS3AV_CMD_AUDIO_DOWNMIX_PERMITTED 0x0000
+#define PS3AV_CMD_AUDIO_DOWNMIX_PROHIBITED 0x0001
+
+/* audio_port */
+#define PS3AV_CMD_AUDIO_PORT_HDMI_0 ( 1 << 0 )
+#define PS3AV_CMD_AUDIO_PORT_HDMI_1 ( 1 << 1 )
+#define PS3AV_CMD_AUDIO_PORT_AVMULTI_0 ( 1 << 10 )
+#define PS3AV_CMD_AUDIO_PORT_SPDIF_0 ( 1 << 20 )
+#define PS3AV_CMD_AUDIO_PORT_SPDIF_1 ( 1 << 21 )
+
+/* audio_ctrl_id */
+#define PS3AV_CMD_AUDIO_CTRL_ID_DAC_RESET 0x0000
+#define PS3AV_CMD_AUDIO_CTRL_ID_DAC_DE_EMPHASIS 0x0001
+#define PS3AV_CMD_AUDIO_CTRL_ID_AVCLK 0x0002
+/* audio_ctrl_data[0] reset */
+#define PS3AV_CMD_AUDIO_CTRL_RESET_NEGATE 0x0000
+#define PS3AV_CMD_AUDIO_CTRL_RESET_ASSERT 0x0001
+/* audio_ctrl_data[0] de-emphasis */
+#define PS3AV_CMD_AUDIO_CTRL_DE_EMPHASIS_OFF 0x0000
+#define PS3AV_CMD_AUDIO_CTRL_DE_EMPHASIS_ON 0x0001
+/* audio_ctrl_data[0] avclk */
+#define PS3AV_CMD_AUDIO_CTRL_AVCLK_22 0x0000
+#define PS3AV_CMD_AUDIO_CTRL_AVCLK_18 0x0001
+
+/* av_vid */
+/* do not use these params directly, use vid_video2av */
+#define PS3AV_CMD_AV_VID_480I 0x0000
+#define PS3AV_CMD_AV_VID_480P 0x0001
+#define PS3AV_CMD_AV_VID_720P_60HZ 0x0002
+#define PS3AV_CMD_AV_VID_1080I_60HZ 0x0003
+#define PS3AV_CMD_AV_VID_1080P_60HZ 0x0004
+#define PS3AV_CMD_AV_VID_576I 0x0005
+#define PS3AV_CMD_AV_VID_576P 0x0006
+#define PS3AV_CMD_AV_VID_720P_50HZ 0x0007
+#define PS3AV_CMD_AV_VID_1080I_50HZ 0x0008
+#define PS3AV_CMD_AV_VID_1080P_50HZ 0x0009
+#define PS3AV_CMD_AV_VID_WXGA 0x000a
+#define PS3AV_CMD_AV_VID_SXGA 0x000b
+#define PS3AV_CMD_AV_VID_WUXGA 0x000c
+/* av_cs_out av_cs_in */
+/* use cs_video2av() */
+#define PS3AV_CMD_AV_CS_RGB_8 0x0000
+#define PS3AV_CMD_AV_CS_YUV444_8 0x0001
+#define PS3AV_CMD_AV_CS_YUV422_8 0x0002
+#define PS3AV_CMD_AV_CS_XVYCC_8 0x0003
+#define PS3AV_CMD_AV_CS_RGB_10 0x0004
+#define PS3AV_CMD_AV_CS_YUV444_10 0x0005
+#define PS3AV_CMD_AV_CS_YUV422_10 0x0006
+#define PS3AV_CMD_AV_CS_XVYCC_10 0x0007
+#define PS3AV_CMD_AV_CS_RGB_12 0x0008
+#define PS3AV_CMD_AV_CS_YUV444_12 0x0009
+#define PS3AV_CMD_AV_CS_YUV422_12 0x000a
+#define PS3AV_CMD_AV_CS_XVYCC_12 0x000b
+#define PS3AV_CMD_AV_CS_8 0x0000
+#define PS3AV_CMD_AV_CS_10 0x0001
+#define PS3AV_CMD_AV_CS_12 0x0002
+/* dither */
+#define PS3AV_CMD_AV_DITHER_OFF 0x0000
+#define PS3AV_CMD_AV_DITHER_ON 0x0001
+#define PS3AV_CMD_AV_DITHER_8BIT 0x0000
+#define PS3AV_CMD_AV_DITHER_10BIT 0x0002
+#define PS3AV_CMD_AV_DITHER_12BIT 0x0004
+/* super_white */
+#define PS3AV_CMD_AV_SUPER_WHITE_OFF 0x0000
+#define PS3AV_CMD_AV_SUPER_WHITE_ON 0x0001
+/* aspect */
+#define PS3AV_CMD_AV_ASPECT_16_9 0x0000
+#define PS3AV_CMD_AV_ASPECT_4_3 0x0001
+/* video_cs_cnv() */
+#define PS3AV_CMD_VIDEO_CS_RGB 0x0001
+#define PS3AV_CMD_VIDEO_CS_YUV422 0x0002
+#define PS3AV_CMD_VIDEO_CS_YUV444 0x0003
+
+/* for broadcast automode */
+#define PS3AV_RESBIT_720x480P 0x0003 /* 0x0001 | 0x0002 */
+#define PS3AV_RESBIT_720x576P 0x0003 /* 0x0001 | 0x0002 */
+#define PS3AV_RESBIT_1280x720P 0x0004
+#define PS3AV_RESBIT_1920x1080I 0x0008
+#define PS3AV_RESBIT_1920x1080P 0x4000
+#define PS3AV_RES_MASK_60 (PS3AV_RESBIT_720x480P \
+ | PS3AV_RESBIT_1280x720P \
+ | PS3AV_RESBIT_1920x1080I \
+ | PS3AV_RESBIT_1920x1080P)
+#define PS3AV_RES_MASK_50 (PS3AV_RESBIT_720x576P \
+ | PS3AV_RESBIT_1280x720P \
+ | PS3AV_RESBIT_1920x1080I \
+ | PS3AV_RESBIT_1920x1080P)
+
+/* for VESA automode */
+#define PS3AV_RESBIT_VGA 0x0001
+#define PS3AV_RESBIT_WXGA 0x0002
+#define PS3AV_RESBIT_SXGA 0x0004
+#define PS3AV_RESBIT_WUXGA 0x0008
+#define PS3AV_RES_MASK_VESA (PS3AV_RESBIT_WXGA |\
+ PS3AV_RESBIT_SXGA |\
+ PS3AV_RESBIT_WUXGA)
+
+#define PS3AV_MONITOR_TYPE_HDMI 1 /* HDMI */
+#define PS3AV_MONITOR_TYPE_DVI 2 /* DVI */
+
+
+/* for video mode */
+enum ps3av_mode_num {
+ PS3AV_MODE_AUTO = 0,
+ PS3AV_MODE_480I = 1,
+ PS3AV_MODE_480P = 2,
+ PS3AV_MODE_720P60 = 3,
+ PS3AV_MODE_1080I60 = 4,
+ PS3AV_MODE_1080P60 = 5,
+ PS3AV_MODE_576I = 6,
+ PS3AV_MODE_576P = 7,
+ PS3AV_MODE_720P50 = 8,
+ PS3AV_MODE_1080I50 = 9,
+ PS3AV_MODE_1080P50 = 10,
+ PS3AV_MODE_WXGA = 11,
+ PS3AV_MODE_SXGA = 12,
+ PS3AV_MODE_WUXGA = 13,
+};
+
+#define PS3AV_MODE_MASK 0x000F
+#define PS3AV_MODE_HDCP_OFF 0x1000 /* Retail PS3 product doesn't support this */
+#define PS3AV_MODE_DITHER 0x0800
+#define PS3AV_MODE_COLOR 0x0400
+#define PS3AV_MODE_WHITE 0x0200
+#define PS3AV_MODE_FULL 0x0080
+#define PS3AV_MODE_DVI 0x0040
+#define PS3AV_MODE_RGB 0x0020
+
+
+#define PS3AV_DEFAULT_HDMI_MODE_ID_REG_60 PS3AV_MODE_480P
+#define PS3AV_DEFAULT_AVMULTI_MODE_ID_REG_60 PS3AV_MODE_480I
+#define PS3AV_DEFAULT_HDMI_MODE_ID_REG_50 PS3AV_MODE_576P
+#define PS3AV_DEFAULT_AVMULTI_MODE_ID_REG_50 PS3AV_MODE_576I
+
+#define PS3AV_REGION_60 0x01
+#define PS3AV_REGION_50 0x02
+#define PS3AV_REGION_RGB 0x10
+
+#define get_status(buf) (((__u32 *)buf)[2])
+#define PS3AV_HDR_SIZE 4 /* version + size */
+
+
+/** command packet structure **/
+struct ps3av_send_hdr {
+ u16 version;
+ u16 size; /* size of command packet */
+ u32 cid; /* command id */
+};
+
+struct ps3av_reply_hdr {
+ u16 version;
+ u16 size;
+ u32 cid;
+ u32 status;
+};
+
+/* backend: initialization */
+struct ps3av_pkt_av_init {
+ struct ps3av_send_hdr send_hdr;
+ u32 event_bit;
+};
+
+/* backend: finalize */
+struct ps3av_pkt_av_fin {
+ struct ps3av_send_hdr send_hdr;
+ /* recv */
+ u32 reserved;
+};
+
+/* backend: get port */
+struct ps3av_pkt_av_get_hw_conf {
+ struct ps3av_send_hdr send_hdr;
+ /* recv */
+ u32 status;
+ u16 num_of_hdmi; /* out: number of hdmi */
+ u16 num_of_avmulti; /* out: number of avmulti */
+ u16 num_of_spdif; /* out: number of hdmi */
+ u16 reserved;
+};
+
+/* backend: get monitor info */
+struct ps3av_info_resolution {
+ u32 res_bits;
+ u32 native;
+};
+
+struct ps3av_info_cs {
+ u8 rgb;
+ u8 yuv444;
+ u8 yuv422;
+ u8 reserved;
+};
+
+struct ps3av_info_color {
+ u16 red_x;
+ u16 red_y;
+ u16 green_x;
+ u16 green_y;
+ u16 blue_x;
+ u16 blue_y;
+ u16 white_x;
+ u16 white_y;
+ u32 gamma;
+};
+
+struct ps3av_info_audio {
+ u8 type;
+ u8 max_num_of_ch;
+ u8 fs;
+ u8 sbit;
+};
+
+struct ps3av_info_monitor {
+ u8 avport;
+ u8 monitor_id[10];
+ u8 monitor_type;
+ u8 monitor_name[16];
+ struct ps3av_info_resolution res_60;
+ struct ps3av_info_resolution res_50;
+ struct ps3av_info_resolution res_other;
+ struct ps3av_info_resolution res_vesa;
+ struct ps3av_info_cs cs;
+ struct ps3av_info_color color;
+ u8 supported_ai;
+ u8 speaker_info;
+ u8 num_of_audio_block;
+ struct ps3av_info_audio audio[0]; /* 0 or more audio blocks */
+ u8 reserved[169];
+} __attribute__ ((packed));
+
+struct ps3av_pkt_av_get_monitor_info {
+ struct ps3av_send_hdr send_hdr;
+ u16 avport; /* in: avport */
+ u16 reserved;
+ /* recv */
+ struct ps3av_info_monitor info; /* out: monitor info */
+};
+
+/* backend: enable/disable event */
+struct ps3av_pkt_av_event {
+ struct ps3av_send_hdr send_hdr;
+ u32 event_bit; /* in */
+};
+
+/* backend: video cs param */
+struct ps3av_pkt_av_video_cs {
+ struct ps3av_send_hdr send_hdr;
+ u16 avport; /* in: avport */
+ u16 av_vid; /* in: video resolution */
+ u16 av_cs_out; /* in: output color space */
+ u16 av_cs_in; /* in: input color space */
+ u8 dither; /* in: dither bit length */
+ u8 bitlen_out; /* in: bit length */
+ u8 super_white; /* in: super white */
+ u8 aspect; /* in: aspect ratio */
+};
+
+/* backend: video mute */
+struct ps3av_av_mute {
+ u16 avport; /* in: avport */
+ u16 mute; /* in: mute on/off */
+};
+
+struct ps3av_pkt_av_video_mute {
+ struct ps3av_send_hdr send_hdr;
+ struct ps3av_av_mute mute[PS3AV_MUTE_PORT_MAX];
+};
+
+/* backend: video disable signal */
+struct ps3av_pkt_av_video_disable_sig {
+ struct ps3av_send_hdr send_hdr;
+ u16 avport; /* in: avport */
+ u16 reserved;
+};
+
+/* backend: audio param */
+struct ps3av_audio_info_frame {
+ struct pb1_bit {
+ u8 ct:4;
+ u8 rsv:1;
+ u8 cc:3;
+ } pb1;
+ struct pb2_bit {
+ u8 rsv:3;
+ u8 sf:3;
+ u8 ss:2;
+ } pb2;
+ u8 pb3;
+ u8 pb4;
+ struct pb5_bit {
+ u8 dm:1;
+ u8 lsv:4;
+ u8 rsv:3;
+ } pb5;
+};
+
+struct ps3av_pkt_av_audio_param {
+ struct ps3av_send_hdr send_hdr;
+ u16 avport; /* in: avport */
+ u16 reserved;
+ u8 mclk; /* in: audio mclk */
+ u8 ns[3]; /* in: audio ns val */
+ u8 enable; /* in: audio enable */
+ u8 swaplr; /* in: audio swap */
+ u8 fifomap; /* in: audio fifomap */
+ u8 inputctrl; /* in: audio input ctrl */
+ u8 inputlen; /* in: sample bit size */
+ u8 layout; /* in: speaker layout param */
+ struct ps3av_audio_info_frame info; /* in: info */
+ u8 chstat[5]; /* in: ch stat */
+};
+
+/* backend: audio_mute */
+struct ps3av_pkt_av_audio_mute {
+ struct ps3av_send_hdr send_hdr;
+ struct ps3av_av_mute mute[PS3AV_MUTE_PORT_MAX];
+};
+
+/* backend: hdmi_mode */
+struct ps3av_pkt_av_hdmi_mode {
+ struct ps3av_send_hdr send_hdr;
+ u8 mode; /* in: hdmi_mode */
+ u8 reserved0;
+ u8 reserved1;
+ u8 reserved2;
+};
+
+/* backend: tv_mute */
+struct ps3av_pkt_av_tv_mute {
+ struct ps3av_send_hdr send_hdr;
+ u16 avport; /* in: avport HDMI only */
+ u16 mute; /* in: mute */
+};
+
+/* video: initialize */
+struct ps3av_pkt_video_init {
+ struct ps3av_send_hdr send_hdr;
+ /* recv */
+ u32 reserved;
+};
+
+/* video: mode setting */
+struct ps3av_pkt_video_mode {
+ struct ps3av_send_hdr send_hdr;
+ u32 video_head; /* in: head */
+ u32 reserved;
+ u32 video_vid; /* in: video resolution */
+ u16 reserved1;
+ u16 width; /* in: width in pixel */
+ u16 reserved2;
+ u16 height; /* in: height in pixel */
+ u32 pitch; /* in: line size in byte */
+ u32 video_out_format; /* in: out format */
+ u32 video_format; /* in: input frame buffer format */
+ u8 reserved3;
+ u8 video_cl_cnv; /* in: color conversion */
+ u16 video_order; /* in: input RGB order */
+ u32 reserved4;
+};
+
+/* video: format */
+struct ps3av_pkt_video_format {
+ struct ps3av_send_hdr send_hdr;
+ u32 video_head; /* in: head */
+ u32 video_format; /* in: frame buffer format */
+ u8 reserved;
+ u8 video_cl_cnv; /* in: color conversion */
+ u16 video_order; /* in: input RGB order */
+};
+
+/* video: pitch */
+struct ps3av_pkt_video_pitch {
+ u16 version;
+ u16 size; /* size of command packet */
+ u32 cid; /* command id */
+ u32 video_head; /* in: head */
+ u32 pitch; /* in: line size in byte */
+};
+
+/* audio: initialize */
+struct ps3av_pkt_audio_init {
+ struct ps3av_send_hdr send_hdr;
+ /* recv */
+ u32 reserved;
+};
+
+/* audio: mode setting */
+struct ps3av_pkt_audio_mode {
+ struct ps3av_send_hdr send_hdr;
+ u8 avport; /* in: avport */
+ u8 reserved0[3];
+ u32 mask; /* in: mask */
+ u32 audio_num_of_ch; /* in: number of ch */
+ u32 audio_fs; /* in: sampling freq */
+ u32 audio_word_bits; /* in: sample bit size */
+ u32 audio_format; /* in: audio output format */
+ u32 audio_source; /* in: audio source */
+ u8 audio_enable[4]; /* in: audio enable */
+ u8 audio_swap[4]; /* in: audio swap */
+ u8 audio_map[4]; /* in: audio map */
+ u32 audio_layout; /* in: speaker layout */
+ u32 audio_downmix; /* in: audio downmix permission */
+ u32 audio_downmix_level;
+ u8 audio_cs_info[8]; /* in: IEC channel status */
+};
+
+/* audio: mute */
+struct ps3av_audio_mute {
+ u8 avport; /* in: opt_port optical */
+ u8 reserved[3];
+ u32 mute; /* in: mute */
+};
+
+struct ps3av_pkt_audio_mute {
+ struct ps3av_send_hdr send_hdr;
+ struct ps3av_audio_mute mute[PS3AV_OPT_PORT_MAX];
+};
+
+/* audio: active/inactive */
+struct ps3av_pkt_audio_active {
+ struct ps3av_send_hdr send_hdr;
+ u32 audio_port; /* in: audio active/inactive port */
+};
+
+/* audio: SPDIF user bit */
+struct ps3av_pkt_audio_spdif_bit {
+ u16 version;
+ u16 size; /* size of command packet */
+ u32 cid; /* command id */
+ u8 avport; /* in: avport SPDIF only */
+ u8 reserved[3];
+ u32 audio_port; /* in: SPDIF only */
+ u32 spdif_bit_data[12]; /* in: user bit data */
+};
+
+/* audio: audio control */
+struct ps3av_pkt_audio_ctrl {
+ u16 version;
+ u16 size; /* size of command packet */
+ u32 cid; /* command id */
+ u32 audio_ctrl_id; /* in: control id */
+ u32 audio_ctrl_data[4]; /* in: control data */
+};
+
+/* avb:param */
+#define PS3AV_PKT_AVB_PARAM_MAX_BUF_SIZE \
+ (PS3AV_AVB_NUM_VIDEO*sizeof(struct ps3av_pkt_video_mode) + \
+ PS3AV_AVB_NUM_AUDIO*sizeof(struct ps3av_pkt_audio_mode) + \
+ PS3AV_AVB_NUM_AV_VIDEO*sizeof(struct ps3av_pkt_av_video_cs) + \
+ PS3AV_AVB_NUM_AV_AUDIO*sizeof(struct ps3av_pkt_av_audio_param))
+
+struct ps3av_pkt_avb_param {
+ struct ps3av_send_hdr send_hdr;
+ u16 num_of_video_pkt;
+ u16 num_of_audio_pkt;
+ u16 num_of_av_video_pkt;
+ u16 num_of_av_audio_pkt;
+ /*
+ * The actual buffer layout depends on the fields above:
+ *
+ * struct ps3av_pkt_video_mode video[num_of_video_pkt];
+ * struct ps3av_pkt_audio_mode audio[num_of_audio_pkt];
+ * struct ps3av_pkt_av_video_cs av_video[num_of_av_video_pkt];
+ * struct ps3av_pkt_av_audio_param av_audio[num_of_av_audio_pkt];
+ */
+ u8 buf[PS3AV_PKT_AVB_PARAM_MAX_BUF_SIZE];
+};
+
+
+/** command status **/
+#define PS3AV_STATUS_SUCCESS 0x0000 /* success */
+#define PS3AV_STATUS_RECEIVE_VUART_ERROR 0x0001 /* receive vuart error */
+#define PS3AV_STATUS_SYSCON_COMMUNICATE_FAIL 0x0002 /* syscon communication error */
+#define PS3AV_STATUS_INVALID_COMMAND 0x0003 /* obsolete invalid CID */
+#define PS3AV_STATUS_INVALID_PORT 0x0004 /* invalid port number */
+#define PS3AV_STATUS_INVALID_VID 0x0005 /* invalid video format */
+#define PS3AV_STATUS_INVALID_COLOR_SPACE 0x0006 /* invalid video colose space */
+#define PS3AV_STATUS_INVALID_FS 0x0007 /* invalid audio sampling freq */
+#define PS3AV_STATUS_INVALID_AUDIO_CH 0x0008 /* invalid audio channel number */
+#define PS3AV_STATUS_UNSUPPORTED_VERSION 0x0009 /* version mismatch */
+#define PS3AV_STATUS_INVALID_SAMPLE_SIZE 0x000a /* invalid audio sample bit size */
+#define PS3AV_STATUS_FAILURE 0x000b /* other failures */
+#define PS3AV_STATUS_UNSUPPORTED_COMMAND 0x000c /* unsupported cid */
+#define PS3AV_STATUS_BUFFER_OVERFLOW 0x000d /* write buffer overflow */
+#define PS3AV_STATUS_INVALID_VIDEO_PARAM 0x000e /* invalid video param */
+#define PS3AV_STATUS_NO_SEL 0x000f /* not exist selector */
+#define PS3AV_STATUS_INVALID_AV_PARAM 0x0010 /* invalid backend param */
+#define PS3AV_STATUS_INVALID_AUDIO_PARAM 0x0011 /* invalid audio param */
+#define PS3AV_STATUS_UNSUPPORTED_HDMI_MODE 0x0012 /* unsupported hdmi mode */
+#define PS3AV_STATUS_NO_SYNC_HEAD 0x0013 /* sync head failed */
+
+extern void ps3av_set_hdr(u32, u16, struct ps3av_send_hdr *);
+extern int ps3av_do_pkt(u32, u16, size_t, struct ps3av_send_hdr *);
+
+extern int ps3av_cmd_init(void);
+extern int ps3av_cmd_fin(void);
+extern int ps3av_cmd_av_video_mute(int, u32 *, u32);
+extern int ps3av_cmd_av_video_disable_sig(u32);
+extern int ps3av_cmd_av_tv_mute(u32, u32);
+extern int ps3av_cmd_enable_event(void);
+extern int ps3av_cmd_av_hdmi_mode(u8);
+extern u32 ps3av_cmd_set_av_video_cs(void *, u32, int, int, int, u32);
+extern u32 ps3av_cmd_set_video_mode(void *, u32, int, int, u32);
+extern int ps3av_cmd_video_format_black(u32, u32, u32);
+extern int ps3av_cmd_av_audio_mute(int, u32 *, u32);
+extern u32 ps3av_cmd_set_av_audio_param(void *, u32,
+ const struct ps3av_pkt_audio_mode *,
+ u32);
+extern void ps3av_cmd_set_audio_mode(struct ps3av_pkt_audio_mode *, u32, u32,
+ u32, u32, u32, u32);
+extern int ps3av_cmd_audio_mode(struct ps3av_pkt_audio_mode *);
+extern int ps3av_cmd_audio_mute(int, u32 *, u32);
+extern int ps3av_cmd_audio_active(int, u32);
+extern int ps3av_cmd_avb_param(struct ps3av_pkt_avb_param *, u32);
+extern int ps3av_cmd_av_get_hw_conf(struct ps3av_pkt_av_get_hw_conf *);
+extern int ps3av_cmd_video_get_monitor_info(struct ps3av_pkt_av_get_monitor_info *,
+ u32);
+
+extern int ps3av_set_video_mode(u32);
+extern int ps3av_set_audio_mode(u32, u32, u32, u32, u32);
+extern int ps3av_get_auto_mode(void);
+extern int ps3av_get_mode(void);
+extern int ps3av_video_mode2res(u32, u32 *, u32 *);
+extern int ps3av_video_mute(int);
+extern int ps3av_audio_mute(int);
+extern int ps3av_dev_open(void);
+extern int ps3av_dev_close(void);
+extern void ps3av_register_flip_ctl(void (*flip_ctl)(int on, void *data),
+ void *flip_data);
+extern void ps3av_flip_ctl(int on);
+
+#endif /* _ASM_POWERPC_PS3AV_H_ */
diff --git a/arch/powerpc/include/asm/ps3fb.h b/arch/powerpc/include/asm/ps3fb.h
new file mode 100644
index 0000000..3f121fe
--- /dev/null
+++ b/arch/powerpc/include/asm/ps3fb.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2006 Sony Computer Entertainment Inc.
+ * Copyright 2006, 2007 Sony Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _ASM_POWERPC_PS3FB_H_
+#define _ASM_POWERPC_PS3FB_H_
+
+#include <linux/ioctl.h>
+
+/* ioctl */
+#define PS3FB_IOCTL_SETMODE _IOW('r', 1, int) /* set video mode */
+#define PS3FB_IOCTL_GETMODE _IOR('r', 2, int) /* get video mode */
+#define PS3FB_IOCTL_SCREENINFO _IOR('r', 3, int) /* get screen info */
+#define PS3FB_IOCTL_ON _IO('r', 4) /* use IOCTL_FSEL */
+#define PS3FB_IOCTL_OFF _IO('r', 5) /* return to normal-flip */
+#define PS3FB_IOCTL_FSEL _IOW('r', 6, int) /* blit and flip request */
+
+#ifndef FBIO_WAITFORVSYNC
+#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32) /* wait for vsync */
+#endif
+
+struct ps3fb_ioctl_res {
+ __u32 xres; /* frame buffer x_size */
+ __u32 yres; /* frame buffer y_size */
+ __u32 xoff; /* margine x */
+ __u32 yoff; /* margine y */
+ __u32 num_frames; /* num of frame buffers */
+};
+
+#endif /* _ASM_POWERPC_PS3FB_H_ */
diff --git a/arch/powerpc/include/asm/ps3stor.h b/arch/powerpc/include/asm/ps3stor.h
new file mode 100644
index 0000000..6fcaf71
--- /dev/null
+++ b/arch/powerpc/include/asm/ps3stor.h
@@ -0,0 +1,71 @@
+/*
+ * PS3 Storage Devices
+ *
+ * Copyright (C) 2007 Sony Computer Entertainment Inc.
+ * Copyright 2007 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _ASM_POWERPC_PS3STOR_H_
+#define _ASM_POWERPC_PS3STOR_H_
+
+#include <linux/interrupt.h>
+
+#include <asm/ps3.h>
+
+
+struct ps3_storage_region {
+ unsigned int id;
+ u64 start;
+ u64 size;
+};
+
+struct ps3_storage_device {
+ struct ps3_system_bus_device sbd;
+
+ struct ps3_dma_region dma_region;
+ unsigned int irq;
+ u64 blk_size;
+
+ u64 tag;
+ u64 lv1_status;
+ struct completion done;
+
+ unsigned long bounce_size;
+ void *bounce_buf;
+ u64 bounce_lpar;
+ dma_addr_t bounce_dma;
+
+ unsigned int num_regions;
+ unsigned long accessible_regions;
+ unsigned int region_idx; /* first accessible region */
+ struct ps3_storage_region regions[0]; /* Must be last */
+};
+
+static inline struct ps3_storage_device *to_ps3_storage_device(struct device *dev)
+{
+ return container_of(dev, struct ps3_storage_device, sbd.core);
+}
+
+extern int ps3stor_setup(struct ps3_storage_device *dev,
+ irq_handler_t handler);
+extern void ps3stor_teardown(struct ps3_storage_device *dev);
+extern u64 ps3stor_read_write_sectors(struct ps3_storage_device *dev, u64 lpar,
+ u64 start_sector, u64 sectors,
+ int write);
+extern u64 ps3stor_send_command(struct ps3_storage_device *dev, u64 cmd,
+ u64 arg1, u64 arg2, u64 arg3, u64 arg4);
+
+#endif /* _ASM_POWERPC_PS3STOR_H_ */
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
new file mode 100644
index 0000000..734e075
--- /dev/null
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -0,0 +1,293 @@
+#ifndef _ASM_POWERPC_PTRACE_H
+#define _ASM_POWERPC_PTRACE_H
+
+/*
+ * Copyright (C) 2001 PPC64 Team, IBM Corp
+ *
+ * This struct defines the way the registers are stored on the
+ * kernel stack during a system call or other kernel entry.
+ *
+ * this should only contain volatile regs
+ * since we can keep non-volatile in the thread_struct
+ * should set this up when only volatiles are saved
+ * by intr code.
+ *
+ * Since this is going on the stack, *CARE MUST BE TAKEN* to insure
+ * that the overall structure is a multiple of 16 bytes in length.
+ *
+ * Note that the offsets of the fields in this struct correspond with
+ * the PT_* values below. This simplifies arch/powerpc/kernel/ptrace.c.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __ASSEMBLY__
+
+struct pt_regs {
+ unsigned long gpr[32];
+ unsigned long nip;
+ unsigned long msr;
+ unsigned long orig_gpr3; /* Used for restarting system calls */
+ unsigned long ctr;
+ unsigned long link;
+ unsigned long xer;
+ unsigned long ccr;
+#ifdef __powerpc64__
+ unsigned long softe; /* Soft enabled/disabled */
+#else
+ unsigned long mq; /* 601 only (not used at present) */
+ /* Used on APUS to hold IPL value. */
+#endif
+ unsigned long trap; /* Reason for being here */
+ /* N.B. for critical exceptions on 4xx, the dar and dsisr
+ fields are overloaded to hold srr0 and srr1. */
+ unsigned long dar; /* Fault registers */
+ unsigned long dsisr; /* on 4xx/Book-E used for ESR */
+ unsigned long result; /* Result of a system call */
+};
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef __KERNEL__
+
+#ifdef __powerpc64__
+
+#define __ARCH_WANT_COMPAT_SYS_PTRACE
+
+#define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */
+#define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */
+#define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265)
+#define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \
+ STACK_FRAME_OVERHEAD + 288)
+#define STACK_FRAME_MARKER 12
+
+/* Size of dummy stack frame allocated when calling signal handler. */
+#define __SIGNAL_FRAMESIZE 128
+#define __SIGNAL_FRAMESIZE32 64
+
+#else /* __powerpc64__ */
+
+#define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
+#define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */
+#define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773)
+#define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
+#define STACK_FRAME_MARKER 2
+
+/* Size of stack frame allocated when calling signal handler. */
+#define __SIGNAL_FRAMESIZE 64
+
+#endif /* __powerpc64__ */
+
+#ifndef __ASSEMBLY__
+
+#define instruction_pointer(regs) ((regs)->nip)
+#define user_stack_pointer(regs) ((regs)->gpr[1])
+#define regs_return_value(regs) ((regs)->gpr[3])
+
+#ifdef CONFIG_SMP
+extern unsigned long profile_pc(struct pt_regs *regs);
+#else
+#define profile_pc(regs) instruction_pointer(regs)
+#endif
+
+#ifdef __powerpc64__
+#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
+#else
+#define user_mode(regs) (((regs)->msr & MSR_PR) != 0)
+#endif
+
+#define force_successful_syscall_return() \
+ do { \
+ set_thread_flag(TIF_NOERROR); \
+ } while(0)
+
+struct task_struct;
+extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
+extern int ptrace_put_reg(struct task_struct *task, int regno,
+ unsigned long data);
+
+/*
+ * We use the least-significant bit of the trap field to indicate
+ * whether we have saved the full set of registers, or only a
+ * partial set. A 1 there means the partial set.
+ * On 4xx we use the next bit to indicate whether the exception
+ * is a critical exception (1 means it is).
+ */
+#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
+#ifndef __powerpc64__
+#define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) != 0)
+#define IS_MCHECK_EXC(regs) (((regs)->trap & 4) != 0)
+#define IS_DEBUG_EXC(regs) (((regs)->trap & 8) != 0)
+#endif /* ! __powerpc64__ */
+#define TRAP(regs) ((regs)->trap & ~0xF)
+#ifdef __powerpc64__
+#define CHECK_FULL_REGS(regs) BUG_ON(regs->trap & 1)
+#else
+#define CHECK_FULL_REGS(regs) \
+do { \
+ if ((regs)->trap & 1) \
+ printk(KERN_CRIT "%s: partial register set\n", __FUNCTION__); \
+} while (0)
+#endif /* __powerpc64__ */
+
+/*
+ * These are defined as per linux/ptrace.h, which see.
+ */
+#define arch_has_single_step() (1)
+extern void user_enable_single_step(struct task_struct *);
+extern void user_disable_single_step(struct task_struct *);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+/*
+ * Offsets used by 'ptrace' system call interface.
+ * These can't be changed without breaking binary compatibility
+ * with MkLinux, etc.
+ */
+#define PT_R0 0
+#define PT_R1 1
+#define PT_R2 2
+#define PT_R3 3
+#define PT_R4 4
+#define PT_R5 5
+#define PT_R6 6
+#define PT_R7 7
+#define PT_R8 8
+#define PT_R9 9
+#define PT_R10 10
+#define PT_R11 11
+#define PT_R12 12
+#define PT_R13 13
+#define PT_R14 14
+#define PT_R15 15
+#define PT_R16 16
+#define PT_R17 17
+#define PT_R18 18
+#define PT_R19 19
+#define PT_R20 20
+#define PT_R21 21
+#define PT_R22 22
+#define PT_R23 23
+#define PT_R24 24
+#define PT_R25 25
+#define PT_R26 26
+#define PT_R27 27
+#define PT_R28 28
+#define PT_R29 29
+#define PT_R30 30
+#define PT_R31 31
+
+#define PT_NIP 32
+#define PT_MSR 33
+#define PT_ORIG_R3 34
+#define PT_CTR 35
+#define PT_LNK 36
+#define PT_XER 37
+#define PT_CCR 38
+#ifndef __powerpc64__
+#define PT_MQ 39
+#else
+#define PT_SOFTE 39
+#endif
+#define PT_TRAP 40
+#define PT_DAR 41
+#define PT_DSISR 42
+#define PT_RESULT 43
+#define PT_REGS_COUNT 44
+
+#define PT_FPR0 48 /* each FP reg occupies 2 slots in this space */
+
+#ifndef __powerpc64__
+
+#define PT_FPR31 (PT_FPR0 + 2*31)
+#define PT_FPSCR (PT_FPR0 + 2*32 + 1)
+
+#else /* __powerpc64__ */
+
+#define PT_FPSCR (PT_FPR0 + 32) /* each FP reg occupies 1 slot in 64-bit space */
+
+#ifdef __KERNEL__
+#define PT_FPSCR32 (PT_FPR0 + 2*32 + 1) /* each FP reg occupies 2 32-bit userspace slots */
+#endif
+
+#define PT_VR0 82 /* each Vector reg occupies 2 slots in 64-bit */
+#define PT_VSCR (PT_VR0 + 32*2 + 1)
+#define PT_VRSAVE (PT_VR0 + 33*2)
+
+#ifdef __KERNEL__
+#define PT_VR0_32 164 /* each Vector reg occupies 4 slots in 32-bit */
+#define PT_VSCR_32 (PT_VR0 + 32*4 + 3)
+#define PT_VRSAVE_32 (PT_VR0 + 33*4)
+#endif
+
+/*
+ * Only store first 32 VSRs here. The second 32 VSRs in VR0-31
+ */
+#define PT_VSR0 150 /* each VSR reg occupies 2 slots in 64-bit */
+#define PT_VSR31 (PT_VSR0 + 2*31)
+#ifdef __KERNEL__
+#define PT_VSR0_32 300 /* each VSR reg occupies 4 slots in 32-bit */
+#endif
+#endif /* __powerpc64__ */
+
+/*
+ * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
+ * The transfer totals 34 quadword. Quadwords 0-31 contain the
+ * corresponding vector registers. Quadword 32 contains the vscr as the
+ * last word (offset 12) within that quadword. Quadword 33 contains the
+ * vrsave as the first word (offset 0) within the quadword.
+ *
+ * This definition of the VMX state is compatible with the current PPC32
+ * ptrace interface. This allows signal handling and ptrace to use the same
+ * structures. This also simplifies the implementation of a bi-arch
+ * (combined (32- and 64-bit) gdb.
+ */
+#define PTRACE_GETVRREGS 18
+#define PTRACE_SETVRREGS 19
+
+/* Get/set all the upper 32-bits of the SPE registers, accumulator, and
+ * spefscr, in one go */
+#define PTRACE_GETEVRREGS 20
+#define PTRACE_SETEVRREGS 21
+
+/* Get the first 32 128bit VSX registers */
+#define PTRACE_GETVSRREGS 27
+#define PTRACE_SETVSRREGS 28
+
+/*
+ * Get or set a debug register. The first 16 are DABR registers and the
+ * second 16 are IABR registers.
+ */
+#define PTRACE_GET_DEBUGREG 25
+#define PTRACE_SET_DEBUGREG 26
+
+/* (new) PTRACE requests using the same numbers as x86 and the same
+ * argument ordering. Additionally, they support more registers too
+ */
+#define PTRACE_GETREGS 12
+#define PTRACE_SETREGS 13
+#define PTRACE_GETFPREGS 14
+#define PTRACE_SETFPREGS 15
+#define PTRACE_GETREGS64 22
+#define PTRACE_SETREGS64 23
+
+/* (old) PTRACE requests with inverted arguments */
+#define PPC_PTRACE_GETREGS 0x99 /* Get GPRs 0 - 31 */
+#define PPC_PTRACE_SETREGS 0x98 /* Set GPRs 0 - 31 */
+#define PPC_PTRACE_GETFPREGS 0x97 /* Get FPRs 0 - 31 */
+#define PPC_PTRACE_SETFPREGS 0x96 /* Set FPRs 0 - 31 */
+
+/* Calls to trace a 64bit program from a 32bit program */
+#define PPC_PTRACE_PEEKTEXT_3264 0x95
+#define PPC_PTRACE_PEEKDATA_3264 0x94
+#define PPC_PTRACE_POKETEXT_3264 0x93
+#define PPC_PTRACE_POKEDATA_3264 0x92
+#define PPC_PTRACE_PEEKUSR_3264 0x91
+#define PPC_PTRACE_POKEUSR_3264 0x90
+
+#endif /* _ASM_POWERPC_PTRACE_H */
diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h
new file mode 100644
index 0000000..edee15d
--- /dev/null
+++ b/arch/powerpc/include/asm/qe.h
@@ -0,0 +1,642 @@
+/*
+ * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ *
+ * Authors: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * QUICC Engine (QE) external definitions and structure.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef _ASM_POWERPC_QE_H
+#define _ASM_POWERPC_QE_H
+#ifdef __KERNEL__
+
+#include <linux/spinlock.h>
+#include <asm/cpm.h>
+#include <asm/immap_qe.h>
+
+#define QE_NUM_OF_SNUM 28
+#define QE_NUM_OF_BRGS 16
+#define QE_NUM_OF_PORTS 1024
+
+/* Memory partitions
+*/
+#define MEM_PART_SYSTEM 0
+#define MEM_PART_SECONDARY 1
+#define MEM_PART_MURAM 2
+
+/* Clocks and BRGs */
+enum qe_clock {
+ QE_CLK_NONE = 0,
+ QE_BRG1, /* Baud Rate Generator 1 */
+ QE_BRG2, /* Baud Rate Generator 2 */
+ QE_BRG3, /* Baud Rate Generator 3 */
+ QE_BRG4, /* Baud Rate Generator 4 */
+ QE_BRG5, /* Baud Rate Generator 5 */
+ QE_BRG6, /* Baud Rate Generator 6 */
+ QE_BRG7, /* Baud Rate Generator 7 */
+ QE_BRG8, /* Baud Rate Generator 8 */
+ QE_BRG9, /* Baud Rate Generator 9 */
+ QE_BRG10, /* Baud Rate Generator 10 */
+ QE_BRG11, /* Baud Rate Generator 11 */
+ QE_BRG12, /* Baud Rate Generator 12 */
+ QE_BRG13, /* Baud Rate Generator 13 */
+ QE_BRG14, /* Baud Rate Generator 14 */
+ QE_BRG15, /* Baud Rate Generator 15 */
+ QE_BRG16, /* Baud Rate Generator 16 */
+ QE_CLK1, /* Clock 1 */
+ QE_CLK2, /* Clock 2 */
+ QE_CLK3, /* Clock 3 */
+ QE_CLK4, /* Clock 4 */
+ QE_CLK5, /* Clock 5 */
+ QE_CLK6, /* Clock 6 */
+ QE_CLK7, /* Clock 7 */
+ QE_CLK8, /* Clock 8 */
+ QE_CLK9, /* Clock 9 */
+ QE_CLK10, /* Clock 10 */
+ QE_CLK11, /* Clock 11 */
+ QE_CLK12, /* Clock 12 */
+ QE_CLK13, /* Clock 13 */
+ QE_CLK14, /* Clock 14 */
+ QE_CLK15, /* Clock 15 */
+ QE_CLK16, /* Clock 16 */
+ QE_CLK17, /* Clock 17 */
+ QE_CLK18, /* Clock 18 */
+ QE_CLK19, /* Clock 19 */
+ QE_CLK20, /* Clock 20 */
+ QE_CLK21, /* Clock 21 */
+ QE_CLK22, /* Clock 22 */
+ QE_CLK23, /* Clock 23 */
+ QE_CLK24, /* Clock 24 */
+ QE_CLK_DUMMY
+};
+
+static inline bool qe_clock_is_brg(enum qe_clock clk)
+{
+ return clk >= QE_BRG1 && clk <= QE_BRG16;
+}
+
+extern spinlock_t cmxgcr_lock;
+
+/* Export QE common operations */
+extern void __init qe_reset(void);
+
+/* QE PIO */
+#define QE_PIO_PINS 32
+
+struct qe_pio_regs {
+ __be32 cpodr; /* Open drain register */
+ __be32 cpdata; /* Data register */
+ __be32 cpdir1; /* Direction register */
+ __be32 cpdir2; /* Direction register */
+ __be32 cppar1; /* Pin assignment register */
+ __be32 cppar2; /* Pin assignment register */
+#ifdef CONFIG_PPC_85xx
+ u8 pad[8];
+#endif
+};
+
+extern int par_io_init(struct device_node *np);
+extern int par_io_of_config(struct device_node *np);
+#define QE_PIO_DIR_IN 2
+#define QE_PIO_DIR_OUT 1
+extern void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin,
+ int dir, int open_drain, int assignment,
+ int has_irq);
+extern int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
+ int assignment, int has_irq);
+extern int par_io_data_set(u8 port, u8 pin, u8 val);
+
+/* QE internal API */
+int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input);
+enum qe_clock qe_clock_source(const char *source);
+unsigned int qe_get_brg_clk(void);
+int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier);
+int qe_get_snum(void);
+void qe_put_snum(u8 snum);
+/* we actually use cpm_muram implementation, define this for convenience */
+#define qe_muram_init cpm_muram_init
+#define qe_muram_alloc cpm_muram_alloc
+#define qe_muram_alloc_fixed cpm_muram_alloc_fixed
+#define qe_muram_free cpm_muram_free
+#define qe_muram_addr cpm_muram_addr
+#define qe_muram_offset cpm_muram_offset
+
+/* Structure that defines QE firmware binary files.
+ *
+ * See Documentation/powerpc/qe-firmware.txt for a description of these
+ * fields.
+ */
+struct qe_firmware {
+ struct qe_header {
+ __be32 length; /* Length of the entire structure, in bytes */
+ u8 magic[3]; /* Set to { 'Q', 'E', 'F' } */
+ u8 version; /* Version of this layout. First ver is '1' */
+ } header;
+ u8 id[62]; /* Null-terminated identifier string */
+ u8 split; /* 0 = shared I-RAM, 1 = split I-RAM */
+ u8 count; /* Number of microcode[] structures */
+ struct {
+ __be16 model; /* The SOC model */
+ u8 major; /* The SOC revision major */
+ u8 minor; /* The SOC revision minor */
+ } __attribute__ ((packed)) soc;
+ u8 padding[4]; /* Reserved, for alignment */
+ __be64 extended_modes; /* Extended modes */
+ __be32 vtraps[8]; /* Virtual trap addresses */
+ u8 reserved[4]; /* Reserved, for future expansion */
+ struct qe_microcode {
+ u8 id[32]; /* Null-terminated identifier */
+ __be32 traps[16]; /* Trap addresses, 0 == ignore */
+ __be32 eccr; /* The value for the ECCR register */
+ __be32 iram_offset; /* Offset into I-RAM for the code */
+ __be32 count; /* Number of 32-bit words of the code */
+ __be32 code_offset; /* Offset of the actual microcode */
+ u8 major; /* The microcode version major */
+ u8 minor; /* The microcode version minor */
+ u8 revision; /* The microcode version revision */
+ u8 padding; /* Reserved, for alignment */
+ u8 reserved[4]; /* Reserved, for future expansion */
+ } __attribute__ ((packed)) microcode[1];
+ /* All microcode binaries should be located here */
+ /* CRC32 should be located here, after the microcode binaries */
+} __attribute__ ((packed));
+
+struct qe_firmware_info {
+ char id[64]; /* Firmware name */
+ u32 vtraps[8]; /* Virtual trap addresses */
+ u64 extended_modes; /* Extended modes */
+};
+
+/* Upload a firmware to the QE */
+int qe_upload_firmware(const struct qe_firmware *firmware);
+
+/* Obtain information on the uploaded firmware */
+struct qe_firmware_info *qe_get_firmware_info(void);
+
+/* QE USB */
+int qe_usb_clock_set(enum qe_clock clk, int rate);
+
+/* Buffer descriptors */
+struct qe_bd {
+ __be16 status;
+ __be16 length;
+ __be32 buf;
+} __attribute__ ((packed));
+
+#define BD_STATUS_MASK 0xffff0000
+#define BD_LENGTH_MASK 0x0000ffff
+
+/* Alignment */
+#define QE_INTR_TABLE_ALIGN 16 /* ??? */
+#define QE_ALIGNMENT_OF_BD 8
+#define QE_ALIGNMENT_OF_PRAM 64
+
+/* RISC allocation */
+enum qe_risc_allocation {
+ QE_RISC_ALLOCATION_RISC1 = 1, /* RISC 1 */
+ QE_RISC_ALLOCATION_RISC2 = 2, /* RISC 2 */
+ QE_RISC_ALLOCATION_RISC1_AND_RISC2 = 3 /* Dynamically choose
+ RISC 1 or RISC 2 */
+};
+
+/* QE extended filtering Table Lookup Key Size */
+enum qe_fltr_tbl_lookup_key_size {
+ QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES
+ = 0x3f, /* LookupKey parsed by the Generate LookupKey
+ CMD is truncated to 8 bytes */
+ QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES
+ = 0x5f, /* LookupKey parsed by the Generate LookupKey
+ CMD is truncated to 16 bytes */
+};
+
+/* QE FLTR extended filtering Largest External Table Lookup Key Size */
+enum qe_fltr_largest_external_tbl_lookup_key_size {
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE
+ = 0x0,/* not used */
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES
+ = QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES, /* 8 bytes */
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES
+ = QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES, /* 16 bytes */
+};
+
+/* structure representing QE parameter RAM */
+struct qe_timer_tables {
+ u16 tm_base; /* QE timer table base adr */
+ u16 tm_ptr; /* QE timer table pointer */
+ u16 r_tmr; /* QE timer mode register */
+ u16 r_tmv; /* QE timer valid register */
+ u32 tm_cmd; /* QE timer cmd register */
+ u32 tm_cnt; /* QE timer internal cnt */
+} __attribute__ ((packed));
+
+#define QE_FLTR_TAD_SIZE 8
+
+/* QE extended filtering Termination Action Descriptor (TAD) */
+struct qe_fltr_tad {
+ u8 serialized[QE_FLTR_TAD_SIZE];
+} __attribute__ ((packed));
+
+/* Communication Direction */
+enum comm_dir {
+ COMM_DIR_NONE = 0,
+ COMM_DIR_RX = 1,
+ COMM_DIR_TX = 2,
+ COMM_DIR_RX_AND_TX = 3
+};
+
+/* QE CMXUCR Registers.
+ * There are two UCCs represented in each of the four CMXUCR registers.
+ * These values are for the UCC in the LSBs
+ */
+#define QE_CMXUCR_MII_ENET_MNG 0x00007000
+#define QE_CMXUCR_MII_ENET_MNG_SHIFT 12
+#define QE_CMXUCR_GRANT 0x00008000
+#define QE_CMXUCR_TSA 0x00004000
+#define QE_CMXUCR_BKPT 0x00000100
+#define QE_CMXUCR_TX_CLK_SRC_MASK 0x0000000F
+
+/* QE CMXGCR Registers.
+*/
+#define QE_CMXGCR_MII_ENET_MNG 0x00007000
+#define QE_CMXGCR_MII_ENET_MNG_SHIFT 12
+#define QE_CMXGCR_USBCS 0x0000000f
+#define QE_CMXGCR_USBCS_CLK3 0x1
+#define QE_CMXGCR_USBCS_CLK5 0x2
+#define QE_CMXGCR_USBCS_CLK7 0x3
+#define QE_CMXGCR_USBCS_CLK9 0x4
+#define QE_CMXGCR_USBCS_CLK13 0x5
+#define QE_CMXGCR_USBCS_CLK17 0x6
+#define QE_CMXGCR_USBCS_CLK19 0x7
+#define QE_CMXGCR_USBCS_CLK21 0x8
+#define QE_CMXGCR_USBCS_BRG9 0x9
+#define QE_CMXGCR_USBCS_BRG10 0xa
+
+/* QE CECR Commands.
+*/
+#define QE_CR_FLG 0x00010000
+#define QE_RESET 0x80000000
+#define QE_INIT_TX_RX 0x00000000
+#define QE_INIT_RX 0x00000001
+#define QE_INIT_TX 0x00000002
+#define QE_ENTER_HUNT_MODE 0x00000003
+#define QE_STOP_TX 0x00000004
+#define QE_GRACEFUL_STOP_TX 0x00000005
+#define QE_RESTART_TX 0x00000006
+#define QE_CLOSE_RX_BD 0x00000007
+#define QE_SWITCH_COMMAND 0x00000007
+#define QE_SET_GROUP_ADDRESS 0x00000008
+#define QE_START_IDMA 0x00000009
+#define QE_MCC_STOP_RX 0x00000009
+#define QE_ATM_TRANSMIT 0x0000000a
+#define QE_HPAC_CLEAR_ALL 0x0000000b
+#define QE_GRACEFUL_STOP_RX 0x0000001a
+#define QE_RESTART_RX 0x0000001b
+#define QE_HPAC_SET_PRIORITY 0x0000010b
+#define QE_HPAC_STOP_TX 0x0000020b
+#define QE_HPAC_STOP_RX 0x0000030b
+#define QE_HPAC_GRACEFUL_STOP_TX 0x0000040b
+#define QE_HPAC_GRACEFUL_STOP_RX 0x0000050b
+#define QE_HPAC_START_TX 0x0000060b
+#define QE_HPAC_START_RX 0x0000070b
+#define QE_USB_STOP_TX 0x0000000a
+#define QE_USB_RESTART_TX 0x0000000c
+#define QE_QMC_STOP_TX 0x0000000c
+#define QE_QMC_STOP_RX 0x0000000d
+#define QE_SS7_SU_FIL_RESET 0x0000000e
+/* jonathbr added from here down for 83xx */
+#define QE_RESET_BCS 0x0000000a
+#define QE_MCC_INIT_TX_RX_16 0x00000003
+#define QE_MCC_STOP_TX 0x00000004
+#define QE_MCC_INIT_TX_1 0x00000005
+#define QE_MCC_INIT_RX_1 0x00000006
+#define QE_MCC_RESET 0x00000007
+#define QE_SET_TIMER 0x00000008
+#define QE_RANDOM_NUMBER 0x0000000c
+#define QE_ATM_MULTI_THREAD_INIT 0x00000011
+#define QE_ASSIGN_PAGE 0x00000012
+#define QE_ADD_REMOVE_HASH_ENTRY 0x00000013
+#define QE_START_FLOW_CONTROL 0x00000014
+#define QE_STOP_FLOW_CONTROL 0x00000015
+#define QE_ASSIGN_PAGE_TO_DEVICE 0x00000016
+
+#define QE_ASSIGN_RISC 0x00000010
+#define QE_CR_MCN_NORMAL_SHIFT 6
+#define QE_CR_MCN_USB_SHIFT 4
+#define QE_CR_MCN_RISC_ASSIGN_SHIFT 8
+#define QE_CR_SNUM_SHIFT 17
+
+/* QE CECR Sub Block - sub block of QE command.
+*/
+#define QE_CR_SUBBLOCK_INVALID 0x00000000
+#define QE_CR_SUBBLOCK_USB 0x03200000
+#define QE_CR_SUBBLOCK_UCCFAST1 0x02000000
+#define QE_CR_SUBBLOCK_UCCFAST2 0x02200000
+#define QE_CR_SUBBLOCK_UCCFAST3 0x02400000
+#define QE_CR_SUBBLOCK_UCCFAST4 0x02600000
+#define QE_CR_SUBBLOCK_UCCFAST5 0x02800000
+#define QE_CR_SUBBLOCK_UCCFAST6 0x02a00000
+#define QE_CR_SUBBLOCK_UCCFAST7 0x02c00000
+#define QE_CR_SUBBLOCK_UCCFAST8 0x02e00000
+#define QE_CR_SUBBLOCK_UCCSLOW1 0x00000000
+#define QE_CR_SUBBLOCK_UCCSLOW2 0x00200000
+#define QE_CR_SUBBLOCK_UCCSLOW3 0x00400000
+#define QE_CR_SUBBLOCK_UCCSLOW4 0x00600000
+#define QE_CR_SUBBLOCK_UCCSLOW5 0x00800000
+#define QE_CR_SUBBLOCK_UCCSLOW6 0x00a00000
+#define QE_CR_SUBBLOCK_UCCSLOW7 0x00c00000
+#define QE_CR_SUBBLOCK_UCCSLOW8 0x00e00000
+#define QE_CR_SUBBLOCK_MCC1 0x03800000
+#define QE_CR_SUBBLOCK_MCC2 0x03a00000
+#define QE_CR_SUBBLOCK_MCC3 0x03000000
+#define QE_CR_SUBBLOCK_IDMA1 0x02800000
+#define QE_CR_SUBBLOCK_IDMA2 0x02a00000
+#define QE_CR_SUBBLOCK_IDMA3 0x02c00000
+#define QE_CR_SUBBLOCK_IDMA4 0x02e00000
+#define QE_CR_SUBBLOCK_HPAC 0x01e00000
+#define QE_CR_SUBBLOCK_SPI1 0x01400000
+#define QE_CR_SUBBLOCK_SPI2 0x01600000
+#define QE_CR_SUBBLOCK_RAND 0x01c00000
+#define QE_CR_SUBBLOCK_TIMER 0x01e00000
+#define QE_CR_SUBBLOCK_GENERAL 0x03c00000
+
+/* QE CECR Protocol - For non-MCC, specifies mode for QE CECR command */
+#define QE_CR_PROTOCOL_UNSPECIFIED 0x00 /* For all other protocols */
+#define QE_CR_PROTOCOL_HDLC_TRANSPARENT 0x00
+#define QE_CR_PROTOCOL_QMC 0x02
+#define QE_CR_PROTOCOL_UART 0x04
+#define QE_CR_PROTOCOL_ATM_POS 0x0A
+#define QE_CR_PROTOCOL_ETHERNET 0x0C
+#define QE_CR_PROTOCOL_L2_SWITCH 0x0D
+
+/* BRG configuration register */
+#define QE_BRGC_ENABLE 0x00010000
+#define QE_BRGC_DIVISOR_SHIFT 1
+#define QE_BRGC_DIVISOR_MAX 0xFFF
+#define QE_BRGC_DIV16 1
+
+/* QE Timers registers */
+#define QE_GTCFR1_PCAS 0x80
+#define QE_GTCFR1_STP2 0x20
+#define QE_GTCFR1_RST2 0x10
+#define QE_GTCFR1_GM2 0x08
+#define QE_GTCFR1_GM1 0x04
+#define QE_GTCFR1_STP1 0x02
+#define QE_GTCFR1_RST1 0x01
+
+/* SDMA registers */
+#define QE_SDSR_BER1 0x02000000
+#define QE_SDSR_BER2 0x01000000
+
+#define QE_SDMR_GLB_1_MSK 0x80000000
+#define QE_SDMR_ADR_SEL 0x20000000
+#define QE_SDMR_BER1_MSK 0x02000000
+#define QE_SDMR_BER2_MSK 0x01000000
+#define QE_SDMR_EB1_MSK 0x00800000
+#define QE_SDMR_ER1_MSK 0x00080000
+#define QE_SDMR_ER2_MSK 0x00040000
+#define QE_SDMR_CEN_MASK 0x0000E000
+#define QE_SDMR_SBER_1 0x00000200
+#define QE_SDMR_SBER_2 0x00000200
+#define QE_SDMR_EB1_PR_MASK 0x000000C0
+#define QE_SDMR_ER1_PR 0x00000008
+
+#define QE_SDMR_CEN_SHIFT 13
+#define QE_SDMR_EB1_PR_SHIFT 6
+
+#define QE_SDTM_MSNUM_SHIFT 24
+
+#define QE_SDEBCR_BA_MASK 0x01FFFFFF
+
+/* Communication Processor */
+#define QE_CP_CERCR_MEE 0x8000 /* Multi-user RAM ECC enable */
+#define QE_CP_CERCR_IEE 0x4000 /* Instruction RAM ECC enable */
+#define QE_CP_CERCR_CIR 0x0800 /* Common instruction RAM */
+
+/* I-RAM */
+#define QE_IRAM_IADD_AIE 0x80000000 /* Auto Increment Enable */
+#define QE_IRAM_IADD_BADDR 0x00080000 /* Base Address */
+
+/* UPC */
+#define UPGCR_PROTOCOL 0x80000000 /* protocol ul2 or pl2 */
+#define UPGCR_TMS 0x40000000 /* Transmit master/slave mode */
+#define UPGCR_RMS 0x20000000 /* Receive master/slave mode */
+#define UPGCR_ADDR 0x10000000 /* Master MPHY Addr multiplexing */
+#define UPGCR_DIAG 0x01000000 /* Diagnostic mode */
+
+/* UCC GUEMR register */
+#define UCC_GUEMR_MODE_MASK_RX 0x02
+#define UCC_GUEMR_MODE_FAST_RX 0x02
+#define UCC_GUEMR_MODE_SLOW_RX 0x00
+#define UCC_GUEMR_MODE_MASK_TX 0x01
+#define UCC_GUEMR_MODE_FAST_TX 0x01
+#define UCC_GUEMR_MODE_SLOW_TX 0x00
+#define UCC_GUEMR_MODE_MASK (UCC_GUEMR_MODE_MASK_RX | UCC_GUEMR_MODE_MASK_TX)
+#define UCC_GUEMR_SET_RESERVED3 0x10 /* Bit 3 in the guemr is reserved but
+ must be set 1 */
+
+/* structure representing UCC SLOW parameter RAM */
+struct ucc_slow_pram {
+ __be16 rbase; /* RX BD base address */
+ __be16 tbase; /* TX BD base address */
+ u8 rbmr; /* RX bus mode register (same as CPM's RFCR) */
+ u8 tbmr; /* TX bus mode register (same as CPM's TFCR) */
+ __be16 mrblr; /* Rx buffer length */
+ __be32 rstate; /* Rx internal state */
+ __be32 rptr; /* Rx internal data pointer */
+ __be16 rbptr; /* rb BD Pointer */
+ __be16 rcount; /* Rx internal byte count */
+ __be32 rtemp; /* Rx temp */
+ __be32 tstate; /* Tx internal state */
+ __be32 tptr; /* Tx internal data pointer */
+ __be16 tbptr; /* Tx BD pointer */
+ __be16 tcount; /* Tx byte count */
+ __be32 ttemp; /* Tx temp */
+ __be32 rcrc; /* temp receive CRC */
+ __be32 tcrc; /* temp transmit CRC */
+} __attribute__ ((packed));
+
+/* General UCC SLOW Mode Register (GUMRH & GUMRL) */
+#define UCC_SLOW_GUMR_H_SAM_QMC 0x00000000
+#define UCC_SLOW_GUMR_H_SAM_SATM 0x00008000
+#define UCC_SLOW_GUMR_H_REVD 0x00002000
+#define UCC_SLOW_GUMR_H_TRX 0x00001000
+#define UCC_SLOW_GUMR_H_TTX 0x00000800
+#define UCC_SLOW_GUMR_H_CDP 0x00000400
+#define UCC_SLOW_GUMR_H_CTSP 0x00000200
+#define UCC_SLOW_GUMR_H_CDS 0x00000100
+#define UCC_SLOW_GUMR_H_CTSS 0x00000080
+#define UCC_SLOW_GUMR_H_TFL 0x00000040
+#define UCC_SLOW_GUMR_H_RFW 0x00000020
+#define UCC_SLOW_GUMR_H_TXSY 0x00000010
+#define UCC_SLOW_GUMR_H_4SYNC 0x00000004
+#define UCC_SLOW_GUMR_H_8SYNC 0x00000008
+#define UCC_SLOW_GUMR_H_16SYNC 0x0000000c
+#define UCC_SLOW_GUMR_H_RTSM 0x00000002
+#define UCC_SLOW_GUMR_H_RSYN 0x00000001
+
+#define UCC_SLOW_GUMR_L_TCI 0x10000000
+#define UCC_SLOW_GUMR_L_RINV 0x02000000
+#define UCC_SLOW_GUMR_L_TINV 0x01000000
+#define UCC_SLOW_GUMR_L_TEND 0x00040000
+#define UCC_SLOW_GUMR_L_TDCR_MASK 0x00030000
+#define UCC_SLOW_GUMR_L_TDCR_32 0x00030000
+#define UCC_SLOW_GUMR_L_TDCR_16 0x00020000
+#define UCC_SLOW_GUMR_L_TDCR_8 0x00010000
+#define UCC_SLOW_GUMR_L_TDCR_1 0x00000000
+#define UCC_SLOW_GUMR_L_RDCR_MASK 0x0000c000
+#define UCC_SLOW_GUMR_L_RDCR_32 0x0000c000
+#define UCC_SLOW_GUMR_L_RDCR_16 0x00008000
+#define UCC_SLOW_GUMR_L_RDCR_8 0x00004000
+#define UCC_SLOW_GUMR_L_RDCR_1 0x00000000
+#define UCC_SLOW_GUMR_L_RENC_NRZI 0x00000800
+#define UCC_SLOW_GUMR_L_RENC_NRZ 0x00000000
+#define UCC_SLOW_GUMR_L_TENC_NRZI 0x00000100
+#define UCC_SLOW_GUMR_L_TENC_NRZ 0x00000000
+#define UCC_SLOW_GUMR_L_DIAG_MASK 0x000000c0
+#define UCC_SLOW_GUMR_L_DIAG_LE 0x000000c0
+#define UCC_SLOW_GUMR_L_DIAG_ECHO 0x00000080
+#define UCC_SLOW_GUMR_L_DIAG_LOOP 0x00000040
+#define UCC_SLOW_GUMR_L_DIAG_NORM 0x00000000
+#define UCC_SLOW_GUMR_L_ENR 0x00000020
+#define UCC_SLOW_GUMR_L_ENT 0x00000010
+#define UCC_SLOW_GUMR_L_MODE_MASK 0x0000000F
+#define UCC_SLOW_GUMR_L_MODE_BISYNC 0x00000008
+#define UCC_SLOW_GUMR_L_MODE_AHDLC 0x00000006
+#define UCC_SLOW_GUMR_L_MODE_UART 0x00000004
+#define UCC_SLOW_GUMR_L_MODE_QMC 0x00000002
+
+/* General UCC FAST Mode Register */
+#define UCC_FAST_GUMR_TCI 0x20000000
+#define UCC_FAST_GUMR_TRX 0x10000000
+#define UCC_FAST_GUMR_TTX 0x08000000
+#define UCC_FAST_GUMR_CDP 0x04000000
+#define UCC_FAST_GUMR_CTSP 0x02000000
+#define UCC_FAST_GUMR_CDS 0x01000000
+#define UCC_FAST_GUMR_CTSS 0x00800000
+#define UCC_FAST_GUMR_TXSY 0x00020000
+#define UCC_FAST_GUMR_RSYN 0x00010000
+#define UCC_FAST_GUMR_RTSM 0x00002000
+#define UCC_FAST_GUMR_REVD 0x00000400
+#define UCC_FAST_GUMR_ENR 0x00000020
+#define UCC_FAST_GUMR_ENT 0x00000010
+
+/* UART Slow UCC Event Register (UCCE) */
+#define UCC_UART_UCCE_AB 0x0200
+#define UCC_UART_UCCE_IDLE 0x0100
+#define UCC_UART_UCCE_GRA 0x0080
+#define UCC_UART_UCCE_BRKE 0x0040
+#define UCC_UART_UCCE_BRKS 0x0020
+#define UCC_UART_UCCE_CCR 0x0008
+#define UCC_UART_UCCE_BSY 0x0004
+#define UCC_UART_UCCE_TX 0x0002
+#define UCC_UART_UCCE_RX 0x0001
+
+/* HDLC Slow UCC Event Register (UCCE) */
+#define UCC_HDLC_UCCE_GLR 0x1000
+#define UCC_HDLC_UCCE_GLT 0x0800
+#define UCC_HDLC_UCCE_IDLE 0x0100
+#define UCC_HDLC_UCCE_BRKE 0x0040
+#define UCC_HDLC_UCCE_BRKS 0x0020
+#define UCC_HDLC_UCCE_TXE 0x0010
+#define UCC_HDLC_UCCE_RXF 0x0008
+#define UCC_HDLC_UCCE_BSY 0x0004
+#define UCC_HDLC_UCCE_TXB 0x0002
+#define UCC_HDLC_UCCE_RXB 0x0001
+
+/* BISYNC Slow UCC Event Register (UCCE) */
+#define UCC_BISYNC_UCCE_GRA 0x0080
+#define UCC_BISYNC_UCCE_TXE 0x0010
+#define UCC_BISYNC_UCCE_RCH 0x0008
+#define UCC_BISYNC_UCCE_BSY 0x0004
+#define UCC_BISYNC_UCCE_TXB 0x0002
+#define UCC_BISYNC_UCCE_RXB 0x0001
+
+/* Gigabit Ethernet Fast UCC Event Register (UCCE) */
+#define UCC_GETH_UCCE_MPD 0x80000000
+#define UCC_GETH_UCCE_SCAR 0x40000000
+#define UCC_GETH_UCCE_GRA 0x20000000
+#define UCC_GETH_UCCE_CBPR 0x10000000
+#define UCC_GETH_UCCE_BSY 0x08000000
+#define UCC_GETH_UCCE_RXC 0x04000000
+#define UCC_GETH_UCCE_TXC 0x02000000
+#define UCC_GETH_UCCE_TXE 0x01000000
+#define UCC_GETH_UCCE_TXB7 0x00800000
+#define UCC_GETH_UCCE_TXB6 0x00400000
+#define UCC_GETH_UCCE_TXB5 0x00200000
+#define UCC_GETH_UCCE_TXB4 0x00100000
+#define UCC_GETH_UCCE_TXB3 0x00080000
+#define UCC_GETH_UCCE_TXB2 0x00040000
+#define UCC_GETH_UCCE_TXB1 0x00020000
+#define UCC_GETH_UCCE_TXB0 0x00010000
+#define UCC_GETH_UCCE_RXB7 0x00008000
+#define UCC_GETH_UCCE_RXB6 0x00004000
+#define UCC_GETH_UCCE_RXB5 0x00002000
+#define UCC_GETH_UCCE_RXB4 0x00001000
+#define UCC_GETH_UCCE_RXB3 0x00000800
+#define UCC_GETH_UCCE_RXB2 0x00000400
+#define UCC_GETH_UCCE_RXB1 0x00000200
+#define UCC_GETH_UCCE_RXB0 0x00000100
+#define UCC_GETH_UCCE_RXF7 0x00000080
+#define UCC_GETH_UCCE_RXF6 0x00000040
+#define UCC_GETH_UCCE_RXF5 0x00000020
+#define UCC_GETH_UCCE_RXF4 0x00000010
+#define UCC_GETH_UCCE_RXF3 0x00000008
+#define UCC_GETH_UCCE_RXF2 0x00000004
+#define UCC_GETH_UCCE_RXF1 0x00000002
+#define UCC_GETH_UCCE_RXF0 0x00000001
+
+/* UPSMR, when used as a UART */
+#define UCC_UART_UPSMR_FLC 0x8000
+#define UCC_UART_UPSMR_SL 0x4000
+#define UCC_UART_UPSMR_CL_MASK 0x3000
+#define UCC_UART_UPSMR_CL_8 0x3000
+#define UCC_UART_UPSMR_CL_7 0x2000
+#define UCC_UART_UPSMR_CL_6 0x1000
+#define UCC_UART_UPSMR_CL_5 0x0000
+#define UCC_UART_UPSMR_UM_MASK 0x0c00
+#define UCC_UART_UPSMR_UM_NORMAL 0x0000
+#define UCC_UART_UPSMR_UM_MAN_MULTI 0x0400
+#define UCC_UART_UPSMR_UM_AUTO_MULTI 0x0c00
+#define UCC_UART_UPSMR_FRZ 0x0200
+#define UCC_UART_UPSMR_RZS 0x0100
+#define UCC_UART_UPSMR_SYN 0x0080
+#define UCC_UART_UPSMR_DRT 0x0040
+#define UCC_UART_UPSMR_PEN 0x0010
+#define UCC_UART_UPSMR_RPM_MASK 0x000c
+#define UCC_UART_UPSMR_RPM_ODD 0x0000
+#define UCC_UART_UPSMR_RPM_LOW 0x0004
+#define UCC_UART_UPSMR_RPM_EVEN 0x0008
+#define UCC_UART_UPSMR_RPM_HIGH 0x000C
+#define UCC_UART_UPSMR_TPM_MASK 0x0003
+#define UCC_UART_UPSMR_TPM_ODD 0x0000
+#define UCC_UART_UPSMR_TPM_LOW 0x0001
+#define UCC_UART_UPSMR_TPM_EVEN 0x0002
+#define UCC_UART_UPSMR_TPM_HIGH 0x0003
+
+/* UCC Transmit On Demand Register (UTODR) */
+#define UCC_SLOW_TOD 0x8000
+#define UCC_FAST_TOD 0x8000
+
+/* UCC Bus Mode Register masks */
+/* Not to be confused with the Bundle Mode Register */
+#define UCC_BMR_GBL 0x20
+#define UCC_BMR_BO_BE 0x10
+#define UCC_BMR_CETM 0x04
+#define UCC_BMR_DTB 0x02
+#define UCC_BMR_BDB 0x01
+
+/* Function code masks */
+#define FC_GBL 0x20
+#define FC_DTB_LCL 0x02
+#define UCC_FAST_FUNCTION_CODE_GBL 0x20
+#define UCC_FAST_FUNCTION_CODE_DTB_LCL 0x02
+#define UCC_FAST_FUNCTION_CODE_BDB_LCL 0x01
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_QE_H */
diff --git a/arch/powerpc/include/asm/qe_ic.h b/arch/powerpc/include/asm/qe_ic.h
new file mode 100644
index 0000000..56a7745
--- /dev/null
+++ b/arch/powerpc/include/asm/qe_ic.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ *
+ * Authors: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * QE IC external definitions and structure.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef _ASM_POWERPC_QE_IC_H
+#define _ASM_POWERPC_QE_IC_H
+
+#include <linux/irq.h>
+
+#define NUM_OF_QE_IC_GROUPS 6
+
+/* Flags when we init the QE IC */
+#define QE_IC_SPREADMODE_GRP_W 0x00000001
+#define QE_IC_SPREADMODE_GRP_X 0x00000002
+#define QE_IC_SPREADMODE_GRP_Y 0x00000004
+#define QE_IC_SPREADMODE_GRP_Z 0x00000008
+#define QE_IC_SPREADMODE_GRP_RISCA 0x00000010
+#define QE_IC_SPREADMODE_GRP_RISCB 0x00000020
+
+#define QE_IC_LOW_SIGNAL 0x00000100
+#define QE_IC_HIGH_SIGNAL 0x00000200
+
+#define QE_IC_GRP_W_PRI0_DEST_SIGNAL_HIGH 0x00001000
+#define QE_IC_GRP_W_PRI1_DEST_SIGNAL_HIGH 0x00002000
+#define QE_IC_GRP_X_PRI0_DEST_SIGNAL_HIGH 0x00004000
+#define QE_IC_GRP_X_PRI1_DEST_SIGNAL_HIGH 0x00008000
+#define QE_IC_GRP_Y_PRI0_DEST_SIGNAL_HIGH 0x00010000
+#define QE_IC_GRP_Y_PRI1_DEST_SIGNAL_HIGH 0x00020000
+#define QE_IC_GRP_Z_PRI0_DEST_SIGNAL_HIGH 0x00040000
+#define QE_IC_GRP_Z_PRI1_DEST_SIGNAL_HIGH 0x00080000
+#define QE_IC_GRP_RISCA_PRI0_DEST_SIGNAL_HIGH 0x00100000
+#define QE_IC_GRP_RISCA_PRI1_DEST_SIGNAL_HIGH 0x00200000
+#define QE_IC_GRP_RISCB_PRI0_DEST_SIGNAL_HIGH 0x00400000
+#define QE_IC_GRP_RISCB_PRI1_DEST_SIGNAL_HIGH 0x00800000
+#define QE_IC_GRP_W_DEST_SIGNAL_SHIFT (12)
+
+/* QE interrupt sources groups */
+enum qe_ic_grp_id {
+ QE_IC_GRP_W = 0, /* QE interrupt controller group W */
+ QE_IC_GRP_X, /* QE interrupt controller group X */
+ QE_IC_GRP_Y, /* QE interrupt controller group Y */
+ QE_IC_GRP_Z, /* QE interrupt controller group Z */
+ QE_IC_GRP_RISCA, /* QE interrupt controller RISC group A */
+ QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */
+};
+
+void qe_ic_init(struct device_node *node, unsigned int flags,
+ void (*low_handler)(unsigned int irq, struct irq_desc *desc),
+ void (*high_handler)(unsigned int irq, struct irq_desc *desc));
+void qe_ic_set_highest_priority(unsigned int virq, int high);
+int qe_ic_set_priority(unsigned int virq, unsigned int priority);
+int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
+
+struct qe_ic;
+unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
+unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
+
+static inline void qe_ic_cascade_low_ipic(unsigned int irq,
+ struct irq_desc *desc)
+{
+ struct qe_ic *qe_ic = desc->handler_data;
+ unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
+
+ if (cascade_irq != NO_IRQ)
+ generic_handle_irq(cascade_irq);
+}
+
+static inline void qe_ic_cascade_high_ipic(unsigned int irq,
+ struct irq_desc *desc)
+{
+ struct qe_ic *qe_ic = desc->handler_data;
+ unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
+
+ if (cascade_irq != NO_IRQ)
+ generic_handle_irq(cascade_irq);
+}
+
+static inline void qe_ic_cascade_low_mpic(unsigned int irq,
+ struct irq_desc *desc)
+{
+ struct qe_ic *qe_ic = desc->handler_data;
+ unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
+
+ if (cascade_irq != NO_IRQ)
+ generic_handle_irq(cascade_irq);
+
+ desc->chip->eoi(irq);
+}
+
+static inline void qe_ic_cascade_high_mpic(unsigned int irq,
+ struct irq_desc *desc)
+{
+ struct qe_ic *qe_ic = desc->handler_data;
+ unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
+
+ if (cascade_irq != NO_IRQ)
+ generic_handle_irq(cascade_irq);
+
+ desc->chip->eoi(irq);
+}
+
+static inline void qe_ic_cascade_muxed_mpic(unsigned int irq,
+ struct irq_desc *desc)
+{
+ struct qe_ic *qe_ic = desc->handler_data;
+ unsigned int cascade_irq;
+
+ cascade_irq = qe_ic_get_high_irq(qe_ic);
+ if (cascade_irq == NO_IRQ)
+ cascade_irq = qe_ic_get_low_irq(qe_ic);
+
+ if (cascade_irq != NO_IRQ)
+ generic_handle_irq(cascade_irq);
+
+ desc->chip->eoi(irq);
+}
+
+#endif /* _ASM_POWERPC_QE_IC_H */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
new file mode 100644
index 0000000..c6d1ab6
--- /dev/null
+++ b/arch/powerpc/include/asm/reg.h
@@ -0,0 +1,788 @@
+/*
+ * Contains the definition of registers common to all PowerPC variants.
+ * If a register definition has been changed in a different PowerPC
+ * variant, we will case it in #ifndef XXX ... #endif, and have the
+ * number used in the Programming Environments Manual For 32-Bit
+ * Implementations of the PowerPC Architecture (a.k.a. Green Book) here.
+ */
+
+#ifndef _ASM_POWERPC_REG_H
+#define _ASM_POWERPC_REG_H
+#ifdef __KERNEL__
+
+#include <linux/stringify.h>
+#include <asm/cputable.h>
+
+/* Pickup Book E specific registers. */
+#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+#include <asm/reg_booke.h>
+#endif /* CONFIG_BOOKE || CONFIG_40x */
+
+#ifdef CONFIG_FSL_EMB_PERFMON
+#include <asm/reg_fsl_emb.h>
+#endif
+
+#ifdef CONFIG_8xx
+#include <asm/reg_8xx.h>
+#endif /* CONFIG_8xx */
+
+#define MSR_SF_LG 63 /* Enable 64 bit mode */
+#define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */
+#define MSR_HV_LG 60 /* Hypervisor state */
+#define MSR_VEC_LG 25 /* Enable AltiVec */
+#define MSR_VSX_LG 23 /* Enable VSX */
+#define MSR_POW_LG 18 /* Enable Power Management */
+#define MSR_WE_LG 18 /* Wait State Enable */
+#define MSR_TGPR_LG 17 /* TLB Update registers in use */
+#define MSR_CE_LG 17 /* Critical Interrupt Enable */
+#define MSR_ILE_LG 16 /* Interrupt Little Endian */
+#define MSR_EE_LG 15 /* External Interrupt Enable */
+#define MSR_PR_LG 14 /* Problem State / Privilege Level */
+#define MSR_FP_LG 13 /* Floating Point enable */
+#define MSR_ME_LG 12 /* Machine Check Enable */
+#define MSR_FE0_LG 11 /* Floating Exception mode 0 */
+#define MSR_SE_LG 10 /* Single Step */
+#define MSR_BE_LG 9 /* Branch Trace */
+#define MSR_DE_LG 9 /* Debug Exception Enable */
+#define MSR_FE1_LG 8 /* Floating Exception mode 1 */
+#define MSR_IP_LG 6 /* Exception prefix 0x000/0xFFF */
+#define MSR_IR_LG 5 /* Instruction Relocate */
+#define MSR_DR_LG 4 /* Data Relocate */
+#define MSR_PE_LG 3 /* Protection Enable */
+#define MSR_PX_LG 2 /* Protection Exclusive Mode */
+#define MSR_PMM_LG 2 /* Performance monitor */
+#define MSR_RI_LG 1 /* Recoverable Exception */
+#define MSR_LE_LG 0 /* Little Endian */
+
+#ifdef __ASSEMBLY__
+#define __MASK(X) (1<<(X))
+#else
+#define __MASK(X) (1UL<<(X))
+#endif
+
+#ifdef CONFIG_PPC64
+#define MSR_SF __MASK(MSR_SF_LG) /* Enable 64 bit mode */
+#define MSR_ISF __MASK(MSR_ISF_LG) /* Interrupt 64b mode valid on 630 */
+#define MSR_HV __MASK(MSR_HV_LG) /* Hypervisor state */
+#else
+/* so tests for these bits fail on 32-bit */
+#define MSR_SF 0
+#define MSR_ISF 0
+#define MSR_HV 0
+#endif
+
+#define MSR_VEC __MASK(MSR_VEC_LG) /* Enable AltiVec */
+#define MSR_VSX __MASK(MSR_VSX_LG) /* Enable VSX */
+#define MSR_POW __MASK(MSR_POW_LG) /* Enable Power Management */
+#define MSR_WE __MASK(MSR_WE_LG) /* Wait State Enable */
+#define MSR_TGPR __MASK(MSR_TGPR_LG) /* TLB Update registers in use */
+#define MSR_CE __MASK(MSR_CE_LG) /* Critical Interrupt Enable */
+#define MSR_ILE __MASK(MSR_ILE_LG) /* Interrupt Little Endian */
+#define MSR_EE __MASK(MSR_EE_LG) /* External Interrupt Enable */
+#define MSR_PR __MASK(MSR_PR_LG) /* Problem State / Privilege Level */
+#define MSR_FP __MASK(MSR_FP_LG) /* Floating Point enable */
+#define MSR_ME __MASK(MSR_ME_LG) /* Machine Check Enable */
+#define MSR_FE0 __MASK(MSR_FE0_LG) /* Floating Exception mode 0 */
+#define MSR_SE __MASK(MSR_SE_LG) /* Single Step */
+#define MSR_BE __MASK(MSR_BE_LG) /* Branch Trace */
+#define MSR_DE __MASK(MSR_DE_LG) /* Debug Exception Enable */
+#define MSR_FE1 __MASK(MSR_FE1_LG) /* Floating Exception mode 1 */
+#define MSR_IP __MASK(MSR_IP_LG) /* Exception prefix 0x000/0xFFF */
+#define MSR_IR __MASK(MSR_IR_LG) /* Instruction Relocate */
+#define MSR_DR __MASK(MSR_DR_LG) /* Data Relocate */
+#define MSR_PE __MASK(MSR_PE_LG) /* Protection Enable */
+#define MSR_PX __MASK(MSR_PX_LG) /* Protection Exclusive Mode */
+#ifndef MSR_PMM
+#define MSR_PMM __MASK(MSR_PMM_LG) /* Performance monitor */
+#endif
+#define MSR_RI __MASK(MSR_RI_LG) /* Recoverable Exception */
+#define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */
+
+#ifdef CONFIG_PPC64
+#define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV
+#define MSR_KERNEL MSR_ | MSR_SF
+
+#define MSR_USER32 MSR_ | MSR_PR | MSR_EE
+#define MSR_USER64 MSR_USER32 | MSR_SF
+
+#else /* 32-bit */
+/* Default MSR for kernel mode. */
+#ifndef MSR_KERNEL /* reg_booke.h also defines this */
+#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR)
+#endif
+
+#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
+#endif
+
+/* Floating Point Status and Control Register (FPSCR) Fields */
+#define FPSCR_FX 0x80000000 /* FPU exception summary */
+#define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */
+#define FPSCR_VX 0x20000000 /* Invalid operation summary */
+#define FPSCR_OX 0x10000000 /* Overflow exception summary */
+#define FPSCR_UX 0x08000000 /* Underflow exception summary */
+#define FPSCR_ZX 0x04000000 /* Zero-divide exception summary */
+#define FPSCR_XX 0x02000000 /* Inexact exception summary */
+#define FPSCR_VXSNAN 0x01000000 /* Invalid op for SNaN */
+#define FPSCR_VXISI 0x00800000 /* Invalid op for Inv - Inv */
+#define FPSCR_VXIDI 0x00400000 /* Invalid op for Inv / Inv */
+#define FPSCR_VXZDZ 0x00200000 /* Invalid op for Zero / Zero */
+#define FPSCR_VXIMZ 0x00100000 /* Invalid op for Inv * Zero */
+#define FPSCR_VXVC 0x00080000 /* Invalid op for Compare */
+#define FPSCR_FR 0x00040000 /* Fraction rounded */
+#define FPSCR_FI 0x00020000 /* Fraction inexact */
+#define FPSCR_FPRF 0x0001f000 /* FPU Result Flags */
+#define FPSCR_FPCC 0x0000f000 /* FPU Condition Codes */
+#define FPSCR_VXSOFT 0x00000400 /* Invalid op for software request */
+#define FPSCR_VXSQRT 0x00000200 /* Invalid op for square root */
+#define FPSCR_VXCVI 0x00000100 /* Invalid op for integer convert */
+#define FPSCR_VE 0x00000080 /* Invalid op exception enable */
+#define FPSCR_OE 0x00000040 /* IEEE overflow exception enable */
+#define FPSCR_UE 0x00000020 /* IEEE underflow exception enable */
+#define FPSCR_ZE 0x00000010 /* IEEE zero divide exception enable */
+#define FPSCR_XE 0x00000008 /* FP inexact exception enable */
+#define FPSCR_NI 0x00000004 /* FPU non IEEE-Mode */
+#define FPSCR_RN 0x00000003 /* FPU rounding control */
+
+/* Special Purpose Registers (SPRNs)*/
+#define SPRN_CTR 0x009 /* Count Register */
+#define SPRN_DSCR 0x11
+#define SPRN_CTRLF 0x088
+#define SPRN_CTRLT 0x098
+#define CTRL_CT 0xc0000000 /* current thread */
+#define CTRL_CT0 0x80000000 /* thread 0 */
+#define CTRL_CT1 0x40000000 /* thread 1 */
+#define CTRL_TE 0x00c00000 /* thread enable */
+#define CTRL_RUNLATCH 0x1
+#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
+#define DABR_TRANSLATION (1UL << 2)
+#define SPRN_DABR2 0x13D /* e300 */
+#define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */
+#define DABRX_USER (1UL << 0)
+#define DABRX_KERNEL (1UL << 1)
+#define SPRN_DAR 0x013 /* Data Address Register */
+#define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
+#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
+#define DSISR_NOHPTE 0x40000000 /* no translation found */
+#define DSISR_PROTFAULT 0x08000000 /* protection fault */
+#define DSISR_ISSTORE 0x02000000 /* access was a store */
+#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
+#define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */
+#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */
+#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
+#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */
+#define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */
+#define SPRN_SPURR 0x134 /* Scaled PURR */
+#define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */
+#define SPRN_LPCR 0x13E /* LPAR Control Register */
+#define SPRN_DBAT0L 0x219 /* Data BAT 0 Lower Register */
+#define SPRN_DBAT0U 0x218 /* Data BAT 0 Upper Register */
+#define SPRN_DBAT1L 0x21B /* Data BAT 1 Lower Register */
+#define SPRN_DBAT1U 0x21A /* Data BAT 1 Upper Register */
+#define SPRN_DBAT2L 0x21D /* Data BAT 2 Lower Register */
+#define SPRN_DBAT2U 0x21C /* Data BAT 2 Upper Register */
+#define SPRN_DBAT3L 0x21F /* Data BAT 3 Lower Register */
+#define SPRN_DBAT3U 0x21E /* Data BAT 3 Upper Register */
+#define SPRN_DBAT4L 0x239 /* Data BAT 4 Lower Register */
+#define SPRN_DBAT4U 0x238 /* Data BAT 4 Upper Register */
+#define SPRN_DBAT5L 0x23B /* Data BAT 5 Lower Register */
+#define SPRN_DBAT5U 0x23A /* Data BAT 5 Upper Register */
+#define SPRN_DBAT6L 0x23D /* Data BAT 6 Lower Register */
+#define SPRN_DBAT6U 0x23C /* Data BAT 6 Upper Register */
+#define SPRN_DBAT7L 0x23F /* Data BAT 7 Lower Register */
+#define SPRN_DBAT7U 0x23E /* Data BAT 7 Upper Register */
+
+#define SPRN_DEC 0x016 /* Decrement Register */
+#define SPRN_DER 0x095 /* Debug Enable Regsiter */
+#define DER_RSTE 0x40000000 /* Reset Interrupt */
+#define DER_CHSTPE 0x20000000 /* Check Stop */
+#define DER_MCIE 0x10000000 /* Machine Check Interrupt */
+#define DER_EXTIE 0x02000000 /* External Interrupt */
+#define DER_ALIE 0x01000000 /* Alignment Interrupt */
+#define DER_PRIE 0x00800000 /* Program Interrupt */
+#define DER_FPUVIE 0x00400000 /* FP Unavailable Interrupt */
+#define DER_DECIE 0x00200000 /* Decrementer Interrupt */
+#define DER_SYSIE 0x00040000 /* System Call Interrupt */
+#define DER_TRE 0x00020000 /* Trace Interrupt */
+#define DER_SEIE 0x00004000 /* FP SW Emulation Interrupt */
+#define DER_ITLBMSE 0x00002000 /* Imp. Spec. Instruction TLB Miss */
+#define DER_ITLBERE 0x00001000 /* Imp. Spec. Instruction TLB Error */
+#define DER_DTLBMSE 0x00000800 /* Imp. Spec. Data TLB Miss */
+#define DER_DTLBERE 0x00000400 /* Imp. Spec. Data TLB Error */
+#define DER_LBRKE 0x00000008 /* Load/Store Breakpoint Interrupt */
+#define DER_IBRKE 0x00000004 /* Instruction Breakpoint Interrupt */
+#define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */
+#define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */
+#define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */
+#define SPRN_EAR 0x11A /* External Address Register */
+#define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */
+#define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */
+#define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */
+#define HID0_EMCP (1<<31) /* Enable Machine Check pin */
+#define HID0_EBA (1<<29) /* Enable Bus Address Parity */
+#define HID0_EBD (1<<28) /* Enable Bus Data Parity */
+#define HID0_SBCLK (1<<27)
+#define HID0_EICE (1<<26)
+#define HID0_TBEN (1<<26) /* Timebase enable - 745x */
+#define HID0_ECLK (1<<25)
+#define HID0_PAR (1<<24)
+#define HID0_STEN (1<<24) /* Software table search enable - 745x */
+#define HID0_HIGH_BAT (1<<23) /* Enable high BATs - 7455 */
+#define HID0_DOZE (1<<23)
+#define HID0_NAP (1<<22)
+#define HID0_SLEEP (1<<21)
+#define HID0_DPM (1<<20)
+#define HID0_BHTCLR (1<<18) /* Clear branch history table - 7450 */
+#define HID0_XAEN (1<<17) /* Extended addressing enable - 7450 */
+#define HID0_NHR (1<<16) /* Not hard reset (software bit-7450)*/
+#define HID0_ICE (1<<15) /* Instruction Cache Enable */
+#define HID0_DCE (1<<14) /* Data Cache Enable */
+#define HID0_ILOCK (1<<13) /* Instruction Cache Lock */
+#define HID0_DLOCK (1<<12) /* Data Cache Lock */
+#define HID0_ICFI (1<<11) /* Instr. Cache Flash Invalidate */
+#define HID0_DCI (1<<10) /* Data Cache Invalidate */
+#define HID0_SPD (1<<9) /* Speculative disable */
+#define HID0_DAPUEN (1<<8) /* Debug APU enable */
+#define HID0_SGE (1<<7) /* Store Gathering Enable */
+#define HID0_SIED (1<<7) /* Serial Instr. Execution [Disable] */
+#define HID0_DCFA (1<<6) /* Data Cache Flush Assist */
+#define HID0_LRSTK (1<<4) /* Link register stack - 745x */
+#define HID0_BTIC (1<<5) /* Branch Target Instr Cache Enable */
+#define HID0_ABE (1<<3) /* Address Broadcast Enable */
+#define HID0_FOLD (1<<3) /* Branch Folding enable - 745x */
+#define HID0_BHTE (1<<2) /* Branch History Table Enable */
+#define HID0_BTCD (1<<1) /* Branch target cache disable */
+#define HID0_NOPDST (1<<1) /* No-op dst, dstt, etc. instr. */
+#define HID0_NOPTI (1<<0) /* No-op dcbt and dcbst instr. */
+
+#define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */
+#define HID1_EMCP (1<<31) /* 7450 Machine Check Pin Enable */
+#define HID1_DFS (1<<22) /* 7447A Dynamic Frequency Scaling */
+#define HID1_PC0 (1<<16) /* 7450 PLL_CFG[0] */
+#define HID1_PC1 (1<<15) /* 7450 PLL_CFG[1] */
+#define HID1_PC2 (1<<14) /* 7450 PLL_CFG[2] */
+#define HID1_PC3 (1<<13) /* 7450 PLL_CFG[3] */
+#define HID1_SYNCBE (1<<11) /* 7450 ABE for sync, eieio */
+#define HID1_ABE (1<<10) /* 7450 Address Broadcast Enable */
+#define HID1_PS (1<<16) /* 750FX PLL selection */
+#define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */
+#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
+#define SPRN_IABR2 0x3FA /* 83xx */
+#define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */
+#define SPRN_HID4 0x3F4 /* 970 HID4 */
+#define SPRN_HID5 0x3F6 /* 970 HID5 */
+#define SPRN_HID6 0x3F9 /* BE HID 6 */
+#define HID6_LB (0x0F<<12) /* Concurrent Large Page Modes */
+#define HID6_DLP (1<<20) /* Disable all large page modes (4K only) */
+#define SPRN_TSC_CELL 0x399 /* Thread switch control on Cell */
+#define TSC_CELL_DEC_ENABLE_0 0x400000 /* Decrementer Interrupt */
+#define TSC_CELL_DEC_ENABLE_1 0x200000 /* Decrementer Interrupt */
+#define TSC_CELL_EE_ENABLE 0x100000 /* External Interrupt */
+#define TSC_CELL_EE_BOOST 0x080000 /* External Interrupt Boost */
+#define SPRN_TSC 0x3FD /* Thread switch control on others */
+#define SPRN_TST 0x3FC /* Thread switch timeout on others */
+#if !defined(SPRN_IAC1) && !defined(SPRN_IAC2)
+#define SPRN_IAC1 0x3F4 /* Instruction Address Compare 1 */
+#define SPRN_IAC2 0x3F5 /* Instruction Address Compare 2 */
+#endif
+#define SPRN_IBAT0L 0x211 /* Instruction BAT 0 Lower Register */
+#define SPRN_IBAT0U 0x210 /* Instruction BAT 0 Upper Register */
+#define SPRN_IBAT1L 0x213 /* Instruction BAT 1 Lower Register */
+#define SPRN_IBAT1U 0x212 /* Instruction BAT 1 Upper Register */
+#define SPRN_IBAT2L 0x215 /* Instruction BAT 2 Lower Register */
+#define SPRN_IBAT2U 0x214 /* Instruction BAT 2 Upper Register */
+#define SPRN_IBAT3L 0x217 /* Instruction BAT 3 Lower Register */
+#define SPRN_IBAT3U 0x216 /* Instruction BAT 3 Upper Register */
+#define SPRN_IBAT4L 0x231 /* Instruction BAT 4 Lower Register */
+#define SPRN_IBAT4U 0x230 /* Instruction BAT 4 Upper Register */
+#define SPRN_IBAT5L 0x233 /* Instruction BAT 5 Lower Register */
+#define SPRN_IBAT5U 0x232 /* Instruction BAT 5 Upper Register */
+#define SPRN_IBAT6L 0x235 /* Instruction BAT 6 Lower Register */
+#define SPRN_IBAT6U 0x234 /* Instruction BAT 6 Upper Register */
+#define SPRN_IBAT7L 0x237 /* Instruction BAT 7 Lower Register */
+#define SPRN_IBAT7U 0x236 /* Instruction BAT 7 Upper Register */
+#define SPRN_ICMP 0x3D5 /* Instruction TLB Compare Register */
+#define SPRN_ICTC 0x3FB /* Instruction Cache Throttling Control Reg */
+#define SPRN_ICTRL 0x3F3 /* 1011 7450 icache and interrupt ctrl */
+#define ICTRL_EICE 0x08000000 /* enable icache parity errs */
+#define ICTRL_EDC 0x04000000 /* enable dcache parity errs */
+#define ICTRL_EICP 0x00000100 /* enable icache par. check */
+#define SPRN_IMISS 0x3D4 /* Instruction TLB Miss Register */
+#define SPRN_IMMR 0x27E /* Internal Memory Map Register */
+#define SPRN_L2CR 0x3F9 /* Level 2 Cache Control Regsiter */
+#define SPRN_L2CR2 0x3f8
+#define L2CR_L2E 0x80000000 /* L2 enable */
+#define L2CR_L2PE 0x40000000 /* L2 parity enable */
+#define L2CR_L2SIZ_MASK 0x30000000 /* L2 size mask */
+#define L2CR_L2SIZ_256KB 0x10000000 /* L2 size 256KB */
+#define L2CR_L2SIZ_512KB 0x20000000 /* L2 size 512KB */
+#define L2CR_L2SIZ_1MB 0x30000000 /* L2 size 1MB */
+#define L2CR_L2CLK_MASK 0x0e000000 /* L2 clock mask */
+#define L2CR_L2CLK_DISABLED 0x00000000 /* L2 clock disabled */
+#define L2CR_L2CLK_DIV1 0x02000000 /* L2 clock / 1 */
+#define L2CR_L2CLK_DIV1_5 0x04000000 /* L2 clock / 1.5 */
+#define L2CR_L2CLK_DIV2 0x08000000 /* L2 clock / 2 */
+#define L2CR_L2CLK_DIV2_5 0x0a000000 /* L2 clock / 2.5 */
+#define L2CR_L2CLK_DIV3 0x0c000000 /* L2 clock / 3 */
+#define L2CR_L2RAM_MASK 0x01800000 /* L2 RAM type mask */
+#define L2CR_L2RAM_FLOW 0x00000000 /* L2 RAM flow through */
+#define L2CR_L2RAM_PIPE 0x01000000 /* L2 RAM pipelined */
+#define L2CR_L2RAM_PIPE_LW 0x01800000 /* L2 RAM pipelined latewr */
+#define L2CR_L2DO 0x00400000 /* L2 data only */
+#define L2CR_L2I 0x00200000 /* L2 global invalidate */
+#define L2CR_L2CTL 0x00100000 /* L2 RAM control */
+#define L2CR_L2WT 0x00080000 /* L2 write-through */
+#define L2CR_L2TS 0x00040000 /* L2 test support */
+#define L2CR_L2OH_MASK 0x00030000 /* L2 output hold mask */
+#define L2CR_L2OH_0_5 0x00000000 /* L2 output hold 0.5 ns */
+#define L2CR_L2OH_1_0 0x00010000 /* L2 output hold 1.0 ns */
+#define L2CR_L2SL 0x00008000 /* L2 DLL slow */
+#define L2CR_L2DF 0x00004000 /* L2 differential clock */
+#define L2CR_L2BYP 0x00002000 /* L2 DLL bypass */
+#define L2CR_L2IP 0x00000001 /* L2 GI in progress */
+#define L2CR_L2IO_745x 0x00100000 /* L2 instr. only (745x) */
+#define L2CR_L2DO_745x 0x00010000 /* L2 data only (745x) */
+#define L2CR_L2REP_745x 0x00001000 /* L2 repl. algorithm (745x) */
+#define L2CR_L2HWF_745x 0x00000800 /* L2 hardware flush (745x) */
+#define SPRN_L3CR 0x3FA /* Level 3 Cache Control Regsiter */
+#define L3CR_L3E 0x80000000 /* L3 enable */
+#define L3CR_L3PE 0x40000000 /* L3 data parity enable */
+#define L3CR_L3APE 0x20000000 /* L3 addr parity enable */
+#define L3CR_L3SIZ 0x10000000 /* L3 size */
+#define L3CR_L3CLKEN 0x08000000 /* L3 clock enable */
+#define L3CR_L3RES 0x04000000 /* L3 special reserved bit */
+#define L3CR_L3CLKDIV 0x03800000 /* L3 clock divisor */
+#define L3CR_L3IO 0x00400000 /* L3 instruction only */
+#define L3CR_L3SPO 0x00040000 /* L3 sample point override */
+#define L3CR_L3CKSP 0x00030000 /* L3 clock sample point */
+#define L3CR_L3PSP 0x0000e000 /* L3 P-clock sample point */
+#define L3CR_L3REP 0x00001000 /* L3 replacement algorithm */
+#define L3CR_L3HWF 0x00000800 /* L3 hardware flush */
+#define L3CR_L3I 0x00000400 /* L3 global invalidate */
+#define L3CR_L3RT 0x00000300 /* L3 SRAM type */
+#define L3CR_L3NIRCA 0x00000080 /* L3 non-integer ratio clock adj. */
+#define L3CR_L3DO 0x00000040 /* L3 data only mode */
+#define L3CR_PMEN 0x00000004 /* L3 private memory enable */
+#define L3CR_PMSIZ 0x00000001 /* L3 private memory size */
+
+#define SPRN_MSSCR0 0x3f6 /* Memory Subsystem Control Register 0 */
+#define SPRN_MSSSR0 0x3f7 /* Memory Subsystem Status Register 1 */
+#define SPRN_LDSTCR 0x3f8 /* Load/Store control register */
+#define SPRN_LDSTDB 0x3f4 /* */
+#define SPRN_LR 0x008 /* Link Register */
+#ifndef SPRN_PIR
+#define SPRN_PIR 0x3FF /* Processor Identification Register */
+#endif
+#define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */
+#define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */
+#define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */
+#define SPRN_PVR 0x11F /* Processor Version Register */
+#define SPRN_RPA 0x3D6 /* Required Physical Address Register */
+#define SPRN_SDA 0x3BF /* Sampled Data Address Register */
+#define SPRN_SDR1 0x019 /* MMU Hash Base Register */
+#define SPRN_ASR 0x118 /* Address Space Register */
+#define SPRN_SIA 0x3BB /* Sampled Instruction Address Register */
+#define SPRN_SPRG0 0x110 /* Special Purpose Register General 0 */
+#define SPRN_SPRG1 0x111 /* Special Purpose Register General 1 */
+#define SPRN_SPRG2 0x112 /* Special Purpose Register General 2 */
+#define SPRN_SPRG3 0x113 /* Special Purpose Register General 3 */
+#define SPRN_SPRG4 0x114 /* Special Purpose Register General 4 */
+#define SPRN_SPRG5 0x115 /* Special Purpose Register General 5 */
+#define SPRN_SPRG6 0x116 /* Special Purpose Register General 6 */
+#define SPRN_SPRG7 0x117 /* Special Purpose Register General 7 */
+#define SPRN_SRR0 0x01A /* Save/Restore Register 0 */
+#define SPRN_SRR1 0x01B /* Save/Restore Register 1 */
+#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
+#define SRR1_WAKERESET 0x00380000 /* System reset */
+#define SRR1_WAKESYSERR 0x00300000 /* System error */
+#define SRR1_WAKEEE 0x00200000 /* External interrupt */
+#define SRR1_WAKEMT 0x00280000 /* mtctrl */
+#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
+#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */
+#define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */
+#define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */
+
+#define SPRN_TBCTL 0x35f /* PA6T Timebase control register */
+#define TBCTL_FREEZE 0x0000000000000000ull /* Freeze all tbs */
+#define TBCTL_RESTART 0x0000000100000000ull /* Restart all tbs */
+#define TBCTL_UPDATE_UPPER 0x0000000200000000ull /* Set upper 32 bits */
+#define TBCTL_UPDATE_LOWER 0x0000000300000000ull /* Set lower 32 bits */
+
+#ifndef SPRN_SVR
+#define SPRN_SVR 0x11E /* System Version Register */
+#endif
+#define SPRN_THRM1 0x3FC /* Thermal Management Register 1 */
+/* these bits were defined in inverted endian sense originally, ugh, confusing */
+#define THRM1_TIN (1 << 31)
+#define THRM1_TIV (1 << 30)
+#define THRM1_THRES(x) ((x&0x7f)<<23)
+#define THRM3_SITV(x) ((x&0x3fff)<<1)
+#define THRM1_TID (1<<2)
+#define THRM1_TIE (1<<1)
+#define THRM1_V (1<<0)
+#define SPRN_THRM2 0x3FD /* Thermal Management Register 2 */
+#define SPRN_THRM3 0x3FE /* Thermal Management Register 3 */
+#define THRM3_E (1<<0)
+#define SPRN_TLBMISS 0x3D4 /* 980 7450 TLB Miss Register */
+#define SPRN_UMMCR0 0x3A8 /* User Monitor Mode Control Register 0 */
+#define SPRN_UMMCR1 0x3AC /* User Monitor Mode Control Register 0 */
+#define SPRN_UPMC1 0x3A9 /* User Performance Counter Register 1 */
+#define SPRN_UPMC2 0x3AA /* User Performance Counter Register 2 */
+#define SPRN_UPMC3 0x3AD /* User Performance Counter Register 3 */
+#define SPRN_UPMC4 0x3AE /* User Performance Counter Register 4 */
+#define SPRN_USIA 0x3AB /* User Sampled Instruction Address Register */
+#define SPRN_VRSAVE 0x100 /* Vector Register Save Register */
+#define SPRN_XER 0x001 /* Fixed Point Exception Register */
+
+#define SPRN_SCOMC 0x114 /* SCOM Access Control */
+#define SPRN_SCOMD 0x115 /* SCOM Access DATA */
+
+/* Performance monitor SPRs */
+#ifdef CONFIG_PPC64
+#define SPRN_MMCR0 795
+#define MMCR0_FC 0x80000000UL /* freeze counters */
+#define MMCR0_FCS 0x40000000UL /* freeze in supervisor state */
+#define MMCR0_KERNEL_DISABLE MMCR0_FCS
+#define MMCR0_FCP 0x20000000UL /* freeze in problem state */
+#define MMCR0_PROBLEM_DISABLE MMCR0_FCP
+#define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */
+#define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */
+#define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */
+#define MMCR0_FCECE 0x02000000UL /* freeze ctrs on enabled cond or event */
+#define MMCR0_TBEE 0x00400000UL /* time base exception enable */
+#define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
+#define MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/
+#define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */
+#define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */
+#define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */
+#define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */
+#define MMCR0_FCTA 0x00000004UL /* freeze counters in tags active mode */
+#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
+#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
+#define SPRN_MMCR1 798
+#define SPRN_MMCRA 0x312
+#define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */
+#define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */
+#define MMCRA_SLOT 0x07000000UL /* SLOT bits (37-39) */
+#define MMCRA_SLOT_SHIFT 24
+#define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */
+#define POWER6_MMCRA_SIHV 0x0000040000000000ULL
+#define POWER6_MMCRA_SIPR 0x0000020000000000ULL
+#define POWER6_MMCRA_THRM 0x00000020UL
+#define POWER6_MMCRA_OTHER 0x0000000EUL
+#define SPRN_PMC1 787
+#define SPRN_PMC2 788
+#define SPRN_PMC3 789
+#define SPRN_PMC4 790
+#define SPRN_PMC5 791
+#define SPRN_PMC6 792
+#define SPRN_PMC7 793
+#define SPRN_PMC8 794
+#define SPRN_SIAR 780
+#define SPRN_SDAR 781
+
+#define SPRN_PA6T_MMCR0 795
+#define PA6T_MMCR0_EN0 0x0000000000000001UL
+#define PA6T_MMCR0_EN1 0x0000000000000002UL
+#define PA6T_MMCR0_EN2 0x0000000000000004UL
+#define PA6T_MMCR0_EN3 0x0000000000000008UL
+#define PA6T_MMCR0_EN4 0x0000000000000010UL
+#define PA6T_MMCR0_EN5 0x0000000000000020UL
+#define PA6T_MMCR0_SUPEN 0x0000000000000040UL
+#define PA6T_MMCR0_PREN 0x0000000000000080UL
+#define PA6T_MMCR0_HYPEN 0x0000000000000100UL
+#define PA6T_MMCR0_FCM0 0x0000000000000200UL
+#define PA6T_MMCR0_FCM1 0x0000000000000400UL
+#define PA6T_MMCR0_INTGEN 0x0000000000000800UL
+#define PA6T_MMCR0_INTEN0 0x0000000000001000UL
+#define PA6T_MMCR0_INTEN1 0x0000000000002000UL
+#define PA6T_MMCR0_INTEN2 0x0000000000004000UL
+#define PA6T_MMCR0_INTEN3 0x0000000000008000UL
+#define PA6T_MMCR0_INTEN4 0x0000000000010000UL
+#define PA6T_MMCR0_INTEN5 0x0000000000020000UL
+#define PA6T_MMCR0_DISCNT 0x0000000000040000UL
+#define PA6T_MMCR0_UOP 0x0000000000080000UL
+#define PA6T_MMCR0_TRG 0x0000000000100000UL
+#define PA6T_MMCR0_TRGEN 0x0000000000200000UL
+#define PA6T_MMCR0_TRGREG 0x0000000001600000UL
+#define PA6T_MMCR0_SIARLOG 0x0000000002000000UL
+#define PA6T_MMCR0_SDARLOG 0x0000000004000000UL
+#define PA6T_MMCR0_PROEN 0x0000000008000000UL
+#define PA6T_MMCR0_PROLOG 0x0000000010000000UL
+#define PA6T_MMCR0_DAMEN2 0x0000000020000000UL
+#define PA6T_MMCR0_DAMEN3 0x0000000040000000UL
+#define PA6T_MMCR0_DAMEN4 0x0000000080000000UL
+#define PA6T_MMCR0_DAMEN5 0x0000000100000000UL
+#define PA6T_MMCR0_DAMSEL2 0x0000000200000000UL
+#define PA6T_MMCR0_DAMSEL3 0x0000000400000000UL
+#define PA6T_MMCR0_DAMSEL4 0x0000000800000000UL
+#define PA6T_MMCR0_DAMSEL5 0x0000001000000000UL
+#define PA6T_MMCR0_HANDDIS 0x0000002000000000UL
+#define PA6T_MMCR0_PCTEN 0x0000004000000000UL
+#define PA6T_MMCR0_SOCEN 0x0000008000000000UL
+#define PA6T_MMCR0_SOCMOD 0x0000010000000000UL
+
+#define SPRN_PA6T_MMCR1 798
+#define PA6T_MMCR1_ES2 0x00000000000000ffUL
+#define PA6T_MMCR1_ES3 0x000000000000ff00UL
+#define PA6T_MMCR1_ES4 0x0000000000ff0000UL
+#define PA6T_MMCR1_ES5 0x00000000ff000000UL
+
+#define SPRN_PA6T_UPMC0 771 /* User PerfMon Counter 0 */
+#define SPRN_PA6T_UPMC1 772 /* ... */
+#define SPRN_PA6T_UPMC2 773
+#define SPRN_PA6T_UPMC3 774
+#define SPRN_PA6T_UPMC4 775
+#define SPRN_PA6T_UPMC5 776
+#define SPRN_PA6T_UMMCR0 779 /* User Monitor Mode Control Register 0 */
+#define SPRN_PA6T_SIAR 780 /* Sampled Instruction Address */
+#define SPRN_PA6T_UMMCR1 782 /* User Monitor Mode Control Register 1 */
+#define SPRN_PA6T_SIER 785 /* Sampled Instruction Event Register */
+#define SPRN_PA6T_PMC0 787
+#define SPRN_PA6T_PMC1 788
+#define SPRN_PA6T_PMC2 789
+#define SPRN_PA6T_PMC3 790
+#define SPRN_PA6T_PMC4 791
+#define SPRN_PA6T_PMC5 792
+#define SPRN_PA6T_TSR0 793 /* Timestamp Register 0 */
+#define SPRN_PA6T_TSR1 794 /* Timestamp Register 1 */
+#define SPRN_PA6T_TSR2 799 /* Timestamp Register 2 */
+#define SPRN_PA6T_TSR3 784 /* Timestamp Register 3 */
+
+#define SPRN_PA6T_IER 981 /* Icache Error Register */
+#define SPRN_PA6T_DER 982 /* Dcache Error Register */
+#define SPRN_PA6T_BER 862 /* BIU Error Address Register */
+#define SPRN_PA6T_MER 849 /* MMU Error Register */
+
+#define SPRN_PA6T_IMA0 880 /* Instruction Match Array 0 */
+#define SPRN_PA6T_IMA1 881 /* ... */
+#define SPRN_PA6T_IMA2 882
+#define SPRN_PA6T_IMA3 883
+#define SPRN_PA6T_IMA4 884
+#define SPRN_PA6T_IMA5 885
+#define SPRN_PA6T_IMA6 886
+#define SPRN_PA6T_IMA7 887
+#define SPRN_PA6T_IMA8 888
+#define SPRN_PA6T_IMA9 889
+#define SPRN_PA6T_BTCR 978 /* Breakpoint and Tagging Control Register */
+#define SPRN_PA6T_IMAAT 979 /* Instruction Match Array Action Table */
+#define SPRN_PA6T_PCCR 1019 /* Power Counter Control Register */
+#define SPRN_BKMK 1020 /* Cell Bookmark Register */
+#define SPRN_PA6T_RPCCR 1021 /* Retire PC Trace Control Register */
+
+
+#else /* 32-bit */
+#define SPRN_MMCR0 952 /* Monitor Mode Control Register 0 */
+#define MMCR0_FC 0x80000000UL /* freeze counters */
+#define MMCR0_FCS 0x40000000UL /* freeze in supervisor state */
+#define MMCR0_FCP 0x20000000UL /* freeze in problem state */
+#define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */
+#define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */
+#define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */
+#define MMCR0_FCECE 0x02000000UL /* freeze ctrs on enabled cond or event */
+#define MMCR0_TBEE 0x00400000UL /* time base exception enable */
+#define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
+#define MMCR0_PMCnCE 0x00004000UL /* count enable for all but PMC 1*/
+#define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */
+#define MMCR0_PMC1SEL 0x00001fc0UL /* PMC 1 Event */
+#define MMCR0_PMC2SEL 0x0000003fUL /* PMC 2 Event */
+
+#define SPRN_MMCR1 956
+#define MMCR1_PMC3SEL 0xf8000000UL /* PMC 3 Event */
+#define MMCR1_PMC4SEL 0x07c00000UL /* PMC 4 Event */
+#define MMCR1_PMC5SEL 0x003e0000UL /* PMC 5 Event */
+#define MMCR1_PMC6SEL 0x0001f800UL /* PMC 6 Event */
+#define SPRN_MMCR2 944
+#define SPRN_PMC1 953 /* Performance Counter Register 1 */
+#define SPRN_PMC2 954 /* Performance Counter Register 2 */
+#define SPRN_PMC3 957 /* Performance Counter Register 3 */
+#define SPRN_PMC4 958 /* Performance Counter Register 4 */
+#define SPRN_PMC5 945 /* Performance Counter Register 5 */
+#define SPRN_PMC6 946 /* Performance Counter Register 6 */
+
+#define SPRN_SIAR 955 /* Sampled Instruction Address Register */
+
+/* Bit definitions for MMCR0 and PMC1 / PMC2. */
+#define MMCR0_PMC1_CYCLES (1 << 7)
+#define MMCR0_PMC1_ICACHEMISS (5 << 7)
+#define MMCR0_PMC1_DTLB (6 << 7)
+#define MMCR0_PMC2_DCACHEMISS 0x6
+#define MMCR0_PMC2_CYCLES 0x1
+#define MMCR0_PMC2_ITLB 0x7
+#define MMCR0_PMC2_LOADMISSTIME 0x5
+#endif
+
+/*
+ * An mtfsf instruction with the L bit set. On CPUs that support this a
+ * full 64bits of FPSCR is restored and on other CPUs the L bit is ignored.
+ *
+ * Until binutils gets the new form of mtfsf, hardwire the instruction.
+ */
+#ifdef CONFIG_PPC64
+#define MTFSF_L(REG) \
+ .long (0xfc00058e | ((0xff) << 17) | ((REG) << 11) | (1 << 25))
+#else
+#define MTFSF_L(REG) mtfsf 0xff, (REG)
+#endif
+
+/* Processor Version Register (PVR) field extraction */
+
+#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
+#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */
+
+#define __is_processor(pv) (PVR_VER(mfspr(SPRN_PVR)) == (pv))
+
+/*
+ * IBM has further subdivided the standard PowerPC 16-bit version and
+ * revision subfields of the PVR for the PowerPC 403s into the following:
+ */
+
+#define PVR_FAM(pvr) (((pvr) >> 20) & 0xFFF) /* Family field */
+#define PVR_MEM(pvr) (((pvr) >> 16) & 0xF) /* Member field */
+#define PVR_CORE(pvr) (((pvr) >> 12) & 0xF) /* Core field */
+#define PVR_CFG(pvr) (((pvr) >> 8) & 0xF) /* Configuration field */
+#define PVR_MAJ(pvr) (((pvr) >> 4) & 0xF) /* Major revision field */
+#define PVR_MIN(pvr) (((pvr) >> 0) & 0xF) /* Minor revision field */
+
+/* Processor Version Numbers */
+
+#define PVR_403GA 0x00200000
+#define PVR_403GB 0x00200100
+#define PVR_403GC 0x00200200
+#define PVR_403GCX 0x00201400
+#define PVR_405GP 0x40110000
+#define PVR_STB03XXX 0x40310000
+#define PVR_NP405H 0x41410000
+#define PVR_NP405L 0x41610000
+#define PVR_601 0x00010000
+#define PVR_602 0x00050000
+#define PVR_603 0x00030000
+#define PVR_603e 0x00060000
+#define PVR_603ev 0x00070000
+#define PVR_603r 0x00071000
+#define PVR_604 0x00040000
+#define PVR_604e 0x00090000
+#define PVR_604r 0x000A0000
+#define PVR_620 0x00140000
+#define PVR_740 0x00080000
+#define PVR_750 PVR_740
+#define PVR_740P 0x10080000
+#define PVR_750P PVR_740P
+#define PVR_7400 0x000C0000
+#define PVR_7410 0x800C0000
+#define PVR_7450 0x80000000
+#define PVR_8540 0x80200000
+#define PVR_8560 0x80200000
+/*
+ * For the 8xx processors, all of them report the same PVR family for
+ * the PowerPC core. The various versions of these processors must be
+ * differentiated by the version number in the Communication Processor
+ * Module (CPM).
+ */
+#define PVR_821 0x00500000
+#define PVR_823 PVR_821
+#define PVR_850 PVR_821
+#define PVR_860 PVR_821
+#define PVR_8240 0x00810100
+#define PVR_8245 0x80811014
+#define PVR_8260 PVR_8240
+
+/* 64-bit processors */
+/* XXX the prefix should be PVR_, we'll do a global sweep to fix it one day */
+#define PV_NORTHSTAR 0x0033
+#define PV_PULSAR 0x0034
+#define PV_POWER4 0x0035
+#define PV_ICESTAR 0x0036
+#define PV_SSTAR 0x0037
+#define PV_POWER4p 0x0038
+#define PV_970 0x0039
+#define PV_POWER5 0x003A
+#define PV_POWER5p 0x003B
+#define PV_970FX 0x003C
+#define PV_630 0x0040
+#define PV_630p 0x0041
+#define PV_970MP 0x0044
+#define PV_970GX 0x0045
+#define PV_BE 0x0070
+#define PV_PA6T 0x0090
+
+/* Macros for setting and retrieving special purpose registers */
+#ifndef __ASSEMBLY__
+#define mfmsr() ({unsigned long rval; \
+ asm volatile("mfmsr %0" : "=r" (rval)); rval;})
+#ifdef CONFIG_PPC64
+#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
+ : : "r" (v))
+#define mtmsrd(v) __mtmsrd((v), 0)
+#define mtmsr(v) mtmsrd(v)
+#else
+#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v))
+#endif
+
+#define mfspr(rn) ({unsigned long rval; \
+ asm volatile("mfspr %0," __stringify(rn) \
+ : "=r" (rval)); rval;})
+#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v))
+
+#ifdef __powerpc64__
+#ifdef CONFIG_PPC_CELL
+#define mftb() ({unsigned long rval; \
+ asm volatile( \
+ "90: mftb %0;\n" \
+ "97: cmpwi %0,0;\n" \
+ " beq- 90b;\n" \
+ "99:\n" \
+ ".section __ftr_fixup,\"a\"\n" \
+ ".align 3\n" \
+ "98:\n" \
+ " .llong %1\n" \
+ " .llong %1\n" \
+ " .llong 97b-98b\n" \
+ " .llong 99b-98b\n" \
+ " .llong 0\n" \
+ " .llong 0\n" \
+ ".previous" \
+ : "=r" (rval) : "i" (CPU_FTR_CELL_TB_BUG)); rval;})
+#else
+#define mftb() ({unsigned long rval; \
+ asm volatile("mftb %0" : "=r" (rval)); rval;})
+#endif /* !CONFIG_PPC_CELL */
+
+#else /* __powerpc64__ */
+
+#define mftbl() ({unsigned long rval; \
+ asm volatile("mftbl %0" : "=r" (rval)); rval;})
+#define mftbu() ({unsigned long rval; \
+ asm volatile("mftbu %0" : "=r" (rval)); rval;})
+#endif /* !__powerpc64__ */
+
+#define mttbl(v) asm volatile("mttbl %0":: "r"(v))
+#define mttbu(v) asm volatile("mttbu %0":: "r"(v))
+
+#ifdef CONFIG_PPC32
+#define mfsrin(v) ({unsigned int rval; \
+ asm volatile("mfsrin %0,%1" : "=r" (rval) : "r" (v)); \
+ rval;})
+#endif
+
+#define proc_trap() asm volatile("trap")
+
+#ifdef CONFIG_PPC64
+
+extern void ppc64_runlatch_on(void);
+extern void ppc64_runlatch_off(void);
+
+extern unsigned long scom970_read(unsigned int address);
+extern void scom970_write(unsigned int address, unsigned long value);
+
+#else
+#define ppc64_runlatch_on()
+#define ppc64_runlatch_off()
+
+#endif /* CONFIG_PPC64 */
+
+#define __get_SP() ({unsigned long sp; \
+ asm volatile("mr %0,1": "=r" (sp)); sp;})
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_REG_H */
diff --git a/arch/powerpc/include/asm/reg_8xx.h b/arch/powerpc/include/asm/reg_8xx.h
new file mode 100644
index 0000000..e8ea346
--- /dev/null
+++ b/arch/powerpc/include/asm/reg_8xx.h
@@ -0,0 +1,42 @@
+/*
+ * Contains register definitions common to PowerPC 8xx CPUs. Notice
+ */
+#ifndef _ASM_POWERPC_REG_8xx_H
+#define _ASM_POWERPC_REG_8xx_H
+
+/* Cache control on the MPC8xx is provided through some additional
+ * special purpose registers.
+ */
+#define SPRN_IC_CST 560 /* Instruction cache control/status */
+#define SPRN_IC_ADR 561 /* Address needed for some commands */
+#define SPRN_IC_DAT 562 /* Read-only data register */
+#define SPRN_DC_CST 568 /* Data cache control/status */
+#define SPRN_DC_ADR 569 /* Address needed for some commands */
+#define SPRN_DC_DAT 570 /* Read-only data register */
+
+/* Commands. Only the first few are available to the instruction cache.
+*/
+#define IDC_ENABLE 0x02000000 /* Cache enable */
+#define IDC_DISABLE 0x04000000 /* Cache disable */
+#define IDC_LDLCK 0x06000000 /* Load and lock */
+#define IDC_UNLINE 0x08000000 /* Unlock line */
+#define IDC_UNALL 0x0a000000 /* Unlock all */
+#define IDC_INVALL 0x0c000000 /* Invalidate all */
+
+#define DC_FLINE 0x0e000000 /* Flush data cache line */
+#define DC_SFWT 0x01000000 /* Set forced writethrough mode */
+#define DC_CFWT 0x03000000 /* Clear forced writethrough mode */
+#define DC_SLES 0x05000000 /* Set little endian swap mode */
+#define DC_CLES 0x07000000 /* Clear little endian swap mode */
+
+/* Status.
+*/
+#define IDC_ENABLED 0x80000000 /* Cache is enabled */
+#define IDC_CERR1 0x00200000 /* Cache error 1 */
+#define IDC_CERR2 0x00100000 /* Cache error 2 */
+#define IDC_CERR3 0x00080000 /* Cache error 3 */
+
+#define DC_DFWT 0x40000000 /* Data cache is forced write through */
+#define DC_LES 0x20000000 /* Caches are little endian mode */
+
+#endif /* _ASM_POWERPC_REG_8xx_H */
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
new file mode 100644
index 0000000..be980f4
--- /dev/null
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -0,0 +1,501 @@
+/*
+ * Contains register definitions common to the Book E PowerPC
+ * specification. Notice that while the IBM-40x series of CPUs
+ * are not true Book E PowerPCs, they borrowed a number of features
+ * before Book E was finalized, and are included here as well. Unfortunatly,
+ * they sometimes used different locations than true Book E CPUs did.
+ */
+#ifdef __KERNEL__
+#ifndef __ASM_POWERPC_REG_BOOKE_H__
+#define __ASM_POWERPC_REG_BOOKE_H__
+
+/* Machine State Register (MSR) Fields */
+#define MSR_UCLE (1<<26) /* User-mode cache lock enable */
+#define MSR_SPE (1<<25) /* Enable SPE */
+#define MSR_DWE (1<<10) /* Debug Wait Enable */
+#define MSR_UBLE (1<<10) /* BTB lock enable (e500) */
+#define MSR_IS MSR_IR /* Instruction Space */
+#define MSR_DS MSR_DR /* Data Space */
+#define MSR_PMM (1<<2) /* Performance monitor mark bit */
+
+/* Default MSR for kernel mode. */
+#if defined (CONFIG_40x)
+#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE)
+#elif defined(CONFIG_BOOKE)
+#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_CE)
+#endif
+
+/* Special Purpose Registers (SPRNs)*/
+#define SPRN_DECAR 0x036 /* Decrementer Auto Reload Register */
+#define SPRN_IVPR 0x03F /* Interrupt Vector Prefix Register */
+#define SPRN_USPRG0 0x100 /* User Special Purpose Register General 0 */
+#define SPRN_SPRG4R 0x104 /* Special Purpose Register General 4 Read */
+#define SPRN_SPRG5R 0x105 /* Special Purpose Register General 5 Read */
+#define SPRN_SPRG6R 0x106 /* Special Purpose Register General 6 Read */
+#define SPRN_SPRG7R 0x107 /* Special Purpose Register General 7 Read */
+#define SPRN_SPRG4W 0x114 /* Special Purpose Register General 4 Write */
+#define SPRN_SPRG5W 0x115 /* Special Purpose Register General 5 Write */
+#define SPRN_SPRG6W 0x116 /* Special Purpose Register General 6 Write */
+#define SPRN_SPRG7W 0x117 /* Special Purpose Register General 7 Write */
+#define SPRN_DBCR2 0x136 /* Debug Control Register 2 */
+#define SPRN_IAC3 0x13A /* Instruction Address Compare 3 */
+#define SPRN_IAC4 0x13B /* Instruction Address Compare 4 */
+#define SPRN_DVC1 0x13E /* Data Value Compare Register 1 */
+#define SPRN_DVC2 0x13F /* Data Value Compare Register 2 */
+#define SPRN_IVOR0 0x190 /* Interrupt Vector Offset Register 0 */
+#define SPRN_IVOR1 0x191 /* Interrupt Vector Offset Register 1 */
+#define SPRN_IVOR2 0x192 /* Interrupt Vector Offset Register 2 */
+#define SPRN_IVOR3 0x193 /* Interrupt Vector Offset Register 3 */
+#define SPRN_IVOR4 0x194 /* Interrupt Vector Offset Register 4 */
+#define SPRN_IVOR5 0x195 /* Interrupt Vector Offset Register 5 */
+#define SPRN_IVOR6 0x196 /* Interrupt Vector Offset Register 6 */
+#define SPRN_IVOR7 0x197 /* Interrupt Vector Offset Register 7 */
+#define SPRN_IVOR8 0x198 /* Interrupt Vector Offset Register 8 */
+#define SPRN_IVOR9 0x199 /* Interrupt Vector Offset Register 9 */
+#define SPRN_IVOR10 0x19A /* Interrupt Vector Offset Register 10 */
+#define SPRN_IVOR11 0x19B /* Interrupt Vector Offset Register 11 */
+#define SPRN_IVOR12 0x19C /* Interrupt Vector Offset Register 12 */
+#define SPRN_IVOR13 0x19D /* Interrupt Vector Offset Register 13 */
+#define SPRN_IVOR14 0x19E /* Interrupt Vector Offset Register 14 */
+#define SPRN_IVOR15 0x19F /* Interrupt Vector Offset Register 15 */
+#define SPRN_SPEFSCR 0x200 /* SPE & Embedded FP Status & Control */
+#define SPRN_BBEAR 0x201 /* Branch Buffer Entry Address Register */
+#define SPRN_BBTAR 0x202 /* Branch Buffer Target Address Register */
+#define SPRN_L1CFG0 0x203 /* L1 Cache Configure Register 0 */
+#define SPRN_L1CFG1 0x204 /* L1 Cache Configure Register 1 */
+#define SPRN_ATB 0x20E /* Alternate Time Base */
+#define SPRN_ATBL 0x20E /* Alternate Time Base Lower */
+#define SPRN_ATBU 0x20F /* Alternate Time Base Upper */
+#define SPRN_IVOR32 0x210 /* Interrupt Vector Offset Register 32 */
+#define SPRN_IVOR33 0x211 /* Interrupt Vector Offset Register 33 */
+#define SPRN_IVOR34 0x212 /* Interrupt Vector Offset Register 34 */
+#define SPRN_IVOR35 0x213 /* Interrupt Vector Offset Register 35 */
+#define SPRN_IVOR36 0x214 /* Interrupt Vector Offset Register 36 */
+#define SPRN_IVOR37 0x215 /* Interrupt Vector Offset Register 37 */
+#define SPRN_MCSRR0 0x23A /* Machine Check Save and Restore Register 0 */
+#define SPRN_MCSRR1 0x23B /* Machine Check Save and Restore Register 1 */
+#define SPRN_MCSR 0x23C /* Machine Check Status Register */
+#define SPRN_MCAR 0x23D /* Machine Check Address Register */
+#define SPRN_DSRR0 0x23E /* Debug Save and Restore Register 0 */
+#define SPRN_DSRR1 0x23F /* Debug Save and Restore Register 1 */
+#define SPRN_SPRG8 0x25C /* Special Purpose Register General 8 */
+#define SPRN_SPRG9 0x25D /* Special Purpose Register General 9 */
+#define SPRN_L1CSR2 0x25E /* L1 Cache Control and Status Register 2 */
+#define SPRN_MAS0 0x270 /* MMU Assist Register 0 */
+#define SPRN_MAS1 0x271 /* MMU Assist Register 1 */
+#define SPRN_MAS2 0x272 /* MMU Assist Register 2 */
+#define SPRN_MAS3 0x273 /* MMU Assist Register 3 */
+#define SPRN_MAS4 0x274 /* MMU Assist Register 4 */
+#define SPRN_MAS5 0x275 /* MMU Assist Register 5 */
+#define SPRN_MAS6 0x276 /* MMU Assist Register 6 */
+#define SPRN_PID1 0x279 /* Process ID Register 1 */
+#define SPRN_PID2 0x27A /* Process ID Register 2 */
+#define SPRN_TLB0CFG 0x2B0 /* TLB 0 Config Register */
+#define SPRN_TLB1CFG 0x2B1 /* TLB 1 Config Register */
+#define SPRN_EPR 0x2BE /* External Proxy Register */
+#define SPRN_CCR1 0x378 /* Core Configuration Register 1 */
+#define SPRN_ZPR 0x3B0 /* Zone Protection Register (40x) */
+#define SPRN_MAS7 0x3B0 /* MMU Assist Register 7 */
+#define SPRN_MMUCR 0x3B2 /* MMU Control Register */
+#define SPRN_CCR0 0x3B3 /* Core Configuration Register 0 */
+#define SPRN_EPLC 0x3B3 /* External Process ID Load Context */
+#define SPRN_EPSC 0x3B4 /* External Process ID Store Context */
+#define SPRN_SGR 0x3B9 /* Storage Guarded Register */
+#define SPRN_DCWR 0x3BA /* Data Cache Write-thru Register */
+#define SPRN_SLER 0x3BB /* Little-endian real mode */
+#define SPRN_SU0R 0x3BC /* "User 0" real mode (40x) */
+#define SPRN_DCMP 0x3D1 /* Data TLB Compare Register */
+#define SPRN_ICDBDR 0x3D3 /* Instruction Cache Debug Data Register */
+#define SPRN_EVPR 0x3D6 /* Exception Vector Prefix Register */
+#define SPRN_L1CSR0 0x3F2 /* L1 Cache Control and Status Register 0 */
+#define SPRN_L1CSR1 0x3F3 /* L1 Cache Control and Status Register 1 */
+#define SPRN_PIT 0x3DB /* Programmable Interval Timer */
+#define SPRN_BUCSR 0x3F5 /* Branch Unit Control and Status */
+#define SPRN_L2CSR0 0x3F9 /* L2 Data Cache Control and Status Register 0 */
+#define SPRN_L2CSR1 0x3FA /* L2 Data Cache Control and Status Register 1 */
+#define SPRN_DCCR 0x3FA /* Data Cache Cacheability Register */
+#define SPRN_ICCR 0x3FB /* Instruction Cache Cacheability Register */
+#define SPRN_SVR 0x3FF /* System Version Register */
+
+/*
+ * SPRs which have conflicting definitions on true Book E versus classic,
+ * or IBM 40x.
+ */
+#ifdef CONFIG_BOOKE
+#define SPRN_PID 0x030 /* Process ID */
+#define SPRN_PID0 SPRN_PID/* Process ID Register 0 */
+#define SPRN_CSRR0 0x03A /* Critical Save and Restore Register 0 */
+#define SPRN_CSRR1 0x03B /* Critical Save and Restore Register 1 */
+#define SPRN_DEAR 0x03D /* Data Error Address Register */
+#define SPRN_ESR 0x03E /* Exception Syndrome Register */
+#define SPRN_PIR 0x11E /* Processor Identification Register */
+#define SPRN_DBSR 0x130 /* Debug Status Register */
+#define SPRN_DBCR0 0x134 /* Debug Control Register 0 */
+#define SPRN_DBCR1 0x135 /* Debug Control Register 1 */
+#define SPRN_IAC1 0x138 /* Instruction Address Compare 1 */
+#define SPRN_IAC2 0x139 /* Instruction Address Compare 2 */
+#define SPRN_DAC1 0x13C /* Data Address Compare 1 */
+#define SPRN_DAC2 0x13D /* Data Address Compare 2 */
+#define SPRN_TSR 0x150 /* Timer Status Register */
+#define SPRN_TCR 0x154 /* Timer Control Register */
+#endif /* Book E */
+#ifdef CONFIG_40x
+#define SPRN_PID 0x3B1 /* Process ID */
+#define SPRN_DBCR1 0x3BD /* Debug Control Register 1 */
+#define SPRN_ESR 0x3D4 /* Exception Syndrome Register */
+#define SPRN_DEAR 0x3D5 /* Data Error Address Register */
+#define SPRN_TSR 0x3D8 /* Timer Status Register */
+#define SPRN_TCR 0x3DA /* Timer Control Register */
+#define SPRN_SRR2 0x3DE /* Save/Restore Register 2 */
+#define SPRN_SRR3 0x3DF /* Save/Restore Register 3 */
+#define SPRN_DBSR 0x3F0 /* Debug Status Register */
+#define SPRN_DBCR0 0x3F2 /* Debug Control Register 0 */
+#define SPRN_DAC1 0x3F6 /* Data Address Compare 1 */
+#define SPRN_DAC2 0x3F7 /* Data Address Compare 2 */
+#define SPRN_CSRR0 SPRN_SRR2 /* Critical Save and Restore Register 0 */
+#define SPRN_CSRR1 SPRN_SRR3 /* Critical Save and Restore Register 1 */
+#endif
+
+/* Bit definitions for CCR1. */
+#define CCR1_DPC 0x00000100 /* Disable L1 I-Cache/D-Cache parity checking */
+#define CCR1_TCS 0x00000080 /* Timer Clock Select */
+
+/* Bit definitions for the MCSR. */
+#define MCSR_MCS 0x80000000 /* Machine Check Summary */
+#define MCSR_IB 0x40000000 /* Instruction PLB Error */
+#define MCSR_DRB 0x20000000 /* Data Read PLB Error */
+#define MCSR_DWB 0x10000000 /* Data Write PLB Error */
+#define MCSR_TLBP 0x08000000 /* TLB Parity Error */
+#define MCSR_ICP 0x04000000 /* I-Cache Parity Error */
+#define MCSR_DCSP 0x02000000 /* D-Cache Search Parity Error */
+#define MCSR_DCFP 0x01000000 /* D-Cache Flush Parity Error */
+#define MCSR_IMPE 0x00800000 /* Imprecise Machine Check Exception */
+
+#ifdef CONFIG_E500
+#define MCSR_MCP 0x80000000UL /* Machine Check Input Pin */
+#define MCSR_ICPERR 0x40000000UL /* I-Cache Parity Error */
+#define MCSR_DCP_PERR 0x20000000UL /* D-Cache Push Parity Error */
+#define MCSR_DCPERR 0x10000000UL /* D-Cache Parity Error */
+#define MCSR_BUS_IAERR 0x00000080UL /* Instruction Address Error */
+#define MCSR_BUS_RAERR 0x00000040UL /* Read Address Error */
+#define MCSR_BUS_WAERR 0x00000020UL /* Write Address Error */
+#define MCSR_BUS_IBERR 0x00000010UL /* Instruction Data Error */
+#define MCSR_BUS_RBERR 0x00000008UL /* Read Data Bus Error */
+#define MCSR_BUS_WBERR 0x00000004UL /* Write Data Bus Error */
+#define MCSR_BUS_IPERR 0x00000002UL /* Instruction parity Error */
+#define MCSR_BUS_RPERR 0x00000001UL /* Read parity Error */
+
+/* e500 parts may set unused bits in MCSR; mask these off */
+#define MCSR_MASK (MCSR_MCP | MCSR_ICPERR | MCSR_DCP_PERR | \
+ MCSR_DCPERR | MCSR_BUS_IAERR | MCSR_BUS_RAERR | \
+ MCSR_BUS_WAERR | MCSR_BUS_IBERR | MCSR_BUS_RBERR | \
+ MCSR_BUS_WBERR | MCSR_BUS_IPERR | MCSR_BUS_RPERR)
+#endif
+#ifdef CONFIG_E200
+#define MCSR_MCP 0x80000000UL /* Machine Check Input Pin */
+#define MCSR_CP_PERR 0x20000000UL /* Cache Push Parity Error */
+#define MCSR_CPERR 0x10000000UL /* Cache Parity Error */
+#define MCSR_EXCP_ERR 0x08000000UL /* ISI, ITLB, or Bus Error on 1st insn
+ fetch for an exception handler */
+#define MCSR_BUS_IRERR 0x00000010UL /* Read Bus Error on instruction fetch*/
+#define MCSR_BUS_DRERR 0x00000008UL /* Read Bus Error on data load */
+#define MCSR_BUS_WRERR 0x00000004UL /* Write Bus Error on buffered
+ store or cache line push */
+
+/* e200 parts may set unused bits in MCSR; mask these off */
+#define MCSR_MASK (MCSR_MCP | MCSR_CP_PERR | MCSR_CPERR | \
+ MCSR_EXCP_ERR | MCSR_BUS_IRERR | MCSR_BUS_DRERR | \
+ MCSR_BUS_WRERR)
+#endif
+
+/* Bit definitions for the DBSR. */
+/*
+ * DBSR bits which have conflicting definitions on true Book E versus IBM 40x.
+ */
+#ifdef CONFIG_BOOKE
+#define DBSR_IC 0x08000000 /* Instruction Completion */
+#define DBSR_BT 0x04000000 /* Branch Taken */
+#define DBSR_IRPT 0x02000000 /* Exception Debug Event */
+#define DBSR_TIE 0x01000000 /* Trap Instruction Event */
+#define DBSR_IAC1 0x00800000 /* Instr Address Compare 1 Event */
+#define DBSR_IAC2 0x00400000 /* Instr Address Compare 2 Event */
+#define DBSR_IAC3 0x00200000 /* Instr Address Compare 3 Event */
+#define DBSR_IAC4 0x00100000 /* Instr Address Compare 4 Event */
+#define DBSR_DAC1R 0x00080000 /* Data Addr Compare 1 Read Event */
+#define DBSR_DAC1W 0x00040000 /* Data Addr Compare 1 Write Event */
+#define DBSR_DAC2R 0x00020000 /* Data Addr Compare 2 Read Event */
+#define DBSR_DAC2W 0x00010000 /* Data Addr Compare 2 Write Event */
+#define DBSR_RET 0x00008000 /* Return Debug Event */
+#define DBSR_CIRPT 0x00000040 /* Critical Interrupt Taken Event */
+#define DBSR_CRET 0x00000020 /* Critical Return Debug Event */
+#endif
+#ifdef CONFIG_40x
+#define DBSR_IC 0x80000000 /* Instruction Completion */
+#define DBSR_BT 0x40000000 /* Branch taken */
+#define DBSR_IRPT 0x20000000 /* Exception Debug Event */
+#define DBSR_TIE 0x10000000 /* Trap Instruction debug Event */
+#define DBSR_IAC1 0x04000000 /* Instruction Address Compare 1 Event */
+#define DBSR_IAC2 0x02000000 /* Instruction Address Compare 2 Event */
+#define DBSR_IAC3 0x00080000 /* Instruction Address Compare 3 Event */
+#define DBSR_IAC4 0x00040000 /* Instruction Address Compare 4 Event */
+#define DBSR_DAC1R 0x01000000 /* Data Address Compare 1 Read Event */
+#define DBSR_DAC1W 0x00800000 /* Data Address Compare 1 Write Event */
+#define DBSR_DAC2R 0x00400000 /* Data Address Compare 2 Read Event */
+#define DBSR_DAC2W 0x00200000 /* Data Address Compare 2 Write Event */
+#endif
+
+/* Bit definitions related to the ESR. */
+#define ESR_MCI 0x80000000 /* Machine Check - Instruction */
+#define ESR_IMCP 0x80000000 /* Instr. Machine Check - Protection */
+#define ESR_IMCN 0x40000000 /* Instr. Machine Check - Non-config */
+#define ESR_IMCB 0x20000000 /* Instr. Machine Check - Bus error */
+#define ESR_IMCT 0x10000000 /* Instr. Machine Check - Timeout */
+#define ESR_PIL 0x08000000 /* Program Exception - Illegal */
+#define ESR_PPR 0x04000000 /* Program Exception - Privileged */
+#define ESR_PTR 0x02000000 /* Program Exception - Trap */
+#define ESR_FP 0x01000000 /* Floating Point Operation */
+#define ESR_DST 0x00800000 /* Storage Exception - Data miss */
+#define ESR_DIZ 0x00400000 /* Storage Exception - Zone fault */
+#define ESR_ST 0x00800000 /* Store Operation */
+#define ESR_DLK 0x00200000 /* Data Cache Locking */
+#define ESR_ILK 0x00100000 /* Instr. Cache Locking */
+#define ESR_PUO 0x00040000 /* Unimplemented Operation exception */
+#define ESR_BO 0x00020000 /* Byte Ordering */
+
+/* Bit definitions related to the DBCR0. */
+#if defined(CONFIG_40x)
+#define DBCR0_EDM 0x80000000 /* External Debug Mode */
+#define DBCR0_IDM 0x40000000 /* Internal Debug Mode */
+#define DBCR0_RST 0x30000000 /* all the bits in the RST field */
+#define DBCR0_RST_SYSTEM 0x30000000 /* System Reset */
+#define DBCR0_RST_CHIP 0x20000000 /* Chip Reset */
+#define DBCR0_RST_CORE 0x10000000 /* Core Reset */
+#define DBCR0_RST_NONE 0x00000000 /* No Reset */
+#define DBCR0_IC 0x08000000 /* Instruction Completion */
+#define DBCR0_ICMP DBCR0_IC
+#define DBCR0_BT 0x04000000 /* Branch Taken */
+#define DBCR0_BRT DBCR0_BT
+#define DBCR0_EDE 0x02000000 /* Exception Debug Event */
+#define DBCR0_IRPT DBCR0_EDE
+#define DBCR0_TDE 0x01000000 /* TRAP Debug Event */
+#define DBCR0_IA1 0x00800000 /* Instr Addr compare 1 enable */
+#define DBCR0_IAC1 DBCR0_IA1
+#define DBCR0_IA2 0x00400000 /* Instr Addr compare 2 enable */
+#define DBCR0_IAC2 DBCR0_IA2
+#define DBCR0_IA12 0x00200000 /* Instr Addr 1-2 range enable */
+#define DBCR0_IA12X 0x00100000 /* Instr Addr 1-2 range eXclusive */
+#define DBCR0_IA3 0x00080000 /* Instr Addr compare 3 enable */
+#define DBCR0_IAC3 DBCR0_IA3
+#define DBCR0_IA4 0x00040000 /* Instr Addr compare 4 enable */
+#define DBCR0_IAC4 DBCR0_IA4
+#define DBCR0_IA34 0x00020000 /* Instr Addr 3-4 range Enable */
+#define DBCR0_IA34X 0x00010000 /* Instr Addr 3-4 range eXclusive */
+#define DBCR0_IA12T 0x00008000 /* Instr Addr 1-2 range Toggle */
+#define DBCR0_IA34T 0x00004000 /* Instr Addr 3-4 range Toggle */
+#define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */
+#elif defined(CONFIG_BOOKE)
+#define DBCR0_EDM 0x80000000 /* External Debug Mode */
+#define DBCR0_IDM 0x40000000 /* Internal Debug Mode */
+#define DBCR0_RST 0x30000000 /* all the bits in the RST field */
+/* DBCR0_RST_* is 44x specific and not followed in fsl booke */
+#define DBCR0_RST_SYSTEM 0x30000000 /* System Reset */
+#define DBCR0_RST_CHIP 0x20000000 /* Chip Reset */
+#define DBCR0_RST_CORE 0x10000000 /* Core Reset */
+#define DBCR0_RST_NONE 0x00000000 /* No Reset */
+#define DBCR0_ICMP 0x08000000 /* Instruction Completion */
+#define DBCR0_IC DBCR0_ICMP
+#define DBCR0_BRT 0x04000000 /* Branch Taken */
+#define DBCR0_BT DBCR0_BRT
+#define DBCR0_IRPT 0x02000000 /* Exception Debug Event */
+#define DBCR0_TDE 0x01000000 /* TRAP Debug Event */
+#define DBCR0_TIE DBCR0_TDE
+#define DBCR0_IAC1 0x00800000 /* Instr Addr compare 1 enable */
+#define DBCR0_IAC2 0x00400000 /* Instr Addr compare 2 enable */
+#define DBCR0_IAC3 0x00200000 /* Instr Addr compare 3 enable */
+#define DBCR0_IAC4 0x00100000 /* Instr Addr compare 4 enable */
+#define DBCR0_DAC1R 0x00080000 /* DAC 1 Read enable */
+#define DBCR0_DAC1W 0x00040000 /* DAC 1 Write enable */
+#define DBCR0_DAC2R 0x00020000 /* DAC 2 Read enable */
+#define DBCR0_DAC2W 0x00010000 /* DAC 2 Write enable */
+#define DBCR0_RET 0x00008000 /* Return Debug Event */
+#define DBCR0_CIRPT 0x00000040 /* Critical Interrupt Taken Event */
+#define DBCR0_CRET 0x00000020 /* Critical Return Debug Event */
+#define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */
+
+/* Bit definitions related to the DBCR1. */
+#define DBCR1_IAC12M 0x00800000 /* Instr Addr 1-2 range enable */
+#define DBCR1_IAC12MX 0x00C00000 /* Instr Addr 1-2 range eXclusive */
+#define DBCR1_IAC12AT 0x00010000 /* Instr Addr 1-2 range Toggle */
+#define DBCR1_IAC34M 0x00000080 /* Instr Addr 3-4 range enable */
+#define DBCR1_IAC34MX 0x000000C0 /* Instr Addr 3-4 range eXclusive */
+#define DBCR1_IAC34AT 0x00000001 /* Instr Addr 3-4 range Toggle */
+
+/* Bit definitions related to the DBCR2. */
+#define DBCR2_DAC12M 0x00800000 /* DAC 1-2 range enable */
+#define DBCR2_DAC12MX 0x00C00000 /* DAC 1-2 range eXclusive */
+#define DBCR2_DAC12A 0x00200000 /* DAC 1-2 Asynchronous */
+#endif
+
+/* Bit definitions related to the TCR. */
+#define TCR_WP(x) (((x)&0x3)<<30) /* WDT Period */
+#define TCR_WP_MASK TCR_WP(3)
+#define WP_2_17 0 /* 2^17 clocks */
+#define WP_2_21 1 /* 2^21 clocks */
+#define WP_2_25 2 /* 2^25 clocks */
+#define WP_2_29 3 /* 2^29 clocks */
+#define TCR_WRC(x) (((x)&0x3)<<28) /* WDT Reset Control */
+#define TCR_WRC_MASK TCR_WRC(3)
+#define WRC_NONE 0 /* No reset will occur */
+#define WRC_CORE 1 /* Core reset will occur */
+#define WRC_CHIP 2 /* Chip reset will occur */
+#define WRC_SYSTEM 3 /* System reset will occur */
+#define TCR_WIE 0x08000000 /* WDT Interrupt Enable */
+#define TCR_PIE 0x04000000 /* PIT Interrupt Enable */
+#define TCR_DIE TCR_PIE /* DEC Interrupt Enable */
+#define TCR_FP(x) (((x)&0x3)<<24) /* FIT Period */
+#define TCR_FP_MASK TCR_FP(3)
+#define FP_2_9 0 /* 2^9 clocks */
+#define FP_2_13 1 /* 2^13 clocks */
+#define FP_2_17 2 /* 2^17 clocks */
+#define FP_2_21 3 /* 2^21 clocks */
+#define TCR_FIE 0x00800000 /* FIT Interrupt Enable */
+#define TCR_ARE 0x00400000 /* Auto Reload Enable */
+
+/* Bit definitions for the TSR. */
+#define TSR_ENW 0x80000000 /* Enable Next Watchdog */
+#define TSR_WIS 0x40000000 /* WDT Interrupt Status */
+#define TSR_WRS(x) (((x)&0x3)<<28) /* WDT Reset Status */
+#define WRS_NONE 0 /* No WDT reset occurred */
+#define WRS_CORE 1 /* WDT forced core reset */
+#define WRS_CHIP 2 /* WDT forced chip reset */
+#define WRS_SYSTEM 3 /* WDT forced system reset */
+#define TSR_PIS 0x08000000 /* PIT Interrupt Status */
+#define TSR_DIS TSR_PIS /* DEC Interrupt Status */
+#define TSR_FIS 0x04000000 /* FIT Interrupt Status */
+
+/* Bit definitions for the DCCR. */
+#define DCCR_NOCACHE 0 /* Noncacheable */
+#define DCCR_CACHE 1 /* Cacheable */
+
+/* Bit definitions for DCWR. */
+#define DCWR_COPY 0 /* Copy-back */
+#define DCWR_WRITE 1 /* Write-through */
+
+/* Bit definitions for ICCR. */
+#define ICCR_NOCACHE 0 /* Noncacheable */
+#define ICCR_CACHE 1 /* Cacheable */
+
+/* Bit definitions for L1CSR0. */
+#define L1CSR0_CLFC 0x00000100 /* Cache Lock Bits Flash Clear */
+#define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */
+#define L1CSR0_CFI 0x00000002 /* Cache Flash Invalidate */
+#define L1CSR0_DCE 0x00000001 /* Data Cache Enable */
+
+/* Bit definitions for L1CSR1. */
+#define L1CSR1_ICLFR 0x00000100 /* Instr Cache Lock Bits Flash Reset */
+#define L1CSR1_ICFI 0x00000002 /* Instr Cache Flash Invalidate */
+#define L1CSR1_ICE 0x00000001 /* Instr Cache Enable */
+
+/* Bit definitions for L2CSR0. */
+#define L2CSR0_L2E 0x80000000 /* L2 Cache Enable */
+#define L2CSR0_L2PE 0x40000000 /* L2 Cache Parity/ECC Enable */
+#define L2CSR0_L2WP 0x1c000000 /* L2 I/D Way Partioning */
+#define L2CSR0_L2CM 0x03000000 /* L2 Cache Coherency Mode */
+#define L2CSR0_L2FI 0x00200000 /* L2 Cache Flash Invalidate */
+#define L2CSR0_L2IO 0x00100000 /* L2 Cache Instruction Only */
+#define L2CSR0_L2DO 0x00010000 /* L2 Cache Data Only */
+#define L2CSR0_L2REP 0x00003000 /* L2 Line Replacement Algo */
+#define L2CSR0_L2FL 0x00000800 /* L2 Cache Flush */
+#define L2CSR0_L2LFC 0x00000400 /* L2 Cache Lock Flash Clear */
+#define L2CSR0_L2LOA 0x00000080 /* L2 Cache Lock Overflow Allocate */
+#define L2CSR0_L2LO 0x00000020 /* L2 Cache Lock Overflow */
+
+/* Bit definitions for SGR. */
+#define SGR_NORMAL 0 /* Speculative fetching allowed. */
+#define SGR_GUARDED 1 /* Speculative fetching disallowed. */
+
+/* Bit definitions for SPEFSCR. */
+#define SPEFSCR_SOVH 0x80000000 /* Summary integer overflow high */
+#define SPEFSCR_OVH 0x40000000 /* Integer overflow high */
+#define SPEFSCR_FGH 0x20000000 /* Embedded FP guard bit high */
+#define SPEFSCR_FXH 0x10000000 /* Embedded FP sticky bit high */
+#define SPEFSCR_FINVH 0x08000000 /* Embedded FP invalid operation high */
+#define SPEFSCR_FDBZH 0x04000000 /* Embedded FP div by zero high */
+#define SPEFSCR_FUNFH 0x02000000 /* Embedded FP underflow high */
+#define SPEFSCR_FOVFH 0x01000000 /* Embedded FP overflow high */
+#define SPEFSCR_FINXS 0x00200000 /* Embedded FP inexact sticky */
+#define SPEFSCR_FINVS 0x00100000 /* Embedded FP invalid op. sticky */
+#define SPEFSCR_FDBZS 0x00080000 /* Embedded FP div by zero sticky */
+#define SPEFSCR_FUNFS 0x00040000 /* Embedded FP underflow sticky */
+#define SPEFSCR_FOVFS 0x00020000 /* Embedded FP overflow sticky */
+#define SPEFSCR_MODE 0x00010000 /* Embedded FP mode */
+#define SPEFSCR_SOV 0x00008000 /* Integer summary overflow */
+#define SPEFSCR_OV 0x00004000 /* Integer overflow */
+#define SPEFSCR_FG 0x00002000 /* Embedded FP guard bit */
+#define SPEFSCR_FX 0x00001000 /* Embedded FP sticky bit */
+#define SPEFSCR_FINV 0x00000800 /* Embedded FP invalid operation */
+#define SPEFSCR_FDBZ 0x00000400 /* Embedded FP div by zero */
+#define SPEFSCR_FUNF 0x00000200 /* Embedded FP underflow */
+#define SPEFSCR_FOVF 0x00000100 /* Embedded FP overflow */
+#define SPEFSCR_FINXE 0x00000040 /* Embedded FP inexact enable */
+#define SPEFSCR_FINVE 0x00000020 /* Embedded FP invalid op. enable */
+#define SPEFSCR_FDBZE 0x00000010 /* Embedded FP div by zero enable */
+#define SPEFSCR_FUNFE 0x00000008 /* Embedded FP underflow enable */
+#define SPEFSCR_FOVFE 0x00000004 /* Embedded FP overflow enable */
+#define SPEFSCR_FRMC 0x00000003 /* Embedded FP rounding mode control */
+
+/*
+ * The IBM-403 is an even more odd special case, as it is much
+ * older than the IBM-405 series. We put these down here incase someone
+ * wishes to support these machines again.
+ */
+#ifdef CONFIG_403GCX
+/* Special Purpose Registers (SPRNs)*/
+#define SPRN_TBHU 0x3CC /* Time Base High User-mode */
+#define SPRN_TBLU 0x3CD /* Time Base Low User-mode */
+#define SPRN_CDBCR 0x3D7 /* Cache Debug Control Register */
+#define SPRN_TBHI 0x3DC /* Time Base High */
+#define SPRN_TBLO 0x3DD /* Time Base Low */
+#define SPRN_DBCR 0x3F2 /* Debug Control Regsiter */
+#define SPRN_PBL1 0x3FC /* Protection Bound Lower 1 */
+#define SPRN_PBL2 0x3FE /* Protection Bound Lower 2 */
+#define SPRN_PBU1 0x3FD /* Protection Bound Upper 1 */
+#define SPRN_PBU2 0x3FF /* Protection Bound Upper 2 */
+
+
+/* Bit definitions for the DBCR. */
+#define DBCR_EDM DBCR0_EDM
+#define DBCR_IDM DBCR0_IDM
+#define DBCR_RST(x) (((x) & 0x3) << 28)
+#define DBCR_RST_NONE 0
+#define DBCR_RST_CORE 1
+#define DBCR_RST_CHIP 2
+#define DBCR_RST_SYSTEM 3
+#define DBCR_IC DBCR0_IC /* Instruction Completion Debug Evnt */
+#define DBCR_BT DBCR0_BT /* Branch Taken Debug Event */
+#define DBCR_EDE DBCR0_EDE /* Exception Debug Event */
+#define DBCR_TDE DBCR0_TDE /* TRAP Debug Event */
+#define DBCR_FER 0x00F80000 /* First Events Remaining Mask */
+#define DBCR_FT 0x00040000 /* Freeze Timers on Debug Event */
+#define DBCR_IA1 0x00020000 /* Instr. Addr. Compare 1 Enable */
+#define DBCR_IA2 0x00010000 /* Instr. Addr. Compare 2 Enable */
+#define DBCR_D1R 0x00008000 /* Data Addr. Compare 1 Read Enable */
+#define DBCR_D1W 0x00004000 /* Data Addr. Compare 1 Write Enable */
+#define DBCR_D1S(x) (((x) & 0x3) << 12) /* Data Adrr. Compare 1 Size */
+#define DAC_BYTE 0
+#define DAC_HALF 1
+#define DAC_WORD 2
+#define DAC_QUAD 3
+#define DBCR_D2R 0x00000800 /* Data Addr. Compare 2 Read Enable */
+#define DBCR_D2W 0x00000400 /* Data Addr. Compare 2 Write Enable */
+#define DBCR_D2S(x) (((x) & 0x3) << 8) /* Data Addr. Compare 2 Size */
+#define DBCR_SBT 0x00000040 /* Second Branch Taken Debug Event */
+#define DBCR_SED 0x00000020 /* Second Exception Debug Event */
+#define DBCR_STD 0x00000010 /* Second Trap Debug Event */
+#define DBCR_SIA 0x00000008 /* Second IAC Enable */
+#define DBCR_SDA 0x00000004 /* Second DAC Enable */
+#define DBCR_JOI 0x00000002 /* JTAG Serial Outbound Int. Enable */
+#define DBCR_JII 0x00000001 /* JTAG Serial Inbound Int. Enable */
+#endif /* 403GCX */
+#endif /* __ASM_POWERPC_REG_BOOKE_H__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/reg_fsl_emb.h b/arch/powerpc/include/asm/reg_fsl_emb.h
new file mode 100644
index 0000000..1e180a5
--- /dev/null
+++ b/arch/powerpc/include/asm/reg_fsl_emb.h
@@ -0,0 +1,72 @@
+/*
+ * Contains register definitions for the Freescale Embedded Performance
+ * Monitor.
+ */
+#ifdef __KERNEL__
+#ifndef __ASM_POWERPC_REG_FSL_EMB_H__
+#define __ASM_POWERPC_REG_FSL_EMB_H__
+
+#ifndef __ASSEMBLY__
+/* Performance Monitor Registers */
+#define mfpmr(rn) ({unsigned int rval; \
+ asm volatile("mfpmr %0," __stringify(rn) \
+ : "=r" (rval)); rval;})
+#define mtpmr(rn, v) asm volatile("mtpmr " __stringify(rn) ",%0" : : "r" (v))
+#endif /* __ASSEMBLY__ */
+
+/* Freescale Book E Performance Monitor APU Registers */
+#define PMRN_PMC0 0x010 /* Performance Monitor Counter 0 */
+#define PMRN_PMC1 0x011 /* Performance Monitor Counter 1 */
+#define PMRN_PMC2 0x012 /* Performance Monitor Counter 1 */
+#define PMRN_PMC3 0x013 /* Performance Monitor Counter 1 */
+#define PMRN_PMLCA0 0x090 /* PM Local Control A0 */
+#define PMRN_PMLCA1 0x091 /* PM Local Control A1 */
+#define PMRN_PMLCA2 0x092 /* PM Local Control A2 */
+#define PMRN_PMLCA3 0x093 /* PM Local Control A3 */
+
+#define PMLCA_FC 0x80000000 /* Freeze Counter */
+#define PMLCA_FCS 0x40000000 /* Freeze in Supervisor */
+#define PMLCA_FCU 0x20000000 /* Freeze in User */
+#define PMLCA_FCM1 0x10000000 /* Freeze when PMM==1 */
+#define PMLCA_FCM0 0x08000000 /* Freeze when PMM==0 */
+#define PMLCA_CE 0x04000000 /* Condition Enable */
+
+#define PMLCA_EVENT_MASK 0x007f0000 /* Event field */
+#define PMLCA_EVENT_SHIFT 16
+
+#define PMRN_PMLCB0 0x110 /* PM Local Control B0 */
+#define PMRN_PMLCB1 0x111 /* PM Local Control B1 */
+#define PMRN_PMLCB2 0x112 /* PM Local Control B2 */
+#define PMRN_PMLCB3 0x113 /* PM Local Control B3 */
+
+#define PMLCB_THRESHMUL_MASK 0x0700 /* Threshhold Multiple Field */
+#define PMLCB_THRESHMUL_SHIFT 8
+
+#define PMLCB_THRESHOLD_MASK 0x003f /* Threshold Field */
+#define PMLCB_THRESHOLD_SHIFT 0
+
+#define PMRN_PMGC0 0x190 /* PM Global Control 0 */
+
+#define PMGC0_FAC 0x80000000 /* Freeze all Counters */
+#define PMGC0_PMIE 0x40000000 /* Interrupt Enable */
+#define PMGC0_FCECE 0x20000000 /* Freeze countes on
+ Enabled Condition or
+ Event */
+
+#define PMRN_UPMC0 0x000 /* User Performance Monitor Counter 0 */
+#define PMRN_UPMC1 0x001 /* User Performance Monitor Counter 1 */
+#define PMRN_UPMC2 0x002 /* User Performance Monitor Counter 1 */
+#define PMRN_UPMC3 0x003 /* User Performance Monitor Counter 1 */
+#define PMRN_UPMLCA0 0x080 /* User PM Local Control A0 */
+#define PMRN_UPMLCA1 0x081 /* User PM Local Control A1 */
+#define PMRN_UPMLCA2 0x082 /* User PM Local Control A2 */
+#define PMRN_UPMLCA3 0x083 /* User PM Local Control A3 */
+#define PMRN_UPMLCB0 0x100 /* User PM Local Control B0 */
+#define PMRN_UPMLCB1 0x101 /* User PM Local Control B1 */
+#define PMRN_UPMLCB2 0x102 /* User PM Local Control B2 */
+#define PMRN_UPMLCB3 0x103 /* User PM Local Control B3 */
+#define PMRN_UPMGC0 0x180 /* User PM Global Control 0 */
+
+
+#endif /* __ASM_POWERPC_REG_FSL_EMB_H__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/resource.h b/arch/powerpc/include/asm/resource.h
new file mode 100644
index 0000000..04bc4db
--- /dev/null
+++ b/arch/powerpc/include/asm/resource.h
@@ -0,0 +1 @@
+#include <asm-generic/resource.h>
diff --git a/arch/powerpc/include/asm/rheap.h b/arch/powerpc/include/asm/rheap.h
new file mode 100644
index 0000000..1723817
--- /dev/null
+++ b/arch/powerpc/include/asm/rheap.h
@@ -0,0 +1,89 @@
+/*
+ * include/asm-ppc/rheap.h
+ *
+ * Header file for the implementation of a remote heap.
+ *
+ * Author: Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2004 (c) INTRACOM S.A. Greece. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#ifndef __ASM_PPC_RHEAP_H__
+#define __ASM_PPC_RHEAP_H__
+
+#include <linux/list.h>
+
+typedef struct _rh_block {
+ struct list_head list;
+ unsigned long start;
+ int size;
+ const char *owner;
+} rh_block_t;
+
+typedef struct _rh_info {
+ unsigned int alignment;
+ int max_blocks;
+ int empty_slots;
+ rh_block_t *block;
+ struct list_head empty_list;
+ struct list_head free_list;
+ struct list_head taken_list;
+ unsigned int flags;
+} rh_info_t;
+
+#define RHIF_STATIC_INFO 0x1
+#define RHIF_STATIC_BLOCK 0x2
+
+typedef struct _rh_stats {
+ unsigned long start;
+ int size;
+ const char *owner;
+} rh_stats_t;
+
+#define RHGS_FREE 0
+#define RHGS_TAKEN 1
+
+/* Create a remote heap dynamically */
+extern rh_info_t *rh_create(unsigned int alignment);
+
+/* Destroy a remote heap, created by rh_create() */
+extern void rh_destroy(rh_info_t * info);
+
+/* Initialize in place a remote info block */
+extern void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
+ rh_block_t * block);
+
+/* Attach a free region to manage */
+extern int rh_attach_region(rh_info_t * info, unsigned long start, int size);
+
+/* Detach a free region */
+extern unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size);
+
+/* Allocate the given size from the remote heap (with alignment) */
+extern unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment,
+ const char *owner);
+
+/* Allocate the given size from the remote heap */
+extern unsigned long rh_alloc(rh_info_t * info, int size, const char *owner);
+
+/* Allocate the given size from the given address */
+extern unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size,
+ const char *owner);
+
+/* Free the allocated area */
+extern int rh_free(rh_info_t * info, unsigned long start);
+
+/* Get stats for debugging purposes */
+extern int rh_get_stats(rh_info_t * info, int what, int max_stats,
+ rh_stats_t * stats);
+
+/* Simple dump of remote heap info */
+extern void rh_dump(rh_info_t * info);
+
+/* Set owner of taken block */
+extern int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner);
+
+#endif /* __ASM_PPC_RHEAP_H__ */
diff --git a/arch/powerpc/include/asm/rio.h b/arch/powerpc/include/asm/rio.h
new file mode 100644
index 0000000..0018bf8
--- /dev/null
+++ b/arch/powerpc/include/asm/rio.h
@@ -0,0 +1,18 @@
+/*
+ * RapidIO architecture support
+ *
+ * Copyright 2005 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef ASM_PPC_RIO_H
+#define ASM_PPC_RIO_H
+
+extern void platform_rio_init(void);
+
+#endif /* ASM_PPC_RIO_H */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
new file mode 100644
index 0000000..8eaa7b2
--- /dev/null
+++ b/arch/powerpc/include/asm/rtas.h
@@ -0,0 +1,247 @@
+#ifndef _POWERPC_RTAS_H
+#define _POWERPC_RTAS_H
+#ifdef __KERNEL__
+
+#include <linux/spinlock.h>
+#include <asm/page.h>
+
+/*
+ * Definitions for talking to the RTAS on CHRP machines.
+ *
+ * Copyright (C) 2001 Peter Bergner
+ * Copyright (C) 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define RTAS_UNKNOWN_SERVICE (-1)
+#define RTAS_INSTANTIATE_MAX (1UL<<30) /* Don't instantiate rtas at/above this value */
+
+/* Buffer size for ppc_rtas system call. */
+#define RTAS_RMOBUF_MAX (64 * 1024)
+
+/* RTAS return status codes */
+#define RTAS_NOT_SUSPENDABLE -9004
+#define RTAS_BUSY -2 /* RTAS Busy */
+#define RTAS_EXTENDED_DELAY_MIN 9900
+#define RTAS_EXTENDED_DELAY_MAX 9905
+
+/*
+ * In general to call RTAS use rtas_token("string") to lookup
+ * an RTAS token for the given string (e.g. "event-scan").
+ * To actually perform the call use
+ * ret = rtas_call(token, n_in, n_out, ...)
+ * Where n_in is the number of input parameters and
+ * n_out is the number of output parameters
+ *
+ * If the "string" is invalid on this system, RTAS_UNKNOWN_SERVICE
+ * will be returned as a token. rtas_call() does look for this
+ * token and error out gracefully so rtas_call(rtas_token("str"), ...)
+ * may be safely used for one-shot calls to RTAS.
+ *
+ */
+
+typedef u32 rtas_arg_t;
+
+struct rtas_args {
+ u32 token;
+ u32 nargs;
+ u32 nret;
+ rtas_arg_t args[16];
+ rtas_arg_t *rets; /* Pointer to return values in args[]. */
+};
+
+struct rtas_t {
+ unsigned long entry; /* physical address pointer */
+ unsigned long base; /* physical address pointer */
+ unsigned long size;
+ spinlock_t lock;
+ struct rtas_args args;
+ struct device_node *dev; /* virtual address pointer */
+};
+
+/* RTAS event classes */
+#define RTAS_INTERNAL_ERROR 0x80000000 /* set bit 0 */
+#define RTAS_EPOW_WARNING 0x40000000 /* set bit 1 */
+#define RTAS_POWERMGM_EVENTS 0x20000000 /* set bit 2 */
+#define RTAS_HOTPLUG_EVENTS 0x10000000 /* set bit 3 */
+#define RTAS_EVENT_SCAN_ALL_EVENTS 0xf0000000
+
+/* RTAS event severity */
+#define RTAS_SEVERITY_FATAL 0x5
+#define RTAS_SEVERITY_ERROR 0x4
+#define RTAS_SEVERITY_ERROR_SYNC 0x3
+#define RTAS_SEVERITY_WARNING 0x2
+#define RTAS_SEVERITY_EVENT 0x1
+#define RTAS_SEVERITY_NO_ERROR 0x0
+
+/* RTAS event disposition */
+#define RTAS_DISP_FULLY_RECOVERED 0x0
+#define RTAS_DISP_LIMITED_RECOVERY 0x1
+#define RTAS_DISP_NOT_RECOVERED 0x2
+
+/* RTAS event initiator */
+#define RTAS_INITIATOR_UNKNOWN 0x0
+#define RTAS_INITIATOR_CPU 0x1
+#define RTAS_INITIATOR_PCI 0x2
+#define RTAS_INITIATOR_ISA 0x3
+#define RTAS_INITIATOR_MEMORY 0x4
+#define RTAS_INITIATOR_POWERMGM 0x5
+
+/* RTAS event target */
+#define RTAS_TARGET_UNKNOWN 0x0
+#define RTAS_TARGET_CPU 0x1
+#define RTAS_TARGET_PCI 0x2
+#define RTAS_TARGET_ISA 0x3
+#define RTAS_TARGET_MEMORY 0x4
+#define RTAS_TARGET_POWERMGM 0x5
+
+/* RTAS event type */
+#define RTAS_TYPE_RETRY 0x01
+#define RTAS_TYPE_TCE_ERR 0x02
+#define RTAS_TYPE_INTERN_DEV_FAIL 0x03
+#define RTAS_TYPE_TIMEOUT 0x04
+#define RTAS_TYPE_DATA_PARITY 0x05
+#define RTAS_TYPE_ADDR_PARITY 0x06
+#define RTAS_TYPE_CACHE_PARITY 0x07
+#define RTAS_TYPE_ADDR_INVALID 0x08
+#define RTAS_TYPE_ECC_UNCORR 0x09
+#define RTAS_TYPE_ECC_CORR 0x0a
+#define RTAS_TYPE_EPOW 0x40
+#define RTAS_TYPE_PLATFORM 0xE0
+#define RTAS_TYPE_IO 0xE1
+#define RTAS_TYPE_INFO 0xE2
+#define RTAS_TYPE_DEALLOC 0xE3
+#define RTAS_TYPE_DUMP 0xE4
+/* I don't add PowerMGM events right now, this is a different topic */
+#define RTAS_TYPE_PMGM_POWER_SW_ON 0x60
+#define RTAS_TYPE_PMGM_POWER_SW_OFF 0x61
+#define RTAS_TYPE_PMGM_LID_OPEN 0x62
+#define RTAS_TYPE_PMGM_LID_CLOSE 0x63
+#define RTAS_TYPE_PMGM_SLEEP_BTN 0x64
+#define RTAS_TYPE_PMGM_WAKE_BTN 0x65
+#define RTAS_TYPE_PMGM_BATTERY_WARN 0x66
+#define RTAS_TYPE_PMGM_BATTERY_CRIT 0x67
+#define RTAS_TYPE_PMGM_SWITCH_TO_BAT 0x68
+#define RTAS_TYPE_PMGM_SWITCH_TO_AC 0x69
+#define RTAS_TYPE_PMGM_KBD_OR_MOUSE 0x6a
+#define RTAS_TYPE_PMGM_ENCLOS_OPEN 0x6b
+#define RTAS_TYPE_PMGM_ENCLOS_CLOSED 0x6c
+#define RTAS_TYPE_PMGM_RING_INDICATE 0x6d
+#define RTAS_TYPE_PMGM_LAN_ATTENTION 0x6e
+#define RTAS_TYPE_PMGM_TIME_ALARM 0x6f
+#define RTAS_TYPE_PMGM_CONFIG_CHANGE 0x70
+#define RTAS_TYPE_PMGM_SERVICE_PROC 0x71
+
+struct rtas_error_log {
+ unsigned long version:8; /* Architectural version */
+ unsigned long severity:3; /* Severity level of error */
+ unsigned long disposition:2; /* Degree of recovery */
+ unsigned long extended:1; /* extended log present? */
+ unsigned long /* reserved */ :2; /* Reserved for future use */
+ unsigned long initiator:4; /* Initiator of event */
+ unsigned long target:4; /* Target of failed operation */
+ unsigned long type:8; /* General event or error*/
+ unsigned long extended_log_length:32; /* length in bytes */
+ unsigned char buffer[1];
+};
+
+/*
+ * This can be set by the rtas_flash module so that it can get called
+ * as the absolutely last thing before the kernel terminates.
+ */
+extern void (*rtas_flash_term_hook)(int);
+
+extern struct rtas_t rtas;
+
+extern void enter_rtas(unsigned long);
+extern int rtas_token(const char *service);
+extern int rtas_service_present(const char *service);
+extern int rtas_call(int token, int, int, int *, ...);
+extern void rtas_restart(char *cmd);
+extern void rtas_power_off(void);
+extern void rtas_halt(void);
+extern void rtas_os_term(char *str);
+extern int rtas_get_sensor(int sensor, int index, int *state);
+extern int rtas_get_power_level(int powerdomain, int *level);
+extern int rtas_set_power_level(int powerdomain, int level, int *setlevel);
+extern int rtas_set_indicator(int indicator, int index, int new_value);
+extern int rtas_set_indicator_fast(int indicator, int index, int new_value);
+extern void rtas_progress(char *s, unsigned short hex);
+extern void rtas_initialize(void);
+
+struct rtc_time;
+extern unsigned long rtas_get_boot_time(void);
+extern void rtas_get_rtc_time(struct rtc_time *rtc_time);
+extern int rtas_set_rtc_time(struct rtc_time *rtc_time);
+
+extern unsigned int rtas_busy_delay_time(int status);
+extern unsigned int rtas_busy_delay(int status);
+
+extern int early_init_dt_scan_rtas(unsigned long node,
+ const char *uname, int depth, void *data);
+
+extern void pSeries_log_error(char *buf, unsigned int err_type, int fatal);
+
+/* Error types logged. */
+#define ERR_FLAG_ALREADY_LOGGED 0x0
+#define ERR_FLAG_BOOT 0x1 /* log was pulled from NVRAM on boot */
+#define ERR_TYPE_RTAS_LOG 0x2 /* from rtas event-scan */
+#define ERR_TYPE_KERNEL_PANIC 0x4 /* from panic() */
+
+/* All the types and not flags */
+#define ERR_TYPE_MASK (ERR_TYPE_RTAS_LOG | ERR_TYPE_KERNEL_PANIC)
+
+#define RTAS_DEBUG KERN_DEBUG "RTAS: "
+
+#define RTAS_ERROR_LOG_MAX 2048
+
+/*
+ * Return the firmware-specified size of the error log buffer
+ * for all rtas calls that require an error buffer argument.
+ * This includes 'check-exception' and 'rtas-last-error'.
+ */
+extern int rtas_get_error_log_max(void);
+
+/* Event Scan Parameters */
+#define EVENT_SCAN_ALL_EVENTS 0xf0000000
+#define SURVEILLANCE_TOKEN 9000
+#define LOG_NUMBER 64 /* must be a power of two */
+#define LOG_NUMBER_MASK (LOG_NUMBER-1)
+
+/* Some RTAS ops require a data buffer and that buffer must be < 4G.
+ * Rather than having a memory allocator, just use this buffer
+ * (get the lock first), make the RTAS call. Copy the data instead
+ * of holding the buffer for long.
+ */
+
+#define RTAS_DATA_BUF_SIZE 4096
+extern spinlock_t rtas_data_buf_lock;
+extern char rtas_data_buf[RTAS_DATA_BUF_SIZE];
+
+/* RMO buffer reserved for user-space RTAS use */
+extern unsigned long rtas_rmo_buf;
+
+#define GLOBAL_INTERRUPT_QUEUE 9005
+
+/**
+ * rtas_config_addr - Format a busno, devfn and reg for RTAS.
+ * @busno: The bus number.
+ * @devfn: The device and function number as encoded by PCI_DEVFN().
+ * @reg: The register number.
+ *
+ * This function encodes the given busno, devfn and register number as
+ * required for RTAS calls that take a "config_addr" parameter.
+ * See PAPR requirement 7.3.4-1 for more info.
+ */
+static inline u32 rtas_config_addr(int busno, int devfn, int reg)
+{
+ return ((reg & 0xf00) << 20) | ((busno & 0xff) << 16) |
+ (devfn << 8) | (reg & 0xff);
+}
+
+#endif /* __KERNEL__ */
+#endif /* _POWERPC_RTAS_H */
diff --git a/arch/powerpc/include/asm/rtc.h b/arch/powerpc/include/asm/rtc.h
new file mode 100644
index 0000000..f580292
--- /dev/null
+++ b/arch/powerpc/include/asm/rtc.h
@@ -0,0 +1,78 @@
+/*
+ * Real-time clock definitions and interfaces
+ *
+ * Author: Tom Rini <trini@mvista.com>
+ *
+ * 2002 (c) MontaVista, Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ * Based on:
+ * include/asm-m68k/rtc.h
+ *
+ * Copyright Richard Zidlicky
+ * implementation details for genrtc/q40rtc driver
+ *
+ * And the old drivers/macintosh/rtc.c which was heavily based on:
+ * Linux/SPARC Real Time Clock Driver
+ * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
+ *
+ * With additional work by Paul Mackerras and Franz Sirl.
+ */
+
+#ifndef __ASM_POWERPC_RTC_H__
+#define __ASM_POWERPC_RTC_H__
+
+#ifdef __KERNEL__
+
+#include <linux/rtc.h>
+
+#include <asm/machdep.h>
+#include <asm/time.h>
+
+#define RTC_PIE 0x40 /* periodic interrupt enable */
+#define RTC_AIE 0x20 /* alarm interrupt enable */
+#define RTC_UIE 0x10 /* update-finished interrupt enable */
+
+/* some dummy definitions */
+#define RTC_BATT_BAD 0x100 /* battery bad */
+#define RTC_SQWE 0x08 /* enable square-wave output */
+#define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
+#define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
+#define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
+
+static inline unsigned int get_rtc_time(struct rtc_time *time)
+{
+ if (ppc_md.get_rtc_time)
+ ppc_md.get_rtc_time(time);
+ return RTC_24H;
+}
+
+/* Set the current date and time in the real time clock. */
+static inline int set_rtc_time(struct rtc_time *time)
+{
+ if (ppc_md.set_rtc_time)
+ return ppc_md.set_rtc_time(time);
+ return -EINVAL;
+}
+
+static inline unsigned int get_rtc_ss(void)
+{
+ struct rtc_time h;
+
+ get_rtc_time(&h);
+ return h.tm_sec;
+}
+
+static inline int get_rtc_pll(struct rtc_pll_info *pll)
+{
+ return -EINVAL;
+}
+static inline int set_rtc_pll(struct rtc_pll_info *pll)
+{
+ return -EINVAL;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_POWERPC_RTC_H__ */
diff --git a/arch/powerpc/include/asm/rwsem.h b/arch/powerpc/include/asm/rwsem.h
new file mode 100644
index 0000000..24cd928
--- /dev/null
+++ b/arch/powerpc/include/asm/rwsem.h
@@ -0,0 +1,173 @@
+#ifndef _ASM_POWERPC_RWSEM_H
+#define _ASM_POWERPC_RWSEM_H
+
+#ifndef _LINUX_RWSEM_H
+#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
+#endif
+
+#ifdef __KERNEL__
+
+/*
+ * R/W semaphores for PPC using the stuff in lib/rwsem.c.
+ * Adapted largely from include/asm-i386/rwsem.h
+ * by Paul Mackerras <paulus@samba.org>.
+ */
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+
+/*
+ * the semaphore definition
+ */
+struct rw_semaphore {
+ /* XXX this should be able to be an atomic_t -- paulus */
+ signed int count;
+#define RWSEM_UNLOCKED_VALUE 0x00000000
+#define RWSEM_ACTIVE_BIAS 0x00000001
+#define RWSEM_ACTIVE_MASK 0x0000ffff
+#define RWSEM_WAITING_BIAS (-0x00010000)
+#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
+#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
+#define __RWSEM_INITIALIZER(name) \
+ { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
+ LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
+
+#define DECLARE_RWSEM(name) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+
+extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
+ struct lock_class_key *key);
+
+#define init_rwsem(sem) \
+ do { \
+ static struct lock_class_key __key; \
+ \
+ __init_rwsem((sem), #sem, &__key); \
+ } while (0)
+
+/*
+ * lock for reading
+ */
+static inline void __down_read(struct rw_semaphore *sem)
+{
+ if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0))
+ rwsem_down_read_failed(sem);
+}
+
+static inline int __down_read_trylock(struct rw_semaphore *sem)
+{
+ int tmp;
+
+ while ((tmp = sem->count) >= 0) {
+ if (tmp == cmpxchg(&sem->count, tmp,
+ tmp + RWSEM_ACTIVE_READ_BIAS)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * lock for writing
+ */
+static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+{
+ int tmp;
+
+ tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
+ (atomic_t *)(&sem->count));
+ if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
+ rwsem_down_write_failed(sem);
+}
+
+static inline void __down_write(struct rw_semaphore *sem)
+{
+ __down_write_nested(sem, 0);
+}
+
+static inline int __down_write_trylock(struct rw_semaphore *sem)
+{
+ int tmp;
+
+ tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
+ RWSEM_ACTIVE_WRITE_BIAS);
+ return tmp == RWSEM_UNLOCKED_VALUE;
+}
+
+/*
+ * unlock after reading
+ */
+static inline void __up_read(struct rw_semaphore *sem)
+{
+ int tmp;
+
+ tmp = atomic_dec_return((atomic_t *)(&sem->count));
+ if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
+ rwsem_wake(sem);
+}
+
+/*
+ * unlock after writing
+ */
+static inline void __up_write(struct rw_semaphore *sem)
+{
+ if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
+ (atomic_t *)(&sem->count)) < 0))
+ rwsem_wake(sem);
+}
+
+/*
+ * implement atomic add functionality
+ */
+static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
+{
+ atomic_add(delta, (atomic_t *)(&sem->count));
+}
+
+/*
+ * downgrade write lock to read lock
+ */
+static inline void __downgrade_write(struct rw_semaphore *sem)
+{
+ int tmp;
+
+ tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
+ if (tmp < 0)
+ rwsem_downgrade_wake(sem);
+}
+
+/*
+ * implement exchange and add functionality
+ */
+static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
+{
+ return atomic_add_return(delta, (atomic_t *)(&sem->count));
+}
+
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_RWSEM_H */
diff --git a/arch/powerpc/include/asm/scatterlist.h b/arch/powerpc/include/asm/scatterlist.h
new file mode 100644
index 0000000..fcf7d55
--- /dev/null
+++ b/arch/powerpc/include/asm/scatterlist.h
@@ -0,0 +1,50 @@
+#ifndef _ASM_POWERPC_SCATTERLIST_H
+#define _ASM_POWERPC_SCATTERLIST_H
+/*
+ * Copyright (C) 2001 PPC64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#include <asm/dma.h>
+
+struct scatterlist {
+#ifdef CONFIG_DEBUG_SG
+ unsigned long sg_magic;
+#endif
+ unsigned long page_link;
+ unsigned int offset;
+ unsigned int length;
+
+ /* For TCE support */
+ dma_addr_t dma_address;
+ u32 dma_length;
+};
+
+/*
+ * These macros should be used after a dma_map_sg call has been done
+ * to get bus addresses of each of the SG entries and their lengths.
+ * You should only work with the number of sg entries pci_map_sg
+ * returns, or alternatively stop on the first sg_dma_len(sg) which
+ * is 0.
+ */
+#define sg_dma_address(sg) ((sg)->dma_address)
+#ifdef __powerpc64__
+#define sg_dma_len(sg) ((sg)->dma_length)
+#else
+#define sg_dma_len(sg) ((sg)->length)
+#endif
+
+#ifdef __powerpc64__
+#define ISA_DMA_THRESHOLD (~0UL)
+#endif
+
+#define ARCH_HAS_SG_CHAIN
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_SCATTERLIST_H */
diff --git a/arch/powerpc/include/asm/seccomp.h b/arch/powerpc/include/asm/seccomp.h
new file mode 100644
index 0000000..853765e
--- /dev/null
+++ b/arch/powerpc/include/asm/seccomp.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_POWERPC_SECCOMP_H
+#define _ASM_POWERPC_SECCOMP_H
+
+#ifdef __KERNEL__
+#include <linux/thread_info.h>
+#endif
+
+#include <linux/unistd.h>
+
+#define __NR_seccomp_read __NR_read
+#define __NR_seccomp_write __NR_write
+#define __NR_seccomp_exit __NR_exit
+#define __NR_seccomp_sigreturn __NR_rt_sigreturn
+
+#define __NR_seccomp_read_32 __NR_read
+#define __NR_seccomp_write_32 __NR_write
+#define __NR_seccomp_exit_32 __NR_exit
+#define __NR_seccomp_sigreturn_32 __NR_sigreturn
+
+#endif /* _ASM_POWERPC_SECCOMP_H */
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
new file mode 100644
index 0000000..916018e
--- /dev/null
+++ b/arch/powerpc/include/asm/sections.h
@@ -0,0 +1,22 @@
+#ifndef _ASM_POWERPC_SECTIONS_H
+#define _ASM_POWERPC_SECTIONS_H
+#ifdef __KERNEL__
+
+#include <asm-generic/sections.h>
+
+#ifdef __powerpc64__
+
+extern char _end[];
+
+static inline int in_kernel_text(unsigned long addr)
+{
+ if (addr >= (unsigned long)_stext && addr < (unsigned long)__init_end)
+ return 1;
+
+ return 0;
+}
+
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_SECTIONS_H */
diff --git a/arch/powerpc/include/asm/sembuf.h b/arch/powerpc/include/asm/sembuf.h
new file mode 100644
index 0000000..99a4193
--- /dev/null
+++ b/arch/powerpc/include/asm/sembuf.h
@@ -0,0 +1,36 @@
+#ifndef _ASM_POWERPC_SEMBUF_H
+#define _ASM_POWERPC_SEMBUF_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * The semid64_ds structure for PPC architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct semid64_ds {
+ struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
+#ifndef __powerpc64__
+ unsigned long __unused1;
+#endif
+ __kernel_time_t sem_otime; /* last semop time */
+#ifndef __powerpc64__
+ unsigned long __unused2;
+#endif
+ __kernel_time_t sem_ctime; /* last change time */
+ unsigned long sem_nsems; /* no. of semaphores in array */
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* _ASM_POWERPC_SEMBUF_H */
diff --git a/arch/powerpc/include/asm/serial.h b/arch/powerpc/include/asm/serial.h
new file mode 100644
index 0000000..3e8589b
--- /dev/null
+++ b/arch/powerpc/include/asm/serial.h
@@ -0,0 +1,24 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_SERIAL_H
+#define _ASM_POWERPC_SERIAL_H
+
+/*
+ * Serial ports are not listed here, because they are discovered
+ * through the device tree.
+ */
+
+/* Default baud base if not found in device-tree */
+#define BASE_BAUD ( 1843200 / 16 )
+
+#ifdef CONFIG_PPC_UDBG_16550
+extern void find_legacy_serial_ports(void);
+#else
+#define find_legacy_serial_ports() do { } while (0)
+#endif
+
+#endif /* _PPC64_SERIAL_H */
diff --git a/arch/powerpc/include/asm/setjmp.h b/arch/powerpc/include/asm/setjmp.h
new file mode 100644
index 0000000..279d03a
--- /dev/null
+++ b/arch/powerpc/include/asm/setjmp.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright © 2008 Michael Neuling IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#ifndef _ASM_POWERPC_SETJMP_H
+#define _ASM_POWERPC_SETJMP_H
+
+#define JMP_BUF_LEN 23
+
+extern long setjmp(long *);
+extern void longjmp(long *, long);
+
+#endif /* _ASM_POWERPC_SETJMP_H */
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
new file mode 100644
index 0000000..817fac0
--- /dev/null
+++ b/arch/powerpc/include/asm/setup.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_POWERPC_SETUP_H
+#define _ASM_POWERPC_SETUP_H
+
+#define COMMAND_LINE_SIZE 512
+
+#endif /* _ASM_POWERPC_SETUP_H */
diff --git a/arch/powerpc/include/asm/shmbuf.h b/arch/powerpc/include/asm/shmbuf.h
new file mode 100644
index 0000000..8efa396
--- /dev/null
+++ b/arch/powerpc/include/asm/shmbuf.h
@@ -0,0 +1,59 @@
+#ifndef _ASM_POWERPC_SHMBUF_H
+#define _ASM_POWERPC_SHMBUF_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * The shmid64_ds structure for PPC architecture.
+ *
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct shmid64_ds {
+ struct ipc64_perm shm_perm; /* operation perms */
+#ifndef __powerpc64__
+ unsigned long __unused1;
+#endif
+ __kernel_time_t shm_atime; /* last attach time */
+#ifndef __powerpc64__
+ unsigned long __unused2;
+#endif
+ __kernel_time_t shm_dtime; /* last detach time */
+#ifndef __powerpc64__
+ unsigned long __unused3;
+#endif
+ __kernel_time_t shm_ctime; /* last change time */
+#ifndef __powerpc64__
+ unsigned long __unused4;
+#endif
+ size_t shm_segsz; /* size of segment (bytes) */
+ __kernel_pid_t shm_cpid; /* pid of creator */
+ __kernel_pid_t shm_lpid; /* pid of last operator */
+ unsigned long shm_nattch; /* no. of current attaches */
+ unsigned long __unused5;
+ unsigned long __unused6;
+};
+
+struct shminfo64 {
+ unsigned long shmmax;
+ unsigned long shmmin;
+ unsigned long shmmni;
+ unsigned long shmseg;
+ unsigned long shmall;
+ unsigned long __unused1;
+ unsigned long __unused2;
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* _ASM_POWERPC_SHMBUF_H */
diff --git a/arch/powerpc/include/asm/shmparam.h b/arch/powerpc/include/asm/shmparam.h
new file mode 100644
index 0000000..5cda42a
--- /dev/null
+++ b/arch/powerpc/include/asm/shmparam.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_POWERPC_SHMPARAM_H
+#define _ASM_POWERPC_SHMPARAM_H
+
+#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
+
+#endif /* _ASM_POWERPC_SHMPARAM_H */
diff --git a/arch/powerpc/include/asm/sigcontext.h b/arch/powerpc/include/asm/sigcontext.h
new file mode 100644
index 0000000..9c1f24f
--- /dev/null
+++ b/arch/powerpc/include/asm/sigcontext.h
@@ -0,0 +1,87 @@
+#ifndef _ASM_POWERPC_SIGCONTEXT_H
+#define _ASM_POWERPC_SIGCONTEXT_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/compiler.h>
+#include <asm/ptrace.h>
+#ifdef __powerpc64__
+#include <asm/elf.h>
+#endif
+
+struct sigcontext {
+ unsigned long _unused[4];
+ int signal;
+#ifdef __powerpc64__
+ int _pad0;
+#endif
+ unsigned long handler;
+ unsigned long oldmask;
+ struct pt_regs __user *regs;
+#ifdef __powerpc64__
+ elf_gregset_t gp_regs;
+ elf_fpregset_t fp_regs;
+/*
+ * To maintain compatibility with current implementations the sigcontext is
+ * extended by appending a pointer (v_regs) to a quadword type (elf_vrreg_t)
+ * followed by an unstructured (vmx_reserve) field of 69 doublewords. This
+ * allows the array of vector registers to be quadword aligned independent of
+ * the alignment of the containing sigcontext or ucontext. It is the
+ * responsibility of the code setting the sigcontext to set this pointer to
+ * either NULL (if this processor does not support the VMX feature) or the
+ * address of the first quadword within the allocated (vmx_reserve) area.
+ *
+ * The pointer (v_regs) of vector type (elf_vrreg_t) is type compatible with
+ * an array of 34 quadword entries (elf_vrregset_t). The entries with
+ * indexes 0-31 contain the corresponding vector registers. The entry with
+ * index 32 contains the vscr as the last word (offset 12) within the
+ * quadword. This allows the vscr to be stored as either a quadword (since
+ * it must be copied via a vector register to/from storage) or as a word.
+ * The entry with index 33 contains the vrsave as the first word (offset 0)
+ * within the quadword.
+ *
+ * Part of the VSX data is stored here also by extending vmx_restore
+ * by an additional 32 double words. Architecturally the layout of
+ * the VSR registers and how they overlap on top of the legacy FPR and
+ * VR registers is shown below:
+ *
+ * VSR doubleword 0 VSR doubleword 1
+ * ----------------------------------------------------------------
+ * VSR[0] | FPR[0] | |
+ * ----------------------------------------------------------------
+ * VSR[1] | FPR[1] | |
+ * ----------------------------------------------------------------
+ * | ... | |
+ * | ... | |
+ * ----------------------------------------------------------------
+ * VSR[30] | FPR[30] | |
+ * ----------------------------------------------------------------
+ * VSR[31] | FPR[31] | |
+ * ----------------------------------------------------------------
+ * VSR[32] | VR[0] |
+ * ----------------------------------------------------------------
+ * VSR[33] | VR[1] |
+ * ----------------------------------------------------------------
+ * | ... |
+ * | ... |
+ * ----------------------------------------------------------------
+ * VSR[62] | VR[30] |
+ * ----------------------------------------------------------------
+ * VSR[63] | VR[31] |
+ * ----------------------------------------------------------------
+ *
+ * FPR/VSR 0-31 doubleword 0 is stored in fp_regs, and VMX/VSR 32-63
+ * is stored at the start of vmx_reserve. vmx_reserve is extended for
+ * backwards compatility to store VSR 0-31 doubleword 1 after the VMX
+ * registers and vscr/vrsave.
+ */
+ elf_vrreg_t __user *v_regs;
+ long vmx_reserve[ELF_NVRREG+ELF_NVRREG+32+1];
+#endif
+};
+
+#endif /* _ASM_POWERPC_SIGCONTEXT_H */
diff --git a/arch/powerpc/include/asm/siginfo.h b/arch/powerpc/include/asm/siginfo.h
new file mode 100644
index 0000000..12f1bce
--- /dev/null
+++ b/arch/powerpc/include/asm/siginfo.h
@@ -0,0 +1,26 @@
+#ifndef _ASM_POWERPC_SIGINFO_H
+#define _ASM_POWERPC_SIGINFO_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifdef __powerpc64__
+# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
+# define SI_PAD_SIZE32 ((SI_MAX_SIZE/sizeof(int)) - 3)
+#endif
+
+#include <asm-generic/siginfo.h>
+
+/*
+ * SIGTRAP si_codes
+ */
+#define TRAP_BRANCH (__SI_FAULT|3) /* process taken branch trap */
+#define TRAP_HWBKPT (__SI_FAULT|4) /* hardware breakpoint or watchpoint */
+#undef NSIGTRAP
+#define NSIGTRAP 4
+
+#endif /* _ASM_POWERPC_SIGINFO_H */
diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h
new file mode 100644
index 0000000..a7360cd
--- /dev/null
+++ b/arch/powerpc/include/asm/signal.h
@@ -0,0 +1,150 @@
+#ifndef _ASM_POWERPC_SIGNAL_H
+#define _ASM_POWERPC_SIGNAL_H
+
+#include <linux/types.h>
+
+#define _NSIG 64
+#ifdef __powerpc64__
+#define _NSIG_BPW 64
+#else
+#define _NSIG_BPW 32
+#endif
+#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
+
+typedef unsigned long old_sigset_t; /* at least 32 bits */
+
+typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGIOT 6
+#define SIGBUS 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGUSR1 10
+#define SIGSEGV 11
+#define SIGUSR2 12
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGSTKFLT 16
+#define SIGCHLD 17
+#define SIGCONT 18
+#define SIGSTOP 19
+#define SIGTSTP 20
+#define SIGTTIN 21
+#define SIGTTOU 22
+#define SIGURG 23
+#define SIGXCPU 24
+#define SIGXFSZ 25
+#define SIGVTALRM 26
+#define SIGPROF 27
+#define SIGWINCH 28
+#define SIGIO 29
+#define SIGPOLL SIGIO
+/*
+#define SIGLOST 29
+*/
+#define SIGPWR 30
+#define SIGSYS 31
+#define SIGUNUSED 31
+
+/* These should not be considered constants from userland. */
+#define SIGRTMIN 32
+#define SIGRTMAX _NSIG
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK is not currently supported, but will allow sigaltstack(2).
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_NOCLDSTOP 0x00000001U
+#define SA_NOCLDWAIT 0x00000002U
+#define SA_SIGINFO 0x00000004U
+#define SA_ONSTACK 0x08000000U
+#define SA_RESTART 0x10000000U
+#define SA_NODEFER 0x40000000U
+#define SA_RESETHAND 0x80000000U
+
+#define SA_NOMASK SA_NODEFER
+#define SA_ONESHOT SA_RESETHAND
+
+#define SA_RESTORER 0x04000000U
+
+/*
+ * sigaltstack controls
+ */
+#define SS_ONSTACK 1
+#define SS_DISABLE 2
+
+#define MINSIGSTKSZ 2048
+#define SIGSTKSZ 8192
+
+#include <asm-generic/signal.h>
+
+struct old_sigaction {
+ __sighandler_t sa_handler;
+ old_sigset_t sa_mask;
+ unsigned long sa_flags;
+ __sigrestore_t sa_restorer;
+};
+
+struct sigaction {
+ __sighandler_t sa_handler;
+ unsigned long sa_flags;
+ __sigrestore_t sa_restorer;
+ sigset_t sa_mask; /* mask last for extensibility */
+};
+
+struct k_sigaction {
+ struct sigaction sa;
+};
+
+typedef struct sigaltstack {
+ void __user *ss_sp;
+ int ss_flags;
+ size_t ss_size;
+} stack_t;
+
+#ifdef __KERNEL__
+struct pt_regs;
+extern void do_signal(struct pt_regs *regs, unsigned long thread_info_flags);
+#define ptrace_signal_deliver(regs, cookie) do { } while (0)
+#endif /* __KERNEL__ */
+
+#ifndef __powerpc64__
+/*
+ * These are parameters to dbg_sigreturn syscall. They enable or
+ * disable certain debugging things that can be done from signal
+ * handlers. The dbg_sigreturn syscall *must* be called from a
+ * SA_SIGINFO signal so the ucontext can be passed to it. It takes an
+ * array of struct sig_dbg_op, which has the debug operations to
+ * perform before returning from the signal.
+ */
+struct sig_dbg_op {
+ int dbg_type;
+ unsigned long dbg_value;
+};
+
+/* Enable or disable single-stepping. The value sets the state. */
+#define SIG_DBG_SINGLE_STEPPING 1
+
+/* Enable or disable branch tracing. The value sets the state. */
+#define SIG_DBG_BRANCH_TRACING 2
+#endif /* ! __powerpc64__ */
+
+#endif /* _ASM_POWERPC_SIGNAL_H */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
new file mode 100644
index 0000000..4d28e1e
--- /dev/null
+++ b/arch/powerpc/include/asm/smp.h
@@ -0,0 +1,127 @@
+/*
+ * smp.h: PowerPC-specific SMP code.
+ *
+ * Original was a copy of sparc smp.h. Now heavily modified
+ * for PPC.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996-2001 Cort Dougan <cort@fsmlabs.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_SMP_H
+#define _ASM_POWERPC_SMP_H
+#ifdef __KERNEL__
+
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/kernel.h>
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+#endif
+#include <asm/percpu.h>
+
+extern int boot_cpuid;
+
+extern void cpu_die(void);
+
+#ifdef CONFIG_SMP
+
+extern void smp_send_debugger_break(int cpu);
+extern void smp_message_recv(int);
+
+DECLARE_PER_CPU(unsigned int, pvr);
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern void fixup_irqs(cpumask_t map);
+int generic_cpu_disable(void);
+int generic_cpu_enable(unsigned int cpu);
+void generic_cpu_die(unsigned int cpu);
+void generic_mach_cpu_die(void);
+#endif
+
+#ifdef CONFIG_PPC64
+#define raw_smp_processor_id() (local_paca->paca_index)
+#define hard_smp_processor_id() (get_paca()->hw_cpu_id)
+#else
+/* 32-bit */
+extern int smp_hw_index[];
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+#define hard_smp_processor_id() (smp_hw_index[smp_processor_id()])
+#define get_hard_smp_processor_id(cpu) (smp_hw_index[(cpu)])
+#define set_hard_smp_processor_id(cpu, phys)\
+ (smp_hw_index[(cpu)] = (phys))
+#endif
+
+DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
+DECLARE_PER_CPU(cpumask_t, cpu_core_map);
+extern int cpu_to_core_id(int cpu);
+
+/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
+ *
+ * Make sure this matches openpic_request_IPIs in open_pic.c, or what shows up
+ * in /proc/interrupts will be wrong!!! --Troy */
+#define PPC_MSG_CALL_FUNCTION 0
+#define PPC_MSG_RESCHEDULE 1
+#define PPC_MSG_CALL_FUNC_SINGLE 2
+#define PPC_MSG_DEBUGGER_BREAK 3
+
+void smp_init_iSeries(void);
+void smp_init_pSeries(void);
+void smp_init_cell(void);
+void smp_init_celleb(void);
+void smp_setup_cpu_maps(void);
+void smp_setup_cpu_sibling_map(void);
+
+extern int __cpu_disable(void);
+extern void __cpu_die(unsigned int cpu);
+
+#else
+/* for UP */
+#define hard_smp_processor_id() 0
+#define smp_setup_cpu_maps()
+
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_PPC64
+#define get_hard_smp_processor_id(CPU) (paca[(CPU)].hw_cpu_id)
+#define set_hard_smp_processor_id(CPU, VAL) \
+ do { (paca[(CPU)].hw_cpu_id = (VAL)); } while (0)
+
+extern void smp_release_cpus(void);
+
+#else
+/* 32-bit */
+#ifndef CONFIG_SMP
+extern int boot_cpuid_phys;
+#define get_hard_smp_processor_id(cpu) boot_cpuid_phys
+#define set_hard_smp_processor_id(cpu, phys)
+#endif
+#endif
+
+extern int smt_enabled_at_boot;
+
+extern int smp_mpic_probe(void);
+extern void smp_mpic_setup_cpu(int cpu);
+extern void smp_generic_kick_cpu(int nr);
+
+extern void smp_generic_give_timebase(void);
+extern void smp_generic_take_timebase(void);
+
+extern struct smp_ops_t *smp_ops;
+
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi(cpumask_t mask);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_SMP_H) */
diff --git a/arch/powerpc/include/asm/smu.h b/arch/powerpc/include/asm/smu.h
new file mode 100644
index 0000000..7ae2753
--- /dev/null
+++ b/arch/powerpc/include/asm/smu.h
@@ -0,0 +1,700 @@
+#ifndef _SMU_H
+#define _SMU_H
+
+/*
+ * Definitions for talking to the SMU chip in newer G5 PowerMacs
+ */
+#ifdef __KERNEL__
+#include <linux/list.h>
+#endif
+#include <linux/types.h>
+
+/*
+ * Known SMU commands
+ *
+ * Most of what is below comes from looking at the Open Firmware driver,
+ * though this is still incomplete and could use better documentation here
+ * or there...
+ */
+
+
+/*
+ * Partition info commands
+ *
+ * These commands are used to retrieve the sdb-partition-XX datas from
+ * the SMU. The length is always 2. First byte is the subcommand code
+ * and second byte is the partition ID.
+ *
+ * The reply is 6 bytes:
+ *
+ * - 0..1 : partition address
+ * - 2 : a byte containing the partition ID
+ * - 3 : length (maybe other bits are rest of header ?)
+ *
+ * The data must then be obtained with calls to another command:
+ * SMU_CMD_MISC_ee_GET_DATABLOCK_REC (described below).
+ */
+#define SMU_CMD_PARTITION_COMMAND 0x3e
+#define SMU_CMD_PARTITION_LATEST 0x01
+#define SMU_CMD_PARTITION_BASE 0x02
+#define SMU_CMD_PARTITION_UPDATE 0x03
+
+
+/*
+ * Fan control
+ *
+ * This is a "mux" for fan control commands. The command seem to
+ * act differently based on the number of arguments. With 1 byte
+ * of argument, this seem to be queries for fans status, setpoint,
+ * etc..., while with 0xe arguments, we will set the fans speeds.
+ *
+ * Queries (1 byte arg):
+ * ---------------------
+ *
+ * arg=0x01: read RPM fans status
+ * arg=0x02: read RPM fans setpoint
+ * arg=0x11: read PWM fans status
+ * arg=0x12: read PWM fans setpoint
+ *
+ * the "status" queries return the current speed while the "setpoint" ones
+ * return the programmed/target speed. It _seems_ that the result is a bit
+ * mask in the first byte of active/available fans, followed by 6 words (16
+ * bits) containing the requested speed.
+ *
+ * Setpoint (14 bytes arg):
+ * ------------------------
+ *
+ * first arg byte is 0 for RPM fans and 0x10 for PWM. Second arg byte is the
+ * mask of fans affected by the command. Followed by 6 words containing the
+ * setpoint value for selected fans in the mask (or 0 if mask value is 0)
+ */
+#define SMU_CMD_FAN_COMMAND 0x4a
+
+
+/*
+ * Battery access
+ *
+ * Same command number as the PMU, could it be same syntax ?
+ */
+#define SMU_CMD_BATTERY_COMMAND 0x6f
+#define SMU_CMD_GET_BATTERY_INFO 0x00
+
+/*
+ * Real time clock control
+ *
+ * This is a "mux", first data byte contains the "sub" command.
+ * The "RTC" part of the SMU controls the date, time, powerup
+ * timer, but also a PRAM
+ *
+ * Dates are in BCD format on 7 bytes:
+ * [sec] [min] [hour] [weekday] [month day] [month] [year]
+ * with month being 1 based and year minus 100
+ */
+#define SMU_CMD_RTC_COMMAND 0x8e
+#define SMU_CMD_RTC_SET_PWRUP_TIMER 0x00 /* i: 7 bytes date */
+#define SMU_CMD_RTC_GET_PWRUP_TIMER 0x01 /* o: 7 bytes date */
+#define SMU_CMD_RTC_STOP_PWRUP_TIMER 0x02
+#define SMU_CMD_RTC_SET_PRAM_BYTE_ACC 0x20 /* i: 1 byte (address?) */
+#define SMU_CMD_RTC_SET_PRAM_AUTOINC 0x21 /* i: 1 byte (data?) */
+#define SMU_CMD_RTC_SET_PRAM_LO_BYTES 0x22 /* i: 10 bytes */
+#define SMU_CMD_RTC_SET_PRAM_HI_BYTES 0x23 /* i: 10 bytes */
+#define SMU_CMD_RTC_GET_PRAM_BYTE 0x28 /* i: 1 bytes (address?) */
+#define SMU_CMD_RTC_GET_PRAM_LO_BYTES 0x29 /* o: 10 bytes */
+#define SMU_CMD_RTC_GET_PRAM_HI_BYTES 0x2a /* o: 10 bytes */
+#define SMU_CMD_RTC_SET_DATETIME 0x80 /* i: 7 bytes date */
+#define SMU_CMD_RTC_GET_DATETIME 0x81 /* o: 7 bytes date */
+
+ /*
+ * i2c commands
+ *
+ * To issue an i2c command, first is to send a parameter block to the
+ * the SMU. This is a command of type 0x9a with 9 bytes of header
+ * eventually followed by data for a write:
+ *
+ * 0: bus number (from device-tree usually, SMU has lots of busses !)
+ * 1: transfer type/format (see below)
+ * 2: device address. For combined and combined4 type transfers, this
+ * is the "write" version of the address (bit 0x01 cleared)
+ * 3: subaddress length (0..3)
+ * 4: subaddress byte 0 (or only byte for subaddress length 1)
+ * 5: subaddress byte 1
+ * 6: subaddress byte 2
+ * 7: combined address (device address for combined mode data phase)
+ * 8: data length
+ *
+ * The transfer types are the same good old Apple ones it seems,
+ * that is:
+ * - 0x00: Simple transfer
+ * - 0x01: Subaddress transfer (addr write + data tx, no restart)
+ * - 0x02: Combined transfer (addr write + restart + data tx)
+ *
+ * This is then followed by actual data for a write.
+ *
+ * At this point, the OF driver seems to have a limitation on transfer
+ * sizes of 0xd bytes on reads and 0x5 bytes on writes. I do not know
+ * wether this is just an OF limit due to some temporary buffer size
+ * or if this is an SMU imposed limit. This driver has the same limitation
+ * for now as I use a 0x10 bytes temporary buffer as well
+ *
+ * Once that is completed, a response is expected from the SMU. This is
+ * obtained via a command of type 0x9a with a length of 1 byte containing
+ * 0 as the data byte. OF also fills the rest of the data buffer with 0xff's
+ * though I can't tell yet if this is actually necessary. Once this command
+ * is complete, at this point, all I can tell is what OF does. OF tests
+ * byte 0 of the reply:
+ * - on read, 0xfe or 0xfc : bus is busy, wait (see below) or nak ?
+ * - on read, 0x00 or 0x01 : reply is in buffer (after the byte 0)
+ * - on write, < 0 -> failure (immediate exit)
+ * - else, OF just exists (without error, weird)
+ *
+ * So on read, there is this wait-for-busy thing when getting a 0xfc or
+ * 0xfe result. OF does a loop of up to 64 retries, waiting 20ms and
+ * doing the above again until either the retries expire or the result
+ * is no longer 0xfe or 0xfc
+ *
+ * The Darwin I2C driver is less subtle though. On any non-success status
+ * from the response command, it waits 5ms and tries again up to 20 times,
+ * it doesn't differenciate between fatal errors or "busy" status.
+ *
+ * This driver provides an asynchronous paramblock based i2c command
+ * interface to be used either directly by low level code or by a higher
+ * level driver interfacing to the linux i2c layer. The current
+ * implementation of this relies on working timers & timer interrupts
+ * though, so be careful of calling context for now. This may be "fixed"
+ * in the future by adding a polling facility.
+ */
+#define SMU_CMD_I2C_COMMAND 0x9a
+ /* transfer types */
+#define SMU_I2C_TRANSFER_SIMPLE 0x00
+#define SMU_I2C_TRANSFER_STDSUB 0x01
+#define SMU_I2C_TRANSFER_COMBINED 0x02
+
+/*
+ * Power supply control
+ *
+ * The "sub" command is an ASCII string in the data, the
+ * data length is that of the string.
+ *
+ * The VSLEW command can be used to get or set the voltage slewing.
+ * - length 5 (only "VSLEW") : it returns "DONE" and 3 bytes of
+ * reply at data offset 6, 7 and 8.
+ * - length 8 ("VSLEWxyz") has 3 additional bytes appended, and is
+ * used to set the voltage slewing point. The SMU replies with "DONE"
+ * I yet have to figure out their exact meaning of those 3 bytes in
+ * both cases. They seem to be:
+ * x = processor mask
+ * y = op. point index
+ * z = processor freq. step index
+ * I haven't yet decyphered result codes
+ *
+ */
+#define SMU_CMD_POWER_COMMAND 0xaa
+#define SMU_CMD_POWER_RESTART "RESTART"
+#define SMU_CMD_POWER_SHUTDOWN "SHUTDOWN"
+#define SMU_CMD_POWER_VOLTAGE_SLEW "VSLEW"
+
+/*
+ * Read ADC sensors
+ *
+ * This command takes one byte of parameter: the sensor ID (or "reg"
+ * value in the device-tree) and returns a 16 bits value
+ */
+#define SMU_CMD_READ_ADC 0xd8
+
+
+/* Misc commands
+ *
+ * This command seem to be a grab bag of various things
+ *
+ * Parameters:
+ * 1: subcommand
+ */
+#define SMU_CMD_MISC_df_COMMAND 0xdf
+
+/*
+ * Sets "system ready" status
+ *
+ * I did not yet understand how it exactly works or what it does.
+ *
+ * Guessing from OF code, 0x02 activates the display backlight. Apple uses/used
+ * the same codebase for all OF versions. On PowerBooks, this command would
+ * enable the backlight. For the G5s, it only activates the front LED. However,
+ * don't take this for granted.
+ *
+ * Parameters:
+ * 2: status [0x00, 0x01 or 0x02]
+ */
+#define SMU_CMD_MISC_df_SET_DISPLAY_LIT 0x02
+
+/*
+ * Sets mode of power switch.
+ *
+ * What this actually does is not yet known. Maybe it enables some interrupt.
+ *
+ * Parameters:
+ * 2: enable power switch? [0x00 or 0x01]
+ * 3 (optional): enable nmi? [0x00 or 0x01]
+ *
+ * Returns:
+ * If parameter 2 is 0x00 and parameter 3 is not specified, returns wether
+ * NMI is enabled. Otherwise unknown.
+ */
+#define SMU_CMD_MISC_df_NMI_OPTION 0x04
+
+/* Sets LED dimm offset.
+ *
+ * The front LED dimms itself during sleep. Its brightness (or, well, the PWM
+ * frequency) depends on current time. Therefore, the SMU needs to know the
+ * timezone.
+ *
+ * Parameters:
+ * 2-8: unknown (BCD coding)
+ */
+#define SMU_CMD_MISC_df_DIMM_OFFSET 0x99
+
+
+/*
+ * Version info commands
+ *
+ * Parameters:
+ * 1 (optional): Specifies version part to retrieve
+ *
+ * Returns:
+ * Version value
+ */
+#define SMU_CMD_VERSION_COMMAND 0xea
+#define SMU_VERSION_RUNNING 0x00
+#define SMU_VERSION_BASE 0x01
+#define SMU_VERSION_UPDATE 0x02
+
+
+/*
+ * Switches
+ *
+ * These are switches whose status seems to be known to the SMU.
+ *
+ * Parameters:
+ * none
+ *
+ * Result:
+ * Switch bits (ORed, see below)
+ */
+#define SMU_CMD_SWITCHES 0xdc
+
+/* Switches bits */
+#define SMU_SWITCH_CASE_CLOSED 0x01
+#define SMU_SWITCH_AC_POWER 0x04
+#define SMU_SWITCH_POWER_SWITCH 0x08
+
+
+/*
+ * Misc commands
+ *
+ * This command seem to be a grab bag of various things
+ *
+ * SMU_CMD_MISC_ee_GET_DATABLOCK_REC is used, among others, to
+ * transfer blocks of data from the SMU. So far, I've decrypted it's
+ * usage to retrieve partition data. In order to do that, you have to
+ * break your transfer in "chunks" since that command cannot transfer
+ * more than a chunk at a time. The chunk size used by OF is 0xe bytes,
+ * but it seems that the darwin driver will let you do 0x1e bytes if
+ * your "PMU" version is >= 0x30. You can get the "PMU" version apparently
+ * either in the last 16 bits of property "smu-version-pmu" or as the 16
+ * bytes at offset 1 of "smu-version-info"
+ *
+ * For each chunk, the command takes 7 bytes of arguments:
+ * byte 0: subcommand code (0x02)
+ * byte 1: 0x04 (always, I don't know what it means, maybe the address
+ * space to use or some other nicety. It's hard coded in OF)
+ * byte 2..5: SMU address of the chunk (big endian 32 bits)
+ * byte 6: size to transfer (up to max chunk size)
+ *
+ * The data is returned directly
+ */
+#define SMU_CMD_MISC_ee_COMMAND 0xee
+#define SMU_CMD_MISC_ee_GET_DATABLOCK_REC 0x02
+
+/* Retrieves currently used watts.
+ *
+ * Parameters:
+ * 1: 0x03 (Meaning unknown)
+ */
+#define SMU_CMD_MISC_ee_GET_WATTS 0x03
+
+#define SMU_CMD_MISC_ee_LEDS_CTRL 0x04 /* i: 00 (00,01) [00] */
+#define SMU_CMD_MISC_ee_GET_DATA 0x05 /* i: 00 , o: ?? */
+
+
+/*
+ * Power related commands
+ *
+ * Parameters:
+ * 1: subcommand
+ */
+#define SMU_CMD_POWER_EVENTS_COMMAND 0x8f
+
+/* SMU_POWER_EVENTS subcommands */
+enum {
+ SMU_PWR_GET_POWERUP_EVENTS = 0x00,
+ SMU_PWR_SET_POWERUP_EVENTS = 0x01,
+ SMU_PWR_CLR_POWERUP_EVENTS = 0x02,
+ SMU_PWR_GET_WAKEUP_EVENTS = 0x03,
+ SMU_PWR_SET_WAKEUP_EVENTS = 0x04,
+ SMU_PWR_CLR_WAKEUP_EVENTS = 0x05,
+
+ /*
+ * Get last shutdown cause
+ *
+ * Returns:
+ * 1 byte (signed char): Last shutdown cause. Exact meaning unknown.
+ */
+ SMU_PWR_LAST_SHUTDOWN_CAUSE = 0x07,
+
+ /*
+ * Sets or gets server ID. Meaning or use is unknown.
+ *
+ * Parameters:
+ * 2 (optional): Set server ID (1 byte)
+ *
+ * Returns:
+ * 1 byte (server ID?)
+ */
+ SMU_PWR_SERVER_ID = 0x08,
+};
+
+/* Power events wakeup bits */
+enum {
+ SMU_PWR_WAKEUP_KEY = 0x01, /* Wake on key press */
+ SMU_PWR_WAKEUP_AC_INSERT = 0x02, /* Wake on AC adapter plug */
+ SMU_PWR_WAKEUP_AC_CHANGE = 0x04,
+ SMU_PWR_WAKEUP_LID_OPEN = 0x08,
+ SMU_PWR_WAKEUP_RING = 0x10,
+};
+
+
+/*
+ * - Kernel side interface -
+ */
+
+#ifdef __KERNEL__
+
+/*
+ * Asynchronous SMU commands
+ *
+ * Fill up this structure and submit it via smu_queue_command(),
+ * and get notified by the optional done() callback, or because
+ * status becomes != 1
+ */
+
+struct smu_cmd;
+
+struct smu_cmd
+{
+ /* public */
+ u8 cmd; /* command */
+ int data_len; /* data len */
+ int reply_len; /* reply len */
+ void *data_buf; /* data buffer */
+ void *reply_buf; /* reply buffer */
+ int status; /* command status */
+ void (*done)(struct smu_cmd *cmd, void *misc);
+ void *misc;
+
+ /* private */
+ struct list_head link;
+};
+
+/*
+ * Queues an SMU command, all fields have to be initialized
+ */
+extern int smu_queue_cmd(struct smu_cmd *cmd);
+
+/*
+ * Simple command wrapper. This structure embeds a small buffer
+ * to ease sending simple SMU commands from the stack
+ */
+struct smu_simple_cmd
+{
+ struct smu_cmd cmd;
+ u8 buffer[16];
+};
+
+/*
+ * Queues a simple command. All fields will be initialized by that
+ * function
+ */
+extern int smu_queue_simple(struct smu_simple_cmd *scmd, u8 command,
+ unsigned int data_len,
+ void (*done)(struct smu_cmd *cmd, void *misc),
+ void *misc,
+ ...);
+
+/*
+ * Completion helper. Pass it to smu_queue_simple or as 'done'
+ * member to smu_queue_cmd, it will call complete() on the struct
+ * completion passed in the "misc" argument
+ */
+extern void smu_done_complete(struct smu_cmd *cmd, void *misc);
+
+/*
+ * Synchronous helpers. Will spin-wait for completion of a command
+ */
+extern void smu_spinwait_cmd(struct smu_cmd *cmd);
+
+static inline void smu_spinwait_simple(struct smu_simple_cmd *scmd)
+{
+ smu_spinwait_cmd(&scmd->cmd);
+}
+
+/*
+ * Poll routine to call if blocked with irqs off
+ */
+extern void smu_poll(void);
+
+
+/*
+ * Init routine, presence check....
+ */
+extern int smu_init(void);
+extern int smu_present(void);
+struct of_device;
+extern struct of_device *smu_get_ofdev(void);
+
+
+/*
+ * Common command wrappers
+ */
+extern void smu_shutdown(void);
+extern void smu_restart(void);
+struct rtc_time;
+extern int smu_get_rtc_time(struct rtc_time *time, int spinwait);
+extern int smu_set_rtc_time(struct rtc_time *time, int spinwait);
+
+/*
+ * SMU command buffer absolute address, exported by pmac_setup,
+ * this is allocated very early during boot.
+ */
+extern unsigned long smu_cmdbuf_abs;
+
+
+/*
+ * Kenrel asynchronous i2c interface
+ */
+
+#define SMU_I2C_READ_MAX 0x1d
+#define SMU_I2C_WRITE_MAX 0x15
+
+/* SMU i2c header, exactly matches i2c header on wire */
+struct smu_i2c_param
+{
+ u8 bus; /* SMU bus ID (from device tree) */
+ u8 type; /* i2c transfer type */
+ u8 devaddr; /* device address (includes direction) */
+ u8 sublen; /* subaddress length */
+ u8 subaddr[3]; /* subaddress */
+ u8 caddr; /* combined address, filled by SMU driver */
+ u8 datalen; /* length of transfer */
+ u8 data[SMU_I2C_READ_MAX]; /* data */
+};
+
+struct smu_i2c_cmd
+{
+ /* public */
+ struct smu_i2c_param info;
+ void (*done)(struct smu_i2c_cmd *cmd, void *misc);
+ void *misc;
+ int status; /* 1 = pending, 0 = ok, <0 = fail */
+
+ /* private */
+ struct smu_cmd scmd;
+ int read;
+ int stage;
+ int retries;
+ u8 pdata[32];
+ struct list_head link;
+};
+
+/*
+ * Call this to queue an i2c command to the SMU. You must fill info,
+ * including info.data for a write, done and misc.
+ * For now, no polling interface is provided so you have to use completion
+ * callback.
+ */
+extern int smu_queue_i2c(struct smu_i2c_cmd *cmd);
+
+
+#endif /* __KERNEL__ */
+
+
+/*
+ * - SMU "sdb" partitions informations -
+ */
+
+
+/*
+ * Partition header format
+ */
+struct smu_sdbp_header {
+ __u8 id;
+ __u8 len;
+ __u8 version;
+ __u8 flags;
+};
+
+
+ /*
+ * demangle 16 and 32 bits integer in some SMU partitions
+ * (currently, afaik, this concerns only the FVT partition
+ * (0x12)
+ */
+#define SMU_U16_MIX(x) le16_to_cpu(x);
+#define SMU_U32_MIX(x) ((((x) & 0xff00ff00u) >> 8)|(((x) & 0x00ff00ffu) << 8))
+
+
+/* This is the definition of the SMU sdb-partition-0x12 table (called
+ * CPU F/V/T operating points in Darwin). The definition for all those
+ * SMU tables should be moved to some separate file
+ */
+#define SMU_SDB_FVT_ID 0x12
+
+struct smu_sdbp_fvt {
+ __u32 sysclk; /* Base SysClk frequency in Hz for
+ * this operating point. Value need to
+ * be unmixed with SMU_U32_MIX()
+ */
+ __u8 pad;
+ __u8 maxtemp; /* Max temp. supported by this
+ * operating point
+ */
+
+ __u16 volts[3]; /* CPU core voltage for the 3
+ * PowerTune modes, a mode with
+ * 0V = not supported. Value need
+ * to be unmixed with SMU_U16_MIX()
+ */
+};
+
+/* This partition contains voltage & current sensor calibration
+ * informations
+ */
+#define SMU_SDB_CPUVCP_ID 0x21
+
+struct smu_sdbp_cpuvcp {
+ __u16 volt_scale; /* u4.12 fixed point */
+ __s16 volt_offset; /* s4.12 fixed point */
+ __u16 curr_scale; /* u4.12 fixed point */
+ __s16 curr_offset; /* s4.12 fixed point */
+ __s32 power_quads[3]; /* s4.28 fixed point */
+};
+
+/* This partition contains CPU thermal diode calibration
+ */
+#define SMU_SDB_CPUDIODE_ID 0x18
+
+struct smu_sdbp_cpudiode {
+ __u16 m_value; /* u1.15 fixed point */
+ __s16 b_value; /* s10.6 fixed point */
+
+};
+
+/* This partition contains Slots power calibration
+ */
+#define SMU_SDB_SLOTSPOW_ID 0x78
+
+struct smu_sdbp_slotspow {
+ __u16 pow_scale; /* u4.12 fixed point */
+ __s16 pow_offset; /* s4.12 fixed point */
+};
+
+/* This partition contains machine specific version information about
+ * the sensor/control layout
+ */
+#define SMU_SDB_SENSORTREE_ID 0x25
+
+struct smu_sdbp_sensortree {
+ __u8 model_id;
+ __u8 unknown[3];
+};
+
+/* This partition contains CPU thermal control PID informations. So far
+ * only single CPU machines have been seen with an SMU, so we assume this
+ * carries only informations for those
+ */
+#define SMU_SDB_CPUPIDDATA_ID 0x17
+
+struct smu_sdbp_cpupiddata {
+ __u8 unknown1;
+ __u8 target_temp_delta;
+ __u8 unknown2;
+ __u8 history_len;
+ __s16 power_adj;
+ __u16 max_power;
+ __s32 gp,gr,gd;
+};
+
+
+/* Other partitions without known structures */
+#define SMU_SDB_DEBUG_SWITCHES_ID 0x05
+
+#ifdef __KERNEL__
+/*
+ * This returns the pointer to an SMU "sdb" partition data or NULL
+ * if not found. The data format is described below
+ */
+extern const struct smu_sdbp_header *smu_get_sdb_partition(int id,
+ unsigned int *size);
+
+/* Get "sdb" partition data from an SMU satellite */
+extern struct smu_sdbp_header *smu_sat_get_sdb_partition(unsigned int sat_id,
+ int id, unsigned int *size);
+
+
+#endif /* __KERNEL__ */
+
+
+/*
+ * - Userland interface -
+ */
+
+/*
+ * A given instance of the device can be configured for 2 different
+ * things at the moment:
+ *
+ * - sending SMU commands (default at open() time)
+ * - receiving SMU events (not yet implemented)
+ *
+ * Commands are written with write() of a command block. They can be
+ * "driver" commands (for example to switch to event reception mode)
+ * or real SMU commands. They are made of a header followed by command
+ * data if any.
+ *
+ * For SMU commands (not for driver commands), you can then read() back
+ * a reply. The reader will be blocked or not depending on how the device
+ * file is opened. poll() isn't implemented yet. The reply will consist
+ * of a header as well, followed by the reply data if any. You should
+ * always provide a buffer large enough for the maximum reply data, I
+ * recommand one page.
+ *
+ * It is illegal to send SMU commands through a file descriptor configured
+ * for events reception
+ *
+ */
+struct smu_user_cmd_hdr
+{
+ __u32 cmdtype;
+#define SMU_CMDTYPE_SMU 0 /* SMU command */
+#define SMU_CMDTYPE_WANTS_EVENTS 1 /* switch fd to events mode */
+#define SMU_CMDTYPE_GET_PARTITION 2 /* retrieve an sdb partition */
+
+ __u8 cmd; /* SMU command byte */
+ __u8 pad[3]; /* padding */
+ __u32 data_len; /* Length of data following */
+};
+
+struct smu_user_reply_hdr
+{
+ __u32 status; /* Command status */
+ __u32 reply_len; /* Length of data follwing */
+};
+
+#endif /* _SMU_H */
diff --git a/arch/powerpc/include/asm/socket.h b/arch/powerpc/include/asm/socket.h
new file mode 100644
index 0000000..f5a4e168
--- /dev/null
+++ b/arch/powerpc/include/asm/socket.h
@@ -0,0 +1,64 @@
+#ifndef _ASM_POWERPC_SOCKET_H
+#define _ASM_POWERPC_SOCKET_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/sockios.h>
+
+/* For setsockopt(2) */
+#define SOL_SOCKET 1
+
+#define SO_DEBUG 1
+#define SO_REUSEADDR 2
+#define SO_TYPE 3
+#define SO_ERROR 4
+#define SO_DONTROUTE 5
+#define SO_BROADCAST 6
+#define SO_SNDBUF 7
+#define SO_RCVBUF 8
+#define SO_SNDBUFFORCE 32
+#define SO_RCVBUFFORCE 33
+#define SO_KEEPALIVE 9
+#define SO_OOBINLINE 10
+#define SO_NO_CHECK 11
+#define SO_PRIORITY 12
+#define SO_LINGER 13
+#define SO_BSDCOMPAT 14
+/* To add :#define SO_REUSEPORT 15 */
+#define SO_RCVLOWAT 16
+#define SO_SNDLOWAT 17
+#define SO_RCVTIMEO 18
+#define SO_SNDTIMEO 19
+#define SO_PASSCRED 20
+#define SO_PEERCRED 21
+
+/* Security levels - as per NRL IPv6 - don't actually do anything */
+#define SO_SECURITY_AUTHENTICATION 22
+#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
+#define SO_SECURITY_ENCRYPTION_NETWORK 24
+
+#define SO_BINDTODEVICE 25
+
+/* Socket filtering */
+#define SO_ATTACH_FILTER 26
+#define SO_DETACH_FILTER 27
+
+#define SO_PEERNAME 28
+#define SO_TIMESTAMP 29
+#define SCM_TIMESTAMP SO_TIMESTAMP
+
+#define SO_ACCEPTCONN 30
+
+#define SO_PEERSEC 31
+#define SO_PASSSEC 34
+#define SO_TIMESTAMPNS 35
+#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
+
+#define SO_MARK 36
+
+#endif /* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/include/asm/sockios.h b/arch/powerpc/include/asm/sockios.h
new file mode 100644
index 0000000..55cef76
--- /dev/null
+++ b/arch/powerpc/include/asm/sockios.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_POWERPC_SOCKIOS_H
+#define _ASM_POWERPC_SOCKIOS_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/* Socket-level I/O control calls. */
+#define FIOSETOWN 0x8901
+#define SIOCSPGRP 0x8902
+#define FIOGETOWN 0x8903
+#define SIOCGPGRP 0x8904
+#define SIOCATMARK 0x8905
+#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
+#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
+
+#endif /* _ASM_POWERPC_SOCKIOS_H */
diff --git a/arch/powerpc/include/asm/sparsemem.h b/arch/powerpc/include/asm/sparsemem.h
new file mode 100644
index 0000000..54a47ea
--- /dev/null
+++ b/arch/powerpc/include/asm/sparsemem.h
@@ -0,0 +1,32 @@
+#ifndef _ASM_POWERPC_SPARSEMEM_H
+#define _ASM_POWERPC_SPARSEMEM_H 1
+#ifdef __KERNEL__
+
+#ifdef CONFIG_SPARSEMEM
+/*
+ * SECTION_SIZE_BITS 2^N: how big each section will be
+ * MAX_PHYSADDR_BITS 2^N: how much physical address space we have
+ * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
+ */
+#define SECTION_SIZE_BITS 24
+
+#define MAX_PHYSADDR_BITS 44
+#define MAX_PHYSMEM_BITS 44
+
+#endif /* CONFIG_SPARSEMEM */
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+extern void create_section_mapping(unsigned long start, unsigned long end);
+extern int remove_section_mapping(unsigned long start, unsigned long end);
+#ifdef CONFIG_NUMA
+extern int hot_add_scn_to_nid(unsigned long scn_addr);
+#else
+static inline int hot_add_scn_to_nid(unsigned long scn_addr)
+{
+ return 0;
+}
+#endif /* CONFIG_NUMA */
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_SPARSEMEM_H */
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
new file mode 100644
index 0000000..f56a843
--- /dev/null
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -0,0 +1,295 @@
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+#ifdef __KERNEL__
+
+/*
+ * Simple spin lock operations.
+ *
+ * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
+ * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
+ * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
+ * Rework to support virtual processors
+ *
+ * Type of int is used as a full 64b word is not necessary.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * (the type definitions are in asm/spinlock_types.h)
+ */
+#include <linux/irqflags.h>
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+#include <asm/hvcall.h>
+#include <asm/iseries/hv_call.h>
+#endif
+#include <asm/asm-compat.h>
+#include <asm/synch.h>
+
+#define __raw_spin_is_locked(x) ((x)->slock != 0)
+
+#ifdef CONFIG_PPC64
+/* use 0x800000yy when locked, where yy == CPU number */
+#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
+#else
+#define LOCK_TOKEN 1
+#endif
+
+#if defined(CONFIG_PPC64) && defined(CONFIG_SMP)
+#define CLEAR_IO_SYNC (get_paca()->io_sync = 0)
+#define SYNC_IO do { \
+ if (unlikely(get_paca()->io_sync)) { \
+ mb(); \
+ get_paca()->io_sync = 0; \
+ } \
+ } while (0)
+#else
+#define CLEAR_IO_SYNC
+#define SYNC_IO
+#endif
+
+/*
+ * This returns the old value in the lock, so we succeeded
+ * in getting the lock if the return value is 0.
+ */
+static inline unsigned long __spin_trylock(raw_spinlock_t *lock)
+{
+ unsigned long tmp, token;
+
+ token = LOCK_TOKEN;
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2\n\
+ cmpwi 0,%0,0\n\
+ bne- 2f\n\
+ stwcx. %1,0,%2\n\
+ bne- 1b\n\
+ isync\n\
+2:" : "=&r" (tmp)
+ : "r" (token), "r" (&lock->slock)
+ : "cr0", "memory");
+
+ return tmp;
+}
+
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+{
+ CLEAR_IO_SYNC;
+ return __spin_trylock(lock) == 0;
+}
+
+/*
+ * On a system with shared processors (that is, where a physical
+ * processor is multiplexed between several virtual processors),
+ * there is no point spinning on a lock if the holder of the lock
+ * isn't currently scheduled on a physical processor. Instead
+ * we detect this situation and ask the hypervisor to give the
+ * rest of our timeslice to the lock holder.
+ *
+ * So that we can tell which virtual processor is holding a lock,
+ * we put 0x80000000 | smp_processor_id() in the lock when it is
+ * held. Conveniently, we have a word in the paca that holds this
+ * value.
+ */
+
+#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
+/* We only yield to the hypervisor if we are in shared processor mode */
+#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
+extern void __spin_yield(raw_spinlock_t *lock);
+extern void __rw_yield(raw_rwlock_t *lock);
+#else /* SPLPAR || ISERIES */
+#define __spin_yield(x) barrier()
+#define __rw_yield(x) barrier()
+#define SHARED_PROCESSOR 0
+#endif
+
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
+{
+ CLEAR_IO_SYNC;
+ while (1) {
+ if (likely(__spin_trylock(lock) == 0))
+ break;
+ do {
+ HMT_low();
+ if (SHARED_PROCESSOR)
+ __spin_yield(lock);
+ } while (unlikely(lock->slock != 0));
+ HMT_medium();
+ }
+}
+
+static inline
+void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+{
+ unsigned long flags_dis;
+
+ CLEAR_IO_SYNC;
+ while (1) {
+ if (likely(__spin_trylock(lock) == 0))
+ break;
+ local_save_flags(flags_dis);
+ local_irq_restore(flags);
+ do {
+ HMT_low();
+ if (SHARED_PROCESSOR)
+ __spin_yield(lock);
+ } while (unlikely(lock->slock != 0));
+ HMT_medium();
+ local_irq_restore(flags_dis);
+ }
+}
+
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+{
+ SYNC_IO;
+ __asm__ __volatile__("# __raw_spin_unlock\n\t"
+ LWSYNC_ON_SMP: : :"memory");
+ lock->slock = 0;
+}
+
+#ifdef CONFIG_PPC64
+extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
+#else
+#define __raw_spin_unlock_wait(lock) \
+ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+#endif
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+
+#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
+#define __raw_write_can_lock(rw) (!(rw)->lock)
+
+#ifdef CONFIG_PPC64
+#define __DO_SIGN_EXTEND "extsw %0,%0\n"
+#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */
+#else
+#define __DO_SIGN_EXTEND
+#define WRLOCK_TOKEN (-1)
+#endif
+
+/*
+ * This returns the old value in the lock + 1,
+ * so we got a read lock if the return value is > 0.
+ */
+static inline long __read_trylock(raw_rwlock_t *rw)
+{
+ long tmp;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%1\n"
+ __DO_SIGN_EXTEND
+" addic. %0,%0,1\n\
+ ble- 2f\n"
+ PPC405_ERR77(0,%1)
+" stwcx. %0,0,%1\n\
+ bne- 1b\n\
+ isync\n\
+2:" : "=&r" (tmp)
+ : "r" (&rw->lock)
+ : "cr0", "xer", "memory");
+
+ return tmp;
+}
+
+/*
+ * This returns the old value in the lock,
+ * so we got the write lock if the return value is 0.
+ */
+static inline long __write_trylock(raw_rwlock_t *rw)
+{
+ long tmp, token;
+
+ token = WRLOCK_TOKEN;
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2\n\
+ cmpwi 0,%0,0\n\
+ bne- 2f\n"
+ PPC405_ERR77(0,%1)
+" stwcx. %1,0,%2\n\
+ bne- 1b\n\
+ isync\n\
+2:" : "=&r" (tmp)
+ : "r" (token), "r" (&rw->lock)
+ : "cr0", "memory");
+
+ return tmp;
+}
+
+static inline void __raw_read_lock(raw_rwlock_t *rw)
+{
+ while (1) {
+ if (likely(__read_trylock(rw) > 0))
+ break;
+ do {
+ HMT_low();
+ if (SHARED_PROCESSOR)
+ __rw_yield(rw);
+ } while (unlikely(rw->lock < 0));
+ HMT_medium();
+ }
+}
+
+static inline void __raw_write_lock(raw_rwlock_t *rw)
+{
+ while (1) {
+ if (likely(__write_trylock(rw) == 0))
+ break;
+ do {
+ HMT_low();
+ if (SHARED_PROCESSOR)
+ __rw_yield(rw);
+ } while (unlikely(rw->lock != 0));
+ HMT_medium();
+ }
+}
+
+static inline int __raw_read_trylock(raw_rwlock_t *rw)
+{
+ return __read_trylock(rw) > 0;
+}
+
+static inline int __raw_write_trylock(raw_rwlock_t *rw)
+{
+ return __write_trylock(rw) == 0;
+}
+
+static inline void __raw_read_unlock(raw_rwlock_t *rw)
+{
+ long tmp;
+
+ __asm__ __volatile__(
+ "# read_unlock\n\t"
+ LWSYNC_ON_SMP
+"1: lwarx %0,0,%1\n\
+ addic %0,%0,-1\n"
+ PPC405_ERR77(0,%1)
+" stwcx. %0,0,%1\n\
+ bne- 1b"
+ : "=&r"(tmp)
+ : "r"(&rw->lock)
+ : "cr0", "memory");
+}
+
+static inline void __raw_write_unlock(raw_rwlock_t *rw)
+{
+ __asm__ __volatile__("# write_unlock\n\t"
+ LWSYNC_ON_SMP: : :"memory");
+ rw->lock = 0;
+}
+
+#define _raw_spin_relax(lock) __spin_yield(lock)
+#define _raw_read_relax(lock) __rw_yield(lock)
+#define _raw_write_relax(lock) __rw_yield(lock)
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h
new file mode 100644
index 0000000..74236c9
--- /dev/null
+++ b/arch/powerpc/include/asm/spinlock_types.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_POWERPC_SPINLOCK_TYPES_H
+#define _ASM_POWERPC_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+ volatile unsigned int slock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+
+typedef struct {
+ volatile signed int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED { 0 }
+
+#endif
diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h
new file mode 100644
index 0000000..8b2eb04
--- /dev/null
+++ b/arch/powerpc/include/asm/spu.h
@@ -0,0 +1,732 @@
+/*
+ * SPU core / file system interface and HW structures
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Arnd Bergmann <arndb@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _SPU_H
+#define _SPU_H
+#ifdef __KERNEL__
+
+#include <linux/workqueue.h>
+#include <linux/sysdev.h>
+
+#define LS_SIZE (256 * 1024)
+#define LS_ADDR_MASK (LS_SIZE - 1)
+
+#define MFC_PUT_CMD 0x20
+#define MFC_PUTS_CMD 0x28
+#define MFC_PUTR_CMD 0x30
+#define MFC_PUTF_CMD 0x22
+#define MFC_PUTB_CMD 0x21
+#define MFC_PUTFS_CMD 0x2A
+#define MFC_PUTBS_CMD 0x29
+#define MFC_PUTRF_CMD 0x32
+#define MFC_PUTRB_CMD 0x31
+#define MFC_PUTL_CMD 0x24
+#define MFC_PUTRL_CMD 0x34
+#define MFC_PUTLF_CMD 0x26
+#define MFC_PUTLB_CMD 0x25
+#define MFC_PUTRLF_CMD 0x36
+#define MFC_PUTRLB_CMD 0x35
+
+#define MFC_GET_CMD 0x40
+#define MFC_GETS_CMD 0x48
+#define MFC_GETF_CMD 0x42
+#define MFC_GETB_CMD 0x41
+#define MFC_GETFS_CMD 0x4A
+#define MFC_GETBS_CMD 0x49
+#define MFC_GETL_CMD 0x44
+#define MFC_GETLF_CMD 0x46
+#define MFC_GETLB_CMD 0x45
+
+#define MFC_SDCRT_CMD 0x80
+#define MFC_SDCRTST_CMD 0x81
+#define MFC_SDCRZ_CMD 0x89
+#define MFC_SDCRS_CMD 0x8D
+#define MFC_SDCRF_CMD 0x8F
+
+#define MFC_GETLLAR_CMD 0xD0
+#define MFC_PUTLLC_CMD 0xB4
+#define MFC_PUTLLUC_CMD 0xB0
+#define MFC_PUTQLLUC_CMD 0xB8
+#define MFC_SNDSIG_CMD 0xA0
+#define MFC_SNDSIGB_CMD 0xA1
+#define MFC_SNDSIGF_CMD 0xA2
+#define MFC_BARRIER_CMD 0xC0
+#define MFC_EIEIO_CMD 0xC8
+#define MFC_SYNC_CMD 0xCC
+
+#define MFC_MIN_DMA_SIZE_SHIFT 4 /* 16 bytes */
+#define MFC_MAX_DMA_SIZE_SHIFT 14 /* 16384 bytes */
+#define MFC_MIN_DMA_SIZE (1 << MFC_MIN_DMA_SIZE_SHIFT)
+#define MFC_MAX_DMA_SIZE (1 << MFC_MAX_DMA_SIZE_SHIFT)
+#define MFC_MIN_DMA_SIZE_MASK (MFC_MIN_DMA_SIZE - 1)
+#define MFC_MAX_DMA_SIZE_MASK (MFC_MAX_DMA_SIZE - 1)
+#define MFC_MIN_DMA_LIST_SIZE 0x0008 /* 8 bytes */
+#define MFC_MAX_DMA_LIST_SIZE 0x4000 /* 16K bytes */
+
+#define MFC_TAGID_TO_TAGMASK(tag_id) (1 << (tag_id & 0x1F))
+
+/* Events for Channels 0-2 */
+#define MFC_DMA_TAG_STATUS_UPDATE_EVENT 0x00000001
+#define MFC_DMA_TAG_CMD_STALL_NOTIFY_EVENT 0x00000002
+#define MFC_DMA_QUEUE_AVAILABLE_EVENT 0x00000008
+#define MFC_SPU_MAILBOX_WRITTEN_EVENT 0x00000010
+#define MFC_DECREMENTER_EVENT 0x00000020
+#define MFC_PU_INT_MAILBOX_AVAILABLE_EVENT 0x00000040
+#define MFC_PU_MAILBOX_AVAILABLE_EVENT 0x00000080
+#define MFC_SIGNAL_2_EVENT 0x00000100
+#define MFC_SIGNAL_1_EVENT 0x00000200
+#define MFC_LLR_LOST_EVENT 0x00000400
+#define MFC_PRIV_ATTN_EVENT 0x00000800
+#define MFC_MULTI_SRC_EVENT 0x00001000
+
+/* Flag indicating progress during context switch. */
+#define SPU_CONTEXT_SWITCH_PENDING 0UL
+#define SPU_CONTEXT_FAULT_PENDING 1UL
+
+struct spu_context;
+struct spu_runqueue;
+struct spu_lscsa;
+struct device_node;
+
+enum spu_utilization_state {
+ SPU_UTIL_USER,
+ SPU_UTIL_SYSTEM,
+ SPU_UTIL_IOWAIT,
+ SPU_UTIL_IDLE_LOADED,
+ SPU_UTIL_MAX
+};
+
+struct spu {
+ const char *name;
+ unsigned long local_store_phys;
+ u8 *local_store;
+ unsigned long problem_phys;
+ struct spu_problem __iomem *problem;
+ struct spu_priv2 __iomem *priv2;
+ struct list_head cbe_list;
+ struct list_head full_list;
+ enum { SPU_FREE, SPU_USED } alloc_state;
+ int number;
+ unsigned int irqs[3];
+ u32 node;
+ u64 flags;
+ u64 class_0_pending;
+ u64 class_0_dar;
+ u64 class_1_dar;
+ u64 class_1_dsisr;
+ size_t ls_size;
+ unsigned int slb_replace;
+ struct mm_struct *mm;
+ struct spu_context *ctx;
+ struct spu_runqueue *rq;
+ unsigned long long timestamp;
+ pid_t pid;
+ pid_t tgid;
+ spinlock_t register_lock;
+
+ void (* wbox_callback)(struct spu *spu);
+ void (* ibox_callback)(struct spu *spu);
+ void (* stop_callback)(struct spu *spu, int irq);
+ void (* mfc_callback)(struct spu *spu);
+
+ char irq_c0[8];
+ char irq_c1[8];
+ char irq_c2[8];
+
+ u64 spe_id;
+
+ void* pdata; /* platform private data */
+
+ /* of based platforms only */
+ struct device_node *devnode;
+
+ /* native only */
+ struct spu_priv1 __iomem *priv1;
+
+ /* beat only */
+ u64 shadow_int_mask_RW[3];
+
+ struct sys_device sysdev;
+
+ int has_mem_affinity;
+ struct list_head aff_list;
+
+ struct {
+ /* protected by interrupt reentrancy */
+ enum spu_utilization_state util_state;
+ unsigned long long tstamp;
+ unsigned long long times[SPU_UTIL_MAX];
+ unsigned long long vol_ctx_switch;
+ unsigned long long invol_ctx_switch;
+ unsigned long long min_flt;
+ unsigned long long maj_flt;
+ unsigned long long hash_flt;
+ unsigned long long slb_flt;
+ unsigned long long class2_intr;
+ unsigned long long libassist;
+ } stats;
+};
+
+struct cbe_spu_info {
+ struct mutex list_mutex;
+ struct list_head spus;
+ int n_spus;
+ int nr_active;
+ atomic_t busy_spus;
+ atomic_t reserved_spus;
+};
+
+extern struct cbe_spu_info cbe_spu_info[];
+
+void spu_init_channels(struct spu *spu);
+void spu_irq_setaffinity(struct spu *spu, int cpu);
+
+void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
+ void *code, int code_size);
+
+#ifdef CONFIG_KEXEC
+void crash_register_spus(struct list_head *list);
+#else
+static inline void crash_register_spus(struct list_head *list)
+{
+}
+#endif
+
+extern void spu_invalidate_slbs(struct spu *spu);
+extern void spu_associate_mm(struct spu *spu, struct mm_struct *mm);
+int spu_64k_pages_available(void);
+
+/* Calls from the memory management to the SPU */
+struct mm_struct;
+extern void spu_flush_all_slbs(struct mm_struct *mm);
+
+/* This interface allows a profiler (e.g., OProfile) to store a ref
+ * to spu context information that it creates. This caching technique
+ * avoids the need to recreate this information after a save/restore operation.
+ *
+ * Assumes the caller has already incremented the ref count to
+ * profile_info; then spu_context_destroy must call kref_put
+ * on prof_info_kref.
+ */
+void spu_set_profile_private_kref(struct spu_context *ctx,
+ struct kref *prof_info_kref,
+ void ( * prof_info_release) (struct kref *kref));
+
+void *spu_get_profile_private_kref(struct spu_context *ctx);
+
+/* system callbacks from the SPU */
+struct spu_syscall_block {
+ u64 nr_ret;
+ u64 parm[6];
+};
+extern long spu_sys_callback(struct spu_syscall_block *s);
+
+/* syscalls implemented in spufs */
+struct file;
+struct spufs_calls {
+ long (*create_thread)(const char __user *name,
+ unsigned int flags, mode_t mode,
+ struct file *neighbor);
+ long (*spu_run)(struct file *filp, __u32 __user *unpc,
+ __u32 __user *ustatus);
+ int (*coredump_extra_notes_size)(void);
+ int (*coredump_extra_notes_write)(struct file *file, loff_t *foffset);
+ void (*notify_spus_active)(void);
+ struct module *owner;
+};
+
+/* return status from spu_run, same as in libspe */
+#define SPE_EVENT_DMA_ALIGNMENT 0x0008 /*A DMA alignment error */
+#define SPE_EVENT_SPE_ERROR 0x0010 /*An illegal instruction error*/
+#define SPE_EVENT_SPE_DATA_SEGMENT 0x0020 /*A DMA segmentation error */
+#define SPE_EVENT_SPE_DATA_STORAGE 0x0040 /*A DMA storage error */
+#define SPE_EVENT_INVALID_DMA 0x0800 /* Invalid MFC DMA */
+
+/*
+ * Flags for sys_spu_create.
+ */
+#define SPU_CREATE_EVENTS_ENABLED 0x0001
+#define SPU_CREATE_GANG 0x0002
+#define SPU_CREATE_NOSCHED 0x0004
+#define SPU_CREATE_ISOLATE 0x0008
+#define SPU_CREATE_AFFINITY_SPU 0x0010
+#define SPU_CREATE_AFFINITY_MEM 0x0020
+
+#define SPU_CREATE_FLAG_ALL 0x003f /* mask of all valid flags */
+
+
+int register_spu_syscalls(struct spufs_calls *calls);
+void unregister_spu_syscalls(struct spufs_calls *calls);
+
+int spu_add_sysdev_attr(struct sysdev_attribute *attr);
+void spu_remove_sysdev_attr(struct sysdev_attribute *attr);
+
+int spu_add_sysdev_attr_group(struct attribute_group *attrs);
+void spu_remove_sysdev_attr_group(struct attribute_group *attrs);
+
+int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
+ unsigned long dsisr, unsigned *flt);
+
+/*
+ * Notifier blocks:
+ *
+ * oprofile can get notified when a context switch is performed
+ * on an spe. The notifer function that gets called is passed
+ * a pointer to the SPU structure as well as the object-id that
+ * identifies the binary running on that SPU now.
+ *
+ * For a context save, the object-id that is passed is zero,
+ * identifying that the kernel will run from that moment on.
+ *
+ * For a context restore, the object-id is the value written
+ * to object-id spufs file from user space and the notifer
+ * function can assume that spu->ctx is valid.
+ */
+struct notifier_block;
+int spu_switch_event_register(struct notifier_block * n);
+int spu_switch_event_unregister(struct notifier_block * n);
+
+extern void notify_spus_active(void);
+extern void do_notify_spus_active(void);
+
+/*
+ * This defines the Local Store, Problem Area and Privilege Area of an SPU.
+ */
+
+union mfc_tag_size_class_cmd {
+ struct {
+ u16 mfc_size;
+ u16 mfc_tag;
+ u8 pad;
+ u8 mfc_rclassid;
+ u16 mfc_cmd;
+ } u;
+ struct {
+ u32 mfc_size_tag32;
+ u32 mfc_class_cmd32;
+ } by32;
+ u64 all64;
+};
+
+struct mfc_cq_sr {
+ u64 mfc_cq_data0_RW;
+ u64 mfc_cq_data1_RW;
+ u64 mfc_cq_data2_RW;
+ u64 mfc_cq_data3_RW;
+};
+
+struct spu_problem {
+#define MS_SYNC_PENDING 1L
+ u64 spc_mssync_RW; /* 0x0000 */
+ u8 pad_0x0008_0x3000[0x3000 - 0x0008];
+
+ /* DMA Area */
+ u8 pad_0x3000_0x3004[0x4]; /* 0x3000 */
+ u32 mfc_lsa_W; /* 0x3004 */
+ u64 mfc_ea_W; /* 0x3008 */
+ union mfc_tag_size_class_cmd mfc_union_W; /* 0x3010 */
+ u8 pad_0x3018_0x3104[0xec]; /* 0x3018 */
+ u32 dma_qstatus_R; /* 0x3104 */
+ u8 pad_0x3108_0x3204[0xfc]; /* 0x3108 */
+ u32 dma_querytype_RW; /* 0x3204 */
+ u8 pad_0x3208_0x321c[0x14]; /* 0x3208 */
+ u32 dma_querymask_RW; /* 0x321c */
+ u8 pad_0x3220_0x322c[0xc]; /* 0x3220 */
+ u32 dma_tagstatus_R; /* 0x322c */
+#define DMA_TAGSTATUS_INTR_ANY 1u
+#define DMA_TAGSTATUS_INTR_ALL 2u
+ u8 pad_0x3230_0x4000[0x4000 - 0x3230]; /* 0x3230 */
+
+ /* SPU Control Area */
+ u8 pad_0x4000_0x4004[0x4]; /* 0x4000 */
+ u32 pu_mb_R; /* 0x4004 */
+ u8 pad_0x4008_0x400c[0x4]; /* 0x4008 */
+ u32 spu_mb_W; /* 0x400c */
+ u8 pad_0x4010_0x4014[0x4]; /* 0x4010 */
+ u32 mb_stat_R; /* 0x4014 */
+ u8 pad_0x4018_0x401c[0x4]; /* 0x4018 */
+ u32 spu_runcntl_RW; /* 0x401c */
+#define SPU_RUNCNTL_STOP 0L
+#define SPU_RUNCNTL_RUNNABLE 1L
+#define SPU_RUNCNTL_ISOLATE 2L
+ u8 pad_0x4020_0x4024[0x4]; /* 0x4020 */
+ u32 spu_status_R; /* 0x4024 */
+#define SPU_STOP_STATUS_SHIFT 16
+#define SPU_STATUS_STOPPED 0x0
+#define SPU_STATUS_RUNNING 0x1
+#define SPU_STATUS_STOPPED_BY_STOP 0x2
+#define SPU_STATUS_STOPPED_BY_HALT 0x4
+#define SPU_STATUS_WAITING_FOR_CHANNEL 0x8
+#define SPU_STATUS_SINGLE_STEP 0x10
+#define SPU_STATUS_INVALID_INSTR 0x20
+#define SPU_STATUS_INVALID_CH 0x40
+#define SPU_STATUS_ISOLATED_STATE 0x80
+#define SPU_STATUS_ISOLATED_LOAD_STATUS 0x200
+#define SPU_STATUS_ISOLATED_EXIT_STATUS 0x400
+ u8 pad_0x4028_0x402c[0x4]; /* 0x4028 */
+ u32 spu_spe_R; /* 0x402c */
+ u8 pad_0x4030_0x4034[0x4]; /* 0x4030 */
+ u32 spu_npc_RW; /* 0x4034 */
+ u8 pad_0x4038_0x14000[0x14000 - 0x4038]; /* 0x4038 */
+
+ /* Signal Notification Area */
+ u8 pad_0x14000_0x1400c[0xc]; /* 0x14000 */
+ u32 signal_notify1; /* 0x1400c */
+ u8 pad_0x14010_0x1c00c[0x7ffc]; /* 0x14010 */
+ u32 signal_notify2; /* 0x1c00c */
+} __attribute__ ((aligned(0x20000)));
+
+/* SPU Privilege 2 State Area */
+struct spu_priv2 {
+ /* MFC Registers */
+ u8 pad_0x0000_0x1100[0x1100 - 0x0000]; /* 0x0000 */
+
+ /* SLB Management Registers */
+ u8 pad_0x1100_0x1108[0x8]; /* 0x1100 */
+ u64 slb_index_W; /* 0x1108 */
+#define SLB_INDEX_MASK 0x7L
+ u64 slb_esid_RW; /* 0x1110 */
+ u64 slb_vsid_RW; /* 0x1118 */
+#define SLB_VSID_SUPERVISOR_STATE (0x1ull << 11)
+#define SLB_VSID_SUPERVISOR_STATE_MASK (0x1ull << 11)
+#define SLB_VSID_PROBLEM_STATE (0x1ull << 10)
+#define SLB_VSID_PROBLEM_STATE_MASK (0x1ull << 10)
+#define SLB_VSID_EXECUTE_SEGMENT (0x1ull << 9)
+#define SLB_VSID_NO_EXECUTE_SEGMENT (0x1ull << 9)
+#define SLB_VSID_EXECUTE_SEGMENT_MASK (0x1ull << 9)
+#define SLB_VSID_4K_PAGE (0x0 << 8)
+#define SLB_VSID_LARGE_PAGE (0x1ull << 8)
+#define SLB_VSID_PAGE_SIZE_MASK (0x1ull << 8)
+#define SLB_VSID_CLASS_MASK (0x1ull << 7)
+#define SLB_VSID_VIRTUAL_PAGE_SIZE_MASK (0x1ull << 6)
+ u64 slb_invalidate_entry_W; /* 0x1120 */
+ u64 slb_invalidate_all_W; /* 0x1128 */
+ u8 pad_0x1130_0x2000[0x2000 - 0x1130]; /* 0x1130 */
+
+ /* Context Save / Restore Area */
+ struct mfc_cq_sr spuq[16]; /* 0x2000 */
+ struct mfc_cq_sr puq[8]; /* 0x2200 */
+ u8 pad_0x2300_0x3000[0x3000 - 0x2300]; /* 0x2300 */
+
+ /* MFC Control */
+ u64 mfc_control_RW; /* 0x3000 */
+#define MFC_CNTL_RESUME_DMA_QUEUE (0ull << 0)
+#define MFC_CNTL_SUSPEND_DMA_QUEUE (1ull << 0)
+#define MFC_CNTL_SUSPEND_DMA_QUEUE_MASK (1ull << 0)
+#define MFC_CNTL_SUSPEND_MASK (1ull << 4)
+#define MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION (0ull << 8)
+#define MFC_CNTL_SUSPEND_IN_PROGRESS (1ull << 8)
+#define MFC_CNTL_SUSPEND_COMPLETE (3ull << 8)
+#define MFC_CNTL_SUSPEND_DMA_STATUS_MASK (3ull << 8)
+#define MFC_CNTL_DMA_QUEUES_EMPTY (1ull << 14)
+#define MFC_CNTL_DMA_QUEUES_EMPTY_MASK (1ull << 14)
+#define MFC_CNTL_PURGE_DMA_REQUEST (1ull << 15)
+#define MFC_CNTL_PURGE_DMA_IN_PROGRESS (1ull << 24)
+#define MFC_CNTL_PURGE_DMA_COMPLETE (3ull << 24)
+#define MFC_CNTL_PURGE_DMA_STATUS_MASK (3ull << 24)
+#define MFC_CNTL_RESTART_DMA_COMMAND (1ull << 32)
+#define MFC_CNTL_DMA_COMMAND_REISSUE_PENDING (1ull << 32)
+#define MFC_CNTL_DMA_COMMAND_REISSUE_STATUS_MASK (1ull << 32)
+#define MFC_CNTL_MFC_PRIVILEGE_STATE (2ull << 33)
+#define MFC_CNTL_MFC_PROBLEM_STATE (3ull << 33)
+#define MFC_CNTL_MFC_KEY_PROTECTION_STATE_MASK (3ull << 33)
+#define MFC_CNTL_DECREMENTER_HALTED (1ull << 35)
+#define MFC_CNTL_DECREMENTER_RUNNING (1ull << 40)
+#define MFC_CNTL_DECREMENTER_STATUS_MASK (1ull << 40)
+ u8 pad_0x3008_0x4000[0x4000 - 0x3008]; /* 0x3008 */
+
+ /* Interrupt Mailbox */
+ u64 puint_mb_R; /* 0x4000 */
+ u8 pad_0x4008_0x4040[0x4040 - 0x4008]; /* 0x4008 */
+
+ /* SPU Control */
+ u64 spu_privcntl_RW; /* 0x4040 */
+#define SPU_PRIVCNTL_MODE_NORMAL (0x0ull << 0)
+#define SPU_PRIVCNTL_MODE_SINGLE_STEP (0x1ull << 0)
+#define SPU_PRIVCNTL_MODE_MASK (0x1ull << 0)
+#define SPU_PRIVCNTL_NO_ATTENTION_EVENT (0x0ull << 1)
+#define SPU_PRIVCNTL_ATTENTION_EVENT (0x1ull << 1)
+#define SPU_PRIVCNTL_ATTENTION_EVENT_MASK (0x1ull << 1)
+#define SPU_PRIVCNT_LOAD_REQUEST_NORMAL (0x0ull << 2)
+#define SPU_PRIVCNT_LOAD_REQUEST_ENABLE_MASK (0x1ull << 2)
+ u8 pad_0x4048_0x4058[0x10]; /* 0x4048 */
+ u64 spu_lslr_RW; /* 0x4058 */
+ u64 spu_chnlcntptr_RW; /* 0x4060 */
+ u64 spu_chnlcnt_RW; /* 0x4068 */
+ u64 spu_chnldata_RW; /* 0x4070 */
+ u64 spu_cfg_RW; /* 0x4078 */
+ u8 pad_0x4080_0x5000[0x5000 - 0x4080]; /* 0x4080 */
+
+ /* PV2_ImplRegs: Implementation-specific privileged-state 2 regs */
+ u64 spu_pm_trace_tag_status_RW; /* 0x5000 */
+ u64 spu_tag_status_query_RW; /* 0x5008 */
+#define TAG_STATUS_QUERY_CONDITION_BITS (0x3ull << 32)
+#define TAG_STATUS_QUERY_MASK_BITS (0xffffffffull)
+ u64 spu_cmd_buf1_RW; /* 0x5010 */
+#define SPU_COMMAND_BUFFER_1_LSA_BITS (0x7ffffull << 32)
+#define SPU_COMMAND_BUFFER_1_EAH_BITS (0xffffffffull)
+ u64 spu_cmd_buf2_RW; /* 0x5018 */
+#define SPU_COMMAND_BUFFER_2_EAL_BITS ((0xffffffffull) << 32)
+#define SPU_COMMAND_BUFFER_2_TS_BITS (0xffffull << 16)
+#define SPU_COMMAND_BUFFER_2_TAG_BITS (0x3full)
+ u64 spu_atomic_status_RW; /* 0x5020 */
+} __attribute__ ((aligned(0x20000)));
+
+/* SPU Privilege 1 State Area */
+struct spu_priv1 {
+ /* Control and Configuration Area */
+ u64 mfc_sr1_RW; /* 0x000 */
+#define MFC_STATE1_LOCAL_STORAGE_DECODE_MASK 0x01ull
+#define MFC_STATE1_BUS_TLBIE_MASK 0x02ull
+#define MFC_STATE1_REAL_MODE_OFFSET_ENABLE_MASK 0x04ull
+#define MFC_STATE1_PROBLEM_STATE_MASK 0x08ull
+#define MFC_STATE1_RELOCATE_MASK 0x10ull
+#define MFC_STATE1_MASTER_RUN_CONTROL_MASK 0x20ull
+#define MFC_STATE1_TABLE_SEARCH_MASK 0x40ull
+ u64 mfc_lpid_RW; /* 0x008 */
+ u64 spu_idr_RW; /* 0x010 */
+ u64 mfc_vr_RO; /* 0x018 */
+#define MFC_VERSION_BITS (0xffff << 16)
+#define MFC_REVISION_BITS (0xffff)
+#define MFC_GET_VERSION_BITS(vr) (((vr) & MFC_VERSION_BITS) >> 16)
+#define MFC_GET_REVISION_BITS(vr) ((vr) & MFC_REVISION_BITS)
+ u64 spu_vr_RO; /* 0x020 */
+#define SPU_VERSION_BITS (0xffff << 16)
+#define SPU_REVISION_BITS (0xffff)
+#define SPU_GET_VERSION_BITS(vr) (vr & SPU_VERSION_BITS) >> 16
+#define SPU_GET_REVISION_BITS(vr) (vr & SPU_REVISION_BITS)
+ u8 pad_0x28_0x100[0x100 - 0x28]; /* 0x28 */
+
+ /* Interrupt Area */
+ u64 int_mask_RW[3]; /* 0x100 */
+#define CLASS0_ENABLE_DMA_ALIGNMENT_INTR 0x1L
+#define CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR 0x2L
+#define CLASS0_ENABLE_SPU_ERROR_INTR 0x4L
+#define CLASS0_ENABLE_MFC_FIR_INTR 0x8L
+#define CLASS1_ENABLE_SEGMENT_FAULT_INTR 0x1L
+#define CLASS1_ENABLE_STORAGE_FAULT_INTR 0x2L
+#define CLASS1_ENABLE_LS_COMPARE_SUSPEND_ON_GET_INTR 0x4L
+#define CLASS1_ENABLE_LS_COMPARE_SUSPEND_ON_PUT_INTR 0x8L
+#define CLASS2_ENABLE_MAILBOX_INTR 0x1L
+#define CLASS2_ENABLE_SPU_STOP_INTR 0x2L
+#define CLASS2_ENABLE_SPU_HALT_INTR 0x4L
+#define CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR 0x8L
+#define CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR 0x10L
+ u8 pad_0x118_0x140[0x28]; /* 0x118 */
+ u64 int_stat_RW[3]; /* 0x140 */
+#define CLASS0_DMA_ALIGNMENT_INTR 0x1L
+#define CLASS0_INVALID_DMA_COMMAND_INTR 0x2L
+#define CLASS0_SPU_ERROR_INTR 0x4L
+#define CLASS0_INTR_MASK 0x7L
+#define CLASS1_SEGMENT_FAULT_INTR 0x1L
+#define CLASS1_STORAGE_FAULT_INTR 0x2L
+#define CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR 0x4L
+#define CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR 0x8L
+#define CLASS1_INTR_MASK 0xfL
+#define CLASS2_MAILBOX_INTR 0x1L
+#define CLASS2_SPU_STOP_INTR 0x2L
+#define CLASS2_SPU_HALT_INTR 0x4L
+#define CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR 0x8L
+#define CLASS2_MAILBOX_THRESHOLD_INTR 0x10L
+#define CLASS2_INTR_MASK 0x1fL
+ u8 pad_0x158_0x180[0x28]; /* 0x158 */
+ u64 int_route_RW; /* 0x180 */
+
+ /* Interrupt Routing */
+ u8 pad_0x188_0x200[0x200 - 0x188]; /* 0x188 */
+
+ /* Atomic Unit Control Area */
+ u64 mfc_atomic_flush_RW; /* 0x200 */
+#define mfc_atomic_flush_enable 0x1L
+ u8 pad_0x208_0x280[0x78]; /* 0x208 */
+ u64 resource_allocation_groupID_RW; /* 0x280 */
+ u64 resource_allocation_enable_RW; /* 0x288 */
+ u8 pad_0x290_0x3c8[0x3c8 - 0x290]; /* 0x290 */
+
+ /* SPU_Cache_ImplRegs: Implementation-dependent cache registers */
+
+ u64 smf_sbi_signal_sel; /* 0x3c8 */
+#define smf_sbi_mask_lsb 56
+#define smf_sbi_shift (63 - smf_sbi_mask_lsb)
+#define smf_sbi_mask (0x301LL << smf_sbi_shift)
+#define smf_sbi_bus0_bits (0x001LL << smf_sbi_shift)
+#define smf_sbi_bus2_bits (0x100LL << smf_sbi_shift)
+#define smf_sbi2_bus0_bits (0x201LL << smf_sbi_shift)
+#define smf_sbi2_bus2_bits (0x300LL << smf_sbi_shift)
+ u64 smf_ato_signal_sel; /* 0x3d0 */
+#define smf_ato_mask_lsb 35
+#define smf_ato_shift (63 - smf_ato_mask_lsb)
+#define smf_ato_mask (0x3LL << smf_ato_shift)
+#define smf_ato_bus0_bits (0x2LL << smf_ato_shift)
+#define smf_ato_bus2_bits (0x1LL << smf_ato_shift)
+ u8 pad_0x3d8_0x400[0x400 - 0x3d8]; /* 0x3d8 */
+
+ /* TLB Management Registers */
+ u64 mfc_sdr_RW; /* 0x400 */
+ u8 pad_0x408_0x500[0xf8]; /* 0x408 */
+ u64 tlb_index_hint_RO; /* 0x500 */
+ u64 tlb_index_W; /* 0x508 */
+ u64 tlb_vpn_RW; /* 0x510 */
+ u64 tlb_rpn_RW; /* 0x518 */
+ u8 pad_0x520_0x540[0x20]; /* 0x520 */
+ u64 tlb_invalidate_entry_W; /* 0x540 */
+ u64 tlb_invalidate_all_W; /* 0x548 */
+ u8 pad_0x550_0x580[0x580 - 0x550]; /* 0x550 */
+
+ /* SPU_MMU_ImplRegs: Implementation-dependent MMU registers */
+ u64 smm_hid; /* 0x580 */
+#define PAGE_SIZE_MASK 0xf000000000000000ull
+#define PAGE_SIZE_16MB_64KB 0x2000000000000000ull
+ u8 pad_0x588_0x600[0x600 - 0x588]; /* 0x588 */
+
+ /* MFC Status/Control Area */
+ u64 mfc_accr_RW; /* 0x600 */
+#define MFC_ACCR_EA_ACCESS_GET (1 << 0)
+#define MFC_ACCR_EA_ACCESS_PUT (1 << 1)
+#define MFC_ACCR_LS_ACCESS_GET (1 << 3)
+#define MFC_ACCR_LS_ACCESS_PUT (1 << 4)
+ u8 pad_0x608_0x610[0x8]; /* 0x608 */
+ u64 mfc_dsisr_RW; /* 0x610 */
+#define MFC_DSISR_PTE_NOT_FOUND (1 << 30)
+#define MFC_DSISR_ACCESS_DENIED (1 << 27)
+#define MFC_DSISR_ATOMIC (1 << 26)
+#define MFC_DSISR_ACCESS_PUT (1 << 25)
+#define MFC_DSISR_ADDR_MATCH (1 << 22)
+#define MFC_DSISR_LS (1 << 17)
+#define MFC_DSISR_L (1 << 16)
+#define MFC_DSISR_ADDRESS_OVERFLOW (1 << 0)
+ u8 pad_0x618_0x620[0x8]; /* 0x618 */
+ u64 mfc_dar_RW; /* 0x620 */
+ u8 pad_0x628_0x700[0x700 - 0x628]; /* 0x628 */
+
+ /* Replacement Management Table (RMT) Area */
+ u64 rmt_index_RW; /* 0x700 */
+ u8 pad_0x708_0x710[0x8]; /* 0x708 */
+ u64 rmt_data1_RW; /* 0x710 */
+ u8 pad_0x718_0x800[0x800 - 0x718]; /* 0x718 */
+
+ /* Control/Configuration Registers */
+ u64 mfc_dsir_R; /* 0x800 */
+#define MFC_DSIR_Q (1 << 31)
+#define MFC_DSIR_SPU_QUEUE MFC_DSIR_Q
+ u64 mfc_lsacr_RW; /* 0x808 */
+#define MFC_LSACR_COMPARE_MASK ((~0ull) << 32)
+#define MFC_LSACR_COMPARE_ADDR ((~0ull) >> 32)
+ u64 mfc_lscrr_R; /* 0x810 */
+#define MFC_LSCRR_Q (1 << 31)
+#define MFC_LSCRR_SPU_QUEUE MFC_LSCRR_Q
+#define MFC_LSCRR_QI_SHIFT 32
+#define MFC_LSCRR_QI_MASK ((~0ull) << MFC_LSCRR_QI_SHIFT)
+ u8 pad_0x818_0x820[0x8]; /* 0x818 */
+ u64 mfc_tclass_id_RW; /* 0x820 */
+#define MFC_TCLASS_ID_ENABLE (1L << 0L)
+#define MFC_TCLASS_SLOT2_ENABLE (1L << 5L)
+#define MFC_TCLASS_SLOT1_ENABLE (1L << 6L)
+#define MFC_TCLASS_SLOT0_ENABLE (1L << 7L)
+#define MFC_TCLASS_QUOTA_2_SHIFT 8L
+#define MFC_TCLASS_QUOTA_1_SHIFT 16L
+#define MFC_TCLASS_QUOTA_0_SHIFT 24L
+#define MFC_TCLASS_QUOTA_2_MASK (0x1FL << MFC_TCLASS_QUOTA_2_SHIFT)
+#define MFC_TCLASS_QUOTA_1_MASK (0x1FL << MFC_TCLASS_QUOTA_1_SHIFT)
+#define MFC_TCLASS_QUOTA_0_MASK (0x1FL << MFC_TCLASS_QUOTA_0_SHIFT)
+ u8 pad_0x828_0x900[0x900 - 0x828]; /* 0x828 */
+
+ /* Real Mode Support Registers */
+ u64 mfc_rm_boundary; /* 0x900 */
+ u8 pad_0x908_0x938[0x30]; /* 0x908 */
+ u64 smf_dma_signal_sel; /* 0x938 */
+#define mfc_dma1_mask_lsb 41
+#define mfc_dma1_shift (63 - mfc_dma1_mask_lsb)
+#define mfc_dma1_mask (0x3LL << mfc_dma1_shift)
+#define mfc_dma1_bits (0x1LL << mfc_dma1_shift)
+#define mfc_dma2_mask_lsb 43
+#define mfc_dma2_shift (63 - mfc_dma2_mask_lsb)
+#define mfc_dma2_mask (0x3LL << mfc_dma2_shift)
+#define mfc_dma2_bits (0x1LL << mfc_dma2_shift)
+ u8 pad_0x940_0xa38[0xf8]; /* 0x940 */
+ u64 smm_signal_sel; /* 0xa38 */
+#define smm_sig_mask_lsb 12
+#define smm_sig_shift (63 - smm_sig_mask_lsb)
+#define smm_sig_mask (0x3LL << smm_sig_shift)
+#define smm_sig_bus0_bits (0x2LL << smm_sig_shift)
+#define smm_sig_bus2_bits (0x1LL << smm_sig_shift)
+ u8 pad_0xa40_0xc00[0xc00 - 0xa40]; /* 0xa40 */
+
+ /* DMA Command Error Area */
+ u64 mfc_cer_R; /* 0xc00 */
+#define MFC_CER_Q (1 << 31)
+#define MFC_CER_SPU_QUEUE MFC_CER_Q
+ u8 pad_0xc08_0x1000[0x1000 - 0xc08]; /* 0xc08 */
+
+ /* PV1_ImplRegs: Implementation-dependent privileged-state 1 regs */
+ /* DMA Command Error Area */
+ u64 spu_ecc_cntl_RW; /* 0x1000 */
+#define SPU_ECC_CNTL_E (1ull << 0ull)
+#define SPU_ECC_CNTL_ENABLE SPU_ECC_CNTL_E
+#define SPU_ECC_CNTL_DISABLE (~SPU_ECC_CNTL_E & 1L)
+#define SPU_ECC_CNTL_S (1ull << 1ull)
+#define SPU_ECC_STOP_AFTER_ERROR SPU_ECC_CNTL_S
+#define SPU_ECC_CONTINUE_AFTER_ERROR (~SPU_ECC_CNTL_S & 2L)
+#define SPU_ECC_CNTL_B (1ull << 2ull)
+#define SPU_ECC_BACKGROUND_ENABLE SPU_ECC_CNTL_B
+#define SPU_ECC_BACKGROUND_DISABLE (~SPU_ECC_CNTL_B & 4L)
+#define SPU_ECC_CNTL_I_SHIFT 3ull
+#define SPU_ECC_CNTL_I_MASK (3ull << SPU_ECC_CNTL_I_SHIFT)
+#define SPU_ECC_WRITE_ALWAYS (~SPU_ECC_CNTL_I & 12L)
+#define SPU_ECC_WRITE_CORRECTABLE (1ull << SPU_ECC_CNTL_I_SHIFT)
+#define SPU_ECC_WRITE_UNCORRECTABLE (3ull << SPU_ECC_CNTL_I_SHIFT)
+#define SPU_ECC_CNTL_D (1ull << 5ull)
+#define SPU_ECC_DETECTION_ENABLE SPU_ECC_CNTL_D
+#define SPU_ECC_DETECTION_DISABLE (~SPU_ECC_CNTL_D & 32L)
+ u64 spu_ecc_stat_RW; /* 0x1008 */
+#define SPU_ECC_CORRECTED_ERROR (1ull << 0ul)
+#define SPU_ECC_UNCORRECTED_ERROR (1ull << 1ul)
+#define SPU_ECC_SCRUB_COMPLETE (1ull << 2ul)
+#define SPU_ECC_SCRUB_IN_PROGRESS (1ull << 3ul)
+#define SPU_ECC_INSTRUCTION_ERROR (1ull << 4ul)
+#define SPU_ECC_DATA_ERROR (1ull << 5ul)
+#define SPU_ECC_DMA_ERROR (1ull << 6ul)
+#define SPU_ECC_STATUS_CNT_MASK (256ull << 8)
+ u64 spu_ecc_addr_RW; /* 0x1010 */
+ u64 spu_err_mask_RW; /* 0x1018 */
+#define SPU_ERR_ILLEGAL_INSTR (1ull << 0ul)
+#define SPU_ERR_ILLEGAL_CHANNEL (1ull << 1ul)
+ u8 pad_0x1020_0x1028[0x1028 - 0x1020]; /* 0x1020 */
+
+ /* SPU Debug-Trace Bus (DTB) Selection Registers */
+ u64 spu_trig0_sel; /* 0x1028 */
+ u64 spu_trig1_sel; /* 0x1030 */
+ u64 spu_trig2_sel; /* 0x1038 */
+ u64 spu_trig3_sel; /* 0x1040 */
+ u64 spu_trace_sel; /* 0x1048 */
+#define spu_trace_sel_mask 0x1f1fLL
+#define spu_trace_sel_bus0_bits 0x1000LL
+#define spu_trace_sel_bus2_bits 0x0010LL
+ u64 spu_event0_sel; /* 0x1050 */
+ u64 spu_event1_sel; /* 0x1058 */
+ u64 spu_event2_sel; /* 0x1060 */
+ u64 spu_event3_sel; /* 0x1068 */
+ u64 spu_trace_cntl; /* 0x1070 */
+} __attribute__ ((aligned(0x2000)));
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/powerpc/include/asm/spu_csa.h b/arch/powerpc/include/asm/spu_csa.h
new file mode 100644
index 0000000..a40fd49
--- /dev/null
+++ b/arch/powerpc/include/asm/spu_csa.h
@@ -0,0 +1,266 @@
+/*
+ * spu_csa.h: Definitions for SPU context save area (CSA).
+ *
+ * (C) Copyright IBM 2005
+ *
+ * Author: Mark Nutter <mnutter@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _SPU_CSA_H_
+#define _SPU_CSA_H_
+#ifdef __KERNEL__
+
+/*
+ * Total number of 128-bit registers.
+ */
+#define NR_SPU_GPRS 128
+#define NR_SPU_SPRS 9
+#define NR_SPU_REGS_PAD 7
+#define NR_SPU_SPILL_REGS 144 /* GPRS + SPRS + PAD */
+#define SIZEOF_SPU_SPILL_REGS NR_SPU_SPILL_REGS * 16
+
+#define SPU_SAVE_COMPLETE 0x3FFB
+#define SPU_RESTORE_COMPLETE 0x3FFC
+
+/*
+ * Definitions for various 'stopped' status conditions,
+ * to be recreated during context restore.
+ */
+#define SPU_STOPPED_STATUS_P 1
+#define SPU_STOPPED_STATUS_I 2
+#define SPU_STOPPED_STATUS_H 3
+#define SPU_STOPPED_STATUS_S 4
+#define SPU_STOPPED_STATUS_S_I 5
+#define SPU_STOPPED_STATUS_S_P 6
+#define SPU_STOPPED_STATUS_P_H 7
+#define SPU_STOPPED_STATUS_P_I 8
+#define SPU_STOPPED_STATUS_R 9
+
+/*
+ * Definitions for software decrementer status flag.
+ */
+#define SPU_DECR_STATUS_RUNNING 0x1
+#define SPU_DECR_STATUS_WRAPPED 0x2
+
+#ifndef __ASSEMBLY__
+/**
+ * spu_reg128 - generic 128-bit register definition.
+ */
+struct spu_reg128 {
+ u32 slot[4];
+};
+
+/**
+ * struct spu_lscsa - Local Store Context Save Area.
+ * @gprs: Array of saved registers.
+ * @fpcr: Saved floating point status control register.
+ * @decr: Saved decrementer value.
+ * @decr_status: Indicates software decrementer status flags.
+ * @ppu_mb: Saved PPU mailbox data.
+ * @ppuint_mb: Saved PPU interrupting mailbox data.
+ * @tag_mask: Saved tag group mask.
+ * @event_mask: Saved event mask.
+ * @srr0: Saved SRR0.
+ * @stopped_status: Conditions to be recreated by restore.
+ * @ls: Saved contents of Local Storage Area.
+ *
+ * The LSCSA represents state that is primarily saved and
+ * restored by SPU-side code.
+ */
+struct spu_lscsa {
+ struct spu_reg128 gprs[128];
+ struct spu_reg128 fpcr;
+ struct spu_reg128 decr;
+ struct spu_reg128 decr_status;
+ struct spu_reg128 ppu_mb;
+ struct spu_reg128 ppuint_mb;
+ struct spu_reg128 tag_mask;
+ struct spu_reg128 event_mask;
+ struct spu_reg128 srr0;
+ struct spu_reg128 stopped_status;
+
+ /*
+ * 'ls' must be page-aligned on all configurations.
+ * Since we don't want to rely on having the spu-gcc
+ * installed to build the kernel and this structure
+ * is used in the SPU-side code, make it 64k-page
+ * aligned for now.
+ */
+ unsigned char ls[LS_SIZE] __attribute__((aligned(65536)));
+};
+
+#ifndef __SPU__
+/*
+ * struct spu_problem_collapsed - condensed problem state area, w/o pads.
+ */
+struct spu_problem_collapsed {
+ u64 spc_mssync_RW;
+ u32 mfc_lsa_W;
+ u32 unused_pad0;
+ u64 mfc_ea_W;
+ union mfc_tag_size_class_cmd mfc_union_W;
+ u32 dma_qstatus_R;
+ u32 dma_querytype_RW;
+ u32 dma_querymask_RW;
+ u32 dma_tagstatus_R;
+ u32 pu_mb_R;
+ u32 spu_mb_W;
+ u32 mb_stat_R;
+ u32 spu_runcntl_RW;
+ u32 spu_status_R;
+ u32 spu_spc_R;
+ u32 spu_npc_RW;
+ u32 signal_notify1;
+ u32 signal_notify2;
+ u32 unused_pad1;
+};
+
+/*
+ * struct spu_priv1_collapsed - condensed privileged 1 area, w/o pads.
+ */
+struct spu_priv1_collapsed {
+ u64 mfc_sr1_RW;
+ u64 mfc_lpid_RW;
+ u64 spu_idr_RW;
+ u64 mfc_vr_RO;
+ u64 spu_vr_RO;
+ u64 int_mask_class0_RW;
+ u64 int_mask_class1_RW;
+ u64 int_mask_class2_RW;
+ u64 int_stat_class0_RW;
+ u64 int_stat_class1_RW;
+ u64 int_stat_class2_RW;
+ u64 int_route_RW;
+ u64 mfc_atomic_flush_RW;
+ u64 resource_allocation_groupID_RW;
+ u64 resource_allocation_enable_RW;
+ u64 mfc_fir_R;
+ u64 mfc_fir_status_or_W;
+ u64 mfc_fir_status_and_W;
+ u64 mfc_fir_mask_R;
+ u64 mfc_fir_mask_or_W;
+ u64 mfc_fir_mask_and_W;
+ u64 mfc_fir_chkstp_enable_RW;
+ u64 smf_sbi_signal_sel;
+ u64 smf_ato_signal_sel;
+ u64 tlb_index_hint_RO;
+ u64 tlb_index_W;
+ u64 tlb_vpn_RW;
+ u64 tlb_rpn_RW;
+ u64 tlb_invalidate_entry_W;
+ u64 tlb_invalidate_all_W;
+ u64 smm_hid;
+ u64 mfc_accr_RW;
+ u64 mfc_dsisr_RW;
+ u64 mfc_dar_RW;
+ u64 rmt_index_RW;
+ u64 rmt_data1_RW;
+ u64 mfc_dsir_R;
+ u64 mfc_lsacr_RW;
+ u64 mfc_lscrr_R;
+ u64 mfc_tclass_id_RW;
+ u64 mfc_rm_boundary;
+ u64 smf_dma_signal_sel;
+ u64 smm_signal_sel;
+ u64 mfc_cer_R;
+ u64 pu_ecc_cntl_RW;
+ u64 pu_ecc_stat_RW;
+ u64 spu_ecc_addr_RW;
+ u64 spu_err_mask_RW;
+ u64 spu_trig0_sel;
+ u64 spu_trig1_sel;
+ u64 spu_trig2_sel;
+ u64 spu_trig3_sel;
+ u64 spu_trace_sel;
+ u64 spu_event0_sel;
+ u64 spu_event1_sel;
+ u64 spu_event2_sel;
+ u64 spu_event3_sel;
+ u64 spu_trace_cntl;
+};
+
+/*
+ * struct spu_priv2_collapsed - condensed privileged 2 area, w/o pads.
+ */
+struct spu_priv2_collapsed {
+ u64 slb_index_W;
+ u64 slb_esid_RW;
+ u64 slb_vsid_RW;
+ u64 slb_invalidate_entry_W;
+ u64 slb_invalidate_all_W;
+ struct mfc_cq_sr spuq[16];
+ struct mfc_cq_sr puq[8];
+ u64 mfc_control_RW;
+ u64 puint_mb_R;
+ u64 spu_privcntl_RW;
+ u64 spu_lslr_RW;
+ u64 spu_chnlcntptr_RW;
+ u64 spu_chnlcnt_RW;
+ u64 spu_chnldata_RW;
+ u64 spu_cfg_RW;
+ u64 spu_tag_status_query_RW;
+ u64 spu_cmd_buf1_RW;
+ u64 spu_cmd_buf2_RW;
+ u64 spu_atomic_status_RW;
+};
+
+/**
+ * struct spu_state
+ * @lscsa: Local Store Context Save Area.
+ * @prob: Collapsed Problem State Area, w/o pads.
+ * @priv1: Collapsed Privileged 1 Area, w/o pads.
+ * @priv2: Collapsed Privileged 2 Area, w/o pads.
+ * @spu_chnlcnt_RW: Array of saved channel counts.
+ * @spu_chnldata_RW: Array of saved channel data.
+ * @suspend_time: Time stamp when decrementer disabled.
+ *
+ * Structure representing the whole of the SPU
+ * context save area (CSA). This struct contains
+ * all of the state necessary to suspend and then
+ * later optionally resume execution of an SPU
+ * context.
+ *
+ * The @lscsa region is by far the largest, and is
+ * allocated separately so that it may either be
+ * pinned or mapped to/from application memory, as
+ * appropriate for the OS environment.
+ */
+struct spu_state {
+ struct spu_lscsa *lscsa;
+#ifdef CONFIG_SPU_FS_64K_LS
+ int use_big_pages;
+ /* One struct page per 64k page */
+#define SPU_LSCSA_NUM_BIG_PAGES (sizeof(struct spu_lscsa) / 0x10000)
+ struct page *lscsa_pages[SPU_LSCSA_NUM_BIG_PAGES];
+#endif
+ struct spu_problem_collapsed prob;
+ struct spu_priv1_collapsed priv1;
+ struct spu_priv2_collapsed priv2;
+ u64 spu_chnlcnt_RW[32];
+ u64 spu_chnldata_RW[32];
+ u32 spu_mailbox_data[4];
+ u32 pu_mailbox_data[1];
+ u64 class_0_dar, class_0_pending;
+ u64 class_1_dar, class_1_dsisr;
+ unsigned long suspend_time;
+ spinlock_t register_lock;
+};
+
+#endif /* !__SPU__ */
+#endif /* __KERNEL__ */
+#endif /* !__ASSEMBLY__ */
+#endif /* _SPU_CSA_H_ */
diff --git a/arch/powerpc/include/asm/spu_info.h b/arch/powerpc/include/asm/spu_info.h
new file mode 100644
index 0000000..3545efb
--- /dev/null
+++ b/arch/powerpc/include/asm/spu_info.h
@@ -0,0 +1,54 @@
+/*
+ * SPU info structures
+ *
+ * (C) Copyright 2006 IBM Corp.
+ *
+ * Author: Dwayne Grant McConnell <decimal@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _SPU_INFO_H
+#define _SPU_INFO_H
+
+#ifdef __KERNEL__
+#include <asm/spu.h>
+#include <linux/types.h>
+#else
+struct mfc_cq_sr {
+ __u64 mfc_cq_data0_RW;
+ __u64 mfc_cq_data1_RW;
+ __u64 mfc_cq_data2_RW;
+ __u64 mfc_cq_data3_RW;
+};
+#endif /* __KERNEL__ */
+
+struct spu_dma_info {
+ __u64 dma_info_type;
+ __u64 dma_info_mask;
+ __u64 dma_info_status;
+ __u64 dma_info_stall_and_notify;
+ __u64 dma_info_atomic_command_status;
+ struct mfc_cq_sr dma_info_command_data[16];
+};
+
+struct spu_proxydma_info {
+ __u64 proxydma_info_type;
+ __u64 proxydma_info_mask;
+ __u64 proxydma_info_status;
+ struct mfc_cq_sr proxydma_info_command_data[8];
+};
+
+#endif
diff --git a/arch/powerpc/include/asm/spu_priv1.h b/arch/powerpc/include/asm/spu_priv1.h
new file mode 100644
index 0000000..25020a3
--- /dev/null
+++ b/arch/powerpc/include/asm/spu_priv1.h
@@ -0,0 +1,236 @@
+/*
+ * Defines an spu hypervisor abstraction layer.
+ *
+ * Copyright 2006 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#if !defined(_SPU_PRIV1_H)
+#define _SPU_PRIV1_H
+#if defined(__KERNEL__)
+
+#include <linux/types.h>
+
+struct spu;
+struct spu_context;
+
+/* access to priv1 registers */
+
+struct spu_priv1_ops {
+ void (*int_mask_and) (struct spu *spu, int class, u64 mask);
+ void (*int_mask_or) (struct spu *spu, int class, u64 mask);
+ void (*int_mask_set) (struct spu *spu, int class, u64 mask);
+ u64 (*int_mask_get) (struct spu *spu, int class);
+ void (*int_stat_clear) (struct spu *spu, int class, u64 stat);
+ u64 (*int_stat_get) (struct spu *spu, int class);
+ void (*cpu_affinity_set) (struct spu *spu, int cpu);
+ u64 (*mfc_dar_get) (struct spu *spu);
+ u64 (*mfc_dsisr_get) (struct spu *spu);
+ void (*mfc_dsisr_set) (struct spu *spu, u64 dsisr);
+ void (*mfc_sdr_setup) (struct spu *spu);
+ void (*mfc_sr1_set) (struct spu *spu, u64 sr1);
+ u64 (*mfc_sr1_get) (struct spu *spu);
+ void (*mfc_tclass_id_set) (struct spu *spu, u64 tclass_id);
+ u64 (*mfc_tclass_id_get) (struct spu *spu);
+ void (*tlb_invalidate) (struct spu *spu);
+ void (*resource_allocation_groupID_set) (struct spu *spu, u64 id);
+ u64 (*resource_allocation_groupID_get) (struct spu *spu);
+ void (*resource_allocation_enable_set) (struct spu *spu, u64 enable);
+ u64 (*resource_allocation_enable_get) (struct spu *spu);
+};
+
+extern const struct spu_priv1_ops* spu_priv1_ops;
+
+static inline void
+spu_int_mask_and (struct spu *spu, int class, u64 mask)
+{
+ spu_priv1_ops->int_mask_and(spu, class, mask);
+}
+
+static inline void
+spu_int_mask_or (struct spu *spu, int class, u64 mask)
+{
+ spu_priv1_ops->int_mask_or(spu, class, mask);
+}
+
+static inline void
+spu_int_mask_set (struct spu *spu, int class, u64 mask)
+{
+ spu_priv1_ops->int_mask_set(spu, class, mask);
+}
+
+static inline u64
+spu_int_mask_get (struct spu *spu, int class)
+{
+ return spu_priv1_ops->int_mask_get(spu, class);
+}
+
+static inline void
+spu_int_stat_clear (struct spu *spu, int class, u64 stat)
+{
+ spu_priv1_ops->int_stat_clear(spu, class, stat);
+}
+
+static inline u64
+spu_int_stat_get (struct spu *spu, int class)
+{
+ return spu_priv1_ops->int_stat_get (spu, class);
+}
+
+static inline void
+spu_cpu_affinity_set (struct spu *spu, int cpu)
+{
+ spu_priv1_ops->cpu_affinity_set(spu, cpu);
+}
+
+static inline u64
+spu_mfc_dar_get (struct spu *spu)
+{
+ return spu_priv1_ops->mfc_dar_get(spu);
+}
+
+static inline u64
+spu_mfc_dsisr_get (struct spu *spu)
+{
+ return spu_priv1_ops->mfc_dsisr_get(spu);
+}
+
+static inline void
+spu_mfc_dsisr_set (struct spu *spu, u64 dsisr)
+{
+ spu_priv1_ops->mfc_dsisr_set(spu, dsisr);
+}
+
+static inline void
+spu_mfc_sdr_setup (struct spu *spu)
+{
+ spu_priv1_ops->mfc_sdr_setup(spu);
+}
+
+static inline void
+spu_mfc_sr1_set (struct spu *spu, u64 sr1)
+{
+ spu_priv1_ops->mfc_sr1_set(spu, sr1);
+}
+
+static inline u64
+spu_mfc_sr1_get (struct spu *spu)
+{
+ return spu_priv1_ops->mfc_sr1_get(spu);
+}
+
+static inline void
+spu_mfc_tclass_id_set (struct spu *spu, u64 tclass_id)
+{
+ spu_priv1_ops->mfc_tclass_id_set(spu, tclass_id);
+}
+
+static inline u64
+spu_mfc_tclass_id_get (struct spu *spu)
+{
+ return spu_priv1_ops->mfc_tclass_id_get(spu);
+}
+
+static inline void
+spu_tlb_invalidate (struct spu *spu)
+{
+ spu_priv1_ops->tlb_invalidate(spu);
+}
+
+static inline void
+spu_resource_allocation_groupID_set (struct spu *spu, u64 id)
+{
+ spu_priv1_ops->resource_allocation_groupID_set(spu, id);
+}
+
+static inline u64
+spu_resource_allocation_groupID_get (struct spu *spu)
+{
+ return spu_priv1_ops->resource_allocation_groupID_get(spu);
+}
+
+static inline void
+spu_resource_allocation_enable_set (struct spu *spu, u64 enable)
+{
+ spu_priv1_ops->resource_allocation_enable_set(spu, enable);
+}
+
+static inline u64
+spu_resource_allocation_enable_get (struct spu *spu)
+{
+ return spu_priv1_ops->resource_allocation_enable_get(spu);
+}
+
+/* spu management abstraction */
+
+struct spu_management_ops {
+ int (*enumerate_spus)(int (*fn)(void *data));
+ int (*create_spu)(struct spu *spu, void *data);
+ int (*destroy_spu)(struct spu *spu);
+ void (*enable_spu)(struct spu_context *ctx);
+ void (*disable_spu)(struct spu_context *ctx);
+ int (*init_affinity)(void);
+};
+
+extern const struct spu_management_ops* spu_management_ops;
+
+static inline int
+spu_enumerate_spus (int (*fn)(void *data))
+{
+ return spu_management_ops->enumerate_spus(fn);
+}
+
+static inline int
+spu_create_spu (struct spu *spu, void *data)
+{
+ return spu_management_ops->create_spu(spu, data);
+}
+
+static inline int
+spu_destroy_spu (struct spu *spu)
+{
+ return spu_management_ops->destroy_spu(spu);
+}
+
+static inline int
+spu_init_affinity (void)
+{
+ return spu_management_ops->init_affinity();
+}
+
+static inline void
+spu_enable_spu (struct spu_context *ctx)
+{
+ spu_management_ops->enable_spu(ctx);
+}
+
+static inline void
+spu_disable_spu (struct spu_context *ctx)
+{
+ spu_management_ops->disable_spu(ctx);
+}
+
+/*
+ * The declarations folowing are put here for convenience
+ * and only intended to be used by the platform setup code.
+ */
+
+extern const struct spu_priv1_ops spu_priv1_mmio_ops;
+extern const struct spu_priv1_ops spu_priv1_beat_ops;
+
+extern const struct spu_management_ops spu_management_of_ops;
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h
new file mode 100644
index 0000000..f593b0f
--- /dev/null
+++ b/arch/powerpc/include/asm/sstep.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+struct pt_regs;
+
+/*
+ * We don't allow single-stepping an mtmsrd that would clear
+ * MSR_RI, since that would make the exception unrecoverable.
+ * Since we need to single-step to proceed from a breakpoint,
+ * we don't allow putting a breakpoint on an mtmsrd instruction.
+ * Similarly we don't allow breakpoints on rfid instructions.
+ * These macros tell us if an instruction is a mtmsrd or rfid.
+ * Note that IS_MTMSRD returns true for both an mtmsr (32-bit)
+ * and an mtmsrd (64-bit).
+ */
+#define IS_MTMSRD(instr) (((instr) & 0xfc0007be) == 0x7c000124)
+#define IS_RFID(instr) (((instr) & 0xfc0007fe) == 0x4c000024)
+#define IS_RFI(instr) (((instr) & 0xfc0007fe) == 0x4c000064)
+
+/* Emulate instructions that cause a transfer of control. */
+extern int emulate_step(struct pt_regs *regs, unsigned int instr);
diff --git a/arch/powerpc/include/asm/stat.h b/arch/powerpc/include/asm/stat.h
new file mode 100644
index 0000000..e4edc51
--- /dev/null
+++ b/arch/powerpc/include/asm/stat.h
@@ -0,0 +1,81 @@
+#ifndef _ASM_POWERPC_STAT_H
+#define _ASM_POWERPC_STAT_H
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/types.h>
+
+#define STAT_HAVE_NSEC 1
+
+#ifndef __powerpc64__
+struct __old_kernel_stat {
+ unsigned short st_dev;
+ unsigned short st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+ unsigned short st_rdev;
+ unsigned long st_size;
+ unsigned long st_atime;
+ unsigned long st_mtime;
+ unsigned long st_ctime;
+};
+#endif /* !__powerpc64__ */
+
+struct stat {
+ unsigned long st_dev;
+ ino_t st_ino;
+#ifdef __powerpc64__
+ nlink_t st_nlink;
+ mode_t st_mode;
+#else
+ mode_t st_mode;
+ nlink_t st_nlink;
+#endif
+ uid_t st_uid;
+ gid_t st_gid;
+ unsigned long st_rdev;
+ off_t st_size;
+ unsigned long st_blksize;
+ unsigned long st_blocks;
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+ unsigned long st_mtime;
+ unsigned long st_mtime_nsec;
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+ unsigned long __unused4;
+ unsigned long __unused5;
+#ifdef __powerpc64__
+ unsigned long __unused6;
+#endif
+};
+
+/* This matches struct stat64 in glibc2.1. Only used for 32 bit. */
+struct stat64 {
+ unsigned long long st_dev; /* Device. */
+ unsigned long long st_ino; /* File serial number. */
+ unsigned int st_mode; /* File mode. */
+ unsigned int st_nlink; /* Link count. */
+ unsigned int st_uid; /* User ID of the file's owner. */
+ unsigned int st_gid; /* Group ID of the file's group. */
+ unsigned long long st_rdev; /* Device number, if device. */
+ unsigned short __pad2;
+ long long st_size; /* Size of file, in bytes. */
+ int st_blksize; /* Optimal block size for I/O. */
+ long long st_blocks; /* Number 512-byte blocks allocated. */
+ int st_atime; /* Time of last access. */
+ unsigned int st_atime_nsec;
+ int st_mtime; /* Time of last modification. */
+ unsigned int st_mtime_nsec;
+ int st_ctime; /* Time of last status change. */
+ unsigned int st_ctime_nsec;
+ unsigned int __unused4;
+ unsigned int __unused5;
+};
+
+#endif /* _ASM_POWERPC_STAT_H */
diff --git a/arch/powerpc/include/asm/statfs.h b/arch/powerpc/include/asm/statfs.h
new file mode 100644
index 0000000..6702402
--- /dev/null
+++ b/arch/powerpc/include/asm/statfs.h
@@ -0,0 +1,60 @@
+#ifndef _ASM_POWERPC_STATFS_H
+#define _ASM_POWERPC_STATFS_H
+
+/* For ppc32 we just use the generic definitions, not so simple on ppc64 */
+
+#ifndef __powerpc64__
+#include <asm-generic/statfs.h>
+#else
+
+#ifndef __KERNEL_STRICT_NAMES
+#include <linux/types.h>
+typedef __kernel_fsid_t fsid_t;
+#endif
+
+/*
+ * We're already 64-bit, so duplicate the definition
+ */
+struct statfs {
+ long f_type;
+ long f_bsize;
+ long f_blocks;
+ long f_bfree;
+ long f_bavail;
+ long f_files;
+ long f_ffree;
+ __kernel_fsid_t f_fsid;
+ long f_namelen;
+ long f_frsize;
+ long f_spare[5];
+};
+
+struct statfs64 {
+ long f_type;
+ long f_bsize;
+ long f_blocks;
+ long f_bfree;
+ long f_bavail;
+ long f_files;
+ long f_ffree;
+ __kernel_fsid_t f_fsid;
+ long f_namelen;
+ long f_frsize;
+ long f_spare[5];
+};
+
+struct compat_statfs64 {
+ __u32 f_type;
+ __u32 f_bsize;
+ __u64 f_blocks;
+ __u64 f_bfree;
+ __u64 f_bavail;
+ __u64 f_files;
+ __u64 f_ffree;
+ __kernel_fsid_t f_fsid;
+ __u32 f_namelen;
+ __u32 f_frsize;
+ __u32 f_spare[5];
+};
+#endif /* ! __powerpc64__ */
+#endif
diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h
new file mode 100644
index 0000000..e40010a
--- /dev/null
+++ b/arch/powerpc/include/asm/string.h
@@ -0,0 +1,32 @@
+#ifndef _ASM_POWERPC_STRING_H
+#define _ASM_POWERPC_STRING_H
+
+#ifdef __KERNEL__
+
+#define __HAVE_ARCH_STRCPY
+#define __HAVE_ARCH_STRNCPY
+#define __HAVE_ARCH_STRLEN
+#define __HAVE_ARCH_STRCMP
+#define __HAVE_ARCH_STRNCMP
+#define __HAVE_ARCH_STRCAT
+#define __HAVE_ARCH_MEMSET
+#define __HAVE_ARCH_MEMCPY
+#define __HAVE_ARCH_MEMMOVE
+#define __HAVE_ARCH_MEMCMP
+#define __HAVE_ARCH_MEMCHR
+
+extern char * strcpy(char *,const char *);
+extern char * strncpy(char *,const char *, __kernel_size_t);
+extern __kernel_size_t strlen(const char *);
+extern int strcmp(const char *,const char *);
+extern int strncmp(const char *, const char *, __kernel_size_t);
+extern char * strcat(char *, const char *);
+extern void * memset(void *,int,__kernel_size_t);
+extern void * memcpy(void *,const void *,__kernel_size_t);
+extern void * memmove(void *,const void *,__kernel_size_t);
+extern int memcmp(const void *,const void *,__kernel_size_t);
+extern void * memchr(const void *,int,__kernel_size_t);
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_STRING_H */
diff --git a/arch/powerpc/include/asm/suspend.h b/arch/powerpc/include/asm/suspend.h
new file mode 100644
index 0000000..cbf2c94
--- /dev/null
+++ b/arch/powerpc/include/asm/suspend.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_POWERPC_SUSPEND_H
+#define __ASM_POWERPC_SUSPEND_H
+
+static inline int arch_prepare_suspend(void) { return 0; }
+
+void save_processor_state(void);
+void restore_processor_state(void);
+
+#endif /* __ASM_POWERPC_SUSPEND_H */
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
new file mode 100644
index 0000000..45963e8
--- /dev/null
+++ b/arch/powerpc/include/asm/synch.h
@@ -0,0 +1,44 @@
+#ifndef _ASM_POWERPC_SYNCH_H
+#define _ASM_POWERPC_SYNCH_H
+#ifdef __KERNEL__
+
+#include <linux/stringify.h>
+#include <asm/feature-fixups.h>
+
+#ifndef __ASSEMBLY__
+extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
+extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
+ void *fixup_end);
+
+static inline void eieio(void)
+{
+ __asm__ __volatile__ ("eieio" : : : "memory");
+}
+
+static inline void isync(void)
+{
+ __asm__ __volatile__ ("isync" : : : "memory");
+}
+#endif /* __ASSEMBLY__ */
+
+#if defined(__powerpc64__)
+# define LWSYNC lwsync
+#elif defined(CONFIG_E500)
+# define LWSYNC \
+ START_LWSYNC_SECTION(96); \
+ sync; \
+ MAKE_LWSYNC_SECTION_ENTRY(96, __lwsync_fixup);
+#else
+# define LWSYNC sync
+#endif
+
+#ifdef CONFIG_SMP
+#define ISYNC_ON_SMP "\n\tisync\n"
+#define LWSYNC_ON_SMP stringify_in_c(LWSYNC) "\n"
+#else
+#define ISYNC_ON_SMP
+#define LWSYNC_ON_SMP
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_SYNCH_H */
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
new file mode 100644
index 0000000..efa7f0b
--- /dev/null
+++ b/arch/powerpc/include/asm/syscall.h
@@ -0,0 +1,84 @@
+/*
+ * Access to user system call parameters and results
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License v.2.
+ *
+ * See asm-generic/syscall.h for descriptions of what we must do here.
+ */
+
+#ifndef _ASM_SYSCALL_H
+#define _ASM_SYSCALL_H 1
+
+#include <linux/sched.h>
+
+static inline long syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return TRAP(regs) == 0xc00 ? regs->gpr[0] : -1L;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ regs->gpr[3] = regs->orig_gpr3;
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return (regs->ccr & 0x1000) ? -regs->gpr[3] : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->gpr[3];
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ if (error) {
+ regs->ccr |= 0x1000L;
+ regs->gpr[3] = -error;
+ } else {
+ regs->ccr &= ~0x1000L;
+ regs->gpr[3] = val;
+ }
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ unsigned long *args)
+{
+ BUG_ON(i + n > 6);
+#ifdef CONFIG_PPC64
+ if (test_tsk_thread_flag(task, TIF_32BIT)) {
+ /*
+ * Zero-extend 32-bit argument values. The high bits are
+ * garbage ignored by the actual syscall dispatch.
+ */
+ while (n-- > 0)
+ args[n] = (u32) regs->gpr[3 + i + n];
+ return;
+ }
+#endif
+ memcpy(args, &regs->gpr[3 + i], n * sizeof(args[0]));
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ const unsigned long *args)
+{
+ BUG_ON(i + n > 6);
+ memcpy(&regs->gpr[3 + i], args, n * sizeof(args[0]));
+}
+
+#endif /* _ASM_SYSCALL_H */
diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h
new file mode 100644
index 0000000..eb8eb40
--- /dev/null
+++ b/arch/powerpc/include/asm/syscalls.h
@@ -0,0 +1,52 @@
+#ifndef __ASM_POWERPC_SYSCALLS_H
+#define __ASM_POWERPC_SYSCALLS_H
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+#include <asm/signal.h>
+
+struct new_utsname;
+struct pt_regs;
+struct rtas_args;
+struct sigaction;
+
+asmlinkage unsigned long sys_mmap(unsigned long addr, size_t len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, off_t offset);
+asmlinkage unsigned long sys_mmap2(unsigned long addr, size_t len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff);
+asmlinkage int sys_execve(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3, unsigned long a4,
+ unsigned long a5, struct pt_regs *regs);
+asmlinkage int sys_clone(unsigned long clone_flags, unsigned long usp,
+ int __user *parent_tidp, void __user *child_threadptr,
+ int __user *child_tidp, int p6, struct pt_regs *regs);
+asmlinkage int sys_fork(unsigned long p1, unsigned long p2,
+ unsigned long p3, unsigned long p4, unsigned long p5,
+ unsigned long p6, struct pt_regs *regs);
+asmlinkage int sys_vfork(unsigned long p1, unsigned long p2,
+ unsigned long p3, unsigned long p4, unsigned long p5,
+ unsigned long p6, struct pt_regs *regs);
+asmlinkage long sys_pipe(int __user *fildes);
+asmlinkage long sys_pipe2(int __user *fildes, int flags);
+asmlinkage long sys_rt_sigaction(int sig,
+ const struct sigaction __user *act,
+ struct sigaction __user *oact, size_t sigsetsize);
+asmlinkage int sys_ipc(uint call, int first, unsigned long second,
+ long third, void __user *ptr, long fifth);
+asmlinkage long ppc64_personality(unsigned long personality);
+asmlinkage int ppc_rtas(struct rtas_args __user *uargs);
+asmlinkage time_t sys64_time(time_t __user * tloc);
+asmlinkage long ppc_newuname(struct new_utsname __user * name);
+
+asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset,
+ size_t sigsetsize);
+asmlinkage long sys_sigaltstack(const stack_t __user *uss,
+ stack_t __user *uoss, unsigned long r5, unsigned long r6,
+ unsigned long r7, unsigned long r8, struct pt_regs *regs);
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_POWERPC_SYSCALLS_H */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
new file mode 100644
index 0000000..e084272
--- /dev/null
+++ b/arch/powerpc/include/asm/systbl.h
@@ -0,0 +1,324 @@
+/*
+ * List of powerpc syscalls. For the meaning of the _SPU suffix see
+ * arch/powerpc/platforms/cell/spu_callbacks.c
+ */
+
+SYSCALL(restart_syscall)
+SYSCALL(exit)
+PPC_SYS(fork)
+SYSCALL_SPU(read)
+SYSCALL_SPU(write)
+COMPAT_SYS_SPU(open)
+SYSCALL_SPU(close)
+COMPAT_SYS_SPU(waitpid)
+COMPAT_SYS_SPU(creat)
+SYSCALL_SPU(link)
+SYSCALL_SPU(unlink)
+COMPAT_SYS(execve)
+SYSCALL_SPU(chdir)
+COMPAT_SYS_SPU(time)
+SYSCALL_SPU(mknod)
+SYSCALL_SPU(chmod)
+SYSCALL_SPU(lchown)
+SYSCALL(ni_syscall)
+OLDSYS(stat)
+SYSX_SPU(sys_lseek,ppc32_lseek,sys_lseek)
+SYSCALL_SPU(getpid)
+COMPAT_SYS(mount)
+SYSX(sys_ni_syscall,sys_oldumount,sys_oldumount)
+SYSCALL_SPU(setuid)
+SYSCALL_SPU(getuid)
+COMPAT_SYS_SPU(stime)
+COMPAT_SYS(ptrace)
+SYSCALL_SPU(alarm)
+OLDSYS(fstat)
+COMPAT_SYS(pause)
+COMPAT_SYS(utime)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+COMPAT_SYS_SPU(access)
+COMPAT_SYS_SPU(nice)
+SYSCALL(ni_syscall)
+SYSCALL_SPU(sync)
+COMPAT_SYS_SPU(kill)
+SYSCALL_SPU(rename)
+COMPAT_SYS_SPU(mkdir)
+SYSCALL_SPU(rmdir)
+SYSCALL_SPU(dup)
+SYSCALL_SPU(pipe)
+COMPAT_SYS_SPU(times)
+SYSCALL(ni_syscall)
+SYSCALL_SPU(brk)
+SYSCALL_SPU(setgid)
+SYSCALL_SPU(getgid)
+SYSCALL(signal)
+SYSCALL_SPU(geteuid)
+SYSCALL_SPU(getegid)
+SYSCALL(acct)
+SYSCALL(umount)
+SYSCALL(ni_syscall)
+COMPAT_SYS_SPU(ioctl)
+COMPAT_SYS_SPU(fcntl)
+SYSCALL(ni_syscall)
+COMPAT_SYS_SPU(setpgid)
+SYSCALL(ni_syscall)
+SYSX(sys_ni_syscall,sys_olduname, sys_olduname)
+COMPAT_SYS_SPU(umask)
+SYSCALL_SPU(chroot)
+SYSCALL(ustat)
+SYSCALL_SPU(dup2)
+SYSCALL_SPU(getppid)
+SYSCALL_SPU(getpgrp)
+SYSCALL_SPU(setsid)
+SYS32ONLY(sigaction)
+SYSCALL_SPU(sgetmask)
+COMPAT_SYS_SPU(ssetmask)
+SYSCALL_SPU(setreuid)
+SYSCALL_SPU(setregid)
+SYS32ONLY(sigsuspend)
+COMPAT_SYS(sigpending)
+COMPAT_SYS_SPU(sethostname)
+COMPAT_SYS_SPU(setrlimit)
+COMPAT_SYS(old_getrlimit)
+COMPAT_SYS_SPU(getrusage)
+COMPAT_SYS_SPU(gettimeofday)
+COMPAT_SYS_SPU(settimeofday)
+COMPAT_SYS_SPU(getgroups)
+COMPAT_SYS_SPU(setgroups)
+SYSX(sys_ni_syscall,sys_ni_syscall,ppc_select)
+SYSCALL_SPU(symlink)
+OLDSYS(lstat)
+COMPAT_SYS_SPU(readlink)
+SYSCALL(uselib)
+SYSCALL(swapon)
+SYSCALL(reboot)
+SYSX(sys_ni_syscall,old32_readdir,old_readdir)
+SYSCALL_SPU(mmap)
+SYSCALL_SPU(munmap)
+SYSCALL_SPU(truncate)
+SYSCALL_SPU(ftruncate)
+SYSCALL_SPU(fchmod)
+SYSCALL_SPU(fchown)
+COMPAT_SYS_SPU(getpriority)
+COMPAT_SYS_SPU(setpriority)
+SYSCALL(ni_syscall)
+COMPAT_SYS(statfs)
+COMPAT_SYS(fstatfs)
+SYSCALL(ni_syscall)
+COMPAT_SYS_SPU(socketcall)
+COMPAT_SYS_SPU(syslog)
+COMPAT_SYS_SPU(setitimer)
+COMPAT_SYS_SPU(getitimer)
+COMPAT_SYS_SPU(newstat)
+COMPAT_SYS_SPU(newlstat)
+COMPAT_SYS_SPU(newfstat)
+SYSX(sys_ni_syscall,sys_uname,sys_uname)
+SYSCALL(ni_syscall)
+SYSCALL_SPU(vhangup)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+COMPAT_SYS_SPU(wait4)
+SYSCALL(swapoff)
+COMPAT_SYS_SPU(sysinfo)
+COMPAT_SYS(ipc)
+SYSCALL_SPU(fsync)
+SYS32ONLY(sigreturn)
+PPC_SYS(clone)
+COMPAT_SYS_SPU(setdomainname)
+PPC_SYS_SPU(newuname)
+SYSCALL(ni_syscall)
+COMPAT_SYS_SPU(adjtimex)
+SYSCALL_SPU(mprotect)
+SYSX(sys_ni_syscall,compat_sys_sigprocmask,sys_sigprocmask)
+SYSCALL(ni_syscall)
+SYSCALL(init_module)
+SYSCALL(delete_module)
+SYSCALL(ni_syscall)
+SYSCALL(quotactl)
+COMPAT_SYS_SPU(getpgid)
+SYSCALL_SPU(fchdir)
+SYSCALL_SPU(bdflush)
+COMPAT_SYS(sysfs)
+SYSX_SPU(ppc64_personality,ppc64_personality,sys_personality)
+SYSCALL(ni_syscall)
+SYSCALL_SPU(setfsuid)
+SYSCALL_SPU(setfsgid)
+SYSCALL_SPU(llseek)
+COMPAT_SYS_SPU(getdents)
+SYSX_SPU(sys_select,ppc32_select,ppc_select)
+SYSCALL_SPU(flock)
+SYSCALL_SPU(msync)
+COMPAT_SYS_SPU(readv)
+COMPAT_SYS_SPU(writev)
+COMPAT_SYS_SPU(getsid)
+SYSCALL_SPU(fdatasync)
+COMPAT_SYS(sysctl)
+SYSCALL_SPU(mlock)
+SYSCALL_SPU(munlock)
+SYSCALL_SPU(mlockall)
+SYSCALL_SPU(munlockall)
+COMPAT_SYS_SPU(sched_setparam)
+COMPAT_SYS_SPU(sched_getparam)
+COMPAT_SYS_SPU(sched_setscheduler)
+COMPAT_SYS_SPU(sched_getscheduler)
+SYSCALL_SPU(sched_yield)
+COMPAT_SYS_SPU(sched_get_priority_max)
+COMPAT_SYS_SPU(sched_get_priority_min)
+COMPAT_SYS_SPU(sched_rr_get_interval)
+COMPAT_SYS_SPU(nanosleep)
+SYSCALL_SPU(mremap)
+SYSCALL_SPU(setresuid)
+SYSCALL_SPU(getresuid)
+SYSCALL(ni_syscall)
+SYSCALL_SPU(poll)
+COMPAT_SYS(nfsservctl)
+SYSCALL_SPU(setresgid)
+SYSCALL_SPU(getresgid)
+COMPAT_SYS_SPU(prctl)
+COMPAT_SYS(rt_sigreturn)
+COMPAT_SYS(rt_sigaction)
+COMPAT_SYS(rt_sigprocmask)
+COMPAT_SYS(rt_sigpending)
+COMPAT_SYS(rt_sigtimedwait)
+COMPAT_SYS(rt_sigqueueinfo)
+COMPAT_SYS(rt_sigsuspend)
+COMPAT_SYS_SPU(pread64)
+COMPAT_SYS_SPU(pwrite64)
+SYSCALL_SPU(chown)
+SYSCALL_SPU(getcwd)
+SYSCALL_SPU(capget)
+SYSCALL_SPU(capset)
+COMPAT_SYS(sigaltstack)
+SYSX_SPU(sys_sendfile64,compat_sys_sendfile,sys_sendfile)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+PPC_SYS(vfork)
+COMPAT_SYS_SPU(getrlimit)
+COMPAT_SYS_SPU(readahead)
+SYS32ONLY(mmap2)
+SYS32ONLY(truncate64)
+SYS32ONLY(ftruncate64)
+SYSX(sys_ni_syscall,sys_stat64,sys_stat64)
+SYSX(sys_ni_syscall,sys_lstat64,sys_lstat64)
+SYSX(sys_ni_syscall,sys_fstat64,sys_fstat64)
+SYSCALL(pciconfig_read)
+SYSCALL(pciconfig_write)
+SYSCALL(pciconfig_iobase)
+SYSCALL(ni_syscall)
+SYSCALL_SPU(getdents64)
+SYSCALL_SPU(pivot_root)
+SYSX(sys_ni_syscall,compat_sys_fcntl64,sys_fcntl64)
+SYSCALL_SPU(madvise)
+SYSCALL_SPU(mincore)
+SYSCALL_SPU(gettid)
+SYSCALL_SPU(tkill)
+SYSCALL_SPU(setxattr)
+SYSCALL_SPU(lsetxattr)
+SYSCALL_SPU(fsetxattr)
+SYSCALL_SPU(getxattr)
+SYSCALL_SPU(lgetxattr)
+SYSCALL_SPU(fgetxattr)
+SYSCALL_SPU(listxattr)
+SYSCALL_SPU(llistxattr)
+SYSCALL_SPU(flistxattr)
+SYSCALL_SPU(removexattr)
+SYSCALL_SPU(lremovexattr)
+SYSCALL_SPU(fremovexattr)
+COMPAT_SYS_SPU(futex)
+COMPAT_SYS_SPU(sched_setaffinity)
+COMPAT_SYS_SPU(sched_getaffinity)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYS32ONLY(sendfile64)
+COMPAT_SYS_SPU(io_setup)
+SYSCALL_SPU(io_destroy)
+COMPAT_SYS_SPU(io_getevents)
+COMPAT_SYS_SPU(io_submit)
+SYSCALL_SPU(io_cancel)
+SYSCALL(set_tid_address)
+SYSX_SPU(sys_fadvise64,ppc32_fadvise64,sys_fadvise64)
+SYSCALL(exit_group)
+SYSX(sys_lookup_dcookie,ppc32_lookup_dcookie,sys_lookup_dcookie)
+SYSCALL_SPU(epoll_create)
+SYSCALL_SPU(epoll_ctl)
+SYSCALL_SPU(epoll_wait)
+SYSCALL_SPU(remap_file_pages)
+SYSX_SPU(sys_timer_create,compat_sys_timer_create,sys_timer_create)
+COMPAT_SYS_SPU(timer_settime)
+COMPAT_SYS_SPU(timer_gettime)
+SYSCALL_SPU(timer_getoverrun)
+SYSCALL_SPU(timer_delete)
+COMPAT_SYS_SPU(clock_settime)
+COMPAT_SYS_SPU(clock_gettime)
+COMPAT_SYS_SPU(clock_getres)
+COMPAT_SYS_SPU(clock_nanosleep)
+SYSX(ppc64_swapcontext,ppc32_swapcontext,ppc_swapcontext)
+COMPAT_SYS_SPU(tgkill)
+COMPAT_SYS_SPU(utimes)
+COMPAT_SYS_SPU(statfs64)
+COMPAT_SYS_SPU(fstatfs64)
+SYSX(sys_ni_syscall, ppc_fadvise64_64, ppc_fadvise64_64)
+PPC_SYS_SPU(rtas)
+OLDSYS(debug_setcontext)
+SYSCALL(ni_syscall)
+COMPAT_SYS(migrate_pages)
+COMPAT_SYS(mbind)
+COMPAT_SYS(get_mempolicy)
+COMPAT_SYS(set_mempolicy)
+COMPAT_SYS(mq_open)
+SYSCALL(mq_unlink)
+COMPAT_SYS(mq_timedsend)
+COMPAT_SYS(mq_timedreceive)
+COMPAT_SYS(mq_notify)
+COMPAT_SYS(mq_getsetattr)
+COMPAT_SYS(kexec_load)
+COMPAT_SYS(add_key)
+COMPAT_SYS(request_key)
+COMPAT_SYS(keyctl)
+COMPAT_SYS(waitid)
+COMPAT_SYS(ioprio_set)
+COMPAT_SYS(ioprio_get)
+SYSCALL(inotify_init)
+SYSCALL(inotify_add_watch)
+SYSCALL(inotify_rm_watch)
+SYSCALL(spu_run)
+SYSCALL(spu_create)
+COMPAT_SYS(pselect6)
+COMPAT_SYS(ppoll)
+SYSCALL_SPU(unshare)
+SYSCALL_SPU(splice)
+SYSCALL_SPU(tee)
+COMPAT_SYS_SPU(vmsplice)
+COMPAT_SYS_SPU(openat)
+SYSCALL_SPU(mkdirat)
+SYSCALL_SPU(mknodat)
+SYSCALL_SPU(fchownat)
+COMPAT_SYS_SPU(futimesat)
+SYSX_SPU(sys_newfstatat, sys_fstatat64, sys_fstatat64)
+SYSCALL_SPU(unlinkat)
+SYSCALL_SPU(renameat)
+SYSCALL_SPU(linkat)
+SYSCALL_SPU(symlinkat)
+SYSCALL_SPU(readlinkat)
+SYSCALL_SPU(fchmodat)
+SYSCALL_SPU(faccessat)
+COMPAT_SYS_SPU(get_robust_list)
+COMPAT_SYS_SPU(set_robust_list)
+COMPAT_SYS_SPU(move_pages)
+SYSCALL_SPU(getcpu)
+COMPAT_SYS(epoll_pwait)
+COMPAT_SYS_SPU(utimensat)
+COMPAT_SYS_SPU(signalfd)
+SYSCALL_SPU(timerfd_create)
+SYSCALL_SPU(eventfd)
+COMPAT_SYS_SPU(sync_file_range2)
+COMPAT_SYS(fallocate)
+SYSCALL(subpage_prot)
+COMPAT_SYS_SPU(timerfd_settime)
+COMPAT_SYS_SPU(timerfd_gettime)
+COMPAT_SYS_SPU(signalfd4)
+SYSCALL_SPU(eventfd2)
+SYSCALL_SPU(epoll_create1)
+SYSCALL_SPU(dup3)
+SYSCALL_SPU(pipe2)
+SYSCALL(inotify_init1)
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
new file mode 100644
index 0000000..d6648c143
--- /dev/null
+++ b/arch/powerpc/include/asm/system.h
@@ -0,0 +1,548 @@
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _ASM_POWERPC_SYSTEM_H
+#define _ASM_POWERPC_SYSTEM_H
+
+#include <linux/kernel.h>
+#include <linux/irqflags.h>
+
+#include <asm/hw_irq.h>
+
+/*
+ * Memory barrier.
+ * The sync instruction guarantees that all memory accesses initiated
+ * by this processor have been performed (with respect to all other
+ * mechanisms that access memory). The eieio instruction is a barrier
+ * providing an ordering (separately) for (a) cacheable stores and (b)
+ * loads and stores to non-cacheable memory (e.g. I/O devices).
+ *
+ * mb() prevents loads and stores being reordered across this point.
+ * rmb() prevents loads being reordered across this point.
+ * wmb() prevents stores being reordered across this point.
+ * read_barrier_depends() prevents data-dependent loads being reordered
+ * across this point (nop on PPC).
+ *
+ * We have to use the sync instructions for mb(), since lwsync doesn't
+ * order loads with respect to previous stores. Lwsync is fine for
+ * rmb(), though. Note that rmb() actually uses a sync on 32-bit
+ * architectures.
+ *
+ * For wmb(), we use sync since wmb is used in drivers to order
+ * stores to system memory with respect to writes to the device.
+ * However, smp_wmb() can be a lighter-weight lwsync or eieio barrier
+ * on SMP since it is only used to order updates to system memory.
+ */
+#define mb() __asm__ __volatile__ ("sync" : : : "memory")
+#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
+#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
+#define read_barrier_depends() do { } while(0)
+
+#define set_mb(var, value) do { var = value; mb(); } while (0)
+
+#ifdef __KERNEL__
+#define AT_VECTOR_SIZE_ARCH 6 /* entries in ARCH_DLINFO */
+#ifdef CONFIG_SMP
+
+#ifdef __SUBARCH_HAS_LWSYNC
+# define SMPWMB lwsync
+#else
+# define SMPWMB eieio
+#endif
+
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() __asm__ __volatile__ (__stringify(SMPWMB) : : :"memory")
+#define smp_read_barrier_depends() read_barrier_depends()
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() do { } while(0)
+#endif /* CONFIG_SMP */
+
+/*
+ * This is a barrier which prevents following instructions from being
+ * started until the value of the argument x is known. For example, if
+ * x is a variable loaded from memory, this prevents following
+ * instructions from being executed until the load has been performed.
+ */
+#define data_barrier(x) \
+ asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
+
+struct task_struct;
+struct pt_regs;
+
+#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
+
+extern int (*__debugger)(struct pt_regs *regs);
+extern int (*__debugger_ipi)(struct pt_regs *regs);
+extern int (*__debugger_bpt)(struct pt_regs *regs);
+extern int (*__debugger_sstep)(struct pt_regs *regs);
+extern int (*__debugger_iabr_match)(struct pt_regs *regs);
+extern int (*__debugger_dabr_match)(struct pt_regs *regs);
+extern int (*__debugger_fault_handler)(struct pt_regs *regs);
+
+#define DEBUGGER_BOILERPLATE(__NAME) \
+static inline int __NAME(struct pt_regs *regs) \
+{ \
+ if (unlikely(__ ## __NAME)) \
+ return __ ## __NAME(regs); \
+ return 0; \
+}
+
+DEBUGGER_BOILERPLATE(debugger)
+DEBUGGER_BOILERPLATE(debugger_ipi)
+DEBUGGER_BOILERPLATE(debugger_bpt)
+DEBUGGER_BOILERPLATE(debugger_sstep)
+DEBUGGER_BOILERPLATE(debugger_iabr_match)
+DEBUGGER_BOILERPLATE(debugger_dabr_match)
+DEBUGGER_BOILERPLATE(debugger_fault_handler)
+
+#else
+static inline int debugger(struct pt_regs *regs) { return 0; }
+static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
+static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
+static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
+static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
+static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
+static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
+#endif
+
+extern int set_dabr(unsigned long dabr);
+extern void do_dabr(struct pt_regs *regs, unsigned long address,
+ unsigned long error_code);
+extern void print_backtrace(unsigned long *);
+extern void show_regs(struct pt_regs * regs);
+extern void flush_instruction_cache(void);
+extern void hard_reset_now(void);
+extern void poweroff_now(void);
+
+#ifdef CONFIG_6xx
+extern long _get_L2CR(void);
+extern long _get_L3CR(void);
+extern void _set_L2CR(unsigned long);
+extern void _set_L3CR(unsigned long);
+#else
+#define _get_L2CR() 0L
+#define _get_L3CR() 0L
+#define _set_L2CR(val) do { } while(0)
+#define _set_L3CR(val) do { } while(0)
+#endif
+
+extern void via_cuda_init(void);
+extern void read_rtc_time(void);
+extern void pmac_find_display(void);
+extern void giveup_fpu(struct task_struct *);
+extern void disable_kernel_fp(void);
+extern void enable_kernel_fp(void);
+extern void flush_fp_to_thread(struct task_struct *);
+extern void enable_kernel_altivec(void);
+extern void giveup_altivec(struct task_struct *);
+extern void load_up_altivec(struct task_struct *);
+extern int emulate_altivec(struct pt_regs *);
+extern void __giveup_vsx(struct task_struct *);
+extern void giveup_vsx(struct task_struct *);
+extern void enable_kernel_spe(void);
+extern void giveup_spe(struct task_struct *);
+extern void load_up_spe(struct task_struct *);
+extern int fix_alignment(struct pt_regs *);
+extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
+extern void cvt_df(double *from, float *to, struct thread_struct *thread);
+
+#ifndef CONFIG_SMP
+extern void discard_lazy_cpu_state(void);
+#else
+static inline void discard_lazy_cpu_state(void)
+{
+}
+#endif
+
+#ifdef CONFIG_ALTIVEC
+extern void flush_altivec_to_thread(struct task_struct *);
+#else
+static inline void flush_altivec_to_thread(struct task_struct *t)
+{
+}
+#endif
+
+#ifdef CONFIG_VSX
+extern void flush_vsx_to_thread(struct task_struct *);
+#else
+static inline void flush_vsx_to_thread(struct task_struct *t)
+{
+}
+#endif
+
+#ifdef CONFIG_SPE
+extern void flush_spe_to_thread(struct task_struct *);
+#else
+static inline void flush_spe_to_thread(struct task_struct *t)
+{
+}
+#endif
+
+extern int call_rtas(const char *, int, int, unsigned long *, ...);
+extern void cacheable_memzero(void *p, unsigned int nb);
+extern void *cacheable_memcpy(void *, const void *, unsigned int);
+extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
+extern void bad_page_fault(struct pt_regs *, unsigned long, int);
+extern int die(const char *, struct pt_regs *, long);
+extern void _exception(int, struct pt_regs *, int, unsigned long);
+extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
+
+#ifdef CONFIG_BOOKE_WDT
+extern u32 booke_wdt_enabled;
+extern u32 booke_wdt_period;
+#endif /* CONFIG_BOOKE_WDT */
+
+struct device_node;
+extern void note_scsi_host(struct device_node *, void *);
+
+extern struct task_struct *__switch_to(struct task_struct *,
+ struct task_struct *);
+#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
+
+struct thread_struct;
+extern struct task_struct *_switch(struct thread_struct *prev,
+ struct thread_struct *next);
+
+extern unsigned int rtas_data;
+extern int mem_init_done; /* set on boot once kmalloc can be called */
+extern int init_bootmem_done; /* set on !NUMA once bootmem is available */
+extern unsigned long memory_limit;
+extern unsigned long klimit;
+
+extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
+extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
+
+extern int powersave_nap; /* set if nap mode can be used in idle loop */
+
+/*
+ * Atomic exchange
+ *
+ * Changes the memory location '*ptr' to be val and returns
+ * the previous value stored there.
+ */
+static __always_inline unsigned long
+__xchg_u32(volatile void *p, unsigned long val)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__(
+ LWSYNC_ON_SMP
+"1: lwarx %0,0,%2 \n"
+ PPC405_ERR77(0,%2)
+" stwcx. %3,0,%2 \n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
+ : "r" (p), "r" (val)
+ : "cc", "memory");
+
+ return prev;
+}
+
+/*
+ * Atomic exchange
+ *
+ * Changes the memory location '*ptr' to be val and returns
+ * the previous value stored there.
+ */
+static __always_inline unsigned long
+__xchg_u32_local(volatile void *p, unsigned long val)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 \n"
+ PPC405_ERR77(0,%2)
+" stwcx. %3,0,%2 \n\
+ bne- 1b"
+ : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
+ : "r" (p), "r" (val)
+ : "cc", "memory");
+
+ return prev;
+}
+
+#ifdef CONFIG_PPC64
+static __always_inline unsigned long
+__xchg_u64(volatile void *p, unsigned long val)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__(
+ LWSYNC_ON_SMP
+"1: ldarx %0,0,%2 \n"
+ PPC405_ERR77(0,%2)
+" stdcx. %3,0,%2 \n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
+ : "r" (p), "r" (val)
+ : "cc", "memory");
+
+ return prev;
+}
+
+static __always_inline unsigned long
+__xchg_u64_local(volatile void *p, unsigned long val)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%2 \n"
+ PPC405_ERR77(0,%2)
+" stdcx. %3,0,%2 \n\
+ bne- 1b"
+ : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
+ : "r" (p), "r" (val)
+ : "cc", "memory");
+
+ return prev;
+}
+#endif
+
+/*
+ * This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid xchg().
+ */
+extern void __xchg_called_with_bad_pointer(void);
+
+static __always_inline unsigned long
+__xchg(volatile void *ptr, unsigned long x, unsigned int size)
+{
+ switch (size) {
+ case 4:
+ return __xchg_u32(ptr, x);
+#ifdef CONFIG_PPC64
+ case 8:
+ return __xchg_u64(ptr, x);
+#endif
+ }
+ __xchg_called_with_bad_pointer();
+ return x;
+}
+
+static __always_inline unsigned long
+__xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
+{
+ switch (size) {
+ case 4:
+ return __xchg_u32_local(ptr, x);
+#ifdef CONFIG_PPC64
+ case 8:
+ return __xchg_u64_local(ptr, x);
+#endif
+ }
+ __xchg_called_with_bad_pointer();
+ return x;
+}
+#define xchg(ptr,x) \
+ ({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
+ })
+
+#define xchg_local(ptr,x) \
+ ({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg_local((ptr), \
+ (unsigned long)_x_, sizeof(*(ptr))); \
+ })
+
+/*
+ * Compare and exchange - if *p == old, set it to new,
+ * and return the old value of *p.
+ */
+#define __HAVE_ARCH_CMPXCHG 1
+
+static __always_inline unsigned long
+__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
+{
+ unsigned int prev;
+
+ __asm__ __volatile__ (
+ LWSYNC_ON_SMP
+"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
+ cmpw 0,%0,%3\n\
+ bne- 2f\n"
+ PPC405_ERR77(0,%2)
+" stwcx. %4,0,%2\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ "\n\
+2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc", "memory");
+
+ return prev;
+}
+
+static __always_inline unsigned long
+__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
+ unsigned long new)
+{
+ unsigned int prev;
+
+ __asm__ __volatile__ (
+"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
+ cmpw 0,%0,%3\n\
+ bne- 2f\n"
+ PPC405_ERR77(0,%2)
+" stwcx. %4,0,%2\n\
+ bne- 1b"
+ "\n\
+2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc", "memory");
+
+ return prev;
+}
+
+#ifdef CONFIG_PPC64
+static __always_inline unsigned long
+__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__ (
+ LWSYNC_ON_SMP
+"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
+ cmpd 0,%0,%3\n\
+ bne- 2f\n\
+ stdcx. %4,0,%2\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ "\n\
+2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc", "memory");
+
+ return prev;
+}
+
+static __always_inline unsigned long
+__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
+ unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__ (
+"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
+ cmpd 0,%0,%3\n\
+ bne- 2f\n\
+ stdcx. %4,0,%2\n\
+ bne- 1b"
+ "\n\
+2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc", "memory");
+
+ return prev;
+}
+#endif
+
+/* This function doesn't exist, so you'll get a linker error
+ if something tries to do an invalid cmpxchg(). */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static __always_inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
+ unsigned int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32(ptr, old, new);
+#ifdef CONFIG_PPC64
+ case 8:
+ return __cmpxchg_u64(ptr, old, new);
+#endif
+ }
+ __cmpxchg_called_with_bad_pointer();
+ return old;
+}
+
+static __always_inline unsigned long
+__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
+ unsigned int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32_local(ptr, old, new);
+#ifdef CONFIG_PPC64
+ case 8:
+ return __cmpxchg_u64_local(ptr, old, new);
+#endif
+ }
+ __cmpxchg_called_with_bad_pointer();
+ return old;
+}
+
+#define cmpxchg(ptr, o, n) \
+ ({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof(*(ptr))); \
+ })
+
+
+#define cmpxchg_local(ptr, o, n) \
+ ({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof(*(ptr))); \
+ })
+
+#ifdef CONFIG_PPC64
+/*
+ * We handle most unaligned accesses in hardware. On the other hand
+ * unaligned DMA can be very expensive on some ppc64 IO chips (it does
+ * powers of 2 writes until it reaches sufficient alignment).
+ *
+ * Based on this we disable the IP header alignment in network drivers.
+ * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
+ * cacheline alignment of buffers.
+ */
+#define NET_IP_ALIGN 0
+#define NET_SKB_PAD L1_CACHE_BYTES
+
+#define cmpxchg64(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg((ptr), (o), (n)); \
+ })
+#define cmpxchg64_local(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg_local((ptr), (o), (n)); \
+ })
+#else
+#include <asm-generic/cmpxchg-local.h>
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#endif
+
+#define arch_align_stack(x) (x)
+
+/* Used in very early kernel initialization. */
+extern unsigned long reloc_offset(void);
+extern unsigned long add_reloc_offset(unsigned long);
+extern void reloc_got2(unsigned long);
+
+#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void account_system_vtime(struct task_struct *);
+#endif
+
+extern struct dentry *powerpc_debugfs_root;
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_SYSTEM_H */
diff --git a/arch/powerpc/include/asm/tce.h b/arch/powerpc/include/asm/tce.h
new file mode 100644
index 0000000..f663634
--- /dev/null
+++ b/arch/powerpc/include/asm/tce.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
+ * Rewrite, cleanup:
+ * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_POWERPC_TCE_H
+#define _ASM_POWERPC_TCE_H
+#ifdef __KERNEL__
+
+#include <asm/iommu.h>
+
+/*
+ * Tces come in two formats, one for the virtual bus and a different
+ * format for PCI
+ */
+#define TCE_VB 0
+#define TCE_PCI 1
+
+/* TCE page size is 4096 bytes (1 << 12) */
+
+#define TCE_SHIFT 12
+#define TCE_PAGE_SIZE (1 << TCE_SHIFT)
+
+#define TCE_ENTRY_SIZE 8 /* each TCE is 64 bits */
+
+#define TCE_RPN_MASK 0xfffffffffful /* 40-bit RPN (4K pages) */
+#define TCE_RPN_SHIFT 12
+#define TCE_VALID 0x800 /* TCE valid */
+#define TCE_ALLIO 0x400 /* TCE valid for all lpars */
+#define TCE_PCI_WRITE 0x2 /* write from PCI allowed */
+#define TCE_PCI_READ 0x1 /* read from PCI allowed */
+#define TCE_VB_WRITE 0x1 /* write from VB allowed */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_TCE_H */
diff --git a/arch/powerpc/include/asm/termbits.h b/arch/powerpc/include/asm/termbits.h
new file mode 100644
index 0000000..6698188
--- /dev/null
+++ b/arch/powerpc/include/asm/termbits.h
@@ -0,0 +1,209 @@
+#ifndef _ASM_POWERPC_TERMBITS_H
+#define _ASM_POWERPC_TERMBITS_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+typedef unsigned char cc_t;
+typedef unsigned int speed_t;
+typedef unsigned int tcflag_t;
+
+/*
+ * termios type and macro definitions. Be careful about adding stuff
+ * to this file since it's used in GNU libc and there are strict rules
+ * concerning namespace pollution.
+ */
+
+#define NCCS 19
+struct termios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_cc[NCCS]; /* control characters */
+ cc_t c_line; /* line discipline (== c_cc[19]) */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
+/* For PowerPC the termios and ktermios are the same */
+
+struct ktermios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_cc[NCCS]; /* control characters */
+ cc_t c_line; /* line discipline (== c_cc[19]) */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
+/* c_cc characters */
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VMIN 5
+#define VEOL 6
+#define VTIME 7
+#define VEOL2 8
+#define VSWTC 9
+#define VWERASE 10
+#define VREPRINT 11
+#define VSUSP 12
+#define VSTART 13
+#define VSTOP 14
+#define VLNEXT 15
+#define VDISCARD 16
+
+/* c_iflag bits */
+#define IGNBRK 0000001
+#define BRKINT 0000002
+#define IGNPAR 0000004
+#define PARMRK 0000010
+#define INPCK 0000020
+#define ISTRIP 0000040
+#define INLCR 0000100
+#define IGNCR 0000200
+#define ICRNL 0000400
+#define IXON 0001000
+#define IXOFF 0002000
+#define IXANY 0004000
+#define IUCLC 0010000
+#define IMAXBEL 0020000
+#define IUTF8 0040000
+
+/* c_oflag bits */
+#define OPOST 0000001
+#define ONLCR 0000002
+#define OLCUC 0000004
+
+#define OCRNL 0000010
+#define ONOCR 0000020
+#define ONLRET 0000040
+
+#define OFILL 00000100
+#define OFDEL 00000200
+#define NLDLY 00001400
+#define NL0 00000000
+#define NL1 00000400
+#define NL2 00001000
+#define NL3 00001400
+#define TABDLY 00006000
+#define TAB0 00000000
+#define TAB1 00002000
+#define TAB2 00004000
+#define TAB3 00006000
+#define XTABS 00006000 /* required by POSIX to == TAB3 */
+#define CRDLY 00030000
+#define CR0 00000000
+#define CR1 00010000
+#define CR2 00020000
+#define CR3 00030000
+#define FFDLY 00040000
+#define FF0 00000000
+#define FF1 00040000
+#define BSDLY 00100000
+#define BS0 00000000
+#define BS1 00100000
+#define VTDLY 00200000
+#define VT0 00000000
+#define VT1 00200000
+
+/* c_cflag bit meaning */
+#define CBAUD 0000377
+#define B0 0000000 /* hang up */
+#define B50 0000001
+#define B75 0000002
+#define B110 0000003
+#define B134 0000004
+#define B150 0000005
+#define B200 0000006
+#define B300 0000007
+#define B600 0000010
+#define B1200 0000011
+#define B1800 0000012
+#define B2400 0000013
+#define B4800 0000014
+#define B9600 0000015
+#define B19200 0000016
+#define B38400 0000017
+#define EXTA B19200
+#define EXTB B38400
+#define CBAUDEX 0000000
+#define B57600 00020
+#define B115200 00021
+#define B230400 00022
+#define B460800 00023
+#define B500000 00024
+#define B576000 00025
+#define B921600 00026
+#define B1000000 00027
+#define B1152000 00030
+#define B1500000 00031
+#define B2000000 00032
+#define B2500000 00033
+#define B3000000 00034
+#define B3500000 00035
+#define B4000000 00036
+#define BOTHER 00037
+
+#define CIBAUD 077600000
+#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
+
+#define CSIZE 00001400
+#define CS5 00000000
+#define CS6 00000400
+#define CS7 00001000
+#define CS8 00001400
+
+#define CSTOPB 00002000
+#define CREAD 00004000
+#define PARENB 00010000
+#define PARODD 00020000
+#define HUPCL 00040000
+
+#define CLOCAL 00100000
+#define CMSPAR 010000000000 /* mark or space (stick) parity */
+#define CRTSCTS 020000000000 /* flow control */
+
+/* c_lflag bits */
+#define ISIG 0x00000080
+#define ICANON 0x00000100
+#define XCASE 0x00004000
+#define ECHO 0x00000008
+#define ECHOE 0x00000002
+#define ECHOK 0x00000004
+#define ECHONL 0x00000010
+#define NOFLSH 0x80000000
+#define TOSTOP 0x00400000
+#define ECHOCTL 0x00000040
+#define ECHOPRT 0x00000020
+#define ECHOKE 0x00000001
+#define FLUSHO 0x00800000
+#define PENDIN 0x20000000
+#define IEXTEN 0x00000400
+
+/* Values for the ACTION argument to `tcflow'. */
+#define TCOOFF 0
+#define TCOON 1
+#define TCIOFF 2
+#define TCION 3
+
+/* Values for the QUEUE_SELECTOR argument to `tcflush'. */
+#define TCIFLUSH 0
+#define TCOFLUSH 1
+#define TCIOFLUSH 2
+
+/* Values for the OPTIONAL_ACTIONS argument to `tcsetattr'. */
+#define TCSANOW 0
+#define TCSADRAIN 1
+#define TCSAFLUSH 2
+
+#endif /* _ASM_POWERPC_TERMBITS_H */
diff --git a/arch/powerpc/include/asm/termios.h b/arch/powerpc/include/asm/termios.h
new file mode 100644
index 0000000..2c14fea
--- /dev/null
+++ b/arch/powerpc/include/asm/termios.h
@@ -0,0 +1,85 @@
+#ifndef _ASM_POWERPC_TERMIOS_H
+#define _ASM_POWERPC_TERMIOS_H
+
+/*
+ * Liberally adapted from alpha/termios.h. In particular, the c_cc[]
+ * fields have been reordered so that termio & termios share the
+ * common subset in the same order (for brain dead programs that don't
+ * know or care about the differences).
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/ioctls.h>
+#include <asm/termbits.h>
+
+struct sgttyb {
+ char sg_ispeed;
+ char sg_ospeed;
+ char sg_erase;
+ char sg_kill;
+ short sg_flags;
+};
+
+struct tchars {
+ char t_intrc;
+ char t_quitc;
+ char t_startc;
+ char t_stopc;
+ char t_eofc;
+ char t_brkc;
+};
+
+struct ltchars {
+ char t_suspc;
+ char t_dsuspc;
+ char t_rprntc;
+ char t_flushc;
+ char t_werasc;
+ char t_lnextc;
+};
+
+struct winsize {
+ unsigned short ws_row;
+ unsigned short ws_col;
+ unsigned short ws_xpixel;
+ unsigned short ws_ypixel;
+};
+
+#define NCC 10
+struct termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[NCC]; /* control characters */
+};
+
+/* c_cc characters */
+#define _VINTR 0
+#define _VQUIT 1
+#define _VERASE 2
+#define _VKILL 3
+#define _VEOF 4
+#define _VMIN 5
+#define _VEOL 6
+#define _VTIME 7
+#define _VEOL2 8
+#define _VSWTC 9
+
+#ifdef __KERNEL__
+/* ^C ^\ del ^U ^D 1 0 0 0 0 ^W ^R ^Z ^Q ^S ^V ^U */
+#define INIT_C_CC "\003\034\177\025\004\001\000\000\000\000\027\022\032\021\023\026\025"
+#endif
+
+#ifdef __KERNEL__
+
+#include <asm-generic/termios.h>
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_TERMIOS_H */
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
new file mode 100644
index 0000000..9665a26
--- /dev/null
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -0,0 +1,161 @@
+/* thread_info.h: PowerPC low-level thread information
+ * adapted from the i386 version by Paul Mackerras
+ *
+ * Copyright (C) 2002 David Howells (dhowells@redhat.com)
+ * - Incorporating suggestions made by Linus Torvalds and Dave Miller
+ */
+
+#ifndef _ASM_POWERPC_THREAD_INFO_H
+#define _ASM_POWERPC_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+/* We have 8k stacks on ppc32 and 16k on ppc64 */
+
+#ifdef CONFIG_PPC64
+#define THREAD_SHIFT 14
+#else
+#define THREAD_SHIFT 13
+#endif
+
+#define THREAD_SIZE (1 << THREAD_SHIFT)
+
+#ifndef __ASSEMBLY__
+#include <linux/cache.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <linux/stringify.h>
+
+/*
+ * low level task data.
+ */
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ struct exec_domain *exec_domain; /* execution domain */
+ int cpu; /* cpu we're on */
+ int preempt_count; /* 0 => preemptable,
+ <0 => BUG */
+ struct restart_block restart_block;
+ unsigned long local_flags; /* private flags for thread */
+
+ /* low level flags - has atomic operations done on it */
+ unsigned long flags ____cacheline_aligned_in_smp;
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .cpu = 0, \
+ .preempt_count = 1, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+ .flags = 0, \
+}
+
+#define init_thread_info (init_thread_union.thread_info)
+#define init_stack (init_thread_union.stack)
+
+/* thread information allocation */
+
+#if THREAD_SHIFT >= PAGE_SHIFT
+
+#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
+
+#else /* THREAD_SHIFT < PAGE_SHIFT */
+
+#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
+
+extern struct thread_info *alloc_thread_info(struct task_struct *tsk);
+extern void free_thread_info(struct thread_info *ti);
+
+#endif /* THREAD_SHIFT < PAGE_SHIFT */
+
+/* how to get the thread information struct from C */
+static inline struct thread_info *current_thread_info(void)
+{
+ register unsigned long sp asm("r1");
+
+ /* gcc4, at least, is smart enough to turn this into a single
+ * rlwinm for ppc32 and clrrdi for ppc64 */
+ return (struct thread_info *)(sp & ~(THREAD_SIZE-1));
+}
+
+#endif /* __ASSEMBLY__ */
+
+#define PREEMPT_ACTIVE 0x10000000
+
+/*
+ * thread information flag bit numbers
+ */
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_SIGPENDING 1 /* signal pending */
+#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
+ TIF_NEED_RESCHED */
+#define TIF_32BIT 4 /* 32 bit binary */
+#define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */
+#define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
+#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
+#define TIF_SINGLESTEP 8 /* singlestepping active */
+#define TIF_MEMDIE 9
+#define TIF_SECCOMP 10 /* secure computing */
+#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
+#define TIF_NOERROR 12 /* Force successful syscall return */
+#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
+#define TIF_FREEZE 14 /* Freezing for suspend */
+#define TIF_RUNLATCH 15 /* Is the runlatch enabled? */
+#define TIF_ABI_PENDING 16 /* 32/64 bit switch needed */
+
+/* as above, but as bit values */
+#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
+#define _TIF_32BIT (1<<TIF_32BIT)
+#define _TIF_PERFMON_WORK (1<<TIF_PERFMON_WORK)
+#define _TIF_PERFMON_CTXSW (1<<TIF_PERFMON_CTXSW)
+#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
+#define _TIF_SECCOMP (1<<TIF_SECCOMP)
+#define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
+#define _TIF_NOERROR (1<<TIF_NOERROR)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+#define _TIF_FREEZE (1<<TIF_FREEZE)
+#define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
+#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
+#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP)
+
+#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+ _TIF_NOTIFY_RESUME)
+#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
+
+/* Bits in local_flags */
+/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
+#define TLF_NAPPING 0 /* idle thread enabled NAP mode */
+#define TLF_SLEEPING 1 /* suspend code enabled SLEEP mode */
+#define TLF_RESTORE_SIGMASK 2 /* Restore signal mask in do_signal */
+
+#define _TLF_NAPPING (1 << TLF_NAPPING)
+#define _TLF_SLEEPING (1 << TLF_SLEEPING)
+#define _TLF_RESTORE_SIGMASK (1 << TLF_RESTORE_SIGMASK)
+
+#ifndef __ASSEMBLY__
+#define HAVE_SET_RESTORE_SIGMASK 1
+static inline void set_restore_sigmask(void)
+{
+ struct thread_info *ti = current_thread_info();
+ ti->local_flags |= _TLF_RESTORE_SIGMASK;
+ set_bit(TIF_SIGPENDING, &ti->flags);
+}
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_THREAD_INFO_H */
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
new file mode 100644
index 0000000..febd581
--- /dev/null
+++ b/arch/powerpc/include/asm/time.h
@@ -0,0 +1,255 @@
+/*
+ * Common time prototypes and such for all ppc machines.
+ *
+ * Written by Cort Dougan (cort@cs.nmt.edu) to merge
+ * Paul Mackerras' version and mine for PReP and Pmac.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __POWERPC_TIME_H
+#define __POWERPC_TIME_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#include <linux/percpu.h>
+
+#include <asm/processor.h>
+#ifdef CONFIG_PPC_ISERIES
+#include <asm/paca.h>
+#include <asm/firmware.h>
+#include <asm/iseries/hv_call.h>
+#endif
+
+/* time.c */
+extern unsigned long tb_ticks_per_jiffy;
+extern unsigned long tb_ticks_per_usec;
+extern unsigned long tb_ticks_per_sec;
+extern u64 tb_to_xs;
+extern unsigned tb_to_us;
+
+struct rtc_time;
+extern void to_tm(int tim, struct rtc_time * tm);
+extern void GregorianDay(struct rtc_time *tm);
+extern time_t last_rtc_update;
+
+extern void generic_calibrate_decr(void);
+extern void wakeup_decrementer(void);
+extern void snapshot_timebase(void);
+
+extern void set_dec_cpu6(unsigned int val);
+
+/* Some sane defaults: 125 MHz timebase, 1GHz processor */
+extern unsigned long ppc_proc_freq;
+#define DEFAULT_PROC_FREQ (DEFAULT_TB_FREQ * 8)
+extern unsigned long ppc_tb_freq;
+#define DEFAULT_TB_FREQ 125000000UL
+
+/*
+ * By putting all of this stuff into a single struct we
+ * reduce the number of cache lines touched by do_gettimeofday.
+ * Both by collecting all of the data in one cache line and
+ * by touching only one TOC entry on ppc64.
+ */
+struct gettimeofday_vars {
+ u64 tb_to_xs;
+ u64 stamp_xsec;
+ u64 tb_orig_stamp;
+};
+
+struct gettimeofday_struct {
+ unsigned long tb_ticks_per_sec;
+ struct gettimeofday_vars vars[2];
+ struct gettimeofday_vars * volatile varp;
+ unsigned var_idx;
+ unsigned tb_to_us;
+};
+
+struct div_result {
+ u64 result_high;
+ u64 result_low;
+};
+
+/* Accessor functions for the timebase (RTC on 601) registers. */
+/* If one day CONFIG_POWER is added just define __USE_RTC as 1 */
+#ifdef CONFIG_6xx
+#define __USE_RTC() (!cpu_has_feature(CPU_FTR_USE_TB))
+#else
+#define __USE_RTC() 0
+#endif
+
+#ifdef CONFIG_PPC64
+
+/* For compatibility, get_tbl() is defined as get_tb() on ppc64 */
+#define get_tbl get_tb
+
+#else
+
+static inline unsigned long get_tbl(void)
+{
+#if defined(CONFIG_403GCX)
+ unsigned long tbl;
+ asm volatile("mfspr %0, 0x3dd" : "=r" (tbl));
+ return tbl;
+#else
+ return mftbl();
+#endif
+}
+
+static inline unsigned int get_tbu(void)
+{
+#ifdef CONFIG_403GCX
+ unsigned int tbu;
+ asm volatile("mfspr %0, 0x3dc" : "=r" (tbu));
+ return tbu;
+#else
+ return mftbu();
+#endif
+}
+#endif /* !CONFIG_PPC64 */
+
+static inline unsigned int get_rtcl(void)
+{
+ unsigned int rtcl;
+
+ asm volatile("mfrtcl %0" : "=r" (rtcl));
+ return rtcl;
+}
+
+static inline u64 get_rtc(void)
+{
+ unsigned int hi, lo, hi2;
+
+ do {
+ asm volatile("mfrtcu %0; mfrtcl %1; mfrtcu %2"
+ : "=r" (hi), "=r" (lo), "=r" (hi2));
+ } while (hi2 != hi);
+ return (u64)hi * 1000000000 + lo;
+}
+
+#ifdef CONFIG_PPC64
+static inline u64 get_tb(void)
+{
+ return mftb();
+}
+#else /* CONFIG_PPC64 */
+static inline u64 get_tb(void)
+{
+ unsigned int tbhi, tblo, tbhi2;
+
+ do {
+ tbhi = get_tbu();
+ tblo = get_tbl();
+ tbhi2 = get_tbu();
+ } while (tbhi != tbhi2);
+
+ return ((u64)tbhi << 32) | tblo;
+}
+#endif /* !CONFIG_PPC64 */
+
+static inline u64 get_tb_or_rtc(void)
+{
+ return __USE_RTC() ? get_rtc() : get_tb();
+}
+
+static inline void set_tb(unsigned int upper, unsigned int lower)
+{
+ mtspr(SPRN_TBWL, 0);
+ mtspr(SPRN_TBWU, upper);
+ mtspr(SPRN_TBWL, lower);
+}
+
+/* Accessor functions for the decrementer register.
+ * The 4xx doesn't even have a decrementer. I tried to use the
+ * generic timer interrupt code, which seems OK, with the 4xx PIT
+ * in auto-reload mode. The problem is PIT stops counting when it
+ * hits zero. If it would wrap, we could use it just like a decrementer.
+ */
+static inline unsigned int get_dec(void)
+{
+#if defined(CONFIG_40x)
+ return (mfspr(SPRN_PIT));
+#else
+ return (mfspr(SPRN_DEC));
+#endif
+}
+
+/*
+ * Note: Book E and 4xx processors differ from other PowerPC processors
+ * in when the decrementer generates its interrupt: on the 1 to 0
+ * transition for Book E/4xx, but on the 0 to -1 transition for others.
+ */
+static inline void set_dec(int val)
+{
+#if defined(CONFIG_40x)
+ mtspr(SPRN_PIT, val);
+#elif defined(CONFIG_8xx_CPU6)
+ set_dec_cpu6(val - 1);
+#else
+#ifndef CONFIG_BOOKE
+ --val;
+#endif
+#ifdef CONFIG_PPC_ISERIES
+ if (firmware_has_feature(FW_FEATURE_ISERIES) &&
+ get_lppaca()->shared_proc) {
+ get_lppaca()->virtual_decr = val;
+ if (get_dec() > val)
+ HvCall_setVirtualDecr();
+ return;
+ }
+#endif
+ mtspr(SPRN_DEC, val);
+#endif /* not 40x or 8xx_CPU6 */
+}
+
+static inline unsigned long tb_ticks_since(unsigned long tstamp)
+{
+ if (__USE_RTC()) {
+ int delta = get_rtcl() - (unsigned int) tstamp;
+ return delta < 0 ? delta + 1000000000 : delta;
+ }
+ return get_tbl() - tstamp;
+}
+
+#define mulhwu(x,y) \
+({unsigned z; asm ("mulhwu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
+
+#ifdef CONFIG_PPC64
+#define mulhdu(x,y) \
+({unsigned long z; asm ("mulhdu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
+#else
+extern u64 mulhdu(u64, u64);
+#endif
+
+extern void smp_space_timers(unsigned int);
+
+extern unsigned mulhwu_scale_factor(unsigned, unsigned);
+extern void div128_by_32(u64 dividend_high, u64 dividend_low,
+ unsigned divisor, struct div_result *dr);
+
+/* Used to store Processor Utilization register (purr) values */
+
+struct cpu_usage {
+ u64 current_tb; /* Holds the current purr register values */
+};
+
+DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
+
+#if defined(CONFIG_VIRT_CPU_ACCOUNTING)
+extern void calculate_steal_time(void);
+extern void snapshot_timebases(void);
+#define account_process_vtime(tsk) account_process_tick(tsk, 0)
+#else
+#define calculate_steal_time() do { } while (0)
+#define snapshot_timebases() do { } while (0)
+#define account_process_vtime(tsk) do { } while (0)
+#endif
+
+extern void secondary_cpu_time_init(void);
+extern void iSeries_time_init_early(void);
+
+#endif /* __KERNEL__ */
+#endif /* __POWERPC_TIME_H */
diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h
new file mode 100644
index 0000000..c55e14f
--- /dev/null
+++ b/arch/powerpc/include/asm/timex.h
@@ -0,0 +1,50 @@
+#ifndef _ASM_POWERPC_TIMEX_H
+#define _ASM_POWERPC_TIMEX_H
+
+#ifdef __KERNEL__
+
+/*
+ * PowerPC architecture timex specifications
+ */
+
+#include <asm/cputable.h>
+#include <asm/reg.h>
+
+#define CLOCK_TICK_RATE 1024000 /* Underlying HZ */
+
+typedef unsigned long cycles_t;
+
+static inline cycles_t get_cycles(void)
+{
+#ifdef __powerpc64__
+ return mftb();
+#else
+ cycles_t ret;
+
+ /*
+ * For the "cycle" counter we use the timebase lower half.
+ * Currently only used on SMP.
+ */
+
+ ret = 0;
+
+ __asm__ __volatile__(
+ "97: mftb %0\n"
+ "99:\n"
+ ".section __ftr_fixup,\"a\"\n"
+ ".align 2\n"
+ "98:\n"
+ " .long %1\n"
+ " .long 0\n"
+ " .long 97b-98b\n"
+ " .long 99b-98b\n"
+ " .long 0\n"
+ " .long 0\n"
+ ".previous"
+ : "=r" (ret) : "i" (CPU_FTR_601));
+ return ret;
+#endif
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_TIMEX_H */
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
new file mode 100644
index 0000000..e20ff75
--- /dev/null
+++ b/arch/powerpc/include/asm/tlb.h
@@ -0,0 +1,81 @@
+/*
+ * TLB shootdown specifics for powerpc
+ *
+ * Copyright (C) 2002 Anton Blanchard, IBM Corp.
+ * Copyright (C) 2002 Paul Mackerras, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_TLB_H
+#define _ASM_POWERPC_TLB_H
+#ifdef __KERNEL__
+
+#ifndef __powerpc64__
+#include <asm/pgtable.h>
+#endif
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#ifndef __powerpc64__
+#include <asm/page.h>
+#include <asm/mmu.h>
+#endif
+
+#include <linux/pagemap.h>
+
+struct mmu_gather;
+
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+
+#if !defined(CONFIG_PPC_STD_MMU)
+
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+#elif defined(__powerpc64__)
+
+extern void pte_free_finish(void);
+
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+ struct ppc64_tlb_batch *tlbbatch = &__get_cpu_var(ppc64_tlb_batch);
+
+ /* If there's a TLB batch pending, then we must flush it because the
+ * pages are going to be freed and we really don't want to have a CPU
+ * access a freed page because it has a stale TLB
+ */
+ if (tlbbatch->index)
+ __flush_tlb_pending(tlbbatch);
+
+ pte_free_finish();
+}
+
+#else
+
+extern void tlb_flush(struct mmu_gather *tlb);
+
+#endif
+
+/* Get the generic bits... */
+#include <asm-generic/tlb.h>
+
+#if !defined(CONFIG_PPC_STD_MMU) || defined(__powerpc64__)
+
+#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
+
+#else
+extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
+ unsigned long address);
+
+static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
+ unsigned long address)
+{
+ if (pte_val(*ptep) & _PAGE_HASHPTE)
+ flush_hash_entry(tlb->mm, ptep, address);
+}
+
+#endif
+#endif /* __KERNEL__ */
+#endif /* __ASM_POWERPC_TLB_H */
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
new file mode 100644
index 0000000..361cd5c
--- /dev/null
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -0,0 +1,166 @@
+#ifndef _ASM_POWERPC_TLBFLUSH_H
+#define _ASM_POWERPC_TLBFLUSH_H
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifdef __KERNEL__
+
+#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE)
+/*
+ * TLB flushing for software loaded TLB chips
+ *
+ * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
+ * flush_tlb_kernel_range are best implemented as tlbia vs
+ * specific tlbie's
+ */
+
+#include <linux/mm.h>
+
+extern void _tlbie(unsigned long address, unsigned int pid);
+
+#if defined(CONFIG_40x) || defined(CONFIG_8xx)
+#define _tlbia() asm volatile ("tlbia; sync" : : : "memory")
+#else /* CONFIG_44x || CONFIG_FSL_BOOKE */
+extern void _tlbia(void);
+#endif
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ _tlbia();
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+ _tlbie(vmaddr, vma ? vma->vm_mm->context.id : 0);
+}
+
+static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+ _tlbie(vmaddr, vma ? vma->vm_mm->context.id : 0);
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ _tlbia();
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ _tlbia();
+}
+
+#elif defined(CONFIG_PPC32)
+/*
+ * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx
+ */
+extern void _tlbie(unsigned long address);
+extern void _tlbia(void);
+
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+#else
+/*
+ * TLB flushing for 64-bit has-MMU CPUs
+ */
+
+#include <linux/percpu.h>
+#include <asm/page.h>
+
+#define PPC64_TLB_BATCH_NR 192
+
+struct ppc64_tlb_batch {
+ int active;
+ unsigned long index;
+ struct mm_struct *mm;
+ real_pte_t pte[PPC64_TLB_BATCH_NR];
+ unsigned long vaddr[PPC64_TLB_BATCH_NR];
+ unsigned int psize;
+ int ssize;
+};
+DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
+
+extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
+
+extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long pte, int huge);
+
+#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+
+static inline void arch_enter_lazy_mmu_mode(void)
+{
+ struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+
+ batch->active = 1;
+}
+
+static inline void arch_leave_lazy_mmu_mode(void)
+{
+ struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+
+ if (batch->index)
+ __flush_tlb_pending(batch);
+ batch->active = 0;
+}
+
+#define arch_flush_lazy_mmu_mode() do {} while (0)
+
+
+extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize,
+ int ssize, int local);
+extern void flush_hash_range(unsigned long number, int local);
+
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+}
+
+static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+}
+
+/* Private function for use by PCI IO mapping code */
+extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end);
+
+
+#endif
+
+#endif /*__KERNEL__ */
+#endif /* _ASM_POWERPC_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
new file mode 100644
index 0000000..c32da6f
--- /dev/null
+++ b/arch/powerpc/include/asm/topology.h
@@ -0,0 +1,117 @@
+#ifndef _ASM_POWERPC_TOPOLOGY_H
+#define _ASM_POWERPC_TOPOLOGY_H
+#ifdef __KERNEL__
+
+
+struct sys_device;
+struct device_node;
+
+#ifdef CONFIG_NUMA
+
+#include <asm/mmzone.h>
+
+static inline int cpu_to_node(int cpu)
+{
+ return numa_cpu_lookup_table[cpu];
+}
+
+#define parent_node(node) (node)
+
+static inline cpumask_t node_to_cpumask(int node)
+{
+ return numa_cpumask_lookup_table[node];
+}
+
+static inline int node_to_first_cpu(int node)
+{
+ cpumask_t tmp;
+ tmp = node_to_cpumask(node);
+ return first_cpu(tmp);
+}
+
+int of_node_to_nid(struct device_node *device);
+
+struct pci_bus;
+#ifdef CONFIG_PCI
+extern int pcibus_to_node(struct pci_bus *bus);
+#else
+static inline int pcibus_to_node(struct pci_bus *bus)
+{
+ return -1;
+}
+#endif
+
+#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
+ CPU_MASK_ALL : \
+ node_to_cpumask(pcibus_to_node(bus)) \
+ )
+
+/* sched_domains SD_NODE_INIT for PPC64 machines */
+#define SD_NODE_INIT (struct sched_domain) { \
+ .span = CPU_MASK_NONE, \
+ .parent = NULL, \
+ .child = NULL, \
+ .groups = NULL, \
+ .min_interval = 8, \
+ .max_interval = 32, \
+ .busy_factor = 32, \
+ .imbalance_pct = 125, \
+ .cache_nice_tries = 1, \
+ .busy_idx = 3, \
+ .idle_idx = 1, \
+ .newidle_idx = 2, \
+ .wake_idx = 1, \
+ .flags = SD_LOAD_BALANCE \
+ | SD_BALANCE_EXEC \
+ | SD_BALANCE_NEWIDLE \
+ | SD_WAKE_IDLE \
+ | SD_SERIALIZE \
+ | SD_WAKE_BALANCE, \
+ .last_balance = jiffies, \
+ .balance_interval = 1, \
+ .nr_balance_failed = 0, \
+}
+
+extern void __init dump_numa_cpu_topology(void);
+
+extern int sysfs_add_device_to_node(struct sys_device *dev, int nid);
+extern void sysfs_remove_device_from_node(struct sys_device *dev, int nid);
+
+#else
+
+static inline int of_node_to_nid(struct device_node *device)
+{
+ return 0;
+}
+
+static inline void dump_numa_cpu_topology(void) {}
+
+static inline int sysfs_add_device_to_node(struct sys_device *dev, int nid)
+{
+ return 0;
+}
+
+static inline void sysfs_remove_device_from_node(struct sys_device *dev,
+ int nid)
+{
+}
+
+#endif /* CONFIG_NUMA */
+
+#include <asm-generic/topology.h>
+
+#ifdef CONFIG_SMP
+#include <asm/cputable.h>
+#define smt_capable() (cpu_has_feature(CPU_FTR_SMT))
+
+#ifdef CONFIG_PPC64
+#include <asm/smp.h>
+
+#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
+#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
+#define topology_core_id(cpu) (cpu_to_core_id(cpu))
+#endif
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_TOPOLOGY_H */
diff --git a/arch/powerpc/include/asm/tsi108.h b/arch/powerpc/include/asm/tsi108.h
new file mode 100644
index 0000000..f8b6079
--- /dev/null
+++ b/arch/powerpc/include/asm/tsi108.h
@@ -0,0 +1,121 @@
+/*
+ * common routine and memory layout for Tundra TSI108(Grendel) host bridge
+ * memory controller.
+ *
+ * Author: Jacob Pan (jacob.pan@freescale.com)
+ * Alex Bounine (alexandreb@tundra.com)
+ *
+ * Copyright 2004-2006 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __PPC_KERNEL_TSI108_H
+#define __PPC_KERNEL_TSI108_H
+
+#include <asm/pci-bridge.h>
+
+/* Size of entire register space */
+#define TSI108_REG_SIZE (0x10000)
+
+/* Sizes of register spaces for individual blocks */
+#define TSI108_HLP_SIZE 0x1000
+#define TSI108_PCI_SIZE 0x1000
+#define TSI108_CLK_SIZE 0x1000
+#define TSI108_PB_SIZE 0x1000
+#define TSI108_SD_SIZE 0x1000
+#define TSI108_DMA_SIZE 0x1000
+#define TSI108_ETH_SIZE 0x1000
+#define TSI108_I2C_SIZE 0x400
+#define TSI108_MPIC_SIZE 0x400
+#define TSI108_UART0_SIZE 0x200
+#define TSI108_GPIO_SIZE 0x200
+#define TSI108_UART1_SIZE 0x200
+
+/* Offsets within Tsi108(A) CSR space for individual blocks */
+#define TSI108_HLP_OFFSET 0x0000
+#define TSI108_PCI_OFFSET 0x1000
+#define TSI108_CLK_OFFSET 0x2000
+#define TSI108_PB_OFFSET 0x3000
+#define TSI108_SD_OFFSET 0x4000
+#define TSI108_DMA_OFFSET 0x5000
+#define TSI108_ETH_OFFSET 0x6000
+#define TSI108_I2C_OFFSET 0x7000
+#define TSI108_MPIC_OFFSET 0x7400
+#define TSI108_UART0_OFFSET 0x7800
+#define TSI108_GPIO_OFFSET 0x7A00
+#define TSI108_UART1_OFFSET 0x7C00
+
+/* Tsi108 registers used by common code components */
+#define TSI108_PCI_CSR (0x004)
+#define TSI108_PCI_IRP_CFG_CTL (0x180)
+#define TSI108_PCI_IRP_STAT (0x184)
+#define TSI108_PCI_IRP_ENABLE (0x188)
+#define TSI108_PCI_IRP_INTAD (0x18C)
+
+#define TSI108_PCI_IRP_STAT_P_INT (0x00400000)
+#define TSI108_PCI_IRP_ENABLE_P_INT (0x00400000)
+
+#define TSI108_CG_PWRUP_STATUS (0x234)
+
+#define TSI108_PB_ISR (0x00C)
+#define TSI108_PB_ERRCS (0x404)
+#define TSI108_PB_AERR (0x408)
+
+#define TSI108_PB_ERRCS_ES (1 << 1)
+#define TSI108_PB_ISR_PBS_RD_ERR (1 << 8)
+
+#define TSI108_PCI_CFG_SIZE (0x01000000)
+
+/*
+ * PHY Configuration Options
+ *
+ * Specify "bcm54xx" in the compatible property of your device tree phy
+ * nodes if your board uses the Broadcom PHYs
+ */
+#define TSI108_PHY_MV88E 0 /* Marvel 88Exxxx PHY */
+#define TSI108_PHY_BCM54XX 1 /* Broardcom BCM54xx PHY */
+
+/* Global variables */
+
+extern u32 tsi108_pci_cfg_base;
+/* Exported functions */
+
+extern int tsi108_bridge_init(struct pci_controller *hose, uint phys_csr_base);
+extern unsigned long tsi108_get_mem_size(void);
+extern unsigned long tsi108_get_cpu_clk(void);
+extern unsigned long tsi108_get_sdc_clk(void);
+extern int tsi108_direct_write_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 val);
+extern int tsi108_direct_read_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 * val);
+extern void tsi108_clear_pci_error(u32 pci_cfg_base);
+
+extern phys_addr_t get_csrbase(void);
+
+typedef struct {
+ u32 regs; /* hw registers base address */
+ u32 phyregs; /* phy registers base address */
+ u16 phy; /* phy address */
+ u16 irq_num; /* irq number */
+ u8 mac_addr[6]; /* phy mac address */
+ u16 phy_type; /* type of phy on board */
+} hw_info;
+
+extern u32 get_vir_csrbase(void);
+extern u32 tsi108_csr_vir_base;
+
+static inline u32 tsi108_read_reg(u32 reg_offset)
+{
+ return in_be32((volatile u32 *)(tsi108_csr_vir_base + reg_offset));
+}
+
+static inline void tsi108_write_reg(u32 reg_offset, u32 val)
+{
+ out_be32((volatile u32 *)(tsi108_csr_vir_base + reg_offset), val);
+}
+
+#endif /* __PPC_KERNEL_TSI108_H */
diff --git a/arch/powerpc/include/asm/tsi108_irq.h b/arch/powerpc/include/asm/tsi108_irq.h
new file mode 100644
index 0000000..6ed9397
--- /dev/null
+++ b/arch/powerpc/include/asm/tsi108_irq.h
@@ -0,0 +1,124 @@
+/*
+ * (C) Copyright 2005 Tundra Semiconductor Corp.
+ * Alex Bounine, <alexandreb at tundra.com).
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/*
+ * definitions for interrupt controller initialization and external interrupt
+ * demultiplexing on TSI108EMU/SVB boards.
+ */
+
+#ifndef _ASM_POWERPC_TSI108_IRQ_H
+#define _ASM_POWERPC_TSI108_IRQ_H
+
+/*
+ * Tsi108 interrupts
+ */
+#ifndef TSI108_IRQ_REG_BASE
+#define TSI108_IRQ_REG_BASE 0
+#endif
+
+#define TSI108_IRQ(x) (TSI108_IRQ_REG_BASE + (x))
+
+#define TSI108_MAX_VECTORS (36 + 4) /* 36 sources + PCI INT demux */
+#define MAX_TASK_PRIO 0xF
+
+#define TSI108_IRQ_SPURIOUS (TSI108_MAX_VECTORS)
+
+#define DEFAULT_PRIO_LVL 10 /* initial priority level */
+
+/* Interrupt vectors assignment to external and internal
+ * sources of requests. */
+
+/* EXTERNAL INTERRUPT SOURCES */
+
+#define IRQ_TSI108_EXT_INT0 TSI108_IRQ(0) /* External Source at INT[0] */
+#define IRQ_TSI108_EXT_INT1 TSI108_IRQ(1) /* External Source at INT[1] */
+#define IRQ_TSI108_EXT_INT2 TSI108_IRQ(2) /* External Source at INT[2] */
+#define IRQ_TSI108_EXT_INT3 TSI108_IRQ(3) /* External Source at INT[3] */
+
+/* INTERNAL INTERRUPT SOURCES */
+
+#define IRQ_TSI108_RESERVED0 TSI108_IRQ(4) /* Reserved IRQ */
+#define IRQ_TSI108_RESERVED1 TSI108_IRQ(5) /* Reserved IRQ */
+#define IRQ_TSI108_RESERVED2 TSI108_IRQ(6) /* Reserved IRQ */
+#define IRQ_TSI108_RESERVED3 TSI108_IRQ(7) /* Reserved IRQ */
+#define IRQ_TSI108_DMA0 TSI108_IRQ(8) /* DMA0 */
+#define IRQ_TSI108_DMA1 TSI108_IRQ(9) /* DMA1 */
+#define IRQ_TSI108_DMA2 TSI108_IRQ(10) /* DMA2 */
+#define IRQ_TSI108_DMA3 TSI108_IRQ(11) /* DMA3 */
+#define IRQ_TSI108_UART0 TSI108_IRQ(12) /* UART0 */
+#define IRQ_TSI108_UART1 TSI108_IRQ(13) /* UART1 */
+#define IRQ_TSI108_I2C TSI108_IRQ(14) /* I2C */
+#define IRQ_TSI108_GPIO TSI108_IRQ(15) /* GPIO */
+#define IRQ_TSI108_GIGE0 TSI108_IRQ(16) /* GIGE0 */
+#define IRQ_TSI108_GIGE1 TSI108_IRQ(17) /* GIGE1 */
+#define IRQ_TSI108_RESERVED4 TSI108_IRQ(18) /* Reserved IRQ */
+#define IRQ_TSI108_HLP TSI108_IRQ(19) /* HLP */
+#define IRQ_TSI108_SDRAM TSI108_IRQ(20) /* SDC */
+#define IRQ_TSI108_PROC_IF TSI108_IRQ(21) /* Processor IF */
+#define IRQ_TSI108_RESERVED5 TSI108_IRQ(22) /* Reserved IRQ */
+#define IRQ_TSI108_PCI TSI108_IRQ(23) /* PCI/X block */
+
+#define IRQ_TSI108_MBOX0 TSI108_IRQ(24) /* Mailbox 0 register */
+#define IRQ_TSI108_MBOX1 TSI108_IRQ(25) /* Mailbox 1 register */
+#define IRQ_TSI108_MBOX2 TSI108_IRQ(26) /* Mailbox 2 register */
+#define IRQ_TSI108_MBOX3 TSI108_IRQ(27) /* Mailbox 3 register */
+
+#define IRQ_TSI108_DBELL0 TSI108_IRQ(28) /* Doorbell 0 */
+#define IRQ_TSI108_DBELL1 TSI108_IRQ(29) /* Doorbell 1 */
+#define IRQ_TSI108_DBELL2 TSI108_IRQ(30) /* Doorbell 2 */
+#define IRQ_TSI108_DBELL3 TSI108_IRQ(31) /* Doorbell 3 */
+
+#define IRQ_TSI108_TIMER0 TSI108_IRQ(32) /* Global Timer 0 */
+#define IRQ_TSI108_TIMER1 TSI108_IRQ(33) /* Global Timer 1 */
+#define IRQ_TSI108_TIMER2 TSI108_IRQ(34) /* Global Timer 2 */
+#define IRQ_TSI108_TIMER3 TSI108_IRQ(35) /* Global Timer 3 */
+
+/*
+ * PCI bus INTA# - INTD# lines demultiplexor
+ */
+#define IRQ_PCI_INTAD_BASE TSI108_IRQ(36)
+#define IRQ_PCI_INTA (IRQ_PCI_INTAD_BASE + 0)
+#define IRQ_PCI_INTB (IRQ_PCI_INTAD_BASE + 1)
+#define IRQ_PCI_INTC (IRQ_PCI_INTAD_BASE + 2)
+#define IRQ_PCI_INTD (IRQ_PCI_INTAD_BASE + 3)
+#define NUM_PCI_IRQS (4)
+
+/* number of entries in vector dispatch table */
+#define IRQ_TSI108_TAB_SIZE (TSI108_MAX_VECTORS + 1)
+
+/* Mapping of MPIC outputs to processors' interrupt pins */
+
+#define IDIR_INT_OUT0 0x1
+#define IDIR_INT_OUT1 0x2
+#define IDIR_INT_OUT2 0x4
+#define IDIR_INT_OUT3 0x8
+
+/*---------------------------------------------------------------
+ * IRQ line configuration parameters */
+
+/* Interrupt delivery modes */
+typedef enum {
+ TSI108_IRQ_DIRECTED,
+ TSI108_IRQ_DISTRIBUTED,
+} TSI108_IRQ_MODE;
+#endif /* _ASM_POWERPC_TSI108_IRQ_H */
diff --git a/arch/powerpc/include/asm/tsi108_pci.h b/arch/powerpc/include/asm/tsi108_pci.h
new file mode 100644
index 0000000..5653d7c
--- /dev/null
+++ b/arch/powerpc/include/asm/tsi108_pci.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2007 IBM Corp
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef _ASM_POWERPC_TSI108_PCI_H
+#define _ASM_POWERPC_TSI108_PCI_H
+
+#include <asm/tsi108.h>
+
+/* Register definitions */
+#define TSI108_PCI_P2O_BAR0 (TSI108_PCI_OFFSET + 0x10)
+#define TSI108_PCI_P2O_BAR0_UPPER (TSI108_PCI_OFFSET + 0x14)
+#define TSI108_PCI_P2O_BAR2 (TSI108_PCI_OFFSET + 0x18)
+#define TSI108_PCI_P2O_BAR2_UPPER (TSI108_PCI_OFFSET + 0x1c)
+#define TSI108_PCI_P2O_PAGE_SIZES (TSI108_PCI_OFFSET + 0x4c)
+#define TSI108_PCI_PFAB_BAR0 (TSI108_PCI_OFFSET + 0x204)
+#define TSI108_PCI_PFAB_BAR0_UPPER (TSI108_PCI_OFFSET + 0x208)
+#define TSI108_PCI_PFAB_IO (TSI108_PCI_OFFSET + 0x20c)
+#define TSI108_PCI_PFAB_IO_UPPER (TSI108_PCI_OFFSET + 0x210)
+#define TSI108_PCI_PFAB_MEM32 (TSI108_PCI_OFFSET + 0x214)
+#define TSI108_PCI_PFAB_PFM3 (TSI108_PCI_OFFSET + 0x220)
+#define TSI108_PCI_PFAB_PFM4 (TSI108_PCI_OFFSET + 0x230)
+
+extern int tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary);
+extern void tsi108_pci_int_init(struct device_node *node);
+extern void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc);
+extern void tsi108_clear_pci_cfg_error(void);
+
+#endif /* _ASM_POWERPC_TSI108_PCI_H */
diff --git a/arch/powerpc/include/asm/types.h b/arch/powerpc/include/asm/types.h
new file mode 100644
index 0000000..d3374bc
--- /dev/null
+++ b/arch/powerpc/include/asm/types.h
@@ -0,0 +1,75 @@
+#ifndef _ASM_POWERPC_TYPES_H
+#define _ASM_POWERPC_TYPES_H
+
+#ifdef __powerpc64__
+# include <asm-generic/int-l64.h>
+#else
+# include <asm-generic/int-ll64.h>
+#endif
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This file is never included by application software unless
+ * explicitly requested (e.g., via linux/types.h) in which case the
+ * application is Linux specific so (user-) name space pollution is
+ * not a major issue. However, for interoperability, libraries still
+ * need to be careful to avoid a name clashes.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifdef __powerpc64__
+typedef unsigned int umode_t;
+#else
+typedef unsigned short umode_t;
+#endif
+
+typedef struct {
+ __u32 u[4];
+} __attribute__((aligned(16))) __vector128;
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef __KERNEL__
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+#ifdef __powerpc64__
+#define BITS_PER_LONG 64
+#else
+#define BITS_PER_LONG 32
+#endif
+
+#ifndef __ASSEMBLY__
+
+typedef __vector128 vector128;
+
+/* Physical address used by some IO functions */
+#if defined(CONFIG_PPC64) || defined(CONFIG_PHYS_64BIT)
+typedef u64 phys_addr_t;
+#else
+typedef u32 phys_addr_t;
+#endif
+
+#ifdef __powerpc64__
+typedef u64 dma_addr_t;
+#else
+typedef u32 dma_addr_t;
+#endif
+typedef u64 dma64_addr_t;
+
+typedef struct {
+ unsigned long entry;
+ unsigned long toc;
+ unsigned long env;
+} func_descr_t;
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_TYPES_H */
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
new file mode 100644
index 0000000..bd0fb84
--- /dev/null
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -0,0 +1,496 @@
+#ifndef _ARCH_POWERPC_UACCESS_H
+#define _ARCH_POWERPC_UACCESS_H
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <asm/asm-compat.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ *
+ * The fs/ds values are now the highest legal address in the "segment".
+ * This simplifies the checking in the routines below.
+ */
+
+#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+
+#define KERNEL_DS MAKE_MM_SEG(~0UL)
+#ifdef __powerpc64__
+/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
+#define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
+#else
+#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
+#endif
+
+#define get_ds() (KERNEL_DS)
+#define get_fs() (current->thread.fs)
+#define set_fs(val) (current->thread.fs = (val))
+
+#define segment_eq(a, b) ((a).seg == (b).seg)
+
+#ifdef __powerpc64__
+/*
+ * This check is sufficient because there is a large enough
+ * gap between user addresses and the kernel addresses
+ */
+#define __access_ok(addr, size, segment) \
+ (((addr) <= (segment).seg) && ((size) <= (segment).seg))
+
+#else
+
+#define __access_ok(addr, size, segment) \
+ (((addr) <= (segment).seg) && \
+ (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr)))))
+
+#endif
+
+#define access_ok(type, addr, size) \
+ (__chk_user_ptr(addr), \
+ __access_ok((__force unsigned long)(addr), (size), get_fs()))
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry {
+ unsigned long insn;
+ unsigned long fixup;
+};
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * This gets kind of ugly. We want to return _two_ values in "get_user()"
+ * and yet we don't want to do any pointers, because that is too much
+ * of a performance impact. Thus we have a few rather ugly macros here,
+ * and hide all the ugliness from the user.
+ *
+ * The "__xxx" versions of the user access functions are versions that
+ * do not verify the address space, that must have been done previously
+ * with a separate "access_ok()" call (this is used when we do multiple
+ * accesses to the same area of user memory).
+ *
+ * As we use the same address space for kernel and user data on the
+ * PowerPC, we can just do these as direct assignments. (Of course, the
+ * exception handling means that it's no longer "just"...)
+ *
+ * The "user64" versions of the user access functions are versions that
+ * allow access of 64-bit data. The "get_user" functions do not
+ * properly handle 64-bit data because the value gets down cast to a long.
+ * The "put_user" functions already handle 64-bit data properly but we add
+ * "user64" versions for completeness
+ */
+#define get_user(x, ptr) \
+ __get_user_check((x), (ptr), sizeof(*(ptr)))
+#define put_user(x, ptr) \
+ __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+
+#define __get_user(x, ptr) \
+ __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+#define __put_user(x, ptr) \
+ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+
+#ifndef __powerpc64__
+#define __get_user64(x, ptr) \
+ __get_user64_nocheck((x), (ptr), sizeof(*(ptr)))
+#define __put_user64(x, ptr) __put_user(x, ptr)
+#endif
+
+#define __get_user_inatomic(x, ptr) \
+ __get_user_nosleep((x), (ptr), sizeof(*(ptr)))
+#define __put_user_inatomic(x, ptr) \
+ __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+
+#define __get_user_unaligned __get_user
+#define __put_user_unaligned __put_user
+
+extern long __put_user_bad(void);
+
+/*
+ * We don't tell gcc that we are accessing memory, but this is OK
+ * because we do not write to any memory gcc knows about, so there
+ * are no aliasing issues.
+ */
+#define __put_user_asm(x, addr, err, op) \
+ __asm__ __volatile__( \
+ "1: " op " %1,0(%2) # put_user\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: li %0,%3\n" \
+ " b 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ PPC_LONG_ALIGN "\n" \
+ PPC_LONG "1b,3b\n" \
+ ".previous" \
+ : "=r" (err) \
+ : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
+
+#ifdef __powerpc64__
+#define __put_user_asm2(x, ptr, retval) \
+ __put_user_asm(x, ptr, retval, "std")
+#else /* __powerpc64__ */
+#define __put_user_asm2(x, addr, err) \
+ __asm__ __volatile__( \
+ "1: stw %1,0(%2)\n" \
+ "2: stw %1+1,4(%2)\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: li %0,%3\n" \
+ " b 3b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ PPC_LONG_ALIGN "\n" \
+ PPC_LONG "1b,4b\n" \
+ PPC_LONG "2b,4b\n" \
+ ".previous" \
+ : "=r" (err) \
+ : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
+#endif /* __powerpc64__ */
+
+#define __put_user_size(x, ptr, size, retval) \
+do { \
+ retval = 0; \
+ switch (size) { \
+ case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
+ case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
+ case 4: __put_user_asm(x, ptr, retval, "stw"); break; \
+ case 8: __put_user_asm2(x, ptr, retval); break; \
+ default: __put_user_bad(); \
+ } \
+} while (0)
+
+#define __put_user_nocheck(x, ptr, size) \
+({ \
+ long __pu_err; \
+ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+ if (!is_kernel_addr((unsigned long)__pu_addr)) \
+ might_sleep(); \
+ __chk_user_ptr(ptr); \
+ __put_user_size((x), __pu_addr, (size), __pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_check(x, ptr, size) \
+({ \
+ long __pu_err = -EFAULT; \
+ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+ might_sleep(); \
+ if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
+ __put_user_size((x), __pu_addr, (size), __pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_nosleep(x, ptr, size) \
+({ \
+ long __pu_err; \
+ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+ __chk_user_ptr(ptr); \
+ __put_user_size((x), __pu_addr, (size), __pu_err); \
+ __pu_err; \
+})
+
+
+extern long __get_user_bad(void);
+
+#define __get_user_asm(x, addr, err, op) \
+ __asm__ __volatile__( \
+ "1: "op" %1,0(%2) # get_user\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: li %0,%3\n" \
+ " li %1,0\n" \
+ " b 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ PPC_LONG_ALIGN "\n" \
+ PPC_LONG "1b,3b\n" \
+ ".previous" \
+ : "=r" (err), "=r" (x) \
+ : "b" (addr), "i" (-EFAULT), "0" (err))
+
+#ifdef __powerpc64__
+#define __get_user_asm2(x, addr, err) \
+ __get_user_asm(x, addr, err, "ld")
+#else /* __powerpc64__ */
+#define __get_user_asm2(x, addr, err) \
+ __asm__ __volatile__( \
+ "1: lwz %1,0(%2)\n" \
+ "2: lwz %1+1,4(%2)\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: li %0,%3\n" \
+ " li %1,0\n" \
+ " li %1+1,0\n" \
+ " b 3b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ PPC_LONG_ALIGN "\n" \
+ PPC_LONG "1b,4b\n" \
+ PPC_LONG "2b,4b\n" \
+ ".previous" \
+ : "=r" (err), "=&r" (x) \
+ : "b" (addr), "i" (-EFAULT), "0" (err))
+#endif /* __powerpc64__ */
+
+#define __get_user_size(x, ptr, size, retval) \
+do { \
+ retval = 0; \
+ __chk_user_ptr(ptr); \
+ if (size > sizeof(x)) \
+ (x) = __get_user_bad(); \
+ switch (size) { \
+ case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
+ case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
+ case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \
+ case 8: __get_user_asm2(x, ptr, retval); break; \
+ default: (x) = __get_user_bad(); \
+ } \
+} while (0)
+
+#define __get_user_nocheck(x, ptr, size) \
+({ \
+ long __gu_err; \
+ unsigned long __gu_val; \
+ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ __chk_user_ptr(ptr); \
+ if (!is_kernel_addr((unsigned long)__gu_addr)) \
+ might_sleep(); \
+ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+#ifndef __powerpc64__
+#define __get_user64_nocheck(x, ptr, size) \
+({ \
+ long __gu_err; \
+ long long __gu_val; \
+ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ __chk_user_ptr(ptr); \
+ if (!is_kernel_addr((unsigned long)__gu_addr)) \
+ might_sleep(); \
+ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+#endif /* __powerpc64__ */
+
+#define __get_user_check(x, ptr, size) \
+({ \
+ long __gu_err = -EFAULT; \
+ unsigned long __gu_val = 0; \
+ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ might_sleep(); \
+ if (access_ok(VERIFY_READ, __gu_addr, (size))) \
+ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+#define __get_user_nosleep(x, ptr, size) \
+({ \
+ long __gu_err; \
+ unsigned long __gu_val; \
+ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ __chk_user_ptr(ptr); \
+ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+
+/* more complex routines */
+
+extern unsigned long __copy_tofrom_user(void __user *to,
+ const void __user *from, unsigned long size);
+
+#ifndef __powerpc64__
+
+static inline unsigned long copy_from_user(void *to,
+ const void __user *from, unsigned long n)
+{
+ unsigned long over;
+
+ if (access_ok(VERIFY_READ, from, n))
+ return __copy_tofrom_user((__force void __user *)to, from, n);
+ if ((unsigned long)from < TASK_SIZE) {
+ over = (unsigned long)from + n - TASK_SIZE;
+ return __copy_tofrom_user((__force void __user *)to, from,
+ n - over) + over;
+ }
+ return n;
+}
+
+static inline unsigned long copy_to_user(void __user *to,
+ const void *from, unsigned long n)
+{
+ unsigned long over;
+
+ if (access_ok(VERIFY_WRITE, to, n))
+ return __copy_tofrom_user(to, (__force void __user *)from, n);
+ if ((unsigned long)to < TASK_SIZE) {
+ over = (unsigned long)to + n - TASK_SIZE;
+ return __copy_tofrom_user(to, (__force void __user *)from,
+ n - over) + over;
+ }
+ return n;
+}
+
+#else /* __powerpc64__ */
+
+#define __copy_in_user(to, from, size) \
+ __copy_tofrom_user((to), (from), (size))
+
+extern unsigned long copy_from_user(void *to, const void __user *from,
+ unsigned long n);
+extern unsigned long copy_to_user(void __user *to, const void *from,
+ unsigned long n);
+extern unsigned long copy_in_user(void __user *to, const void __user *from,
+ unsigned long n);
+
+#endif /* __powerpc64__ */
+
+static inline unsigned long __copy_from_user_inatomic(void *to,
+ const void __user *from, unsigned long n)
+{
+ if (__builtin_constant_p(n) && (n <= 8)) {
+ unsigned long ret = 1;
+
+ switch (n) {
+ case 1:
+ __get_user_size(*(u8 *)to, from, 1, ret);
+ break;
+ case 2:
+ __get_user_size(*(u16 *)to, from, 2, ret);
+ break;
+ case 4:
+ __get_user_size(*(u32 *)to, from, 4, ret);
+ break;
+ case 8:
+ __get_user_size(*(u64 *)to, from, 8, ret);
+ break;
+ }
+ if (ret == 0)
+ return 0;
+ }
+ return __copy_tofrom_user((__force void __user *)to, from, n);
+}
+
+static inline unsigned long __copy_to_user_inatomic(void __user *to,
+ const void *from, unsigned long n)
+{
+ if (__builtin_constant_p(n) && (n <= 8)) {
+ unsigned long ret = 1;
+
+ switch (n) {
+ case 1:
+ __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
+ break;
+ case 2:
+ __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
+ break;
+ case 4:
+ __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
+ break;
+ case 8:
+ __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
+ break;
+ }
+ if (ret == 0)
+ return 0;
+ }
+ return __copy_tofrom_user(to, (__force const void __user *)from, n);
+}
+
+static inline unsigned long __copy_from_user(void *to,
+ const void __user *from, unsigned long size)
+{
+ might_sleep();
+ return __copy_from_user_inatomic(to, from, size);
+}
+
+static inline unsigned long __copy_to_user(void __user *to,
+ const void *from, unsigned long size)
+{
+ might_sleep();
+ return __copy_to_user_inatomic(to, from, size);
+}
+
+extern unsigned long __clear_user(void __user *addr, unsigned long size);
+
+static inline unsigned long clear_user(void __user *addr, unsigned long size)
+{
+ might_sleep();
+ if (likely(access_ok(VERIFY_WRITE, addr, size)))
+ return __clear_user(addr, size);
+ if ((unsigned long)addr < TASK_SIZE) {
+ unsigned long over = (unsigned long)addr + size - TASK_SIZE;
+ return __clear_user(addr, size - over) + over;
+ }
+ return size;
+}
+
+extern int __strncpy_from_user(char *dst, const char __user *src, long count);
+
+static inline long strncpy_from_user(char *dst, const char __user *src,
+ long count)
+{
+ might_sleep();
+ if (likely(access_ok(VERIFY_READ, src, 1)))
+ return __strncpy_from_user(dst, src, count);
+ return -EFAULT;
+}
+
+/*
+ * Return the size of a string (including the ending 0)
+ *
+ * Return 0 for error
+ */
+extern int __strnlen_user(const char __user *str, long len, unsigned long top);
+
+/*
+ * Returns the length of the string at str (including the null byte),
+ * or 0 if we hit a page we can't access,
+ * or something > len if we didn't find a null byte.
+ *
+ * The `top' parameter to __strnlen_user is to make sure that
+ * we can never overflow from the user area into kernel space.
+ */
+static inline int strnlen_user(const char __user *str, long len)
+{
+ unsigned long top = current->thread.fs.seg;
+
+ if ((unsigned long)str > top)
+ return 0;
+ return __strnlen_user(str, len, top);
+}
+
+#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+
+#endif /* _ARCH_POWERPC_UACCESS_H */
diff --git a/arch/powerpc/include/asm/ucc.h b/arch/powerpc/include/asm/ucc.h
new file mode 100644
index 0000000..46b09ba
--- /dev/null
+++ b/arch/powerpc/include/asm/ucc.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ *
+ * Authors: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * Internal header file for UCC unit routines.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef __UCC_H__
+#define __UCC_H__
+
+#include <asm/immap_qe.h>
+#include <asm/qe.h>
+
+#define STATISTICS
+
+#define UCC_MAX_NUM 8
+
+/* Slow or fast type for UCCs.
+*/
+enum ucc_speed_type {
+ UCC_SPEED_TYPE_FAST = UCC_GUEMR_MODE_FAST_RX | UCC_GUEMR_MODE_FAST_TX,
+ UCC_SPEED_TYPE_SLOW = UCC_GUEMR_MODE_SLOW_RX | UCC_GUEMR_MODE_SLOW_TX
+};
+
+/* ucc_set_type
+ * Sets UCC to slow or fast mode.
+ *
+ * ucc_num - (In) number of UCC (0-7).
+ * speed - (In) slow or fast mode for UCC.
+ */
+int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed);
+
+int ucc_set_qe_mux_mii_mng(unsigned int ucc_num);
+
+int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
+ enum comm_dir mode);
+
+int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask);
+
+/* QE MUX clock routing for UCC
+*/
+static inline int ucc_set_qe_mux_grant(unsigned int ucc_num, int set)
+{
+ return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_GRANT);
+}
+
+static inline int ucc_set_qe_mux_tsa(unsigned int ucc_num, int set)
+{
+ return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_TSA);
+}
+
+static inline int ucc_set_qe_mux_bkpt(unsigned int ucc_num, int set)
+{
+ return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_BKPT);
+}
+
+#endif /* __UCC_H__ */
diff --git a/arch/powerpc/include/asm/ucc_fast.h b/arch/powerpc/include/asm/ucc_fast.h
new file mode 100644
index 0000000..839aab8
--- /dev/null
+++ b/arch/powerpc/include/asm/ucc_fast.h
@@ -0,0 +1,244 @@
+/*
+ * Internal header file for UCC FAST unit routines.
+ *
+ * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ *
+ * Authors: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef __UCC_FAST_H__
+#define __UCC_FAST_H__
+
+#include <linux/kernel.h>
+
+#include <asm/immap_qe.h>
+#include <asm/qe.h>
+
+#include "ucc.h"
+
+/* Receive BD's status */
+#define R_E 0x80000000 /* buffer empty */
+#define R_W 0x20000000 /* wrap bit */
+#define R_I 0x10000000 /* interrupt on reception */
+#define R_L 0x08000000 /* last */
+#define R_F 0x04000000 /* first */
+
+/* transmit BD's status */
+#define T_R 0x80000000 /* ready bit */
+#define T_W 0x20000000 /* wrap bit */
+#define T_I 0x10000000 /* interrupt on completion */
+#define T_L 0x08000000 /* last */
+
+/* Rx Data buffer must be 4 bytes aligned in most cases */
+#define UCC_FAST_RX_ALIGN 4
+#define UCC_FAST_MRBLR_ALIGNMENT 4
+#define UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT 8
+
+/* Sizes */
+#define UCC_FAST_URFS_MIN_VAL 0x88
+#define UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR 8
+
+/* ucc_fast_channel_protocol_mode - UCC FAST mode */
+enum ucc_fast_channel_protocol_mode {
+ UCC_FAST_PROTOCOL_MODE_HDLC = 0x00000000,
+ UCC_FAST_PROTOCOL_MODE_RESERVED01 = 0x00000001,
+ UCC_FAST_PROTOCOL_MODE_RESERVED_QMC = 0x00000002,
+ UCC_FAST_PROTOCOL_MODE_RESERVED02 = 0x00000003,
+ UCC_FAST_PROTOCOL_MODE_RESERVED_UART = 0x00000004,
+ UCC_FAST_PROTOCOL_MODE_RESERVED03 = 0x00000005,
+ UCC_FAST_PROTOCOL_MODE_RESERVED_EX_MAC_1 = 0x00000006,
+ UCC_FAST_PROTOCOL_MODE_RESERVED_EX_MAC_2 = 0x00000007,
+ UCC_FAST_PROTOCOL_MODE_RESERVED_BISYNC = 0x00000008,
+ UCC_FAST_PROTOCOL_MODE_RESERVED04 = 0x00000009,
+ UCC_FAST_PROTOCOL_MODE_ATM = 0x0000000A,
+ UCC_FAST_PROTOCOL_MODE_RESERVED05 = 0x0000000B,
+ UCC_FAST_PROTOCOL_MODE_ETHERNET = 0x0000000C,
+ UCC_FAST_PROTOCOL_MODE_RESERVED06 = 0x0000000D,
+ UCC_FAST_PROTOCOL_MODE_POS = 0x0000000E,
+ UCC_FAST_PROTOCOL_MODE_RESERVED07 = 0x0000000F
+};
+
+/* ucc_fast_transparent_txrx - UCC Fast Transparent TX & RX */
+enum ucc_fast_transparent_txrx {
+ UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL = 0x00000000,
+ UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_TRANSPARENT = 0x18000000
+};
+
+/* UCC fast diagnostic mode */
+enum ucc_fast_diag_mode {
+ UCC_FAST_DIAGNOSTIC_NORMAL = 0x0,
+ UCC_FAST_DIAGNOSTIC_LOCAL_LOOP_BACK = 0x40000000,
+ UCC_FAST_DIAGNOSTIC_AUTO_ECHO = 0x80000000,
+ UCC_FAST_DIAGNOSTIC_LOOP_BACK_AND_ECHO = 0xC0000000
+};
+
+/* UCC fast Sync length (transparent mode only) */
+enum ucc_fast_sync_len {
+ UCC_FAST_SYNC_LEN_NOT_USED = 0x0,
+ UCC_FAST_SYNC_LEN_AUTOMATIC = 0x00004000,
+ UCC_FAST_SYNC_LEN_8_BIT = 0x00008000,
+ UCC_FAST_SYNC_LEN_16_BIT = 0x0000C000
+};
+
+/* UCC fast RTS mode */
+enum ucc_fast_ready_to_send {
+ UCC_FAST_SEND_IDLES_BETWEEN_FRAMES = 0x00000000,
+ UCC_FAST_SEND_FLAGS_BETWEEN_FRAMES = 0x00002000
+};
+
+/* UCC fast receiver decoding mode */
+enum ucc_fast_rx_decoding_method {
+ UCC_FAST_RX_ENCODING_NRZ = 0x00000000,
+ UCC_FAST_RX_ENCODING_NRZI = 0x00000800,
+ UCC_FAST_RX_ENCODING_RESERVED0 = 0x00001000,
+ UCC_FAST_RX_ENCODING_RESERVED1 = 0x00001800
+};
+
+/* UCC fast transmitter encoding mode */
+enum ucc_fast_tx_encoding_method {
+ UCC_FAST_TX_ENCODING_NRZ = 0x00000000,
+ UCC_FAST_TX_ENCODING_NRZI = 0x00000100,
+ UCC_FAST_TX_ENCODING_RESERVED0 = 0x00000200,
+ UCC_FAST_TX_ENCODING_RESERVED1 = 0x00000300
+};
+
+/* UCC fast CRC length */
+enum ucc_fast_transparent_tcrc {
+ UCC_FAST_16_BIT_CRC = 0x00000000,
+ UCC_FAST_CRC_RESERVED0 = 0x00000040,
+ UCC_FAST_32_BIT_CRC = 0x00000080,
+ UCC_FAST_CRC_RESERVED1 = 0x000000C0
+};
+
+/* Fast UCC initialization structure */
+struct ucc_fast_info {
+ int ucc_num;
+ enum qe_clock rx_clock;
+ enum qe_clock tx_clock;
+ u32 regs;
+ int irq;
+ u32 uccm_mask;
+ int bd_mem_part;
+ int brkpt_support;
+ int grant_support;
+ int tsa;
+ int cdp;
+ int cds;
+ int ctsp;
+ int ctss;
+ int tci;
+ int txsy;
+ int rtsm;
+ int revd;
+ int rsyn;
+ u16 max_rx_buf_length;
+ u16 urfs;
+ u16 urfet;
+ u16 urfset;
+ u16 utfs;
+ u16 utfet;
+ u16 utftt;
+ u16 ufpt;
+ enum ucc_fast_channel_protocol_mode mode;
+ enum ucc_fast_transparent_txrx ttx_trx;
+ enum ucc_fast_tx_encoding_method tenc;
+ enum ucc_fast_rx_decoding_method renc;
+ enum ucc_fast_transparent_tcrc tcrc;
+ enum ucc_fast_sync_len synl;
+};
+
+struct ucc_fast_private {
+ struct ucc_fast_info *uf_info;
+ struct ucc_fast __iomem *uf_regs; /* a pointer to the UCC regs. */
+ u32 __iomem *p_ucce; /* a pointer to the event register in memory. */
+ u32 __iomem *p_uccm; /* a pointer to the mask register in memory. */
+#ifdef CONFIG_UGETH_TX_ON_DEMAND
+ u16 __iomem *p_utodr; /* pointer to the transmit on demand register */
+#endif
+ int enabled_tx; /* Whether channel is enabled for Tx (ENT) */
+ int enabled_rx; /* Whether channel is enabled for Rx (ENR) */
+ int stopped_tx; /* Whether channel has been stopped for Tx
+ (STOP_TX, etc.) */
+ int stopped_rx; /* Whether channel has been stopped for Rx */
+ u32 ucc_fast_tx_virtual_fifo_base_offset;/* pointer to base of Tx
+ virtual fifo */
+ u32 ucc_fast_rx_virtual_fifo_base_offset;/* pointer to base of Rx
+ virtual fifo */
+#ifdef STATISTICS
+ u32 tx_frames; /* Transmitted frames counter. */
+ u32 rx_frames; /* Received frames counter (only frames
+ passed to application). */
+ u32 tx_discarded; /* Discarded tx frames counter (frames that
+ were discarded by the driver due to errors).
+ */
+ u32 rx_discarded; /* Discarded rx frames counter (frames that
+ were discarded by the driver due to errors).
+ */
+#endif /* STATISTICS */
+ u16 mrblr; /* maximum receive buffer length */
+};
+
+/* ucc_fast_init
+ * Initializes Fast UCC according to user provided parameters.
+ *
+ * uf_info - (In) pointer to the fast UCC info structure.
+ * uccf_ret - (Out) pointer to the fast UCC structure.
+ */
+int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret);
+
+/* ucc_fast_free
+ * Frees all resources for fast UCC.
+ *
+ * uccf - (In) pointer to the fast UCC structure.
+ */
+void ucc_fast_free(struct ucc_fast_private * uccf);
+
+/* ucc_fast_enable
+ * Enables a fast UCC port.
+ * This routine enables Tx and/or Rx through the General UCC Mode Register.
+ *
+ * uccf - (In) pointer to the fast UCC structure.
+ * mode - (In) TX, RX, or both.
+ */
+void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode);
+
+/* ucc_fast_disable
+ * Disables a fast UCC port.
+ * This routine disables Tx and/or Rx through the General UCC Mode Register.
+ *
+ * uccf - (In) pointer to the fast UCC structure.
+ * mode - (In) TX, RX, or both.
+ */
+void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode);
+
+/* ucc_fast_irq
+ * Handles interrupts on fast UCC.
+ * Called from the general interrupt routine to handle interrupts on fast UCC.
+ *
+ * uccf - (In) pointer to the fast UCC structure.
+ */
+void ucc_fast_irq(struct ucc_fast_private * uccf);
+
+/* ucc_fast_transmit_on_demand
+ * Immediately forces a poll of the transmitter for data to be sent.
+ * Typically, the hardware performs a periodic poll for data that the
+ * transmit routine has set up to be transmitted. In cases where
+ * this polling cycle is not soon enough, this optional routine can
+ * be invoked to force a poll right away, instead. Proper use for
+ * each transmission for which this functionality is desired is to
+ * call the transmit routine and then this routine right after.
+ *
+ * uccf - (In) pointer to the fast UCC structure.
+ */
+void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf);
+
+u32 ucc_fast_get_qe_cr_subblock(int uccf_num);
+
+void ucc_fast_dump_regs(struct ucc_fast_private * uccf);
+
+#endif /* __UCC_FAST_H__ */
diff --git a/arch/powerpc/include/asm/ucc_slow.h b/arch/powerpc/include/asm/ucc_slow.h
new file mode 100644
index 0000000..0980e6a
--- /dev/null
+++ b/arch/powerpc/include/asm/ucc_slow.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ *
+ * Authors: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * Internal header file for UCC SLOW unit routines.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef __UCC_SLOW_H__
+#define __UCC_SLOW_H__
+
+#include <linux/kernel.h>
+
+#include <asm/immap_qe.h>
+#include <asm/qe.h>
+
+#include "ucc.h"
+
+/* transmit BD's status */
+#define T_R 0x80000000 /* ready bit */
+#define T_PAD 0x40000000 /* add pads to short frames */
+#define T_W 0x20000000 /* wrap bit */
+#define T_I 0x10000000 /* interrupt on completion */
+#define T_L 0x08000000 /* last */
+
+#define T_A 0x04000000 /* Address - the data transmitted as address
+ chars */
+#define T_TC 0x04000000 /* transmit CRC */
+#define T_CM 0x02000000 /* continuous mode */
+#define T_DEF 0x02000000 /* collision on previous attempt to transmit */
+#define T_P 0x01000000 /* Preamble - send Preamble sequence before
+ data */
+#define T_HB 0x01000000 /* heartbeat */
+#define T_NS 0x00800000 /* No Stop */
+#define T_LC 0x00800000 /* late collision */
+#define T_RL 0x00400000 /* retransmission limit */
+#define T_UN 0x00020000 /* underrun */
+#define T_CT 0x00010000 /* CTS lost */
+#define T_CSL 0x00010000 /* carrier sense lost */
+#define T_RC 0x003c0000 /* retry count */
+
+/* Receive BD's status */
+#define R_E 0x80000000 /* buffer empty */
+#define R_W 0x20000000 /* wrap bit */
+#define R_I 0x10000000 /* interrupt on reception */
+#define R_L 0x08000000 /* last */
+#define R_C 0x08000000 /* the last byte in this buffer is a cntl
+ char */
+#define R_F 0x04000000 /* first */
+#define R_A 0x04000000 /* the first byte in this buffer is address
+ byte */
+#define R_CM 0x02000000 /* continuous mode */
+#define R_ID 0x01000000 /* buffer close on reception of idles */
+#define R_M 0x01000000 /* Frame received because of promiscuous
+ mode */
+#define R_AM 0x00800000 /* Address match */
+#define R_DE 0x00800000 /* Address match */
+#define R_LG 0x00200000 /* Break received */
+#define R_BR 0x00200000 /* Frame length violation */
+#define R_NO 0x00100000 /* Rx Non Octet Aligned Packet */
+#define R_FR 0x00100000 /* Framing Error (no stop bit) character
+ received */
+#define R_PR 0x00080000 /* Parity Error character received */
+#define R_AB 0x00080000 /* Frame Aborted */
+#define R_SH 0x00080000 /* frame is too short */
+#define R_CR 0x00040000 /* CRC Error */
+#define R_OV 0x00020000 /* Overrun */
+#define R_CD 0x00010000 /* CD lost */
+#define R_CL 0x00010000 /* this frame is closed because of a
+ collision */
+
+/* Rx Data buffer must be 4 bytes aligned in most cases.*/
+#define UCC_SLOW_RX_ALIGN 4
+#define UCC_SLOW_MRBLR_ALIGNMENT 4
+#define UCC_SLOW_PRAM_SIZE 0x100
+#define ALIGNMENT_OF_UCC_SLOW_PRAM 64
+
+/* UCC Slow Channel Protocol Mode */
+enum ucc_slow_channel_protocol_mode {
+ UCC_SLOW_CHANNEL_PROTOCOL_MODE_QMC = 0x00000002,
+ UCC_SLOW_CHANNEL_PROTOCOL_MODE_UART = 0x00000004,
+ UCC_SLOW_CHANNEL_PROTOCOL_MODE_BISYNC = 0x00000008,
+};
+
+/* UCC Slow Transparent Transmit CRC (TCRC) */
+enum ucc_slow_transparent_tcrc {
+ /* 16-bit CCITT CRC (HDLC). (X16 + X12 + X5 + 1) */
+ UCC_SLOW_TRANSPARENT_TCRC_CCITT_CRC16 = 0x00000000,
+ /* CRC16 (BISYNC). (X16 + X15 + X2 + 1) */
+ UCC_SLOW_TRANSPARENT_TCRC_CRC16 = 0x00004000,
+ /* 32-bit CCITT CRC (Ethernet and HDLC) */
+ UCC_SLOW_TRANSPARENT_TCRC_CCITT_CRC32 = 0x00008000,
+};
+
+/* UCC Slow oversampling rate for transmitter (TDCR) */
+enum ucc_slow_tx_oversampling_rate {
+ /* 1x clock mode */
+ UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_1 = 0x00000000,
+ /* 8x clock mode */
+ UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_8 = 0x00010000,
+ /* 16x clock mode */
+ UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_16 = 0x00020000,
+ /* 32x clock mode */
+ UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_32 = 0x00030000,
+};
+
+/* UCC Slow Oversampling rate for receiver (RDCR)
+*/
+enum ucc_slow_rx_oversampling_rate {
+ /* 1x clock mode */
+ UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_1 = 0x00000000,
+ /* 8x clock mode */
+ UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_8 = 0x00004000,
+ /* 16x clock mode */
+ UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_16 = 0x00008000,
+ /* 32x clock mode */
+ UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_32 = 0x0000c000,
+};
+
+/* UCC Slow Transmitter encoding method (TENC)
+*/
+enum ucc_slow_tx_encoding_method {
+ UCC_SLOW_TRANSMITTER_ENCODING_METHOD_TENC_NRZ = 0x00000000,
+ UCC_SLOW_TRANSMITTER_ENCODING_METHOD_TENC_NRZI = 0x00000100
+};
+
+/* UCC Slow Receiver decoding method (RENC)
+*/
+enum ucc_slow_rx_decoding_method {
+ UCC_SLOW_RECEIVER_DECODING_METHOD_RENC_NRZ = 0x00000000,
+ UCC_SLOW_RECEIVER_DECODING_METHOD_RENC_NRZI = 0x00000800
+};
+
+/* UCC Slow Diagnostic mode (DIAG)
+*/
+enum ucc_slow_diag_mode {
+ UCC_SLOW_DIAG_MODE_NORMAL = 0x00000000,
+ UCC_SLOW_DIAG_MODE_LOOPBACK = 0x00000040,
+ UCC_SLOW_DIAG_MODE_ECHO = 0x00000080,
+ UCC_SLOW_DIAG_MODE_LOOPBACK_ECHO = 0x000000c0
+};
+
+struct ucc_slow_info {
+ int ucc_num;
+ int protocol; /* QE_CR_PROTOCOL_xxx */
+ enum qe_clock rx_clock;
+ enum qe_clock tx_clock;
+ phys_addr_t regs;
+ int irq;
+ u16 uccm_mask;
+ int data_mem_part;
+ int init_tx;
+ int init_rx;
+ u32 tx_bd_ring_len;
+ u32 rx_bd_ring_len;
+ int rx_interrupts;
+ int brkpt_support;
+ int grant_support;
+ int tsa;
+ int cdp;
+ int cds;
+ int ctsp;
+ int ctss;
+ int rinv;
+ int tinv;
+ int rtsm;
+ int rfw;
+ int tci;
+ int tend;
+ int tfl;
+ int txsy;
+ u16 max_rx_buf_length;
+ enum ucc_slow_transparent_tcrc tcrc;
+ enum ucc_slow_channel_protocol_mode mode;
+ enum ucc_slow_diag_mode diag;
+ enum ucc_slow_tx_oversampling_rate tdcr;
+ enum ucc_slow_rx_oversampling_rate rdcr;
+ enum ucc_slow_tx_encoding_method tenc;
+ enum ucc_slow_rx_decoding_method renc;
+};
+
+struct ucc_slow_private {
+ struct ucc_slow_info *us_info;
+ struct ucc_slow __iomem *us_regs; /* Ptr to memory map of UCC regs */
+ struct ucc_slow_pram *us_pram; /* a pointer to the parameter RAM */
+ u32 us_pram_offset;
+ int enabled_tx; /* Whether channel is enabled for Tx (ENT) */
+ int enabled_rx; /* Whether channel is enabled for Rx (ENR) */
+ int stopped_tx; /* Whether channel has been stopped for Tx
+ (STOP_TX, etc.) */
+ int stopped_rx; /* Whether channel has been stopped for Rx */
+ struct list_head confQ; /* frames passed to chip waiting for tx */
+ u32 first_tx_bd_mask; /* mask is used in Tx routine to save status
+ and length for first BD in a frame */
+ u32 tx_base_offset; /* first BD in Tx BD table offset (In MURAM) */
+ u32 rx_base_offset; /* first BD in Rx BD table offset (In MURAM) */
+ struct qe_bd *confBd; /* next BD for confirm after Tx */
+ struct qe_bd *tx_bd; /* next BD for new Tx request */
+ struct qe_bd *rx_bd; /* next BD to collect after Rx */
+ void *p_rx_frame; /* accumulating receive frame */
+ u16 *p_ucce; /* a pointer to the event register in memory.
+ */
+ u16 *p_uccm; /* a pointer to the mask register in memory */
+ u16 saved_uccm; /* a saved mask for the RX Interrupt bits */
+#ifdef STATISTICS
+ u32 tx_frames; /* Transmitted frames counters */
+ u32 rx_frames; /* Received frames counters (only frames
+ passed to application) */
+ u32 rx_discarded; /* Discarded frames counters (frames that
+ were discarded by the driver due to
+ errors) */
+#endif /* STATISTICS */
+};
+
+/* ucc_slow_init
+ * Initializes Slow UCC according to provided parameters.
+ *
+ * us_info - (In) pointer to the slow UCC info structure.
+ * uccs_ret - (Out) pointer to the slow UCC structure.
+ */
+int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret);
+
+/* ucc_slow_free
+ * Frees all resources for slow UCC.
+ *
+ * uccs - (In) pointer to the slow UCC structure.
+ */
+void ucc_slow_free(struct ucc_slow_private * uccs);
+
+/* ucc_slow_enable
+ * Enables a fast UCC port.
+ * This routine enables Tx and/or Rx through the General UCC Mode Register.
+ *
+ * uccs - (In) pointer to the slow UCC structure.
+ * mode - (In) TX, RX, or both.
+ */
+void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode);
+
+/* ucc_slow_disable
+ * Disables a fast UCC port.
+ * This routine disables Tx and/or Rx through the General UCC Mode Register.
+ *
+ * uccs - (In) pointer to the slow UCC structure.
+ * mode - (In) TX, RX, or both.
+ */
+void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode);
+
+/* ucc_slow_poll_transmitter_now
+ * Immediately forces a poll of the transmitter for data to be sent.
+ * Typically, the hardware performs a periodic poll for data that the
+ * transmit routine has set up to be transmitted. In cases where
+ * this polling cycle is not soon enough, this optional routine can
+ * be invoked to force a poll right away, instead. Proper use for
+ * each transmission for which this functionality is desired is to
+ * call the transmit routine and then this routine right after.
+ *
+ * uccs - (In) pointer to the slow UCC structure.
+ */
+void ucc_slow_poll_transmitter_now(struct ucc_slow_private * uccs);
+
+/* ucc_slow_graceful_stop_tx
+ * Smoothly stops transmission on a specified slow UCC.
+ *
+ * uccs - (In) pointer to the slow UCC structure.
+ */
+void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs);
+
+/* ucc_slow_stop_tx
+ * Stops transmission on a specified slow UCC.
+ *
+ * uccs - (In) pointer to the slow UCC structure.
+ */
+void ucc_slow_stop_tx(struct ucc_slow_private * uccs);
+
+/* ucc_slow_restart_tx
+ * Restarts transmitting on a specified slow UCC.
+ *
+ * uccs - (In) pointer to the slow UCC structure.
+ */
+void ucc_slow_restart_tx(struct ucc_slow_private *uccs);
+
+u32 ucc_slow_get_qe_cr_subblock(int uccs_num);
+
+#endif /* __UCC_SLOW_H__ */
diff --git a/arch/powerpc/include/asm/ucontext.h b/arch/powerpc/include/asm/ucontext.h
new file mode 100644
index 0000000..d9a4ddf
--- /dev/null
+++ b/arch/powerpc/include/asm/ucontext.h
@@ -0,0 +1,40 @@
+#ifndef _ASM_POWERPC_UCONTEXT_H
+#define _ASM_POWERPC_UCONTEXT_H
+
+#ifdef __powerpc64__
+#include <asm/sigcontext.h>
+#else
+#include <asm/elf.h>
+#endif
+#include <asm/signal.h>
+
+#ifndef __powerpc64__
+struct mcontext {
+ elf_gregset_t mc_gregs;
+ elf_fpregset_t mc_fregs;
+ unsigned long mc_pad[2];
+ elf_vrregset_t mc_vregs __attribute__((__aligned__(16)));
+};
+#endif
+
+struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext __user *uc_link;
+ stack_t uc_stack;
+#ifndef __powerpc64__
+ int uc_pad[7];
+ struct mcontext __user *uc_regs;/* points to uc_mcontext field */
+#endif
+ sigset_t uc_sigmask;
+ /* glibc has 1024-bit signal masks, ours are 64-bit */
+#ifdef __powerpc64__
+ sigset_t __unused[15]; /* Allow for uc_sigmask growth */
+ struct sigcontext uc_mcontext; /* last for extensibility */
+#else
+ int uc_maskext[30];
+ int uc_pad2[3];
+ struct mcontext uc_mcontext;
+#endif
+};
+
+#endif /* _ASM_POWERPC_UCONTEXT_H */
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
new file mode 100644
index 0000000..6418cee
--- /dev/null
+++ b/arch/powerpc/include/asm/udbg.h
@@ -0,0 +1,55 @@
+/*
+ * (c) 2001, 2006 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_UDBG_H
+#define _ASM_POWERPC_UDBG_H
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <linux/init.h>
+
+extern void (*udbg_putc)(char c);
+extern int (*udbg_getc)(void);
+extern int (*udbg_getc_poll)(void);
+
+extern void udbg_puts(const char *s);
+extern int udbg_write(const char *s, int n);
+extern int udbg_read(char *buf, int buflen);
+
+extern void register_early_udbg_console(void);
+extern void udbg_printf(const char *fmt, ...)
+ __attribute__ ((format (printf, 1, 2)));
+extern void udbg_progress(char *s, unsigned short hex);
+
+extern void udbg_init_uart(void __iomem *comport, unsigned int speed,
+ unsigned int clock);
+extern unsigned int udbg_probe_uart_speed(void __iomem *comport,
+ unsigned int clock);
+
+struct device_node;
+extern void udbg_scc_init(int force_scc);
+extern int udbg_adb_init(int force_btext);
+extern void udbg_adb_init_early(void);
+
+extern void __init udbg_early_init(void);
+extern void __init udbg_init_debug_lpar(void);
+extern void __init udbg_init_pmac_realmode(void);
+extern void __init udbg_init_maple_realmode(void);
+extern void __init udbg_init_pas_realmode(void);
+extern void __init udbg_init_iseries(void);
+extern void __init udbg_init_rtas_panel(void);
+extern void __init udbg_init_rtas_console(void);
+extern void __init udbg_init_debug_beat(void);
+extern void __init udbg_init_btext(void);
+extern void __init udbg_init_44x_as1(void);
+extern void __init udbg_init_40x_realmode(void);
+extern void __init udbg_init_cpm(void);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_UDBG_H */
diff --git a/arch/powerpc/include/asm/uic.h b/arch/powerpc/include/asm/uic.h
new file mode 100644
index 0000000..597edfc
--- /dev/null
+++ b/arch/powerpc/include/asm/uic.h
@@ -0,0 +1,21 @@
+/*
+ * IBM PPC4xx UIC external definitions and structure.
+ *
+ * Maintainer: David Gibson <dwg@au1.ibm.com>
+ * Copyright 2007 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef _ASM_POWERPC_UIC_H
+#define _ASM_POWERPC_UIC_H
+
+#ifdef __KERNEL__
+
+extern void __init uic_init_tree(void);
+extern unsigned int uic_get_irq(void);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_UIC_H */
diff --git a/arch/powerpc/include/asm/unaligned.h b/arch/powerpc/include/asm/unaligned.h
new file mode 100644
index 0000000..5f1b1e3
--- /dev/null
+++ b/arch/powerpc/include/asm/unaligned.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_POWERPC_UNALIGNED_H
+#define _ASM_POWERPC_UNALIGNED_H
+
+#ifdef __KERNEL__
+
+/*
+ * The PowerPC can do unaligned accesses itself in big endian mode.
+ */
+#include <linux/unaligned/access_ok.h>
+#include <linux/unaligned/generic.h>
+
+#define get_unaligned __get_unaligned_be
+#define put_unaligned __put_unaligned_be
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_UNALIGNED_H */
diff --git a/arch/powerpc/include/asm/uninorth.h b/arch/powerpc/include/asm/uninorth.h
new file mode 100644
index 0000000..f737732
--- /dev/null
+++ b/arch/powerpc/include/asm/uninorth.h
@@ -0,0 +1,229 @@
+/*
+ * uninorth.h: definitions for using the "UniNorth" host bridge chip
+ * from Apple. This chip is used on "Core99" machines
+ * This also includes U2 used on more recent MacRISC2/3
+ * machines and U3 (G5)
+ *
+ */
+#ifdef __KERNEL__
+#ifndef __ASM_UNINORTH_H__
+#define __ASM_UNINORTH_H__
+
+/*
+ * Uni-N and U3 config space reg. definitions
+ *
+ * (Little endian)
+ */
+
+/* Address ranges selection. This one should work with Bandit too */
+/* Not U3 */
+#define UNI_N_ADDR_SELECT 0x48
+#define UNI_N_ADDR_COARSE_MASK 0xffff0000 /* 256Mb regions at *0000000 */
+#define UNI_N_ADDR_FINE_MASK 0x0000ffff /* 16Mb regions at f*000000 */
+
+/* AGP registers */
+/* Not U3 */
+#define UNI_N_CFG_GART_BASE 0x8c
+#define UNI_N_CFG_AGP_BASE 0x90
+#define UNI_N_CFG_GART_CTRL 0x94
+#define UNI_N_CFG_INTERNAL_STATUS 0x98
+#define UNI_N_CFG_GART_DUMMY_PAGE 0xa4
+
+/* UNI_N_CFG_GART_CTRL bits definitions */
+#define UNI_N_CFG_GART_INVAL 0x00000001
+#define UNI_N_CFG_GART_ENABLE 0x00000100
+#define UNI_N_CFG_GART_2xRESET 0x00010000
+#define UNI_N_CFG_GART_DISSBADET 0x00020000
+/* The following seems to only be used only on U3 <j.glisse@gmail.com> */
+#define U3_N_CFG_GART_SYNCMODE 0x00040000
+#define U3_N_CFG_GART_PERFRD 0x00080000
+#define U3_N_CFG_GART_B2BGNT 0x00200000
+#define U3_N_CFG_GART_FASTDDR 0x00400000
+
+/* My understanding of UniNorth AGP as of UniNorth rev 1.0x,
+ * revision 1.5 (x4 AGP) may need further changes.
+ *
+ * AGP_BASE register contains the base address of the AGP aperture on
+ * the AGP bus. It doesn't seem to be visible to the CPU as of UniNorth 1.x,
+ * even if decoding of this address range is enabled in the address select
+ * register. Apparently, the only supported bases are 256Mb multiples
+ * (high 4 bits of that register).
+ *
+ * GART_BASE register appear to contain the physical address of the GART
+ * in system memory in the high address bits (page aligned), and the
+ * GART size in the low order bits (number of GART pages)
+ *
+ * The GART format itself is one 32bits word per physical memory page.
+ * This word contains, in little-endian format (!!!), the physical address
+ * of the page in the high bits, and what appears to be an "enable" bit
+ * in the LSB bit (0) that must be set to 1 when the entry is valid.
+ *
+ * Obviously, the GART is not cache coherent and so any change to it
+ * must be flushed to memory (or maybe just make the GART space non
+ * cachable). AGP memory itself doens't seem to be cache coherent neither.
+ *
+ * In order to invalidate the GART (which is probably necessary to inval
+ * the bridge internal TLBs), the following sequence has to be written,
+ * in order, to the GART_CTRL register:
+ *
+ * UNI_N_CFG_GART_ENABLE | UNI_N_CFG_GART_INVAL
+ * UNI_N_CFG_GART_ENABLE
+ * UNI_N_CFG_GART_ENABLE | UNI_N_CFG_GART_2xRESET
+ * UNI_N_CFG_GART_ENABLE
+ *
+ * As far as AGP "features" are concerned, it looks like fast write may
+ * not be supported but this has to be confirmed.
+ *
+ * Turning on AGP seem to require a double invalidate operation, one before
+ * setting the AGP command register, on after.
+ *
+ * Turning off AGP seems to require the following sequence: first wait
+ * for the AGP to be idle by reading the internal status register, then
+ * write in that order to the GART_CTRL register:
+ *
+ * UNI_N_CFG_GART_ENABLE | UNI_N_CFG_GART_INVAL
+ * 0
+ * UNI_N_CFG_GART_2xRESET
+ * 0
+ */
+
+/*
+ * Uni-N memory mapped reg. definitions
+ *
+ * Those registers are Big-Endian !!
+ *
+ * Their meaning come from either Darwin and/or from experiments I made with
+ * the bootrom, I'm not sure about their exact meaning yet
+ *
+ */
+
+/* Version of the UniNorth chip */
+#define UNI_N_VERSION 0x0000 /* Known versions: 3,7 and 8 */
+
+#define UNI_N_VERSION_107 0x0003 /* 1.0.7 */
+#define UNI_N_VERSION_10A 0x0007 /* 1.0.10 */
+#define UNI_N_VERSION_150 0x0011 /* 1.5 */
+#define UNI_N_VERSION_200 0x0024 /* 2.0 */
+#define UNI_N_VERSION_PANGEA 0x00C0 /* Integrated U1 + K */
+#define UNI_N_VERSION_INTREPID 0x00D2 /* Integrated U2 + K */
+#define UNI_N_VERSION_300 0x0030 /* 3.0 (U3 on G5) */
+
+/* This register is used to enable/disable various clocks */
+#define UNI_N_CLOCK_CNTL 0x0020
+#define UNI_N_CLOCK_CNTL_PCI 0x00000001 /* PCI2 clock control */
+#define UNI_N_CLOCK_CNTL_GMAC 0x00000002 /* GMAC clock control */
+#define UNI_N_CLOCK_CNTL_FW 0x00000004 /* FireWire clock control */
+#define UNI_N_CLOCK_CNTL_ATA100 0x00000010 /* ATA-100 clock control (U2) */
+
+/* Power Management control */
+#define UNI_N_POWER_MGT 0x0030
+#define UNI_N_POWER_MGT_NORMAL 0x00
+#define UNI_N_POWER_MGT_IDLE2 0x01
+#define UNI_N_POWER_MGT_SLEEP 0x02
+
+/* This register is configured by Darwin depending on the UniN
+ * revision
+ */
+#define UNI_N_ARB_CTRL 0x0040
+#define UNI_N_ARB_CTRL_QACK_DELAY_SHIFT 15
+#define UNI_N_ARB_CTRL_QACK_DELAY_MASK 0x0e1f8000
+#define UNI_N_ARB_CTRL_QACK_DELAY 0x30
+#define UNI_N_ARB_CTRL_QACK_DELAY105 0x00
+
+/* This one _might_ return the CPU number of the CPU reading it;
+ * the bootROM decides whether to boot or to sleep/spinloop depending
+ * on this register beeing 0 or not
+ */
+#define UNI_N_CPU_NUMBER 0x0050
+
+/* This register appear to be read by the bootROM to decide what
+ * to do on a non-recoverable reset (powerup or wakeup)
+ */
+#define UNI_N_HWINIT_STATE 0x0070
+#define UNI_N_HWINIT_STATE_SLEEPING 0x01
+#define UNI_N_HWINIT_STATE_RUNNING 0x02
+/* This last bit appear to be used by the bootROM to know the second
+ * CPU has started and will enter it's sleep loop with IP=0
+ */
+#define UNI_N_HWINIT_STATE_CPU1_FLAG 0x10000000
+
+/* This register controls AACK delay, which is set when 2004 iBook/PowerBook
+ * is in low speed mode.
+ */
+#define UNI_N_AACK_DELAY 0x0100
+#define UNI_N_AACK_DELAY_ENABLE 0x00000001
+
+/* Clock status for Intrepid */
+#define UNI_N_CLOCK_STOP_STATUS0 0x0150
+#define UNI_N_CLOCK_STOPPED_EXTAGP 0x00200000
+#define UNI_N_CLOCK_STOPPED_AGPDEL 0x00100000
+#define UNI_N_CLOCK_STOPPED_I2S0_45_49 0x00080000
+#define UNI_N_CLOCK_STOPPED_I2S0_18 0x00040000
+#define UNI_N_CLOCK_STOPPED_I2S1_45_49 0x00020000
+#define UNI_N_CLOCK_STOPPED_I2S1_18 0x00010000
+#define UNI_N_CLOCK_STOPPED_TIMER 0x00008000
+#define UNI_N_CLOCK_STOPPED_SCC_RTCLK18 0x00004000
+#define UNI_N_CLOCK_STOPPED_SCC_RTCLK32 0x00002000
+#define UNI_N_CLOCK_STOPPED_SCC_VIA32 0x00001000
+#define UNI_N_CLOCK_STOPPED_SCC_SLOT0 0x00000800
+#define UNI_N_CLOCK_STOPPED_SCC_SLOT1 0x00000400
+#define UNI_N_CLOCK_STOPPED_SCC_SLOT2 0x00000200
+#define UNI_N_CLOCK_STOPPED_PCI_FBCLKO 0x00000100
+#define UNI_N_CLOCK_STOPPED_VEO0 0x00000080
+#define UNI_N_CLOCK_STOPPED_VEO1 0x00000040
+#define UNI_N_CLOCK_STOPPED_USB0 0x00000020
+#define UNI_N_CLOCK_STOPPED_USB1 0x00000010
+#define UNI_N_CLOCK_STOPPED_USB2 0x00000008
+#define UNI_N_CLOCK_STOPPED_32 0x00000004
+#define UNI_N_CLOCK_STOPPED_45 0x00000002
+#define UNI_N_CLOCK_STOPPED_49 0x00000001
+
+#define UNI_N_CLOCK_STOP_STATUS1 0x0160
+#define UNI_N_CLOCK_STOPPED_PLL4REF 0x00080000
+#define UNI_N_CLOCK_STOPPED_CPUDEL 0x00040000
+#define UNI_N_CLOCK_STOPPED_CPU 0x00020000
+#define UNI_N_CLOCK_STOPPED_BUF_REFCKO 0x00010000
+#define UNI_N_CLOCK_STOPPED_PCI2 0x00008000
+#define UNI_N_CLOCK_STOPPED_FW 0x00004000
+#define UNI_N_CLOCK_STOPPED_GB 0x00002000
+#define UNI_N_CLOCK_STOPPED_ATA66 0x00001000
+#define UNI_N_CLOCK_STOPPED_ATA100 0x00000800
+#define UNI_N_CLOCK_STOPPED_MAX 0x00000400
+#define UNI_N_CLOCK_STOPPED_PCI1 0x00000200
+#define UNI_N_CLOCK_STOPPED_KLPCI 0x00000100
+#define UNI_N_CLOCK_STOPPED_USB0PCI 0x00000080
+#define UNI_N_CLOCK_STOPPED_USB1PCI 0x00000040
+#define UNI_N_CLOCK_STOPPED_USB2PCI 0x00000020
+#define UNI_N_CLOCK_STOPPED_7PCI1 0x00000008
+#define UNI_N_CLOCK_STOPPED_AGP 0x00000004
+#define UNI_N_CLOCK_STOPPED_PCI0 0x00000002
+#define UNI_N_CLOCK_STOPPED_18 0x00000001
+
+/* Intrepid registe to OF do-platform-clockspreading */
+#define UNI_N_CLOCK_SPREADING 0x190
+
+/* Uninorth 1.5 rev. has additional perf. monitor registers at 0xf00-0xf50 */
+
+
+/*
+ * U3 specific registers
+ */
+
+
+/* U3 Toggle */
+#define U3_TOGGLE_REG 0x00e0
+#define U3_PMC_START_STOP 0x0001
+#define U3_MPIC_RESET 0x0002
+#define U3_MPIC_OUTPUT_ENABLE 0x0004
+
+/* U3 API PHY Config 1 */
+#define U3_API_PHY_CONFIG_1 0x23030
+
+/* U3 HyperTransport registers */
+#define U3_HT_CONFIG_BASE 0x70000
+#define U3_HT_LINK_COMMAND 0x100
+#define U3_HT_LINK_CONFIG 0x110
+#define U3_HT_LINK_FREQ 0x120
+
+#endif /* __ASM_UNINORTH_H__ */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
new file mode 100644
index 0000000..e07d0c7
--- /dev/null
+++ b/arch/powerpc/include/asm/unistd.h
@@ -0,0 +1,398 @@
+#ifndef _ASM_POWERPC_UNISTD_H_
+#define _ASM_POWERPC_UNISTD_H_
+
+/*
+ * This file contains the system call numbers.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define __NR_restart_syscall 0
+#define __NR_exit 1
+#define __NR_fork 2
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_open 5
+#define __NR_close 6
+#define __NR_waitpid 7
+#define __NR_creat 8
+#define __NR_link 9
+#define __NR_unlink 10
+#define __NR_execve 11
+#define __NR_chdir 12
+#define __NR_time 13
+#define __NR_mknod 14
+#define __NR_chmod 15
+#define __NR_lchown 16
+#define __NR_break 17
+#define __NR_oldstat 18
+#define __NR_lseek 19
+#define __NR_getpid 20
+#define __NR_mount 21
+#define __NR_umount 22
+#define __NR_setuid 23
+#define __NR_getuid 24
+#define __NR_stime 25
+#define __NR_ptrace 26
+#define __NR_alarm 27
+#define __NR_oldfstat 28
+#define __NR_pause 29
+#define __NR_utime 30
+#define __NR_stty 31
+#define __NR_gtty 32
+#define __NR_access 33
+#define __NR_nice 34
+#define __NR_ftime 35
+#define __NR_sync 36
+#define __NR_kill 37
+#define __NR_rename 38
+#define __NR_mkdir 39
+#define __NR_rmdir 40
+#define __NR_dup 41
+#define __NR_pipe 42
+#define __NR_times 43
+#define __NR_prof 44
+#define __NR_brk 45
+#define __NR_setgid 46
+#define __NR_getgid 47
+#define __NR_signal 48
+#define __NR_geteuid 49
+#define __NR_getegid 50
+#define __NR_acct 51
+#define __NR_umount2 52
+#define __NR_lock 53
+#define __NR_ioctl 54
+#define __NR_fcntl 55
+#define __NR_mpx 56
+#define __NR_setpgid 57
+#define __NR_ulimit 58
+#define __NR_oldolduname 59
+#define __NR_umask 60
+#define __NR_chroot 61
+#define __NR_ustat 62
+#define __NR_dup2 63
+#define __NR_getppid 64
+#define __NR_getpgrp 65
+#define __NR_setsid 66
+#define __NR_sigaction 67
+#define __NR_sgetmask 68
+#define __NR_ssetmask 69
+#define __NR_setreuid 70
+#define __NR_setregid 71
+#define __NR_sigsuspend 72
+#define __NR_sigpending 73
+#define __NR_sethostname 74
+#define __NR_setrlimit 75
+#define __NR_getrlimit 76
+#define __NR_getrusage 77
+#define __NR_gettimeofday 78
+#define __NR_settimeofday 79
+#define __NR_getgroups 80
+#define __NR_setgroups 81
+#define __NR_select 82
+#define __NR_symlink 83
+#define __NR_oldlstat 84
+#define __NR_readlink 85
+#define __NR_uselib 86
+#define __NR_swapon 87
+#define __NR_reboot 88
+#define __NR_readdir 89
+#define __NR_mmap 90
+#define __NR_munmap 91
+#define __NR_truncate 92
+#define __NR_ftruncate 93
+#define __NR_fchmod 94
+#define __NR_fchown 95
+#define __NR_getpriority 96
+#define __NR_setpriority 97
+#define __NR_profil 98
+#define __NR_statfs 99
+#define __NR_fstatfs 100
+#define __NR_ioperm 101
+#define __NR_socketcall 102
+#define __NR_syslog 103
+#define __NR_setitimer 104
+#define __NR_getitimer 105
+#define __NR_stat 106
+#define __NR_lstat 107
+#define __NR_fstat 108
+#define __NR_olduname 109
+#define __NR_iopl 110
+#define __NR_vhangup 111
+#define __NR_idle 112
+#define __NR_vm86 113
+#define __NR_wait4 114
+#define __NR_swapoff 115
+#define __NR_sysinfo 116
+#define __NR_ipc 117
+#define __NR_fsync 118
+#define __NR_sigreturn 119
+#define __NR_clone 120
+#define __NR_setdomainname 121
+#define __NR_uname 122
+#define __NR_modify_ldt 123
+#define __NR_adjtimex 124
+#define __NR_mprotect 125
+#define __NR_sigprocmask 126
+#define __NR_create_module 127
+#define __NR_init_module 128
+#define __NR_delete_module 129
+#define __NR_get_kernel_syms 130
+#define __NR_quotactl 131
+#define __NR_getpgid 132
+#define __NR_fchdir 133
+#define __NR_bdflush 134
+#define __NR_sysfs 135
+#define __NR_personality 136
+#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
+#define __NR_setfsuid 138
+#define __NR_setfsgid 139
+#define __NR__llseek 140
+#define __NR_getdents 141
+#define __NR__newselect 142
+#define __NR_flock 143
+#define __NR_msync 144
+#define __NR_readv 145
+#define __NR_writev 146
+#define __NR_getsid 147
+#define __NR_fdatasync 148
+#define __NR__sysctl 149
+#define __NR_mlock 150
+#define __NR_munlock 151
+#define __NR_mlockall 152
+#define __NR_munlockall 153
+#define __NR_sched_setparam 154
+#define __NR_sched_getparam 155
+#define __NR_sched_setscheduler 156
+#define __NR_sched_getscheduler 157
+#define __NR_sched_yield 158
+#define __NR_sched_get_priority_max 159
+#define __NR_sched_get_priority_min 160
+#define __NR_sched_rr_get_interval 161
+#define __NR_nanosleep 162
+#define __NR_mremap 163
+#define __NR_setresuid 164
+#define __NR_getresuid 165
+#define __NR_query_module 166
+#define __NR_poll 167
+#define __NR_nfsservctl 168
+#define __NR_setresgid 169
+#define __NR_getresgid 170
+#define __NR_prctl 171
+#define __NR_rt_sigreturn 172
+#define __NR_rt_sigaction 173
+#define __NR_rt_sigprocmask 174
+#define __NR_rt_sigpending 175
+#define __NR_rt_sigtimedwait 176
+#define __NR_rt_sigqueueinfo 177
+#define __NR_rt_sigsuspend 178
+#define __NR_pread64 179
+#define __NR_pwrite64 180
+#define __NR_chown 181
+#define __NR_getcwd 182
+#define __NR_capget 183
+#define __NR_capset 184
+#define __NR_sigaltstack 185
+#define __NR_sendfile 186
+#define __NR_getpmsg 187 /* some people actually want streams */
+#define __NR_putpmsg 188 /* some people actually want streams */
+#define __NR_vfork 189
+#define __NR_ugetrlimit 190 /* SuS compliant getrlimit */
+#define __NR_readahead 191
+#ifndef __powerpc64__ /* these are 32-bit only */
+#define __NR_mmap2 192
+#define __NR_truncate64 193
+#define __NR_ftruncate64 194
+#define __NR_stat64 195
+#define __NR_lstat64 196
+#define __NR_fstat64 197
+#endif
+#define __NR_pciconfig_read 198
+#define __NR_pciconfig_write 199
+#define __NR_pciconfig_iobase 200
+#define __NR_multiplexer 201
+#define __NR_getdents64 202
+#define __NR_pivot_root 203
+#ifndef __powerpc64__
+#define __NR_fcntl64 204
+#endif
+#define __NR_madvise 205
+#define __NR_mincore 206
+#define __NR_gettid 207
+#define __NR_tkill 208
+#define __NR_setxattr 209
+#define __NR_lsetxattr 210
+#define __NR_fsetxattr 211
+#define __NR_getxattr 212
+#define __NR_lgetxattr 213
+#define __NR_fgetxattr 214
+#define __NR_listxattr 215
+#define __NR_llistxattr 216
+#define __NR_flistxattr 217
+#define __NR_removexattr 218
+#define __NR_lremovexattr 219
+#define __NR_fremovexattr 220
+#define __NR_futex 221
+#define __NR_sched_setaffinity 222
+#define __NR_sched_getaffinity 223
+/* 224 currently unused */
+#define __NR_tuxcall 225
+#ifndef __powerpc64__
+#define __NR_sendfile64 226
+#endif
+#define __NR_io_setup 227
+#define __NR_io_destroy 228
+#define __NR_io_getevents 229
+#define __NR_io_submit 230
+#define __NR_io_cancel 231
+#define __NR_set_tid_address 232
+#define __NR_fadvise64 233
+#define __NR_exit_group 234
+#define __NR_lookup_dcookie 235
+#define __NR_epoll_create 236
+#define __NR_epoll_ctl 237
+#define __NR_epoll_wait 238
+#define __NR_remap_file_pages 239
+#define __NR_timer_create 240
+#define __NR_timer_settime 241
+#define __NR_timer_gettime 242
+#define __NR_timer_getoverrun 243
+#define __NR_timer_delete 244
+#define __NR_clock_settime 245
+#define __NR_clock_gettime 246
+#define __NR_clock_getres 247
+#define __NR_clock_nanosleep 248
+#define __NR_swapcontext 249
+#define __NR_tgkill 250
+#define __NR_utimes 251
+#define __NR_statfs64 252
+#define __NR_fstatfs64 253
+#ifndef __powerpc64__
+#define __NR_fadvise64_64 254
+#endif
+#define __NR_rtas 255
+#define __NR_sys_debug_setcontext 256
+/* Number 257 is reserved for vserver */
+#define __NR_migrate_pages 258
+#define __NR_mbind 259
+#define __NR_get_mempolicy 260
+#define __NR_set_mempolicy 261
+#define __NR_mq_open 262
+#define __NR_mq_unlink 263
+#define __NR_mq_timedsend 264
+#define __NR_mq_timedreceive 265
+#define __NR_mq_notify 266
+#define __NR_mq_getsetattr 267
+#define __NR_kexec_load 268
+#define __NR_add_key 269
+#define __NR_request_key 270
+#define __NR_keyctl 271
+#define __NR_waitid 272
+#define __NR_ioprio_set 273
+#define __NR_ioprio_get 274
+#define __NR_inotify_init 275
+#define __NR_inotify_add_watch 276
+#define __NR_inotify_rm_watch 277
+#define __NR_spu_run 278
+#define __NR_spu_create 279
+#define __NR_pselect6 280
+#define __NR_ppoll 281
+#define __NR_unshare 282
+#define __NR_splice 283
+#define __NR_tee 284
+#define __NR_vmsplice 285
+#define __NR_openat 286
+#define __NR_mkdirat 287
+#define __NR_mknodat 288
+#define __NR_fchownat 289
+#define __NR_futimesat 290
+#ifdef __powerpc64__
+#define __NR_newfstatat 291
+#else
+#define __NR_fstatat64 291
+#endif
+#define __NR_unlinkat 292
+#define __NR_renameat 293
+#define __NR_linkat 294
+#define __NR_symlinkat 295
+#define __NR_readlinkat 296
+#define __NR_fchmodat 297
+#define __NR_faccessat 298
+#define __NR_get_robust_list 299
+#define __NR_set_robust_list 300
+#define __NR_move_pages 301
+#define __NR_getcpu 302
+#define __NR_epoll_pwait 303
+#define __NR_utimensat 304
+#define __NR_signalfd 305
+#define __NR_timerfd_create 306
+#define __NR_eventfd 307
+#define __NR_sync_file_range2 308
+#define __NR_fallocate 309
+#define __NR_subpage_prot 310
+#define __NR_timerfd_settime 311
+#define __NR_timerfd_gettime 312
+#define __NR_signalfd4 313
+#define __NR_eventfd2 314
+#define __NR_epoll_create1 315
+#define __NR_dup3 316
+#define __NR_pipe2 317
+#define __NR_inotify_init1 318
+
+#ifdef __KERNEL__
+
+#define __NR_syscalls 319
+
+#define __NR__exit __NR_exit
+#define NR_syscalls __NR_syscalls
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+
+#define __ARCH_WANT_IPC_PARSE_VERSION
+#define __ARCH_WANT_OLD_READDIR
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_ALARM
+#define __ARCH_WANT_SYS_GETHOSTNAME
+#define __ARCH_WANT_SYS_PAUSE
+#define __ARCH_WANT_SYS_SGETMASK
+#define __ARCH_WANT_SYS_SIGNAL
+#define __ARCH_WANT_SYS_TIME
+#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_WAITPID
+#define __ARCH_WANT_SYS_SOCKETCALL
+#define __ARCH_WANT_SYS_FADVISE64
+#define __ARCH_WANT_SYS_GETPGRP
+#define __ARCH_WANT_SYS_LLSEEK
+#define __ARCH_WANT_SYS_NICE
+#define __ARCH_WANT_SYS_OLD_GETRLIMIT
+#define __ARCH_WANT_SYS_OLDUMOUNT
+#define __ARCH_WANT_SYS_SIGPENDING
+#define __ARCH_WANT_SYS_SIGPROCMASK
+#define __ARCH_WANT_SYS_RT_SIGACTION
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
+#ifdef CONFIG_PPC32
+#define __ARCH_WANT_OLD_STAT
+#endif
+#ifdef CONFIG_PPC64
+#define __ARCH_WANT_COMPAT_SYS_TIME
+#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
+#define __ARCH_WANT_SYS_NEWFSTATAT
+#endif
+
+/*
+ * "Conditional" syscalls
+ */
+#define cond_syscall(x) \
+ asmlinkage long x (void) __attribute__((weak,alias("sys_ni_syscall")))
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/include/asm/user.h b/arch/powerpc/include/asm/user.h
new file mode 100644
index 0000000..3fd4545
--- /dev/null
+++ b/arch/powerpc/include/asm/user.h
@@ -0,0 +1,51 @@
+#ifndef _ASM_POWERPC_USER_H
+#define _ASM_POWERPC_USER_H
+
+#include <asm/ptrace.h>
+#include <asm/page.h>
+
+/*
+ * Adapted from <asm-alpha/user.h>
+ *
+ * Core file format: The core file is written in such a way that gdb
+ * can understand it and provide useful information to the user (under
+ * linux we use the `trad-core' bfd, NOT the osf-core). The file contents
+ * are as follows:
+ *
+ * upage: 1 page consisting of a user struct that tells gdb
+ * what is present in the file. Directly after this is a
+ * copy of the task_struct, which is currently not used by gdb,
+ * but it may come in handy at some point. All of the registers
+ * are stored as part of the upage. The upage should always be
+ * only one page long.
+ * data: The data segment follows next. We use current->end_text to
+ * current->brk to pick up all of the user variables, plus any memory
+ * that may have been sbrk'ed. No attempt is made to determine if a
+ * page is demand-zero or if a page is totally unused, we just cover
+ * the entire range. All of the addresses are rounded in such a way
+ * that an integral number of pages is written.
+ * stack: We need the stack information in order to get a meaningful
+ * backtrace. We need to write the data from usp to
+ * current->start_stack, so we round each of these in order to be able
+ * to write an integer number of pages.
+ */
+struct user {
+ struct pt_regs regs; /* entire machine state */
+ size_t u_tsize; /* text size (pages) */
+ size_t u_dsize; /* data size (pages) */
+ size_t u_ssize; /* stack size (pages) */
+ unsigned long start_code; /* text starting address */
+ unsigned long start_data; /* data starting address */
+ unsigned long start_stack; /* stack starting address */
+ long int signal; /* signal causing core dump */
+ unsigned long u_ar0; /* help gdb find registers */
+ unsigned long magic; /* identifies a core file */
+ char u_comm[32]; /* user command name */
+};
+
+#define NBPG PAGE_SIZE
+#define UPAGES 1
+#define HOST_TEXT_START_ADDR (u.start_code)
+#define HOST_DATA_START_ADDR (u.start_data)
+#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
+#endif /* _ASM_POWERPC_USER_H */
diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h
new file mode 100644
index 0000000..26fc449
--- /dev/null
+++ b/arch/powerpc/include/asm/vdso.h
@@ -0,0 +1,78 @@
+#ifndef __PPC64_VDSO_H__
+#define __PPC64_VDSO_H__
+
+#ifdef __KERNEL__
+
+/* Default link addresses for the vDSOs */
+#define VDSO32_LBASE 0x100000
+#define VDSO64_LBASE 0x100000
+
+/* Default map addresses */
+#define VDSO32_MBASE VDSO32_LBASE
+#define VDSO64_MBASE VDSO64_LBASE
+
+#define VDSO_VERSION_STRING LINUX_2.6.15
+
+/* Define if 64 bits VDSO has procedure descriptors */
+#undef VDS64_HAS_DESCRIPTORS
+
+#ifndef __ASSEMBLY__
+
+/* Offsets relative to thread->vdso_base */
+extern unsigned long vdso64_rt_sigtramp;
+extern unsigned long vdso32_sigtramp;
+extern unsigned long vdso32_rt_sigtramp;
+
+#else /* __ASSEMBLY__ */
+
+#ifdef __VDSO64__
+#ifdef VDS64_HAS_DESCRIPTORS
+#define V_FUNCTION_BEGIN(name) \
+ .globl name; \
+ .section ".opd","a"; \
+ .align 3; \
+ name: \
+ .quad .name,.TOC.@tocbase,0; \
+ .previous; \
+ .globl .name; \
+ .type .name,@function; \
+ .name: \
+
+#define V_FUNCTION_END(name) \
+ .size .name,.-.name;
+
+#define V_LOCAL_FUNC(name) (.name)
+
+#else /* VDS64_HAS_DESCRIPTORS */
+
+#define V_FUNCTION_BEGIN(name) \
+ .globl name; \
+ name: \
+
+#define V_FUNCTION_END(name) \
+ .size name,.-name;
+
+#define V_LOCAL_FUNC(name) (name)
+
+#endif /* VDS64_HAS_DESCRIPTORS */
+#endif /* __VDSO64__ */
+
+#ifdef __VDSO32__
+
+#define V_FUNCTION_BEGIN(name) \
+ .globl name; \
+ .type name,@function; \
+ name: \
+
+#define V_FUNCTION_END(name) \
+ .size name,.-name;
+
+#define V_LOCAL_FUNC(name) (name)
+
+#endif /* __VDSO32__ */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* __PPC64_VDSO_H__ */
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
new file mode 100644
index 0000000..f013932
--- /dev/null
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -0,0 +1,121 @@
+#ifndef _VDSO_DATAPAGE_H
+#define _VDSO_DATAPAGE_H
+#ifdef __KERNEL__
+
+/*
+ * Copyright (C) 2002 Peter Bergner <bergner@vnet.ibm.com>, IBM
+ * Copyright (C) 2005 Benjamin Herrenschmidy <benh@kernel.crashing.org>,
+ * IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+
+/*
+ * Note about this structure:
+ *
+ * This structure was historically called systemcfg and exposed to
+ * userland via /proc/ppc64/systemcfg. Unfortunately, this became an
+ * ABI issue as some proprietary software started relying on being able
+ * to mmap() it, thus we have to keep the base layout at least for a
+ * few kernel versions.
+ *
+ * However, since ppc32 doesn't suffer from this backward handicap,
+ * a simpler version of the data structure is used there with only the
+ * fields actually used by the vDSO.
+ *
+ */
+
+/*
+ * If the major version changes we are incompatible.
+ * Minor version changes are a hint.
+ */
+#define SYSTEMCFG_MAJOR 1
+#define SYSTEMCFG_MINOR 1
+
+#ifndef __ASSEMBLY__
+
+#include <linux/unistd.h>
+
+#define SYSCALL_MAP_SIZE ((__NR_syscalls + 31) / 32)
+
+/*
+ * So here is the ppc64 backward compatible version
+ */
+
+#ifdef CONFIG_PPC64
+
+struct vdso_data {
+ __u8 eye_catcher[16]; /* Eyecatcher: SYSTEMCFG:PPC64 0x00 */
+ struct { /* Systemcfg version numbers */
+ __u32 major; /* Major number 0x10 */
+ __u32 minor; /* Minor number 0x14 */
+ } version;
+
+ /* Note about the platform flags: it now only contains the lpar
+ * bit. The actual platform number is dead and burried
+ */
+ __u32 platform; /* Platform flags 0x18 */
+ __u32 processor; /* Processor type 0x1C */
+ __u64 processorCount; /* # of physical processors 0x20 */
+ __u64 physicalMemorySize; /* Size of real memory(B) 0x28 */
+ __u64 tb_orig_stamp; /* Timebase at boot 0x30 */
+ __u64 tb_ticks_per_sec; /* Timebase tics / sec 0x38 */
+ __u64 tb_to_xs; /* Inverse of TB to 2^20 0x40 */
+ __u64 stamp_xsec; /* 0x48 */
+ __u64 tb_update_count; /* Timebase atomicity ctr 0x50 */
+ __u32 tz_minuteswest; /* Minutes west of Greenwich 0x58 */
+ __u32 tz_dsttime; /* Type of dst correction 0x5C */
+ __u32 dcache_size; /* L1 d-cache size 0x60 */
+ __u32 dcache_line_size; /* L1 d-cache line size 0x64 */
+ __u32 icache_size; /* L1 i-cache size 0x68 */
+ __u32 icache_line_size; /* L1 i-cache line size 0x6C */
+
+ /* those additional ones don't have to be located anywhere
+ * special as they were not part of the original systemcfg
+ */
+ __u32 dcache_block_size; /* L1 d-cache block size */
+ __u32 icache_block_size; /* L1 i-cache block size */
+ __u32 dcache_log_block_size; /* L1 d-cache log block size */
+ __u32 icache_log_block_size; /* L1 i-cache log block size */
+ __s32 wtom_clock_sec; /* Wall to monotonic clock */
+ __s32 wtom_clock_nsec;
+ __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */
+ __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
+};
+
+#else /* CONFIG_PPC64 */
+
+/*
+ * And here is the simpler 32 bits version
+ */
+struct vdso_data {
+ __u64 tb_orig_stamp; /* Timebase at boot 0x30 */
+ __u64 tb_ticks_per_sec; /* Timebase tics / sec 0x38 */
+ __u64 tb_to_xs; /* Inverse of TB to 2^20 0x40 */
+ __u64 stamp_xsec; /* 0x48 */
+ __u32 tb_update_count; /* Timebase atomicity ctr 0x50 */
+ __u32 tz_minuteswest; /* Minutes west of Greenwich 0x58 */
+ __u32 tz_dsttime; /* Type of dst correction 0x5C */
+ __s32 wtom_clock_sec; /* Wall to monotonic clock */
+ __s32 wtom_clock_nsec;
+ __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
+ __u32 dcache_block_size; /* L1 d-cache block size */
+ __u32 icache_block_size; /* L1 i-cache block size */
+ __u32 dcache_log_block_size; /* L1 d-cache log block size */
+ __u32 icache_log_block_size; /* L1 i-cache log block size */
+};
+
+#endif /* CONFIG_PPC64 */
+
+#ifdef __KERNEL__
+extern struct vdso_data *vdso_data;
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+#endif /* _SYSTEMCFG_H */
diff --git a/arch/powerpc/include/asm/vga.h b/arch/powerpc/include/asm/vga.h
new file mode 100644
index 0000000..a2eac40
--- /dev/null
+++ b/arch/powerpc/include/asm/vga.h
@@ -0,0 +1,53 @@
+#ifndef _ASM_POWERPC_VGA_H_
+#define _ASM_POWERPC_VGA_H_
+
+#ifdef __KERNEL__
+
+/*
+ * Access to VGA videoram
+ *
+ * (c) 1998 Martin Mares <mj@ucw.cz>
+ */
+
+
+#include <asm/io.h>
+
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_MDA_CONSOLE)
+
+#define VT_BUF_HAVE_RW
+/*
+ * These are only needed for supporting VGA or MDA text mode, which use little
+ * endian byte ordering.
+ * In other cases, we can optimize by using native byte ordering and
+ * <linux/vt_buffer.h> has already done the right job for us.
+ */
+
+static inline void scr_writew(u16 val, volatile u16 *addr)
+{
+ st_le16(addr, val);
+}
+
+static inline u16 scr_readw(volatile const u16 *addr)
+{
+ return ld_le16(addr);
+}
+
+#define VT_BUF_HAVE_MEMCPYW
+#define scr_memcpyw memcpy
+
+#endif /* !CONFIG_VGA_CONSOLE && !CONFIG_MDA_CONSOLE */
+
+extern unsigned long vgacon_remap_base;
+
+#ifdef __powerpc64__
+#define VGA_MAP_MEM(x,s) ((unsigned long) ioremap((x), s))
+#else
+#define VGA_MAP_MEM(x,s) (x + vgacon_remap_base)
+#endif
+
+#define vga_readb(x) (*(x))
+#define vga_writeb(x,y) (*(y) = (x))
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_VGA_H_ */
diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h
new file mode 100644
index 0000000..0a290a1
--- /dev/null
+++ b/arch/powerpc/include/asm/vio.h
@@ -0,0 +1,118 @@
+/*
+ * IBM PowerPC Virtual I/O Infrastructure Support.
+ *
+ * Copyright (c) 2003 IBM Corp.
+ * Dave Engebretsen engebret@us.ibm.com
+ * Santiago Leon santil@us.ibm.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_VIO_H
+#define _ASM_POWERPC_VIO_H
+#ifdef __KERNEL__
+
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mod_devicetable.h>
+
+#include <asm/hvcall.h>
+#include <asm/scatterlist.h>
+
+/*
+ * Architecture-specific constants for drivers to
+ * extract attributes of the device using vio_get_attribute()
+ */
+#define VETH_MAC_ADDR "local-mac-address"
+#define VETH_MCAST_FILTER_SIZE "ibm,mac-address-filters"
+
+/* End architecture-specific constants */
+
+#define h_vio_signal(ua, mode) \
+ plpar_hcall_norets(H_VIO_SIGNAL, ua, mode)
+
+#define VIO_IRQ_DISABLE 0UL
+#define VIO_IRQ_ENABLE 1UL
+
+/*
+ * VIO CMO minimum entitlement for all devices and spare entitlement
+ */
+#define VIO_CMO_MIN_ENT 1562624
+
+struct iommu_table;
+
+/**
+ * vio_dev - This structure is used to describe virtual I/O devices.
+ *
+ * @desired: set from return of driver's get_desired_dma() function
+ * @entitled: bytes of IO data that has been reserved for this device.
+ * @allocated: bytes of IO data currently in use by the device.
+ * @allocs_failed: number of DMA failures due to insufficient entitlement.
+ */
+struct vio_dev {
+ const char *name;
+ const char *type;
+ uint32_t unit_address;
+ unsigned int irq;
+ struct {
+ size_t desired;
+ size_t entitled;
+ size_t allocated;
+ atomic_t allocs_failed;
+ } cmo;
+ struct device dev;
+};
+
+struct vio_driver {
+ const struct vio_device_id *id_table;
+ int (*probe)(struct vio_dev *dev, const struct vio_device_id *id);
+ int (*remove)(struct vio_dev *dev);
+ /* A driver must have a get_desired_dma() function to
+ * be loaded in a CMO environment if it uses DMA.
+ */
+ unsigned long (*get_desired_dma)(struct vio_dev *dev);
+ struct device_driver driver;
+};
+
+extern int vio_register_driver(struct vio_driver *drv);
+extern void vio_unregister_driver(struct vio_driver *drv);
+
+extern int vio_cmo_entitlement_update(size_t);
+extern void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired);
+
+extern void __devinit vio_unregister_device(struct vio_dev *dev);
+
+struct device_node;
+
+extern struct vio_dev *vio_register_device_node(
+ struct device_node *node_vdev);
+extern const void *vio_get_attribute(struct vio_dev *vdev, char *which,
+ int *length);
+#ifdef CONFIG_PPC_PSERIES
+extern struct vio_dev *vio_find_node(struct device_node *vnode);
+extern int vio_enable_interrupts(struct vio_dev *dev);
+extern int vio_disable_interrupts(struct vio_dev *dev);
+#else
+static inline int vio_enable_interrupts(struct vio_dev *dev)
+{
+ return 0;
+}
+#endif
+
+static inline struct vio_driver *to_vio_driver(struct device_driver *drv)
+{
+ return container_of(drv, struct vio_driver, driver);
+}
+
+static inline struct vio_dev *to_vio_dev(struct device *dev)
+{
+ return container_of(dev, struct vio_dev, dev);
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_VIO_H */
diff --git a/arch/powerpc/include/asm/xilinx_intc.h b/arch/powerpc/include/asm/xilinx_intc.h
new file mode 100644
index 0000000..343612f
--- /dev/null
+++ b/arch/powerpc/include/asm/xilinx_intc.h
@@ -0,0 +1,20 @@
+/*
+ * Xilinx intc external definitions
+ *
+ * Copyright 2007 Secret Lab Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef _ASM_POWERPC_XILINX_INTC_H
+#define _ASM_POWERPC_XILINX_INTC_H
+
+#ifdef __KERNEL__
+
+extern void __init xilinx_intc_init_tree(void);
+extern unsigned int xilinx_intc_get_irq(void);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_XILINX_INTC_H */
diff --git a/arch/powerpc/include/asm/xmon.h b/arch/powerpc/include/asm/xmon.h
new file mode 100644
index 0000000..5eb8e59
--- /dev/null
+++ b/arch/powerpc/include/asm/xmon.h
@@ -0,0 +1,33 @@
+#ifndef __ASM_POWERPC_XMON_H
+#define __ASM_POWERPC_XMON_H
+
+/*
+ * Copyrignt (C) 2006 IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifdef __KERNEL__
+
+#include <linux/irqreturn.h>
+
+#ifdef CONFIG_XMON
+extern void xmon_setup(void);
+extern void xmon_register_spus(struct list_head *list);
+struct pt_regs;
+extern int xmon(struct pt_regs *excp);
+extern irqreturn_t xmon_irq(int, void *);
+#else
+static inline void xmon_setup(void) { };
+static inline void xmon_register_spus(struct list_head *list) { };
+#endif
+
+#if defined(CONFIG_XMON) && defined(CONFIG_SMP)
+extern int cpus_are_in_xmon(void);
+#endif
+
+#endif /* __KERNEL __ */
+#endif /* __ASM_POWERPC_XMON_H */
diff --git a/arch/powerpc/include/asm/xor.h b/arch/powerpc/include/asm/xor.h
new file mode 100644
index 0000000..c82eb12
--- /dev/null
+++ b/arch/powerpc/include/asm/xor.h
@@ -0,0 +1 @@
+#include <asm-generic/xor.h>
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 1a40947..64f5948 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -59,8 +59,6 @@ obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o
obj-$(CONFIG_MODULES) += module.o module_$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_44x) += cpu_setup_44x.o
-ifeq ($(CONFIG_PPC_MERGE),y)
-
extra-$(CONFIG_PPC_STD_MMU) := head_32.o
extra-$(CONFIG_PPC64) := head_64.o
extra-$(CONFIG_40x) := head_40x.o
@@ -100,12 +98,6 @@ ifneq ($(CONFIG_PPC_INDIRECT_IO),y)
obj-y += iomap.o
endif
-else
-# stuff used from here for ARCH=ppc
-smpobj-$(CONFIG_SMP) += smp.o
-
-endif
-
obj-$(CONFIG_PPC64) += $(obj64-y)
extra-$(CONFIG_PPC_FPU) += fpu.o
@@ -121,9 +113,6 @@ PHONY += systbl_chk
systbl_chk: $(src)/systbl_chk.sh $(obj)/systbl_chk.i
$(call cmd,systbl_chk)
-
-ifeq ($(CONFIG_PPC_MERGE),y)
-
$(obj)/built-in.o: prom_init_check
quiet_cmd_prom_init_check = CALL $<
@@ -133,7 +122,4 @@ PHONY += prom_init_check
prom_init_check: $(src)/prom_init_check.sh $(obj)/prom_init.o
$(call cmd,prom_init_check)
-endif
-
-
clean-files := vmlinux.lds
diff --git a/arch/powerpc/kernel/cpu_setup_44x.S b/arch/powerpc/kernel/cpu_setup_44x.S
index 5465e8d..80cac98 100644
--- a/arch/powerpc/kernel/cpu_setup_44x.S
+++ b/arch/powerpc/kernel/cpu_setup_44x.S
@@ -39,12 +39,6 @@ _GLOBAL(__setup_cpu_440gx)
_GLOBAL(__setup_cpu_440spe)
b __fixup_440A_mcheck
- /* Temporary fixup for arch/ppc until we kill the whole thing */
-#ifndef CONFIG_PPC_MERGE
-_GLOBAL(__fixup_440A_mcheck)
- blr
-#endif
-
/* enable APU between CPU and FPU */
_GLOBAL(__init_fpu_44x)
mfspr r3,SPRN_CCR0
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 81c8324..1cbbf70 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -148,7 +148,7 @@ transfer_to_handler:
/* Check to see if the dbcr0 register is set up to debug. Use the
internal debug mode bit to do this. */
lwz r12,THREAD_DBCR0(r12)
- andis. r12,r12,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h
+ andis. r12,r12,DBCR0_IDM@h
beq+ 3f
/* From user and task is ptraced - load up global dbcr0 */
li r12,-1 /* clear all pending debug events */
@@ -292,7 +292,7 @@ syscall_exit_cont:
/* If the process has its own DBCR0 value, load it up. The internal
debug mode bit tells us that dbcr0 should be loaded. */
lwz r0,THREAD+THREAD_DBCR0(r2)
- andis. r10,r0,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h
+ andis. r10,r0,DBCR0_IDM@h
bnel- load_dbcr0
#endif
#ifdef CONFIG_44x
@@ -343,7 +343,12 @@ syscall_dotrace:
stw r0,_TRAP(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_syscall_trace_enter
- lwz r0,GPR0(r1) /* Restore original registers */
+ /*
+ * Restore argument registers possibly just changed.
+ * We use the return value of do_syscall_trace_enter
+ * for call number to look up in the table (r0).
+ */
+ mr r0,r3
lwz r3,GPR3(r1)
lwz r4,GPR4(r1)
lwz r5,GPR5(r1)
@@ -720,7 +725,7 @@ restore_user:
/* Check whether this process has its own DBCR0 value. The internal
debug mode bit tells us that dbcr0 should be loaded. */
lwz r0,THREAD+THREAD_DBCR0(r2)
- andis. r10,r0,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h
+ andis. r10,r0,DBCR0_IDM@h
bnel- load_dbcr0
#endif
@@ -1055,8 +1060,8 @@ do_user_signal: /* r10 contains MSR_KERNEL here */
SAVE_NVGPRS(r1)
rlwinm r3,r3,0,0,30
stw r3,_TRAP(r1)
-2: li r3,0
- addi r4,r1,STACK_FRAME_OVERHEAD
+2: addi r3,r1,STACK_FRAME_OVERHEAD
+ mr r4,r9
bl do_signal
REST_NVGPRS(r1)
b recheck
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index d736924..2d802e9 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -214,7 +214,12 @@ syscall_dotrace:
bl .save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_syscall_trace_enter
- ld r0,GPR0(r1) /* Restore original registers */
+ /*
+ * Restore argument registers possibly just changed.
+ * We use the return value of do_syscall_trace_enter
+ * for the call number to look up in the table (r0).
+ */
+ mr r0,r3
ld r3,GPR3(r1)
ld r4,GPR4(r1)
ld r5,GPR5(r1)
@@ -638,8 +643,7 @@ user_work:
b .ret_from_except_lite
1: bl .save_nvgprs
- li r3,0
- addi r4,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_signal
b .ret_from_except
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 6ac8612..d972dec 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -77,22 +77,12 @@ static int ppc_spurious_interrupts;
EXPORT_SYMBOL(__irq_offset_value);
atomic_t ppc_n_lost_interrupts;
-#ifndef CONFIG_PPC_MERGE
-#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
-unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
-#endif
-
#ifdef CONFIG_TAU_INT
extern int tau_initialized;
extern int tau_interrupts(int);
#endif
#endif /* CONFIG_PPC32 */
-#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
-extern atomic_t ipi_recv;
-extern atomic_t ipi_sent;
-#endif
-
#ifdef CONFIG_PPC64
EXPORT_SYMBOL(irq_desc);
@@ -216,21 +206,14 @@ int show_interrupts(struct seq_file *p, void *v)
skip:
spin_unlock_irqrestore(&desc->lock, flags);
} else if (i == NR_IRQS) {
-#ifdef CONFIG_PPC32
-#ifdef CONFIG_TAU_INT
+#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
if (tau_initialized){
seq_puts(p, "TAU: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", tau_interrupts(j));
seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
}
-#endif
-#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
- /* should this be per processor send/receive? */
- seq_printf(p, "IPI (recv/sent): %10u/%u\n",
- atomic_read(&ipi_recv), atomic_read(&ipi_sent));
-#endif
-#endif /* CONFIG_PPC32 */
+#endif /* CONFIG_PPC32 && CONFIG_TAU_INT*/
seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
}
return 0;
@@ -454,8 +437,6 @@ void do_softirq(void)
* IRQ controller and virtual interrupts
*/
-#ifdef CONFIG_PPC_MERGE
-
static LIST_HEAD(irq_hosts);
static DEFINE_SPINLOCK(irq_big_lock);
static DEFINE_PER_CPU(unsigned int, irq_radix_reader);
@@ -1114,8 +1095,6 @@ static int __init irq_debugfs_init(void)
__initcall(irq_debugfs_init);
#endif /* CONFIG_VIRQ_DEBUG */
-#endif /* CONFIG_PPC_MERGE */
-
#ifdef CONFIG_PPC64
static int __init setup_noirqdistrib(char *str)
{
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 4d96e1d..9ddfaef 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -493,18 +493,18 @@ static int __init serial_dev_init(void)
device_initcall(serial_dev_init);
+#ifdef CONFIG_SERIAL_8250_CONSOLE
/*
* This is called very early, as part of console_init() (typically just after
* time_init()). This function is respondible for trying to find a good
* default console on serial ports. It tries to match the open firmware
- * default output with one of the available serial console drivers, either
- * one of the platform serial ports that have been probed earlier by
- * find_legacy_serial_ports() or some more platform specific ones.
+ * default output with one of the available serial console drivers that have
+ * been probed earlier by find_legacy_serial_ports()
*/
static int __init check_legacy_serial_console(void)
{
struct device_node *prom_stdout = NULL;
- int speed = 0, offset = 0;
+ int i, speed = 0, offset = 0;
const char *name;
const u32 *spd;
@@ -548,31 +548,20 @@ static int __init check_legacy_serial_console(void)
if (spd)
speed = *spd;
- if (0)
- ;
-#ifdef CONFIG_SERIAL_8250_CONSOLE
- else if (strcmp(name, "serial") == 0) {
- int i;
- /* Look for it in probed array */
- for (i = 0; i < legacy_serial_count; i++) {
- if (prom_stdout != legacy_serial_infos[i].np)
- continue;
- offset = i;
- speed = legacy_serial_infos[i].speed;
- break;
- }
- if (i >= legacy_serial_count)
- goto not_found;
+ if (strcmp(name, "serial") != 0)
+ goto not_found;
+
+ /* Look for it in probed array */
+ for (i = 0; i < legacy_serial_count; i++) {
+ if (prom_stdout != legacy_serial_infos[i].np)
+ continue;
+ offset = i;
+ speed = legacy_serial_infos[i].speed;
+ break;
}
-#endif /* CONFIG_SERIAL_8250_CONSOLE */
-#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
- else if (strcmp(name, "ch-a") == 0)
- offset = 0;
- else if (strcmp(name, "ch-b") == 0)
- offset = 1;
-#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
- else
+ if (i >= legacy_serial_count)
goto not_found;
+
of_node_put(prom_stdout);
DBG("Found serial console at ttyS%d\n", offset);
@@ -591,3 +580,4 @@ static int __init check_legacy_serial_console(void)
}
console_initcall(check_legacy_serial_console);
+#endif /* CONFIG_SERIAL_8250_CONSOLE */
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 9f856a0..1a09719 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -636,10 +636,6 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
retval = -EIO;
} else if (retval == H_PARAMETER) {
retval = -EINVAL;
- } else {
- printk(KERN_WARNING "%s: received unknown hv return code %ld",
- __func__, retval);
- retval = -EIO;
}
return retval;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index db2497c..957bded 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -254,7 +254,7 @@ void do_dabr(struct pt_regs *regs, unsigned long address,
return;
/* Clear the DAC and struct entries. One shot trigger */
-#if (defined(CONFIG_44x) || defined(CONFIG_BOOKE))
+#if defined(CONFIG_BOOKE)
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | DBSR_DAC1W
| DBCR0_IDM));
#endif
@@ -276,17 +276,15 @@ int set_dabr(unsigned long dabr)
{
__get_cpu_var(current_dabr) = dabr;
-#ifdef CONFIG_PPC_MERGE /* XXX for now */
if (ppc_md.set_dabr)
return ppc_md.set_dabr(dabr);
-#endif
/* XXX should we have a CPU_FTR_HAS_DABR ? */
#if defined(CONFIG_PPC64) || defined(CONFIG_6xx)
mtspr(SPRN_DABR, dabr);
#endif
-#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
+#if defined(CONFIG_BOOKE)
mtspr(SPRN_DAC1, dabr);
#endif
@@ -373,7 +371,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
set_dabr(new->thread.dabr);
-#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
+#if defined(CONFIG_BOOKE)
/* If new thread DAC (HW breakpoint) is the same then leave it */
if (new->thread.dabr)
set_dabr(new->thread.dabr);
@@ -568,7 +566,7 @@ void flush_thread(void)
current->thread.dabr = 0;
set_dabr(0);
-#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
+#if defined(CONFIG_BOOKE)
current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W);
#endif
}
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index c4ab219..b72849a 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -205,8 +205,6 @@ static int __initdata mem_reserve_cnt;
static cell_t __initdata regbuf[1024];
-#define MAX_CPU_THREADS 2
-
/*
* Error results ... some OF calls will return "-1" on error, some
* will return 0, some will return either. To simplify, here are
@@ -1339,10 +1337,6 @@ static void __init prom_hold_cpus(void)
unsigned int reg;
phandle node;
char type[64];
- int cpuid = 0;
- unsigned int interrupt_server[MAX_CPU_THREADS];
- unsigned int cpu_threads, hw_cpu_num;
- int propsize;
struct prom_t *_prom = &RELOC(prom);
unsigned long *spinloop
= (void *) LOW_ADDR(__secondary_hold_spinloop);
@@ -1386,7 +1380,6 @@ static void __init prom_hold_cpus(void)
reg = -1;
prom_getprop(node, "reg", &reg, sizeof(reg));
- prom_debug("\ncpuid = 0x%x\n", cpuid);
prom_debug("cpu hw idx = 0x%x\n", reg);
/* Init the acknowledge var which will be reset by
@@ -1395,28 +1388,9 @@ static void __init prom_hold_cpus(void)
*/
*acknowledge = (unsigned long)-1;
- propsize = prom_getprop(node, "ibm,ppc-interrupt-server#s",
- &interrupt_server,
- sizeof(interrupt_server));
- if (propsize < 0) {
- /* no property. old hardware has no SMT */
- cpu_threads = 1;
- interrupt_server[0] = reg; /* fake it with phys id */
- } else {
- /* We have a threaded processor */
- cpu_threads = propsize / sizeof(u32);
- if (cpu_threads > MAX_CPU_THREADS) {
- prom_printf("SMT: too many threads!\n"
- "SMT: found %x, max is %x\n",
- cpu_threads, MAX_CPU_THREADS);
- cpu_threads = 1; /* ToDo: panic? */
- }
- }
-
- hw_cpu_num = interrupt_server[0];
- if (hw_cpu_num != _prom->cpu) {
+ if (reg != _prom->cpu) {
/* Primary Thread of non-boot cpu */
- prom_printf("%x : starting cpu hw idx %x... ", cpuid, reg);
+ prom_printf("starting cpu hw idx %x... ", reg);
call_prom("start-cpu", 3, 0, node,
secondary_hold, reg);
@@ -1431,17 +1405,10 @@ static void __init prom_hold_cpus(void)
}
#ifdef CONFIG_SMP
else
- prom_printf("%x : boot cpu %x\n", cpuid, reg);
+ prom_printf("boot cpu hw idx %x\n", reg);
#endif /* CONFIG_SMP */
-
- /* Reserve cpu #s for secondary threads. They start later. */
- cpuid += cpu_threads;
}
- if (cpuid > NR_CPUS)
- prom_printf("WARNING: maximum CPUs (" __stringify(NR_CPUS)
- ") exceeded: ignoring extras\n");
-
prom_debug("prom_hold_cpus: end...\n");
}
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index a5d0e78..3635be6 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -22,6 +22,7 @@
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
+#include <linux/tracehook.h>
#include <linux/elf.h>
#include <linux/user.h>
#include <linux/security.h>
@@ -374,7 +375,7 @@ static int vsr_get(struct task_struct *target, const struct user_regset *regset,
flush_vsx_to_thread(target);
for (i = 0; i < 32 ; i++)
- buf[i] = current->thread.fpr[i][TS_VSRLOWOFFSET];
+ buf[i] = target->thread.fpr[i][TS_VSRLOWOFFSET];
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
@@ -393,7 +394,7 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
for (i = 0; i < 32 ; i++)
- current->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
+ target->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
return ret;
@@ -717,7 +718,7 @@ void user_disable_single_step(struct task_struct *task)
struct pt_regs *regs = task->thread.regs;
-#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
+#if defined(CONFIG_BOOKE)
/* If DAC then do not single step, skip */
if (task->thread.dabr)
return;
@@ -744,10 +745,11 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
if (addr > 0)
return -EINVAL;
+ /* The bottom 3 bits in dabr are flags */
if ((data & ~0x7UL) >= TASK_SIZE)
return -EIO;
-#ifdef CONFIG_PPC64
+#ifndef CONFIG_BOOKE
/* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
* It was assumed, on previous implementations, that 3 bits were
@@ -769,7 +771,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
task->thread.dabr = data;
#endif
-#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
+#if defined(CONFIG_BOOKE)
/* As described above, it was assumed 3 bits were passed with the data
* address, but we will assume only the mode bits will be passed
@@ -973,15 +975,13 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_GETVSRREGS:
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_VSX,
- 0, (32 * sizeof(vector128) +
- sizeof(u32)),
+ 0, 32 * sizeof(double),
(void __user *) data);
case PTRACE_SETVSRREGS:
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_VSX,
- 0, (32 * sizeof(vector128) +
- sizeof(u32)),
+ 0, 32 * sizeof(double),
(const void __user *) data);
#endif
#ifdef CONFIG_SPE
@@ -1013,31 +1013,24 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return ret;
}
-static void do_syscall_trace(void)
+/*
+ * We must return the syscall number to actually look up in the table.
+ * This can be -1L to skip running any syscall at all.
+ */
+long do_syscall_trace_enter(struct pt_regs *regs)
{
- /* the 0x80 provides a way for the tracing parent to distinguish
- between a syscall stop and SIGTRAP delivery */
- ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
- ? 0x80 : 0));
-
- /*
- * this isn't the same as continuing with a signal, but it will do
- * for normal use. strace only continues with a signal if the
- * stopping signal is not SIGTRAP. -brl
- */
- if (current->exit_code) {
- send_sig(current->exit_code, current, 1);
- current->exit_code = 0;
- }
-}
+ long ret = 0;
-void do_syscall_trace_enter(struct pt_regs *regs)
-{
secure_computing(regs->gpr[0]);
- if (test_thread_flag(TIF_SYSCALL_TRACE)
- && (current->ptrace & PT_PTRACED))
- do_syscall_trace();
+ if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+ tracehook_report_syscall_entry(regs))
+ /*
+ * Tracing decided this syscall should not happen.
+ * We'll return a bogus call number to get an ENOSYS
+ * error, but leave the original number in regs->gpr[0].
+ */
+ ret = -1L;
if (unlikely(current->audit_context)) {
#ifdef CONFIG_PPC64
@@ -1055,16 +1048,19 @@ void do_syscall_trace_enter(struct pt_regs *regs)
regs->gpr[5] & 0xffffffff,
regs->gpr[6] & 0xffffffff);
}
+
+ return ret ?: regs->gpr[0];
}
void do_syscall_trace_leave(struct pt_regs *regs)
{
+ int step;
+
if (unlikely(current->audit_context))
audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
regs->result);
- if ((test_thread_flag(TIF_SYSCALL_TRACE)
- || test_thread_flag(TIF_SINGLESTEP))
- && (current->ptrace & PT_PTRACED))
- do_syscall_trace();
+ step = test_thread_flag(TIF_SINGLESTEP);
+ if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, step);
}
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index 67bf1a1..197d49c 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -294,6 +294,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
case PTRACE_SETFPREGS:
case PTRACE_GETVRREGS:
case PTRACE_SETVRREGS:
+ case PTRACE_GETVSRREGS:
+ case PTRACE_SETVSRREGS:
case PTRACE_GETREGS64:
case PTRACE_SETREGS64:
case PPC_PTRACE_GETFPREGS:
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 61a3f413..9cc5a52 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -367,7 +367,6 @@ static void __init cpu_init_thread_core_maps(int tpc)
* setup_cpu_maps - initialize the following cpu maps:
* cpu_possible_map
* cpu_present_map
- * cpu_sibling_map
*
* Having the possible map set up early allows us to restrict allocations
* of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
@@ -475,29 +474,6 @@ void __init smp_setup_cpu_maps(void)
*/
cpu_init_thread_core_maps(nthreads);
}
-
-/*
- * Being that cpu_sibling_map is now a per_cpu array, then it cannot
- * be initialized until the per_cpu areas have been created. This
- * function is now called from setup_per_cpu_areas().
- */
-void __init smp_setup_cpu_sibling_map(void)
-{
-#ifdef CONFIG_PPC64
- int i, cpu, base;
-
- for_each_possible_cpu(cpu) {
- DBG("Sibling map for CPU %d:", cpu);
- base = cpu_first_thread_in_core(cpu);
- for (i = 0; i < threads_per_core; i++) {
- cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
- DBG(" %d", base + i);
- }
- DBG("\n");
- }
-
-#endif /* CONFIG_PPC64 */
-}
#endif /* CONFIG_SMP */
#ifdef CONFIG_PCSPKR_PLATFORM
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 04d8de9..8b25f51 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -611,9 +611,6 @@ void __init setup_per_cpu_areas(void)
paca[i].data_offset = ptr - __per_cpu_start;
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
}
-
- /* Now that per_cpu is setup, initialize cpu_sibling_map */
- smp_setup_cpu_sibling_map();
}
#endif
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 7aada78..a54405e 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -9,7 +9,7 @@
* this archive for more details.
*/
-#include <linux/ptrace.h>
+#include <linux/tracehook.h>
#include <linux/signal.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -112,7 +112,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
}
}
-int do_signal(sigset_t *oldset, struct pt_regs *regs)
+static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
{
siginfo_t info;
int signr;
@@ -147,7 +147,7 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
*/
if (current->thread.dabr) {
set_dabr(current->thread.dabr);
-#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
+#if defined(CONFIG_BOOKE)
mtspr(SPRN_DBCR0, current->thread.dbcr0);
#endif
}
@@ -177,11 +177,28 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
* its frame, and we can clear the TLF_RESTORE_SIGMASK flag.
*/
current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK;
+
+ /*
+ * Let tracing know that we've done the handler setup.
+ */
+ tracehook_signal_handler(signr, &info, &ka, regs,
+ test_thread_flag(TIF_SINGLESTEP));
}
return ret;
}
+void do_signal(struct pt_regs *regs, unsigned long thread_info_flags)
+{
+ if (thread_info_flags & _TIF_SIGPENDING)
+ do_signal_pending(NULL, regs);
+
+ if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ }
+}
+
long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
unsigned long r5, unsigned long r6, unsigned long r7,
unsigned long r8, struct pt_regs *regs)
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index f5ae9fa..5337ca7 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -41,6 +41,7 @@
#include <asm/smp.h>
#include <asm/time.h>
#include <asm/machdep.h>
+#include <asm/cputhreads.h>
#include <asm/cputable.h>
#include <asm/system.h>
#include <asm/mpic.h>
@@ -62,10 +63,12 @@ struct thread_info *secondary_ti;
cpumask_t cpu_possible_map = CPU_MASK_NONE;
cpumask_t cpu_online_map = CPU_MASK_NONE;
DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
+DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE;
EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(cpu_possible_map);
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
+EXPORT_PER_CPU_SYMBOL(cpu_core_map);
/* SMP operations for this machine */
struct smp_ops_t *smp_ops;
@@ -228,6 +231,8 @@ void __devinit smp_prepare_boot_cpu(void)
BUG_ON(smp_processor_id() != boot_cpuid);
cpu_set(boot_cpuid, cpu_online_map);
+ cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid));
+ cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid));
#ifdef CONFIG_PPC64
paca[boot_cpuid].__current = current;
#endif
@@ -375,11 +380,60 @@ int __cpuinit __cpu_up(unsigned int cpu)
return 0;
}
+/* Return the value of the reg property corresponding to the given
+ * logical cpu.
+ */
+int cpu_to_core_id(int cpu)
+{
+ struct device_node *np;
+ const int *reg;
+ int id = -1;
+
+ np = of_get_cpu_node(cpu, NULL);
+ if (!np)
+ goto out;
+
+ reg = of_get_property(np, "reg", NULL);
+ if (!reg)
+ goto out;
+
+ id = *reg;
+out:
+ of_node_put(np);
+ return id;
+}
+
+/* Must be called when no change can occur to cpu_present_map,
+ * i.e. during cpu online or offline.
+ */
+static struct device_node *cpu_to_l2cache(int cpu)
+{
+ struct device_node *np;
+ const phandle *php;
+ phandle ph;
+
+ if (!cpu_present(cpu))
+ return NULL;
+
+ np = of_get_cpu_node(cpu, NULL);
+ if (np == NULL)
+ return NULL;
+
+ php = of_get_property(np, "l2-cache", NULL);
+ if (php == NULL)
+ return NULL;
+ ph = *php;
+ of_node_put(np);
+
+ return of_find_node_by_phandle(ph);
+}
/* Activate a secondary processor. */
int __devinit start_secondary(void *unused)
{
unsigned int cpu = smp_processor_id();
+ struct device_node *l2_cache;
+ int i, base;
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
@@ -400,6 +454,33 @@ int __devinit start_secondary(void *unused)
ipi_call_lock();
cpu_set(cpu, cpu_online_map);
+ /* Update sibling maps */
+ base = cpu_first_thread_in_core(cpu);
+ for (i = 0; i < threads_per_core; i++) {
+ if (cpu_is_offline(base + i))
+ continue;
+ cpu_set(cpu, per_cpu(cpu_sibling_map, base + i));
+ cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
+
+ /* cpu_core_map should be a superset of
+ * cpu_sibling_map even if we don't have cache
+ * information, so update the former here, too.
+ */
+ cpu_set(cpu, per_cpu(cpu_core_map, base +i));
+ cpu_set(base + i, per_cpu(cpu_core_map, cpu));
+ }
+ l2_cache = cpu_to_l2cache(cpu);
+ for_each_online_cpu(i) {
+ struct device_node *np = cpu_to_l2cache(i);
+ if (!np)
+ continue;
+ if (np == l2_cache) {
+ cpu_set(cpu, per_cpu(cpu_core_map, i));
+ cpu_set(i, per_cpu(cpu_core_map, cpu));
+ }
+ of_node_put(np);
+ }
+ of_node_put(l2_cache);
ipi_call_unlock();
local_irq_enable();
@@ -437,10 +518,42 @@ void __init smp_cpus_done(unsigned int max_cpus)
#ifdef CONFIG_HOTPLUG_CPU
int __cpu_disable(void)
{
- if (smp_ops->cpu_disable)
- return smp_ops->cpu_disable();
+ struct device_node *l2_cache;
+ int cpu = smp_processor_id();
+ int base, i;
+ int err;
- return -ENOSYS;
+ if (!smp_ops->cpu_disable)
+ return -ENOSYS;
+
+ err = smp_ops->cpu_disable();
+ if (err)
+ return err;
+
+ /* Update sibling maps */
+ base = cpu_first_thread_in_core(cpu);
+ for (i = 0; i < threads_per_core; i++) {
+ cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i));
+ cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu));
+ cpu_clear(cpu, per_cpu(cpu_core_map, base +i));
+ cpu_clear(base + i, per_cpu(cpu_core_map, cpu));
+ }
+
+ l2_cache = cpu_to_l2cache(cpu);
+ for_each_present_cpu(i) {
+ struct device_node *np = cpu_to_l2cache(i);
+ if (!np)
+ continue;
+ if (np == l2_cache) {
+ cpu_clear(cpu, per_cpu(cpu_core_map, i));
+ cpu_clear(i, per_cpu(cpu_core_map, cpu));
+ }
+ of_node_put(np);
+ }
+ of_node_put(l2_cache);
+
+
+ return 0;
}
void __cpu_die(unsigned int cpu)
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index f258964..b0dbb1d 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/stacktrace.h>
-#include <linux/module.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 800e5e9..56d172d 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -22,6 +22,8 @@
static DEFINE_PER_CPU(struct cpu, cpu_devices);
+static DEFINE_PER_CPU(struct kobject *, cache_toplevel);
+
/* SMT stuff */
#ifdef CONFIG_PPC_MULTIPLATFORM
@@ -297,8 +299,289 @@ static struct sysdev_attribute pa6t_attrs[] = {
#endif /* CONFIG_DEBUG_KERNEL */
};
+struct cache_desc {
+ struct kobject kobj;
+ struct cache_desc *next;
+ const char *type; /* Instruction, Data, or Unified */
+ u32 size; /* total cache size in KB */
+ u32 line_size; /* in bytes */
+ u32 nr_sets; /* number of sets */
+ u32 level; /* e.g. 1, 2, 3... */
+ u32 associativity; /* e.g. 8-way... 0 is fully associative */
+};
+
+DEFINE_PER_CPU(struct cache_desc *, cache_desc);
+
+static struct cache_desc *kobj_to_cache_desc(struct kobject *k)
+{
+ return container_of(k, struct cache_desc, kobj);
+}
+
+static void cache_desc_release(struct kobject *k)
+{
+ struct cache_desc *desc = kobj_to_cache_desc(k);
+
+ pr_debug("%s: releasing %s\n", __func__, kobject_name(k));
+
+ if (desc->next)
+ kobject_put(&desc->next->kobj);
+
+ kfree(kobj_to_cache_desc(k));
+}
+
+static ssize_t cache_desc_show(struct kobject *k, struct attribute *attr, char *buf)
+{
+ struct kobj_attribute *kobj_attr;
+
+ kobj_attr = container_of(attr, struct kobj_attribute, attr);
+
+ return kobj_attr->show(k, kobj_attr, buf);
+}
+
+static struct sysfs_ops cache_desc_sysfs_ops = {
+ .show = cache_desc_show,
+};
+
+static struct kobj_type cache_desc_type = {
+ .release = cache_desc_release,
+ .sysfs_ops = &cache_desc_sysfs_ops,
+};
+
+static ssize_t cache_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
+{
+ struct cache_desc *cache = kobj_to_cache_desc(k);
+
+ return sprintf(buf, "%uK\n", cache->size);
+}
+
+static struct kobj_attribute cache_size_attr =
+ __ATTR(size, 0444, cache_size_show, NULL);
+
+static ssize_t cache_line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
+{
+ struct cache_desc *cache = kobj_to_cache_desc(k);
+
+ return sprintf(buf, "%u\n", cache->line_size);
+}
+
+static struct kobj_attribute cache_line_size_attr =
+ __ATTR(coherency_line_size, 0444, cache_line_size_show, NULL);
+
+static ssize_t cache_nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
+{
+ struct cache_desc *cache = kobj_to_cache_desc(k);
+
+ return sprintf(buf, "%u\n", cache->nr_sets);
+}
+
+static struct kobj_attribute cache_nr_sets_attr =
+ __ATTR(number_of_sets, 0444, cache_nr_sets_show, NULL);
+
+static ssize_t cache_type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
+{
+ struct cache_desc *cache = kobj_to_cache_desc(k);
+
+ return sprintf(buf, "%s\n", cache->type);
+}
+
+static struct kobj_attribute cache_type_attr =
+ __ATTR(type, 0444, cache_type_show, NULL);
+
+static ssize_t cache_level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
+{
+ struct cache_desc *cache = kobj_to_cache_desc(k);
+
+ return sprintf(buf, "%u\n", cache->level);
+}
+
+static struct kobj_attribute cache_level_attr =
+ __ATTR(level, 0444, cache_level_show, NULL);
+
+static ssize_t cache_assoc_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
+{
+ struct cache_desc *cache = kobj_to_cache_desc(k);
+
+ return sprintf(buf, "%u\n", cache->associativity);
+}
+
+static struct kobj_attribute cache_assoc_attr =
+ __ATTR(ways_of_associativity, 0444, cache_assoc_show, NULL);
+
+struct cache_desc_info {
+ const char *type;
+ const char *size_prop;
+ const char *line_size_prop;
+ const char *nr_sets_prop;
+};
+
+/* PowerPC Processor binding says the [di]-cache-* must be equal on
+ * unified caches, so just use d-cache properties. */
+static struct cache_desc_info ucache_info = {
+ .type = "Unified",
+ .size_prop = "d-cache-size",
+ .line_size_prop = "d-cache-line-size",
+ .nr_sets_prop = "d-cache-sets",
+};
-static void register_cpu_online(unsigned int cpu)
+static struct cache_desc_info dcache_info = {
+ .type = "Data",
+ .size_prop = "d-cache-size",
+ .line_size_prop = "d-cache-line-size",
+ .nr_sets_prop = "d-cache-sets",
+};
+
+static struct cache_desc_info icache_info = {
+ .type = "Instruction",
+ .size_prop = "i-cache-size",
+ .line_size_prop = "i-cache-line-size",
+ .nr_sets_prop = "i-cache-sets",
+};
+
+static struct cache_desc * __cpuinit create_cache_desc(struct device_node *np, struct kobject *parent, int index, int level, struct cache_desc_info *info)
+{
+ const u32 *cache_line_size;
+ struct cache_desc *new;
+ const u32 *cache_size;
+ const u32 *nr_sets;
+ int rc;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ rc = kobject_init_and_add(&new->kobj, &cache_desc_type, parent,
+ "index%d", index);
+ if (rc)
+ goto err;
+
+ /* type */
+ new->type = info->type;
+ rc = sysfs_create_file(&new->kobj, &cache_type_attr.attr);
+ WARN_ON(rc);
+
+ /* level */
+ new->level = level;
+ rc = sysfs_create_file(&new->kobj, &cache_level_attr.attr);
+ WARN_ON(rc);
+
+ /* size */
+ cache_size = of_get_property(np, info->size_prop, NULL);
+ if (cache_size) {
+ new->size = *cache_size / 1024;
+ rc = sysfs_create_file(&new->kobj,
+ &cache_size_attr.attr);
+ WARN_ON(rc);
+ }
+
+ /* coherency_line_size */
+ cache_line_size = of_get_property(np, info->line_size_prop, NULL);
+ if (cache_line_size) {
+ new->line_size = *cache_line_size;
+ rc = sysfs_create_file(&new->kobj,
+ &cache_line_size_attr.attr);
+ WARN_ON(rc);
+ }
+
+ /* number_of_sets */
+ nr_sets = of_get_property(np, info->nr_sets_prop, NULL);
+ if (nr_sets) {
+ new->nr_sets = *nr_sets;
+ rc = sysfs_create_file(&new->kobj,
+ &cache_nr_sets_attr.attr);
+ WARN_ON(rc);
+ }
+
+ /* ways_of_associativity */
+ if (new->nr_sets == 1) {
+ /* fully associative */
+ new->associativity = 0;
+ goto create_assoc;
+ }
+
+ if (new->nr_sets && new->size && new->line_size) {
+ /* If we have values for all of these we can derive
+ * the associativity. */
+ new->associativity =
+ ((new->size * 1024) / new->nr_sets) / new->line_size;
+create_assoc:
+ rc = sysfs_create_file(&new->kobj,
+ &cache_assoc_attr.attr);
+ WARN_ON(rc);
+ }
+
+ return new;
+err:
+ kfree(new);
+ return NULL;
+}
+
+static bool cache_is_unified(struct device_node *np)
+{
+ return of_get_property(np, "cache-unified", NULL);
+}
+
+static struct cache_desc * __cpuinit create_cache_index_info(struct device_node *np, struct kobject *parent, int index, int level)
+{
+ const phandle *next_cache_phandle;
+ struct device_node *next_cache;
+ struct cache_desc *new, **end;
+
+ pr_debug("%s(node = %s, index = %d)\n", __func__, np->full_name, index);
+
+ if (cache_is_unified(np)) {
+ new = create_cache_desc(np, parent, index, level,
+ &ucache_info);
+ } else {
+ new = create_cache_desc(np, parent, index, level,
+ &dcache_info);
+ if (new) {
+ index++;
+ new->next = create_cache_desc(np, parent, index, level,
+ &icache_info);
+ }
+ }
+ if (!new)
+ return NULL;
+
+ end = &new->next;
+ while (*end)
+ end = &(*end)->next;
+
+ next_cache_phandle = of_get_property(np, "l2-cache", NULL);
+ if (!next_cache_phandle)
+ goto out;
+
+ next_cache = of_find_node_by_phandle(*next_cache_phandle);
+ if (!next_cache)
+ goto out;
+
+ *end = create_cache_index_info(next_cache, parent, ++index, ++level);
+
+ of_node_put(next_cache);
+out:
+ return new;
+}
+
+static void __cpuinit create_cache_info(struct sys_device *sysdev)
+{
+ struct kobject *cache_toplevel;
+ struct device_node *np = NULL;
+ int cpu = sysdev->id;
+
+ cache_toplevel = kobject_create_and_add("cache", &sysdev->kobj);
+ if (!cache_toplevel)
+ return;
+ per_cpu(cache_toplevel, cpu) = cache_toplevel;
+ np = of_get_cpu_node(cpu, NULL);
+ if (np != NULL) {
+ per_cpu(cache_desc, cpu) =
+ create_cache_index_info(np, cache_toplevel, 0, 1);
+ of_node_put(np);
+ }
+ return;
+}
+
+static void __cpuinit register_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct sys_device *s = &c->sysdev;
@@ -346,9 +629,33 @@ static void register_cpu_online(unsigned int cpu)
if (cpu_has_feature(CPU_FTR_DSCR))
sysdev_create_file(s, &attr_dscr);
+
+ create_cache_info(s);
}
#ifdef CONFIG_HOTPLUG_CPU
+static void remove_cache_info(struct sys_device *sysdev)
+{
+ struct kobject *cache_toplevel;
+ struct cache_desc *cache_desc;
+ int cpu = sysdev->id;
+
+ cache_desc = per_cpu(cache_desc, cpu);
+ if (cache_desc != NULL) {
+ sysfs_remove_file(&cache_desc->kobj, &cache_size_attr.attr);
+ sysfs_remove_file(&cache_desc->kobj, &cache_line_size_attr.attr);
+ sysfs_remove_file(&cache_desc->kobj, &cache_type_attr.attr);
+ sysfs_remove_file(&cache_desc->kobj, &cache_level_attr.attr);
+ sysfs_remove_file(&cache_desc->kobj, &cache_nr_sets_attr.attr);
+ sysfs_remove_file(&cache_desc->kobj, &cache_assoc_attr.attr);
+
+ kobject_put(&cache_desc->kobj);
+ }
+ cache_toplevel = per_cpu(cache_toplevel, cpu);
+ if (cache_toplevel != NULL)
+ kobject_put(cache_toplevel);
+}
+
static void unregister_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -399,6 +706,8 @@ static void unregister_cpu_online(unsigned int cpu)
if (cpu_has_feature(CPU_FTR_DSCR))
sysdev_remove_file(s, &attr_dscr);
+
+ remove_cache_info(s);
}
#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index f177c60..65639a4 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -788,9 +788,7 @@ static int __init vdso_init(void)
return 0;
}
-#ifdef CONFIG_PPC_MERGE
arch_initcall(vdso_init);
-#endif
int in_gate_area_no_task(unsigned long addr)
{
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index ade8aea..22a3c33 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -530,7 +530,7 @@ static dma_addr_t vio_dma_iommu_map_single(struct device *dev, void *vaddr,
}
ret = dma_iommu_ops.map_single(dev, vaddr, size, direction, attrs);
- if (unlikely(dma_mapping_error(ret))) {
+ if (unlikely(dma_mapping_error(dev, ret))) {
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
atomic_inc(&viodev->cmo.allocs_failed);
}
@@ -1031,8 +1031,8 @@ void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
-static void vio_cmo_bus_init() {}
-static void vio_cmo_sysfs_init() { }
+static void vio_cmo_bus_init(void) {}
+static void vio_cmo_sysfs_init(void) { }
#endif /* CONFIG_PPC_SMLPAR */
EXPORT_SYMBOL(vio_cmo_entitlement_update);
EXPORT_SYMBOL(vio_cmo_set_dev_desired);
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 2a88e8b..d69912c 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -6,12 +6,10 @@ ifeq ($(CONFIG_PPC64),y)
EXTRA_CFLAGS += -mno-minimal-toc
endif
-ifeq ($(CONFIG_PPC_MERGE),y)
obj-y := string.o alloc.o \
checksum_$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o
obj-$(CONFIG_HAS_IOMEM) += devres.o
-endif
obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
memcpy_64.o usercopy_64.o mem_64.o string.o
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 1c00e01..e7392b4 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -12,7 +12,8 @@ obj-y := fault.o mem.o \
mmu_context_$(CONFIG_WORD_SIZE).o
hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o
obj-$(CONFIG_PPC64) += hash_utils_64.o \
- slb_low.o slb.o stab.o mmap.o $(hash-y)
+ slb_low.o slb.o stab.o \
+ gup.o mmap.o $(hash-y)
obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o
obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \
tlb_$(CONFIG_WORD_SIZE).o
diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
new file mode 100644
index 0000000..9fdf4d6
--- /dev/null
+++ b/arch/powerpc/mm/gup.c
@@ -0,0 +1,280 @@
+/*
+ * Lockless get_user_pages_fast for powerpc
+ *
+ * Copyright (C) 2008 Nick Piggin
+ * Copyright (C) 2008 Novell Inc.
+ */
+#undef DEBUG
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/vmstat.h>
+#include <linux/pagemap.h>
+#include <linux/rwsem.h>
+#include <asm/pgtable.h>
+
+/*
+ * The performance critical leaf functions are made noinline otherwise gcc
+ * inlines everything into a single function which results in too much
+ * register pressure.
+ */
+static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
+{
+ unsigned long mask, result;
+ pte_t *ptep;
+
+ result = _PAGE_PRESENT|_PAGE_USER;
+ if (write)
+ result |= _PAGE_RW;
+ mask = result | _PAGE_SPECIAL;
+
+ ptep = pte_offset_kernel(&pmd, addr);
+ do {
+ pte_t pte = *ptep;
+ struct page *page;
+
+ if ((pte_val(pte) & mask) != result)
+ return 0;
+ VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+ page = pte_page(pte);
+ if (!page_cache_get_speculative(page))
+ return 0;
+ if (unlikely(pte != *ptep)) {
+ put_page(page);
+ return 0;
+ }
+ pages[*nr] = page;
+ (*nr)++;
+
+ } while (ptep++, addr += PAGE_SIZE, addr != end);
+
+ return 1;
+}
+
+#ifdef CONFIG_HUGETLB_PAGE
+static noinline int gup_huge_pte(pte_t *ptep, struct hstate *hstate,
+ unsigned long *addr, unsigned long end,
+ int write, struct page **pages, int *nr)
+{
+ unsigned long mask;
+ unsigned long pte_end;
+ struct page *head, *page;
+ pte_t pte;
+ int refs;
+
+ pte_end = (*addr + huge_page_size(hstate)) & huge_page_mask(hstate);
+ if (pte_end < end)
+ end = pte_end;
+
+ pte = *ptep;
+ mask = _PAGE_PRESENT|_PAGE_USER;
+ if (write)
+ mask |= _PAGE_RW;
+ if ((pte_val(pte) & mask) != mask)
+ return 0;
+ /* hugepages are never "special" */
+ VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+
+ refs = 0;
+ head = pte_page(pte);
+ page = head + ((*addr & ~huge_page_mask(hstate)) >> PAGE_SHIFT);
+ do {
+ VM_BUG_ON(compound_head(page) != head);
+ pages[*nr] = page;
+ (*nr)++;
+ page++;
+ refs++;
+ } while (*addr += PAGE_SIZE, *addr != end);
+
+ if (!page_cache_add_speculative(head, refs)) {
+ *nr -= refs;
+ return 0;
+ }
+ if (unlikely(pte != *ptep)) {
+ /* Could be optimized better */
+ while (*nr) {
+ put_page(page);
+ (*nr)--;
+ }
+ }
+
+ return 1;
+}
+#endif /* CONFIG_HUGETLB_PAGE */
+
+static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
+ int write, struct page **pages, int *nr)
+{
+ unsigned long next;
+ pmd_t *pmdp;
+
+ pmdp = pmd_offset(&pud, addr);
+ do {
+ pmd_t pmd = *pmdp;
+
+ next = pmd_addr_end(addr, end);
+ if (pmd_none(pmd))
+ return 0;
+ if (!gup_pte_range(pmd, addr, next, write, pages, nr))
+ return 0;
+ } while (pmdp++, addr = next, addr != end);
+
+ return 1;
+}
+
+static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
+ int write, struct page **pages, int *nr)
+{
+ unsigned long next;
+ pud_t *pudp;
+
+ pudp = pud_offset(&pgd, addr);
+ do {
+ pud_t pud = *pudp;
+
+ next = pud_addr_end(addr, end);
+ if (pud_none(pud))
+ return 0;
+ if (!gup_pmd_range(pud, addr, next, write, pages, nr))
+ return 0;
+ } while (pudp++, addr = next, addr != end);
+
+ return 1;
+}
+
+int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long addr, len, end;
+ unsigned long next;
+ pgd_t *pgdp;
+ int psize, nr = 0;
+ unsigned int shift;
+
+ pr_debug("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read");
+
+ start &= PAGE_MASK;
+ addr = start;
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
+ end = start + len;
+
+ if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
+ start, len)))
+ goto slow_irqon;
+
+ pr_debug(" aligned: %lx .. %lx\n", start, end);
+
+#ifdef CONFIG_HUGETLB_PAGE
+ /* We bail out on slice boundary crossing when hugetlb is
+ * enabled in order to not have to deal with two different
+ * page table formats
+ */
+ if (addr < SLICE_LOW_TOP) {
+ if (end > SLICE_LOW_TOP)
+ goto slow_irqon;
+
+ if (unlikely(GET_LOW_SLICE_INDEX(addr) !=
+ GET_LOW_SLICE_INDEX(end - 1)))
+ goto slow_irqon;
+ } else {
+ if (unlikely(GET_HIGH_SLICE_INDEX(addr) !=
+ GET_HIGH_SLICE_INDEX(end - 1)))
+ goto slow_irqon;
+ }
+#endif /* CONFIG_HUGETLB_PAGE */
+
+ /*
+ * XXX: batch / limit 'nr', to avoid large irq off latency
+ * needs some instrumenting to determine the common sizes used by
+ * important workloads (eg. DB2), and whether limiting the batch size
+ * will decrease performance.
+ *
+ * It seems like we're in the clear for the moment. Direct-IO is
+ * the main guy that batches up lots of get_user_pages, and even
+ * they are limited to 64-at-a-time which is not so many.
+ */
+ /*
+ * This doesn't prevent pagetable teardown, but does prevent
+ * the pagetables from being freed on powerpc.
+ *
+ * So long as we atomically load page table pointers versus teardown,
+ * we can follow the address down to the the page and take a ref on it.
+ */
+ local_irq_disable();
+
+ psize = get_slice_psize(mm, addr);
+ shift = mmu_psize_defs[psize].shift;
+
+#ifdef CONFIG_HUGETLB_PAGE
+ if (unlikely(mmu_huge_psizes[psize])) {
+ pte_t *ptep;
+ unsigned long a = addr;
+ unsigned long sz = ((1UL) << shift);
+ struct hstate *hstate = size_to_hstate(sz);
+
+ BUG_ON(!hstate);
+ /*
+ * XXX: could be optimized to avoid hstate
+ * lookup entirely (just use shift)
+ */
+
+ do {
+ VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, a)].shift);
+ ptep = huge_pte_offset(mm, a);
+ pr_debug(" %016lx: huge ptep %p\n", a, ptep);
+ if (!ptep || !gup_huge_pte(ptep, hstate, &a, end, write, pages,
+ &nr))
+ goto slow;
+ } while (a != end);
+ } else
+#endif /* CONFIG_HUGETLB_PAGE */
+ {
+ pgdp = pgd_offset(mm, addr);
+ do {
+ pgd_t pgd = *pgdp;
+
+ VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, addr)].shift);
+ pr_debug(" %016lx: normal pgd %p\n", addr, (void *)pgd);
+ next = pgd_addr_end(addr, end);
+ if (pgd_none(pgd))
+ goto slow;
+ if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
+ goto slow;
+ } while (pgdp++, addr = next, addr != end);
+ }
+ local_irq_enable();
+
+ VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
+ return nr;
+
+ {
+ int ret;
+
+slow:
+ local_irq_enable();
+slow_irqon:
+ pr_debug(" slow path ! nr = %d\n", nr);
+
+ /* Try to get the remaining pages with get_user_pages */
+ start += nr << PAGE_SHIFT;
+ pages += nr;
+
+ down_read(&mm->mmap_sem);
+ ret = get_user_pages(current, mm, start,
+ (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
+ up_read(&mm->mmap_sem);
+
+ /* Have to be a bit careful with return values */
+ if (nr > 0) {
+ if (ret < 0)
+ ret = nr;
+ else
+ ret += nr;
+ }
+
+ return ret;
+ }
+}
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index ed0aab0..f1c2d55 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -736,14 +736,21 @@ static int __init hugetlbpage_init(void)
if (!cpu_has_feature(CPU_FTR_16M_PAGE))
return -ENODEV;
+
/* Add supported huge page sizes. Need to change HUGE_MAX_HSTATE
* and adjust PTE_NONCACHE_NUM if the number of supported huge page
* sizes changes.
*/
set_huge_psize(MMU_PAGE_16M);
- set_huge_psize(MMU_PAGE_64K);
set_huge_psize(MMU_PAGE_16G);
+ /* Temporarily disable support for 64K huge pages when 64K SPU local
+ * store support is enabled as the current implementation conflicts.
+ */
+#ifndef CONFIG_SPU_FS_64K_LS
+ set_huge_psize(MMU_PAGE_64K);
+#endif
+
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
if (mmu_huge_psizes[psize]) {
huge_pgtable_cache(psize) = kmem_cache_create(
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 702691c..1c93c25 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -311,7 +311,7 @@ void __init paging_init(void)
#endif /* CONFIG_HIGHMEM */
printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%lx\n",
- (u64)top_of_ram, total_ram);
+ (unsigned long long)top_of_ram, total_ram);
printk(KERN_DEBUG "Memory hole size: %ldMB\n",
(long int)((top_of_ram - total_ram) >> 20));
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index c53145f..6aa1208 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -236,8 +236,8 @@ void __init MMU_init_hw(void)
Hash_end = (struct hash_pte *) ((unsigned long)Hash + Hash_size);
- printk("Total memory = %ldMB; using %ldkB for hash table (at %p)\n",
- total_memory >> 20, Hash_size >> 10, Hash);
+ printk("Total memory = %lldMB; using %ldkB for hash table (at %p)\n",
+ (unsigned long long)(total_memory >> 20), Hash_size >> 10, Hash);
/*
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index 409fcc7..be7dd42 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -34,7 +34,7 @@
DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
/* This is declared as we are using the more or less generic
- * include/asm-powerpc/tlb.h file -- tgall
+ * arch/powerpc/include/asm/tlb.h file -- tgall
*/
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
diff --git a/arch/powerpc/platforms/52xx/Makefile b/arch/powerpc/platforms/52xx/Makefile
index daf0e15..b8a5206 100644
--- a/arch/powerpc/platforms/52xx/Makefile
+++ b/arch/powerpc/platforms/52xx/Makefile
@@ -1,10 +1,8 @@
#
# Makefile for 52xx based boards
#
-ifeq ($(CONFIG_PPC_MERGE),y)
obj-y += mpc52xx_pic.o mpc52xx_common.o
obj-$(CONFIG_PCI) += mpc52xx_pci.o
-endif
obj-$(CONFIG_PPC_MPC5200_SIMPLE) += mpc5200_simple.o
obj-$(CONFIG_PPC_EFIKA) += efika.o
@@ -15,4 +13,4 @@ ifeq ($(CONFIG_PPC_LITE5200),y)
obj-$(CONFIG_PM) += lite5200_sleep.o lite5200_pm.o
endif
-obj-$(CONFIG_PPC_MPC5200_GPIO) += mpc52xx_gpio.o \ No newline at end of file
+obj-$(CONFIG_PPC_MPC5200_GPIO) += mpc52xx_gpio.o
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
index dd4be4a..ec43477 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
@@ -105,6 +105,7 @@ static void __init mpc832x_sys_setup_arch(void)
static struct of_device_id mpc832x_ids[] = {
{ .type = "soc", },
{ .compatible = "soc", },
+ { .compatible = "simple-bus", },
{ .type = "qe", },
{ .compatible = "fsl,qe", },
{},
diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
index f049d69..0300268 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
@@ -115,6 +115,7 @@ static void __init mpc832x_rdb_setup_arch(void)
static struct of_device_id mpc832x_ids[] = {
{ .type = "soc", },
{ .compatible = "soc", },
+ { .compatible = "simple-bus", },
{ .type = "qe", },
{ .compatible = "fsl,qe", },
{},
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c
index 7301d77..76092d3 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_itx.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c
@@ -41,6 +41,7 @@
static struct of_device_id __initdata mpc834x_itx_ids[] = {
{ .compatible = "fsl,pq2pro-localbus", },
+ { .compatible = "simple-bus", },
{},
};
diff --git a/arch/powerpc/platforms/83xx/mpc834x_mds.c b/arch/powerpc/platforms/83xx/mpc834x_mds.c
index 30d509a..fc3f2ed 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_mds.c
@@ -111,6 +111,7 @@ static void __init mpc834x_mds_init_IRQ(void)
static struct of_device_id mpc834x_ids[] = {
{ .type = "soc", },
{ .compatible = "soc", },
+ { .compatible = "simple-bus", },
{},
};
diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c
index 75b80e8..9d46e5b 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c
@@ -136,6 +136,7 @@ static void __init mpc836x_mds_setup_arch(void)
static struct of_device_id mpc836x_ids[] = {
{ .type = "soc", },
{ .compatible = "soc", },
+ { .compatible = "simple-bus", },
{ .type = "qe", },
{ .compatible = "fsl,qe", },
{},
diff --git a/arch/powerpc/platforms/83xx/sbc834x.c b/arch/powerpc/platforms/83xx/sbc834x.c
index fc21f5c..156c4e2 100644
--- a/arch/powerpc/platforms/83xx/sbc834x.c
+++ b/arch/powerpc/platforms/83xx/sbc834x.c
@@ -83,6 +83,7 @@ static void __init sbc834x_init_IRQ(void)
static struct __initdata of_device_id sbc834x_ids[] = {
{ .type = "soc", },
{ .compatible = "soc", },
+ { .compatible = "simple-bus", },
{},
};
diff --git a/arch/powerpc/platforms/85xx/ksi8560.c b/arch/powerpc/platforms/85xx/ksi8560.c
index 2145ade..8a3b117 100644
--- a/arch/powerpc/platforms/85xx/ksi8560.c
+++ b/arch/powerpc/platforms/85xx/ksi8560.c
@@ -222,6 +222,7 @@ static void ksi8560_show_cpuinfo(struct seq_file *m)
static struct of_device_id __initdata of_bus_ids[] = {
{ .type = "soc", },
+ { .type = "simple-bus", },
{ .name = "cpm", },
{ .name = "localbus", },
{},
diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c
index 6b846aa..1bf5aef 100644
--- a/arch/powerpc/platforms/85xx/mpc8536_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c
@@ -91,6 +91,7 @@ static void __init mpc8536_ds_setup_arch(void)
static struct of_device_id __initdata mpc8536_ds_ids[] = {
{ .type = "soc", },
{ .compatible = "soc", },
+ { .compatible = "simple-bus", },
{},
};
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index ba498d6..d17807a 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -230,6 +230,7 @@ static struct of_device_id __initdata of_bus_ids[] = {
{ .type = "soc", },
{ .name = "cpm", },
{ .name = "localbus", },
+ { .compatible = "simple-bus", },
{},
};
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index 00c5358..483b65c 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -186,6 +186,7 @@ static int __init mpc8544_ds_probe(void)
static struct of_device_id __initdata mpc85xxds_ids[] = {
{ .type = "soc", },
{ .compatible = "soc", },
+ { .compatible = "simple-bus", },
{},
};
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 43a459f..2494c51 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -260,6 +260,7 @@ machine_arch_initcall(mpc85xx_mds, board_fixups);
static struct of_device_id mpc85xx_ids[] = {
{ .type = "soc", },
{ .compatible = "soc", },
+ { .compatible = "simple-bus", },
{ .type = "qe", },
{ .compatible = "fsl,qe", },
{},
diff --git a/arch/powerpc/platforms/85xx/sbc8560.c b/arch/powerpc/platforms/85xx/sbc8560.c
index 2c580cd..6509ade 100644
--- a/arch/powerpc/platforms/85xx/sbc8560.c
+++ b/arch/powerpc/platforms/85xx/sbc8560.c
@@ -217,6 +217,7 @@ static struct of_device_id __initdata of_bus_ids[] = {
{ .type = "soc", },
{ .name = "cpm", },
{ .name = "localbus", },
+ { .compatible = "simple-bus", },
{},
};
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
index 835f2dc..014e26c 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
@@ -19,7 +19,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/pci-bridge.h>
-#include <asm-powerpc/mpic.h>
+#include <asm/mpic.h>
#include <asm/mpc86xx.h>
#include <asm/cacheflush.h>
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index 6fc849e..71d7562 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -105,6 +105,16 @@ config 8xx_COPYBACK
If in doubt, say Y here.
+config 8xx_GPIO
+ bool "GPIO API Support"
+ select GENERIC_GPIO
+ select ARCH_REQUIRE_GPIOLIB
+ help
+ Saying Y here will cause the ports on an MPC8xx processor to be used
+ with the GPIO API. If you say N here, the kernel needs less memory.
+
+ If in doubt, say Y here.
+
config 8xx_CPU6
bool "CPU6 Silicon Errata (860 Pre Rev. C)"
help
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 1d09687..4c900ef 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -254,6 +254,8 @@ config CPM2
select CPM
select PPC_LIB_RHEAP
select PPC_PCI_CHOICE
+ select ARCH_REQUIRE_GPIOLIB
+ select GENERIC_GPIO
help
The CPM2 (Communications Processor Module) is a coprocessor on
embedded CPUs made by Freescale. Selecting this option means that
@@ -281,6 +283,7 @@ config FSL_ULI1575
config CPM
bool
+ select PPC_CLOCK
config OF_RTC
bool
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index 423a023..8079e0b 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -1,13 +1,7 @@
obj-$(CONFIG_FSL_ULI1575) += fsl_uli1575.o
-ifeq ($(CONFIG_PPC_MERGE),y)
obj-$(CONFIG_PPC_PMAC) += powermac/
-else
-ifeq ($(CONFIG_PPC64),y)
-obj-$(CONFIG_PPC_PMAC) += powermac/
-endif
-endif
obj-$(CONFIG_PPC_CHRP) += chrp/
obj-$(CONFIG_40x) += 40x/
obj-$(CONFIG_44x) += 44x/
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
index 69288f6..3233fe8 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
@@ -96,6 +96,12 @@ static int pmi_notifier(struct notifier_block *nb,
struct cpufreq_frequency_table *cbe_freqs;
u8 node;
+ /* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE
+ * and CPUFREQ_NOTIFY policy events?)
+ */
+ if (event == CPUFREQ_START)
+ return 0;
+
cbe_freqs = cpufreq_frequency_get_table(policy->cpu);
node = cbe_cpu_to_node(policy->cpu);
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile
index 8977417..58ecdd7 100644
--- a/arch/powerpc/platforms/powermac/Makefile
+++ b/arch/powerpc/platforms/powermac/Makefile
@@ -7,7 +7,7 @@ endif
obj-y += pic.o setup.o time.o feature.o pci.o \
sleep.o low_i2c.o cache.o pfunc_core.o \
- pfunc_base.o
+ pfunc_base.o udbg_scc.o udbg_adb.o
obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o
obj-$(CONFIG_CPU_FREQ_PMAC) += cpufreq_32.o
obj-$(CONFIG_CPU_FREQ_PMAC64) += cpufreq_64.o
@@ -19,4 +19,3 @@ obj-$(CONFIG_NVRAM:m=y) += nvram.o
obj-$(CONFIG_PPC64) += nvram.o
obj-$(CONFIG_PPC32) += bootx_init.o
obj-$(CONFIG_SMP) += smp.o
-obj-$(CONFIG_PPC_MERGE) += udbg_scc.o udbg_adb.o
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 3163544..88ccf3a 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -541,6 +541,78 @@ static int __init pmac_declare_of_platform_devices(void)
}
machine_device_initcall(powermac, pmac_declare_of_platform_devices);
+#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
+/*
+ * This is called very early, as part of console_init() (typically just after
+ * time_init()). This function is respondible for trying to find a good
+ * default console on serial ports. It tries to match the open firmware
+ * default output with one of the available serial console drivers.
+ */
+static int __init check_pmac_serial_console(void)
+{
+ struct device_node *prom_stdout = NULL;
+ int offset = 0;
+ const char *name;
+#ifdef CONFIG_SERIAL_PMACZILOG_TTYS
+ char *devname = "ttyS";
+#else
+ char *devname = "ttyPZ";
+#endif
+
+ pr_debug(" -> check_pmac_serial_console()\n");
+
+ /* The user has requested a console so this is already set up. */
+ if (strstr(boot_command_line, "console=")) {
+ pr_debug(" console was specified !\n");
+ return -EBUSY;
+ }
+
+ if (!of_chosen) {
+ pr_debug(" of_chosen is NULL !\n");
+ return -ENODEV;
+ }
+
+ /* We are getting a weird phandle from OF ... */
+ /* ... So use the full path instead */
+ name = of_get_property(of_chosen, "linux,stdout-path", NULL);
+ if (name == NULL) {
+ pr_debug(" no linux,stdout-path !\n");
+ return -ENODEV;
+ }
+ prom_stdout = of_find_node_by_path(name);
+ if (!prom_stdout) {
+ pr_debug(" can't find stdout package %s !\n", name);
+ return -ENODEV;
+ }
+ pr_debug("stdout is %s\n", prom_stdout->full_name);
+
+ name = of_get_property(prom_stdout, "name", NULL);
+ if (!name) {
+ pr_debug(" stdout package has no name !\n");
+ goto not_found;
+ }
+
+ if (strcmp(name, "ch-a") == 0)
+ offset = 0;
+ else if (strcmp(name, "ch-b") == 0)
+ offset = 1;
+ else
+ goto not_found;
+ of_node_put(prom_stdout);
+
+ pr_debug("Found serial console at %s%d\n", devname, offset);
+
+ return add_preferred_console(devname, offset, NULL);
+
+ not_found:
+ pr_debug("No preferred console found !\n");
+ of_node_put(prom_stdout);
+ return -ENODEV;
+}
+console_initcall(check_pmac_serial_console);
+
+#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
+
/*
* Called very early, MMU is off, device-tree isn't unflattened
*/
diff --git a/arch/powerpc/platforms/powermac/udbg_scc.c b/arch/powerpc/platforms/powermac/udbg_scc.c
index 47de4d3..572771f 100644
--- a/arch/powerpc/platforms/powermac/udbg_scc.c
+++ b/arch/powerpc/platforms/powermac/udbg_scc.c
@@ -125,13 +125,23 @@ void udbg_scc_init(int force_scc)
out_8(sccc, 0xc0);
/* If SCC was the OF output port, read the BRG value, else
- * Setup for 57600 8N1
+ * Setup for 38400 or 57600 8N1 depending on the machine
*/
if (ch_def != NULL) {
out_8(sccc, 13);
scc_inittab[1] = in_8(sccc);
out_8(sccc, 12);
scc_inittab[3] = in_8(sccc);
+ } else if (machine_is_compatible("RackMac1,1")
+ || machine_is_compatible("RackMac1,2")
+ || machine_is_compatible("MacRISC4")) {
+ /* Xserves and G5s default to 57600 */
+ scc_inittab[1] = 0;
+ scc_inittab[3] = 0;
+ } else {
+ /* Others default to 38400 */
+ scc_inittab[1] = 0;
+ scc_inittab[3] = 1;
}
for (i = 0; i < sizeof(scc_inittab); ++i)
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index c6b3be0..38fe32a 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -289,7 +289,9 @@ static int cmm_thread(void *dummy)
}
#define CMM_SHOW(name, format, args...) \
- static ssize_t show_##name(struct sys_device *dev, char *buf) \
+ static ssize_t show_##name(struct sys_device *dev, \
+ struct sysdev_attribute *attr, \
+ char *buf) \
{ \
return sprintf(buf, format, ##args); \
} \
@@ -298,12 +300,14 @@ static int cmm_thread(void *dummy)
CMM_SHOW(loaned_kb, "%lu\n", PAGES2KB(loaned_pages));
CMM_SHOW(loaned_target_kb, "%lu\n", PAGES2KB(loaned_pages_target));
-static ssize_t show_oom_pages(struct sys_device *dev, char *buf)
+static ssize_t show_oom_pages(struct sys_device *dev,
+ struct sysdev_attribute *attr, char *buf)
{
return sprintf(buf, "%lu\n", PAGES2KB(oom_freed_pages));
}
static ssize_t store_oom_pages(struct sys_device *dev,
+ struct sysdev_attribute *attr,
const char *buf, size_t count)
{
unsigned long val = simple_strtoul (buf, NULL, 10);
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 16a0ed2..a90054b 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -25,7 +25,6 @@ obj-$(CONFIG_MV64X60) += $(mv64x60-y) mv64x60_pic.o mv64x60_dev.o \
obj-$(CONFIG_RTC_DRV_CMOS) += rtc_cmos_setup.o
obj-$(CONFIG_AXON_RAM) += axonram.o
-ifeq ($(CONFIG_PPC_MERGE),y)
obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o
obj-$(CONFIG_PPC_I8259) += i8259.o
obj-$(CONFIG_IPIC) += ipic.o
@@ -36,7 +35,6 @@ obj-$(CONFIG_OF_RTC) += of_rtc.o
ifeq ($(CONFIG_PCI),y)
obj-$(CONFIG_4xx) += ppc4xx_pci.o
endif
-endif
# Temporary hack until we have migrated to asm-powerpc
ifeq ($(ARCH),powerpc)
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index 661df42..4a04823 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -30,6 +30,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
+#include <linux/spinlock.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/8xx_immap.h>
@@ -42,6 +43,10 @@
#include <asm/fs_pd.h>
+#ifdef CONFIG_8xx_GPIO
+#include <linux/of_gpio.h>
+#endif
+
#define CPM_MAP_SIZE (0x4000)
cpm8xx_t __iomem *cpmp; /* Pointer to comm processor space */
@@ -290,20 +295,24 @@ struct cpm_ioport16 {
__be16 res[3];
};
-struct cpm_ioport32 {
- __be32 dir, par, sor;
+struct cpm_ioport32b {
+ __be32 dir, par, odr, dat;
+};
+
+struct cpm_ioport32e {
+ __be32 dir, par, sor, odr, dat;
};
static void cpm1_set_pin32(int port, int pin, int flags)
{
- struct cpm_ioport32 __iomem *iop;
+ struct cpm_ioport32e __iomem *iop;
pin = 1 << (31 - pin);
if (port == CPM_PORTB)
- iop = (struct cpm_ioport32 __iomem *)
+ iop = (struct cpm_ioport32e __iomem *)
&mpc8xx_immr->im_cpm.cp_pbdir;
else
- iop = (struct cpm_ioport32 __iomem *)
+ iop = (struct cpm_ioport32e __iomem *)
&mpc8xx_immr->im_cpm.cp_pedir;
if (flags & CPM_PIN_OUTPUT)
@@ -498,3 +507,251 @@ int cpm1_clk_setup(enum cpm_clk_target target, int clock, int mode)
return 0;
}
+
+/*
+ * GPIO LIB API implementation
+ */
+#ifdef CONFIG_8xx_GPIO
+
+struct cpm1_gpio16_chip {
+ struct of_mm_gpio_chip mm_gc;
+ spinlock_t lock;
+
+ /* shadowed data register to clear/set bits safely */
+ u16 cpdata;
+};
+
+static inline struct cpm1_gpio16_chip *
+to_cpm1_gpio16_chip(struct of_mm_gpio_chip *mm_gc)
+{
+ return container_of(mm_gc, struct cpm1_gpio16_chip, mm_gc);
+}
+
+static void cpm1_gpio16_save_regs(struct of_mm_gpio_chip *mm_gc)
+{
+ struct cpm1_gpio16_chip *cpm1_gc = to_cpm1_gpio16_chip(mm_gc);
+ struct cpm_ioport16 __iomem *iop = mm_gc->regs;
+
+ cpm1_gc->cpdata = in_be16(&iop->dat);
+}
+
+static int cpm1_gpio16_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct cpm_ioport16 __iomem *iop = mm_gc->regs;
+ u16 pin_mask;
+
+ pin_mask = 1 << (15 - gpio);
+
+ return !!(in_be16(&iop->dat) & pin_mask);
+}
+
+static void cpm1_gpio16_set(struct gpio_chip *gc, unsigned int gpio, int value)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct cpm1_gpio16_chip *cpm1_gc = to_cpm1_gpio16_chip(mm_gc);
+ struct cpm_ioport16 __iomem *iop = mm_gc->regs;
+ unsigned long flags;
+ u16 pin_mask = 1 << (15 - gpio);
+
+ spin_lock_irqsave(&cpm1_gc->lock, flags);
+
+ if (value)
+ cpm1_gc->cpdata |= pin_mask;
+ else
+ cpm1_gc->cpdata &= ~pin_mask;
+
+ out_be16(&iop->dat, cpm1_gc->cpdata);
+
+ spin_unlock_irqrestore(&cpm1_gc->lock, flags);
+}
+
+static int cpm1_gpio16_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct cpm_ioport16 __iomem *iop = mm_gc->regs;
+ u16 pin_mask;
+
+ pin_mask = 1 << (15 - gpio);
+
+ setbits16(&iop->dir, pin_mask);
+
+ cpm1_gpio16_set(gc, gpio, val);
+
+ return 0;
+}
+
+static int cpm1_gpio16_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct cpm_ioport16 __iomem *iop = mm_gc->regs;
+ u16 pin_mask;
+
+ pin_mask = 1 << (15 - gpio);
+
+ clrbits16(&iop->dir, pin_mask);
+
+ return 0;
+}
+
+int cpm1_gpiochip_add16(struct device_node *np)
+{
+ struct cpm1_gpio16_chip *cpm1_gc;
+ struct of_mm_gpio_chip *mm_gc;
+ struct of_gpio_chip *of_gc;
+ struct gpio_chip *gc;
+
+ cpm1_gc = kzalloc(sizeof(*cpm1_gc), GFP_KERNEL);
+ if (!cpm1_gc)
+ return -ENOMEM;
+
+ spin_lock_init(&cpm1_gc->lock);
+
+ mm_gc = &cpm1_gc->mm_gc;
+ of_gc = &mm_gc->of_gc;
+ gc = &of_gc->gc;
+
+ mm_gc->save_regs = cpm1_gpio16_save_regs;
+ of_gc->gpio_cells = 2;
+ gc->ngpio = 16;
+ gc->direction_input = cpm1_gpio16_dir_in;
+ gc->direction_output = cpm1_gpio16_dir_out;
+ gc->get = cpm1_gpio16_get;
+ gc->set = cpm1_gpio16_set;
+
+ return of_mm_gpiochip_add(np, mm_gc);
+}
+
+struct cpm1_gpio32_chip {
+ struct of_mm_gpio_chip mm_gc;
+ spinlock_t lock;
+
+ /* shadowed data register to clear/set bits safely */
+ u32 cpdata;
+};
+
+static inline struct cpm1_gpio32_chip *
+to_cpm1_gpio32_chip(struct of_mm_gpio_chip *mm_gc)
+{
+ return container_of(mm_gc, struct cpm1_gpio32_chip, mm_gc);
+}
+
+static void cpm1_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc)
+{
+ struct cpm1_gpio32_chip *cpm1_gc = to_cpm1_gpio32_chip(mm_gc);
+ struct cpm_ioport32b __iomem *iop = mm_gc->regs;
+
+ cpm1_gc->cpdata = in_be32(&iop->dat);
+}
+
+static int cpm1_gpio32_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct cpm_ioport32b __iomem *iop = mm_gc->regs;
+ u32 pin_mask;
+
+ pin_mask = 1 << (31 - gpio);
+
+ return !!(in_be32(&iop->dat) & pin_mask);
+}
+
+static void cpm1_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct cpm1_gpio32_chip *cpm1_gc = to_cpm1_gpio32_chip(mm_gc);
+ struct cpm_ioport32b __iomem *iop = mm_gc->regs;
+ unsigned long flags;
+ u32 pin_mask = 1 << (31 - gpio);
+
+ spin_lock_irqsave(&cpm1_gc->lock, flags);
+
+ if (value)
+ cpm1_gc->cpdata |= pin_mask;
+ else
+ cpm1_gc->cpdata &= ~pin_mask;
+
+ out_be32(&iop->dat, cpm1_gc->cpdata);
+
+ spin_unlock_irqrestore(&cpm1_gc->lock, flags);
+}
+
+static int cpm1_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct cpm_ioport32b __iomem *iop = mm_gc->regs;
+ u32 pin_mask;
+
+ pin_mask = 1 << (31 - gpio);
+
+ setbits32(&iop->dir, pin_mask);
+
+ cpm1_gpio32_set(gc, gpio, val);
+
+ return 0;
+}
+
+static int cpm1_gpio32_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct cpm_ioport32b __iomem *iop = mm_gc->regs;
+ u32 pin_mask;
+
+ pin_mask = 1 << (31 - gpio);
+
+ clrbits32(&iop->dir, pin_mask);
+
+ return 0;
+}
+
+int cpm1_gpiochip_add32(struct device_node *np)
+{
+ struct cpm1_gpio32_chip *cpm1_gc;
+ struct of_mm_gpio_chip *mm_gc;
+ struct of_gpio_chip *of_gc;
+ struct gpio_chip *gc;
+
+ cpm1_gc = kzalloc(sizeof(*cpm1_gc), GFP_KERNEL);
+ if (!cpm1_gc)
+ return -ENOMEM;
+
+ spin_lock_init(&cpm1_gc->lock);
+
+ mm_gc = &cpm1_gc->mm_gc;
+ of_gc = &mm_gc->of_gc;
+ gc = &of_gc->gc;
+
+ mm_gc->save_regs = cpm1_gpio32_save_regs;
+ of_gc->gpio_cells = 2;
+ gc->ngpio = 32;
+ gc->direction_input = cpm1_gpio32_dir_in;
+ gc->direction_output = cpm1_gpio32_dir_out;
+ gc->get = cpm1_gpio32_get;
+ gc->set = cpm1_gpio32_set;
+
+ return of_mm_gpiochip_add(np, mm_gc);
+}
+
+static int cpm_init_par_io(void)
+{
+ struct device_node *np;
+
+ for_each_compatible_node(np, NULL, "fsl,cpm1-pario-bank-a")
+ cpm1_gpiochip_add16(np);
+
+ for_each_compatible_node(np, NULL, "fsl,cpm1-pario-bank-b")
+ cpm1_gpiochip_add32(np);
+
+ for_each_compatible_node(np, NULL, "fsl,cpm1-pario-bank-c")
+ cpm1_gpiochip_add16(np);
+
+ for_each_compatible_node(np, NULL, "fsl,cpm1-pario-bank-d")
+ cpm1_gpiochip_add16(np);
+
+ /* Port E uses CPM2 layout */
+ for_each_compatible_node(np, NULL, "fsl,cpm1-pario-bank-e")
+ cpm2_gpiochip_add32(np);
+ return 0;
+}
+arch_initcall(cpm_init_par_io);
+
+#endif /* CONFIG_8xx_GPIO */
diff --git a/arch/powerpc/sysdev/cpm2.c b/arch/powerpc/sysdev/cpm2.c
index 5a6c5df..f1c3395 100644
--- a/arch/powerpc/sysdev/cpm2.c
+++ b/arch/powerpc/sysdev/cpm2.c
@@ -115,16 +115,10 @@ EXPORT_SYMBOL(cpm_command);
* Baud rate clocks are zero-based in the driver code (as that maps
* to port numbers). Documentation uses 1-based numbering.
*/
-#define BRG_INT_CLK (get_brgfreq())
-#define BRG_UART_CLK (BRG_INT_CLK/16)
-
-/* This function is used by UARTS, or anything else that uses a 16x
- * oversampled clock.
- */
-void
-cpm_setbrg(uint brg, uint rate)
+void __cpm2_setbrg(uint brg, uint rate, uint clk, int div16, int src)
{
u32 __iomem *bp;
+ u32 val;
/* This is good enough to get SMCs running.....
*/
@@ -135,34 +129,14 @@ cpm_setbrg(uint brg, uint rate)
brg -= 4;
}
bp += brg;
- out_be32(bp, (((BRG_UART_CLK / rate) - 1) << 1) | CPM_BRG_EN);
-
- cpm2_unmap(bp);
-}
-
-/* This function is used to set high speed synchronous baud rate
- * clocks.
- */
-void
-cpm2_fastbrg(uint brg, uint rate, int div16)
-{
- u32 __iomem *bp;
- u32 val;
-
- if (brg < 4) {
- bp = cpm2_map_size(im_brgc1, 16);
- } else {
- bp = cpm2_map_size(im_brgc5, 16);
- brg -= 4;
- }
- bp += brg;
- val = ((BRG_INT_CLK / rate) << 1) | CPM_BRG_EN;
+ val = (((clk / rate) - 1) << 1) | CPM_BRG_EN | src;
if (div16)
val |= CPM_BRG_DIV16;
out_be32(bp, val);
cpm2_unmap(bp);
}
+EXPORT_SYMBOL(__cpm2_setbrg);
int cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode)
{
@@ -377,3 +351,14 @@ void cpm2_set_pin(int port, int pin, int flags)
else
clrbits32(&iop[port].odr, pin);
}
+
+static int cpm_init_par_io(void)
+{
+ struct device_node *np;
+
+ for_each_compatible_node(np, NULL, "fsl,cpm2-pario-bank")
+ cpm2_gpiochip_add32(np);
+ return 0;
+}
+arch_initcall(cpm_init_par_io);
+
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index e4b7296..53da8a07 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -19,6 +19,8 @@
#include <linux/init.h>
#include <linux/of_device.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
#include <asm/udbg.h>
#include <asm/io.h>
@@ -28,6 +30,10 @@
#include <mm/mmu_decl.h>
+#if defined(CONFIG_CPM2) || defined(CONFIG_8xx_GPIO)
+#include <linux/of_gpio.h>
+#endif
+
#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
static u32 __iomem *cpm_udbg_txdesc =
(u32 __iomem __force *)CONFIG_PPC_EARLY_DEBUG_CPM_ADDR;
@@ -207,3 +213,120 @@ dma_addr_t cpm_muram_dma(void __iomem *addr)
return muram_pbase + ((u8 __iomem *)addr - muram_vbase);
}
EXPORT_SYMBOL(cpm_muram_dma);
+
+#if defined(CONFIG_CPM2) || defined(CONFIG_8xx_GPIO)
+
+struct cpm2_ioports {
+ u32 dir, par, sor, odr, dat;
+ u32 res[3];
+};
+
+struct cpm2_gpio32_chip {
+ struct of_mm_gpio_chip mm_gc;
+ spinlock_t lock;
+
+ /* shadowed data register to clear/set bits safely */
+ u32 cpdata;
+};
+
+static inline struct cpm2_gpio32_chip *
+to_cpm2_gpio32_chip(struct of_mm_gpio_chip *mm_gc)
+{
+ return container_of(mm_gc, struct cpm2_gpio32_chip, mm_gc);
+}
+
+static void cpm2_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc)
+{
+ struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc);
+ struct cpm2_ioports __iomem *iop = mm_gc->regs;
+
+ cpm2_gc->cpdata = in_be32(&iop->dat);
+}
+
+static int cpm2_gpio32_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct cpm2_ioports __iomem *iop = mm_gc->regs;
+ u32 pin_mask;
+
+ pin_mask = 1 << (31 - gpio);
+
+ return !!(in_be32(&iop->dat) & pin_mask);
+}
+
+static void cpm2_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc);
+ struct cpm2_ioports __iomem *iop = mm_gc->regs;
+ unsigned long flags;
+ u32 pin_mask = 1 << (31 - gpio);
+
+ spin_lock_irqsave(&cpm2_gc->lock, flags);
+
+ if (value)
+ cpm2_gc->cpdata |= pin_mask;
+ else
+ cpm2_gc->cpdata &= ~pin_mask;
+
+ out_be32(&iop->dat, cpm2_gc->cpdata);
+
+ spin_unlock_irqrestore(&cpm2_gc->lock, flags);
+}
+
+static int cpm2_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct cpm2_ioports __iomem *iop = mm_gc->regs;
+ u32 pin_mask;
+
+ pin_mask = 1 << (31 - gpio);
+
+ setbits32(&iop->dir, pin_mask);
+
+ cpm2_gpio32_set(gc, gpio, val);
+
+ return 0;
+}
+
+static int cpm2_gpio32_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct cpm2_ioports __iomem *iop = mm_gc->regs;
+ u32 pin_mask;
+
+ pin_mask = 1 << (31 - gpio);
+
+ clrbits32(&iop->dir, pin_mask);
+
+ return 0;
+}
+
+int cpm2_gpiochip_add32(struct device_node *np)
+{
+ struct cpm2_gpio32_chip *cpm2_gc;
+ struct of_mm_gpio_chip *mm_gc;
+ struct of_gpio_chip *of_gc;
+ struct gpio_chip *gc;
+
+ cpm2_gc = kzalloc(sizeof(*cpm2_gc), GFP_KERNEL);
+ if (!cpm2_gc)
+ return -ENOMEM;
+
+ spin_lock_init(&cpm2_gc->lock);
+
+ mm_gc = &cpm2_gc->mm_gc;
+ of_gc = &mm_gc->of_gc;
+ gc = &of_gc->gc;
+
+ mm_gc->save_regs = cpm2_gpio32_save_regs;
+ of_gc->gpio_cells = 2;
+ gc->ngpio = 32;
+ gc->direction_input = cpm2_gpio32_dir_in;
+ gc->direction_output = cpm2_gpio32_dir_out;
+ gc->get = cpm2_gpio32_get;
+ gc->set = cpm2_gpio32_set;
+
+ return of_mm_gpiochip_add(np, mm_gc);
+}
+#endif /* CONFIG_CPM2 || CONFIG_8xx_GPIO */
diff --git a/arch/powerpc/sysdev/rtc_cmos_setup.c b/arch/powerpc/sysdev/rtc_cmos_setup.c
index c09ddc0..c1879eb 100644
--- a/arch/powerpc/sysdev/rtc_cmos_setup.c
+++ b/arch/powerpc/sysdev/rtc_cmos_setup.c
@@ -21,6 +21,7 @@ static int __init add_rtc(void)
struct device_node *np;
struct platform_device *pd;
struct resource res[2];
+ unsigned int num_res = 1;
int ret;
memset(&res, 0, sizeof(res));
@@ -41,14 +42,24 @@ static int __init add_rtc(void)
if (res[0].start != RTC_PORT(0))
return -EINVAL;
- /* Use a fixed interrupt value of 8 since on PPC if we are using this
- * its off an i8259 which we ensure has interrupt numbers 0..15. */
- res[1].start = 8;
- res[1].end = 8;
- res[1].flags = IORESOURCE_IRQ;
+ np = of_find_compatible_node(NULL, NULL, "chrp,iic");
+ if (!np)
+ np = of_find_compatible_node(NULL, NULL, "pnpPNP,000");
+ if (np) {
+ of_node_put(np);
+ /*
+ * Use a fixed interrupt value of 8 since on PPC if we are
+ * using this its off an i8259 which we ensure has interrupt
+ * numbers 0..15.
+ */
+ res[1].start = 8;
+ res[1].end = 8;
+ res[1].flags = IORESOURCE_IRQ;
+ num_res++;
+ }
pd = platform_device_register_simple("rtc_cmos", -1,
- &res[0], 2);
+ &res[0], num_res);
if (IS_ERR(pd))
return PTR_ERR(pd);
OpenPOWER on IntegriCloud