diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/alpha/kernel | |
download | op-kernel-dev-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.zip op-kernel-dev-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.gz |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/alpha/kernel')
76 files changed, 34945 insertions, 0 deletions
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile new file mode 100644 index 0000000..ab6fa54 --- /dev/null +++ b/arch/alpha/kernel/Makefile @@ -0,0 +1,104 @@ +# +# Makefile for the linux kernel. +# + +extra-y := head.o vmlinux.lds +EXTRA_AFLAGS := $(CFLAGS) +EXTRA_CFLAGS := -Werror -Wno-sign-compare + +obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \ + irq_alpha.o signal.o setup.o ptrace.o time.o semaphore.o \ + alpha_ksyms.o systbls.o err_common.o io.o + +obj-$(CONFIG_VGA_HOSE) += console.o +obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_PCI) += pci.o pci_iommu.o +obj-$(CONFIG_SRM_ENV) += srm_env.o +obj-$(CONFIG_MODULES) += module.o + +ifdef CONFIG_ALPHA_GENERIC + +obj-y += core_apecs.o core_cia.o core_irongate.o core_lca.o \ + core_mcpcia.o core_polaris.o core_t2.o \ + core_tsunami.o + +obj-y += sys_alcor.o sys_cabriolet.o sys_dp264.o sys_eb64p.o sys_eiger.o \ + sys_jensen.o sys_miata.o sys_mikasa.o sys_nautilus.o \ + sys_noritake.o sys_rawhide.o sys_ruffian.o sys_rx164.o \ + sys_sable.o sys_sio.o sys_sx164.o sys_takara.o + +ifndef CONFIG_ALPHA_LEGACY_START_ADDRESS +obj-y += core_marvel.o core_titan.o core_wildfire.o +obj-y += sys_marvel.o sys_titan.o sys_wildfire.o +obj-y += err_ev7.o err_titan.o err_marvel.o +endif + +obj-y += irq_pyxis.o irq_i8259.o irq_srm.o +obj-y += err_ev6.o +obj-y += es1888.o smc37c669.o smc37c93x.o ns87312.o gct.o +obj-y += srmcons.o + +else + +# Misc support +obj-$(CONFIG_ALPHA_SRM) += srmcons.o + +# Core logic support +obj-$(CONFIG_ALPHA_APECS) += core_apecs.o +obj-$(CONFIG_ALPHA_CIA) += core_cia.o +obj-$(CONFIG_ALPHA_IRONGATE) += core_irongate.o +obj-$(CONFIG_ALPHA_LCA) += core_lca.o +obj-$(CONFIG_ALPHA_MARVEL) += core_marvel.o gct.o +obj-$(CONFIG_ALPHA_MCPCIA) += core_mcpcia.o +obj-$(CONFIG_ALPHA_POLARIS) += core_polaris.o +obj-$(CONFIG_ALPHA_T2) += core_t2.o +obj-$(CONFIG_ALPHA_TSUNAMI) += core_tsunami.o +obj-$(CONFIG_ALPHA_TITAN) += core_titan.o +obj-$(CONFIG_ALPHA_WILDFIRE) += core_wildfire.o + +# Board support +obj-$(CONFIG_ALPHA_ALCOR) += sys_alcor.o irq_i8259.o irq_srm.o +obj-$(CONFIG_ALPHA_CABRIOLET) += sys_cabriolet.o irq_i8259.o irq_srm.o \ + ns87312.o +obj-$(CONFIG_ALPHA_EB164) += sys_cabriolet.o irq_i8259.o irq_srm.o \ + ns87312.o +obj-$(CONFIG_ALPHA_EB66P) += sys_cabriolet.o irq_i8259.o irq_srm.o \ + ns87312.o +obj-$(CONFIG_ALPHA_LX164) += sys_cabriolet.o irq_i8259.o irq_srm.o \ + smc37c93x.o +obj-$(CONFIG_ALPHA_PC164) += sys_cabriolet.o irq_i8259.o irq_srm.o \ + smc37c93x.o +obj-$(CONFIG_ALPHA_DP264) += sys_dp264.o irq_i8259.o es1888.o smc37c669.o +obj-$(CONFIG_ALPHA_SHARK) += sys_dp264.o irq_i8259.o es1888.o smc37c669.o +obj-$(CONFIG_ALPHA_TITAN) += sys_titan.o irq_i8259.o smc37c669.o +obj-$(CONFIG_ALPHA_EB64P) += sys_eb64p.o irq_i8259.o +obj-$(CONFIG_ALPHA_EB66) += sys_eb64p.o irq_i8259.o +obj-$(CONFIG_ALPHA_EIGER) += sys_eiger.o irq_i8259.o +obj-$(CONFIG_ALPHA_JENSEN) += sys_jensen.o pci-noop.o irq_i8259.o +obj-$(CONFIG_ALPHA_MARVEL) += sys_marvel.o +obj-$(CONFIG_ALPHA_MIATA) += sys_miata.o irq_pyxis.o irq_i8259.o \ + es1888.o smc37c669.o +obj-$(CONFIG_ALPHA_MIKASA) += sys_mikasa.o irq_i8259.o irq_srm.o +obj-$(CONFIG_ALPHA_NAUTILUS) += sys_nautilus.o irq_i8259.o irq_srm.o +obj-$(CONFIG_ALPHA_NORITAKE) += sys_noritake.o irq_i8259.o +obj-$(CONFIG_ALPHA_RAWHIDE) += sys_rawhide.o irq_i8259.o +obj-$(CONFIG_ALPHA_RUFFIAN) += sys_ruffian.o irq_pyxis.o irq_i8259.o +obj-$(CONFIG_ALPHA_RX164) += sys_rx164.o irq_i8259.o +obj-$(CONFIG_ALPHA_SABLE) += sys_sable.o +obj-$(CONFIG_ALPHA_LYNX) += sys_sable.o +obj-$(CONFIG_ALPHA_BOOK1) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o +obj-$(CONFIG_ALPHA_AVANTI) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o +obj-$(CONFIG_ALPHA_NONAME) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o +obj-$(CONFIG_ALPHA_P2K) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o +obj-$(CONFIG_ALPHA_XL) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o +obj-$(CONFIG_ALPHA_SX164) += sys_sx164.o irq_pyxis.o irq_i8259.o \ + irq_srm.o smc37c669.o +obj-$(CONFIG_ALPHA_TAKARA) += sys_takara.o irq_i8259.o ns87312.o +obj-$(CONFIG_ALPHA_WILDFIRE) += sys_wildfire.o irq_i8259.o + +# Error support +obj-$(CONFIG_ALPHA_MARVEL) += err_ev7.o err_marvel.o +obj-$(CONFIG_ALPHA_NAUTILUS) += err_ev6.o +obj-$(CONFIG_ALPHA_TITAN) += err_ev6.o err_titan.o + +endif # GENERIC diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c new file mode 100644 index 0000000..fc5ef90 --- /dev/null +++ b/arch/alpha/kernel/alpha_ksyms.c @@ -0,0 +1,235 @@ +/* + * linux/arch/alpha/kernel/ksyms.c + * + * Export the alpha-specific functions that are needed for loadable + * modules. + */ + +#include <linux/config.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/user.h> +#include <linux/elfcore.h> +#include <linux/socket.h> +#include <linux/syscalls.h> +#include <linux/in.h> +#include <linux/in6.h> +#include <linux/pci.h> +#include <linux/tty.h> +#include <linux/mm.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> + +#include <asm/io.h> +#include <asm/console.h> +#include <asm/hwrpb.h> +#include <asm/uaccess.h> +#include <asm/processor.h> +#include <asm/checksum.h> +#include <linux/interrupt.h> +#include <asm/fpu.h> +#include <asm/irq.h> +#include <asm/machvec.h> +#include <asm/pgalloc.h> +#include <asm/semaphore.h> +#include <asm/tlbflush.h> +#include <asm/cacheflush.h> +#include <asm/vga.h> + +#define __KERNEL_SYSCALLS__ +#include <asm/unistd.h> + +extern struct hwrpb_struct *hwrpb; +extern void dump_thread(struct pt_regs *, struct user *); +extern spinlock_t rtc_lock; + +/* these are C runtime functions with special calling conventions: */ +extern void __divl (void); +extern void __reml (void); +extern void __divq (void); +extern void __remq (void); +extern void __divlu (void); +extern void __remlu (void); +extern void __divqu (void); +extern void __remqu (void); + +EXPORT_SYMBOL(alpha_mv); +EXPORT_SYMBOL(enable_irq); +EXPORT_SYMBOL(disable_irq); +EXPORT_SYMBOL(disable_irq_nosync); +EXPORT_SYMBOL(probe_irq_mask); +EXPORT_SYMBOL(screen_info); +EXPORT_SYMBOL(perf_irq); +EXPORT_SYMBOL(callback_getenv); +EXPORT_SYMBOL(callback_setenv); +EXPORT_SYMBOL(callback_save_env); +#ifdef CONFIG_ALPHA_GENERIC +EXPORT_SYMBOL(alpha_using_srm); +#endif /* CONFIG_ALPHA_GENERIC */ + +/* platform dependent support */ +EXPORT_SYMBOL(strcat); +EXPORT_SYMBOL(strcmp); +EXPORT_SYMBOL(strcpy); +EXPORT_SYMBOL(strlen); +EXPORT_SYMBOL(strncmp); +EXPORT_SYMBOL(strncpy); +EXPORT_SYMBOL(strnlen); +EXPORT_SYMBOL(strncat); +EXPORT_SYMBOL(strstr); +EXPORT_SYMBOL(strpbrk); +EXPORT_SYMBOL(strchr); +EXPORT_SYMBOL(strrchr); +EXPORT_SYMBOL(memcmp); +EXPORT_SYMBOL(memmove); +EXPORT_SYMBOL(memscan); +EXPORT_SYMBOL(__memcpy); +EXPORT_SYMBOL(__memset); +EXPORT_SYMBOL(__memsetw); +EXPORT_SYMBOL(__constant_c_memset); +EXPORT_SYMBOL(copy_page); +EXPORT_SYMBOL(clear_page); + +EXPORT_SYMBOL(__direct_map_base); +EXPORT_SYMBOL(__direct_map_size); + +#ifdef CONFIG_PCI +EXPORT_SYMBOL(pci_alloc_consistent); +EXPORT_SYMBOL(pci_free_consistent); +EXPORT_SYMBOL(pci_map_single); +EXPORT_SYMBOL(pci_map_page); +EXPORT_SYMBOL(pci_unmap_single); +EXPORT_SYMBOL(pci_unmap_page); +EXPORT_SYMBOL(pci_map_sg); +EXPORT_SYMBOL(pci_unmap_sg); +EXPORT_SYMBOL(pci_dma_supported); +EXPORT_SYMBOL(pci_dac_dma_supported); +EXPORT_SYMBOL(pci_dac_page_to_dma); +EXPORT_SYMBOL(pci_dac_dma_to_page); +EXPORT_SYMBOL(pci_dac_dma_to_offset); +EXPORT_SYMBOL(alpha_gendev_to_pci); +#endif +EXPORT_SYMBOL(dma_set_mask); + +EXPORT_SYMBOL(dump_thread); +EXPORT_SYMBOL(dump_elf_thread); +EXPORT_SYMBOL(dump_elf_task); +EXPORT_SYMBOL(dump_elf_task_fp); +EXPORT_SYMBOL(hwrpb); +EXPORT_SYMBOL(start_thread); +EXPORT_SYMBOL(alpha_read_fp_reg); +EXPORT_SYMBOL(alpha_read_fp_reg_s); +EXPORT_SYMBOL(alpha_write_fp_reg); +EXPORT_SYMBOL(alpha_write_fp_reg_s); + +/* In-kernel system calls. */ +EXPORT_SYMBOL(kernel_thread); +EXPORT_SYMBOL(sys_open); +EXPORT_SYMBOL(sys_dup); +EXPORT_SYMBOL(sys_exit); +EXPORT_SYMBOL(sys_write); +EXPORT_SYMBOL(sys_read); +EXPORT_SYMBOL(sys_lseek); +EXPORT_SYMBOL(execve); +EXPORT_SYMBOL(sys_setsid); +EXPORT_SYMBOL(sys_wait4); + +/* Networking helper routines. */ +EXPORT_SYMBOL(csum_tcpudp_magic); +EXPORT_SYMBOL(ip_compute_csum); +EXPORT_SYMBOL(ip_fast_csum); +EXPORT_SYMBOL(csum_partial_copy_nocheck); +EXPORT_SYMBOL(csum_partial_copy_from_user); +EXPORT_SYMBOL(csum_ipv6_magic); + +#ifdef CONFIG_MATHEMU_MODULE +extern long (*alpha_fp_emul_imprecise)(struct pt_regs *, unsigned long); +extern long (*alpha_fp_emul) (unsigned long pc); +EXPORT_SYMBOL(alpha_fp_emul_imprecise); +EXPORT_SYMBOL(alpha_fp_emul); +#endif + +#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK +EXPORT_SYMBOL(__min_ipl); +#endif + +/* + * The following are specially called from the uaccess assembly stubs. + */ +EXPORT_SYMBOL(__copy_user); +EXPORT_SYMBOL(__do_clear_user); +EXPORT_SYMBOL(__strncpy_from_user); +EXPORT_SYMBOL(__strnlen_user); + +/* Semaphore helper functions. */ +EXPORT_SYMBOL(__down_failed); +EXPORT_SYMBOL(__down_failed_interruptible); +EXPORT_SYMBOL(__up_wakeup); +EXPORT_SYMBOL(down); +EXPORT_SYMBOL(down_interruptible); +EXPORT_SYMBOL(down_trylock); +EXPORT_SYMBOL(up); + +/* + * SMP-specific symbols. + */ + +#ifdef CONFIG_SMP +EXPORT_SYMBOL(synchronize_irq); +EXPORT_SYMBOL(flush_tlb_mm); +EXPORT_SYMBOL(flush_tlb_range); +EXPORT_SYMBOL(flush_tlb_page); +EXPORT_SYMBOL(smp_imb); +EXPORT_SYMBOL(cpu_data); +EXPORT_SYMBOL(smp_num_cpus); +EXPORT_SYMBOL(smp_call_function); +EXPORT_SYMBOL(smp_call_function_on_cpu); +EXPORT_SYMBOL(_atomic_dec_and_lock); +#ifdef CONFIG_DEBUG_SPINLOCK +EXPORT_SYMBOL(_raw_spin_unlock); +EXPORT_SYMBOL(debug_spin_lock); +EXPORT_SYMBOL(debug_spin_trylock); +#endif +#ifdef CONFIG_DEBUG_RWLOCK +EXPORT_SYMBOL(_raw_write_lock); +EXPORT_SYMBOL(_raw_read_lock); +#endif +EXPORT_SYMBOL(cpu_present_mask); +#endif /* CONFIG_SMP */ + +/* + * NUMA specific symbols + */ +#ifdef CONFIG_DISCONTIGMEM +EXPORT_SYMBOL(node_data); +#endif /* CONFIG_DISCONTIGMEM */ + +EXPORT_SYMBOL(rtc_lock); + +/* + * The following are special because they're not called + * explicitly (the C compiler or assembler generates them in + * response to division operations). Fortunately, their + * interface isn't gonna change any time soon now, so it's OK + * to leave it out of version control. + */ +# undef memcpy +# undef memset +EXPORT_SYMBOL(__divl); +EXPORT_SYMBOL(__divlu); +EXPORT_SYMBOL(__divq); +EXPORT_SYMBOL(__divqu); +EXPORT_SYMBOL(__reml); +EXPORT_SYMBOL(__remlu); +EXPORT_SYMBOL(__remq); +EXPORT_SYMBOL(__remqu); +EXPORT_SYMBOL(memcpy); +EXPORT_SYMBOL(memset); +EXPORT_SYMBOL(memchr); + +EXPORT_SYMBOL(get_wchan); + +#ifdef CONFIG_ALPHA_IRONGATE +EXPORT_SYMBOL(irongate_ioremap); +EXPORT_SYMBOL(irongate_iounmap); +#endif diff --git a/arch/alpha/kernel/asm-offsets.c b/arch/alpha/kernel/asm-offsets.c new file mode 100644 index 0000000..8f2e5c7 --- /dev/null +++ b/arch/alpha/kernel/asm-offsets.c @@ -0,0 +1,43 @@ +/* + * Generate definitions needed by assembly language modules. + * This code generates raw asm output which is post-processed to extract + * and format the required data. + */ + +#include <linux/types.h> +#include <linux/stddef.h> +#include <linux/sched.h> +#include <linux/ptrace.h> +#include <asm/io.h> + +#define DEFINE(sym, val) \ + asm volatile("\n->" #sym " %0 " #val : : "i" (val)) + +#define BLANK() asm volatile("\n->" : : ) + +void foo(void) +{ + DEFINE(TI_TASK, offsetof(struct thread_info, task)); + DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); + BLANK(); + + DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked)); + DEFINE(TASK_UID, offsetof(struct task_struct, uid)); + DEFINE(TASK_EUID, offsetof(struct task_struct, euid)); + DEFINE(TASK_GID, offsetof(struct task_struct, gid)); + DEFINE(TASK_EGID, offsetof(struct task_struct, egid)); + DEFINE(TASK_REAL_PARENT, offsetof(struct task_struct, real_parent)); + DEFINE(TASK_TGID, offsetof(struct task_struct, tgid)); + BLANK(); + + DEFINE(SIZEOF_PT_REGS, sizeof(struct pt_regs)); + DEFINE(PT_PTRACED, PT_PTRACED); + DEFINE(CLONE_VM, CLONE_VM); + DEFINE(CLONE_UNTRACED, CLONE_UNTRACED); + DEFINE(SIGCHLD, SIGCHLD); + BLANK(); + + DEFINE(HAE_CACHE, offsetof(struct alpha_machine_vector, hae_cache)); + DEFINE(HAE_REG, offsetof(struct alpha_machine_vector, hae_register)); +} diff --git a/arch/alpha/kernel/console.c b/arch/alpha/kernel/console.c new file mode 100644 index 0000000..cb3e739 --- /dev/null +++ b/arch/alpha/kernel/console.c @@ -0,0 +1,66 @@ +/* + * linux/arch/alpha/kernel/console.c + * + * Architecture-specific specific support for VGA device on + * non-0 I/O hose + */ + +#include <linux/config.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/tty.h> +#include <linux/console.h> +#include <asm/vga.h> +#include <asm/machvec.h> + +#ifdef CONFIG_VGA_HOSE + +/* + * Externally-visible vga hose bases + */ +unsigned long __vga_hose_io_base = 0; /* base for default hose */ +unsigned long __vga_hose_mem_base = 0; /* base for default hose */ + +static struct pci_controller * __init +default_vga_hose_select(struct pci_controller *h1, struct pci_controller *h2) +{ + if (h2->index < h1->index) + return h2; + + return h1; +} + +void __init +set_vga_hose(struct pci_controller *hose) +{ + if (hose) { + __vga_hose_io_base = hose->io_space->start; + __vga_hose_mem_base = hose->mem_space->start; + } +} + +void __init +locate_and_init_vga(void *(*sel_func)(void *, void *)) +{ + struct pci_controller *hose = NULL; + struct pci_dev *dev = NULL; + + if (!sel_func) sel_func = (void *)default_vga_hose_select; + + for(dev=NULL; (dev=pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, dev));) { + if (!hose) hose = dev->sysdata; + else hose = sel_func(hose, dev->sysdata); + } + + /* Did we already inititialize the correct one? */ + if (conswitchp == &vga_con && + __vga_hose_io_base == hose->io_space->start && + __vga_hose_mem_base == hose->mem_space->start) + return; + + /* Set the VGA hose and init the new console */ + set_vga_hose(hose); + take_over_console(&vga_con, 0, MAX_NR_CONSOLES-1, 1); +} + +#endif diff --git a/arch/alpha/kernel/core_apecs.c b/arch/alpha/kernel/core_apecs.c new file mode 100644 index 0000000..a27ba12 --- /dev/null +++ b/arch/alpha/kernel/core_apecs.c @@ -0,0 +1,418 @@ +/* + * linux/arch/alpha/kernel/core_apecs.c + * + * Rewritten for Apecs from the lca.c from: + * + * Written by David Mosberger (davidm@cs.arizona.edu) with some code + * taken from Dave Rusling's (david.rusling@reo.mts.dec.com) 32-bit + * bios code. + * + * Code common to all APECS core logic chips. + */ + +#define __EXTERN_INLINE inline +#include <asm/io.h> +#include <asm/core_apecs.h> +#undef __EXTERN_INLINE + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/init.h> + +#include <asm/ptrace.h> +#include <asm/smp.h> + +#include "proto.h" +#include "pci_impl.h" + +/* + * NOTE: Herein lie back-to-back mb instructions. They are magic. + * One plausible explanation is that the i/o controller does not properly + * handle the system transaction. Another involves timing. Ho hum. + */ + +/* + * BIOS32-style PCI interface: + */ + +#define DEBUG_CONFIG 0 + +#if DEBUG_CONFIG +# define DBGC(args) printk args +#else +# define DBGC(args) +#endif + +#define vuip volatile unsigned int * + +/* + * Given a bus, device, and function number, compute resulting + * configuration space address and setup the APECS_HAXR2 register + * accordingly. It is therefore not safe to have concurrent + * invocations to configuration space access routines, but there + * really shouldn't be any need for this. + * + * Type 0: + * + * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 + * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | | | | | | | | | | | | | | | | | | | | | | |F|F|F|R|R|R|R|R|R|0|0| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * 31:11 Device select bit. + * 10:8 Function number + * 7:2 Register number + * + * Type 1: + * + * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 + * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * 31:24 reserved + * 23:16 bus number (8 bits = 128 possible buses) + * 15:11 Device number (5 bits) + * 10:8 function number + * 7:2 register number + * + * Notes: + * The function number selects which function of a multi-function device + * (e.g., SCSI and Ethernet). + * + * The register selects a DWORD (32 bit) register offset. Hence it + * doesn't get shifted by 2 bits as we want to "drop" the bottom two + * bits. + */ + +static int +mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, + unsigned long *pci_addr, unsigned char *type1) +{ + unsigned long addr; + u8 bus = pbus->number; + + DBGC(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x," + " pci_addr=0x%p, type1=0x%p)\n", + bus, device_fn, where, pci_addr, type1)); + + if (bus == 0) { + int device = device_fn >> 3; + + /* type 0 configuration cycle: */ + + if (device > 20) { + DBGC(("mk_conf_addr: device (%d) > 20, returning -1\n", + device)); + return -1; + } + + *type1 = 0; + addr = (device_fn << 8) | (where); + } else { + /* type 1 configuration cycle: */ + *type1 = 1; + addr = (bus << 16) | (device_fn << 8) | (where); + } + *pci_addr = addr; + DBGC(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); + return 0; +} + +static unsigned int +conf_read(unsigned long addr, unsigned char type1) +{ + unsigned long flags; + unsigned int stat0, value; + unsigned int haxr2 = 0; + + local_irq_save(flags); /* avoid getting hit by machine check */ + + DBGC(("conf_read(addr=0x%lx, type1=%d)\n", addr, type1)); + + /* Reset status register to avoid losing errors. */ + stat0 = *(vuip)APECS_IOC_DCSR; + *(vuip)APECS_IOC_DCSR = stat0; + mb(); + DBGC(("conf_read: APECS DCSR was 0x%x\n", stat0)); + + /* If Type1 access, must set HAE #2. */ + if (type1) { + haxr2 = *(vuip)APECS_IOC_HAXR2; + mb(); + *(vuip)APECS_IOC_HAXR2 = haxr2 | 1; + DBGC(("conf_read: TYPE1 access\n")); + } + + draina(); + mcheck_expected(0) = 1; + mcheck_taken(0) = 0; + mb(); + + /* Access configuration space. */ + + /* Some SRMs step on these registers during a machine check. */ + asm volatile("ldl %0,%1; mb; mb" : "=r"(value) : "m"(*(vuip)addr) + : "$9", "$10", "$11", "$12", "$13", "$14", "memory"); + + if (mcheck_taken(0)) { + mcheck_taken(0) = 0; + value = 0xffffffffU; + mb(); + } + mcheck_expected(0) = 0; + mb(); + +#if 1 + /* + * david.rusling@reo.mts.dec.com. This code is needed for the + * EB64+ as it does not generate a machine check (why I don't + * know). When we build kernels for one particular platform + * then we can make this conditional on the type. + */ + draina(); + + /* Now look for any errors. */ + stat0 = *(vuip)APECS_IOC_DCSR; + DBGC(("conf_read: APECS DCSR after read 0x%x\n", stat0)); + + /* Is any error bit set? */ + if (stat0 & 0xffe0U) { + /* If not NDEV, print status. */ + if (!(stat0 & 0x0800)) { + printk("apecs.c:conf_read: got stat0=%x\n", stat0); + } + + /* Reset error status. */ + *(vuip)APECS_IOC_DCSR = stat0; + mb(); + wrmces(0x7); /* reset machine check */ + value = 0xffffffff; + } +#endif + + /* If Type1 access, must reset HAE #2 so normal IO space ops work. */ + if (type1) { + *(vuip)APECS_IOC_HAXR2 = haxr2 & ~1; + mb(); + } + local_irq_restore(flags); + + return value; +} + +static void +conf_write(unsigned long addr, unsigned int value, unsigned char type1) +{ + unsigned long flags; + unsigned int stat0; + unsigned int haxr2 = 0; + + local_irq_save(flags); /* avoid getting hit by machine check */ + + /* Reset status register to avoid losing errors. */ + stat0 = *(vuip)APECS_IOC_DCSR; + *(vuip)APECS_IOC_DCSR = stat0; + mb(); + + /* If Type1 access, must set HAE #2. */ + if (type1) { + haxr2 = *(vuip)APECS_IOC_HAXR2; + mb(); + *(vuip)APECS_IOC_HAXR2 = haxr2 | 1; + } + + draina(); + mcheck_expected(0) = 1; + mb(); + + /* Access configuration space. */ + *(vuip)addr = value; + mb(); + mb(); /* magic */ + mcheck_expected(0) = 0; + mb(); + +#if 1 + /* + * david.rusling@reo.mts.dec.com. This code is needed for the + * EB64+ as it does not generate a machine check (why I don't + * know). When we build kernels for one particular platform + * then we can make this conditional on the type. + */ + draina(); + + /* Now look for any errors. */ + stat0 = *(vuip)APECS_IOC_DCSR; + + /* Is any error bit set? */ + if (stat0 & 0xffe0U) { + /* If not NDEV, print status. */ + if (!(stat0 & 0x0800)) { + printk("apecs.c:conf_write: got stat0=%x\n", stat0); + } + + /* Reset error status. */ + *(vuip)APECS_IOC_DCSR = stat0; + mb(); + wrmces(0x7); /* reset machine check */ + } +#endif + + /* If Type1 access, must reset HAE #2 so normal IO space ops work. */ + if (type1) { + *(vuip)APECS_IOC_HAXR2 = haxr2 & ~1; + mb(); + } + local_irq_restore(flags); +} + +static int +apecs_read_config(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 *value) +{ + unsigned long addr, pci_addr; + unsigned char type1; + long mask; + int shift; + + if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1)) + return PCIBIOS_DEVICE_NOT_FOUND; + + mask = (size - 1) * 8; + shift = (where & 3) * 8; + addr = (pci_addr << 5) + mask + APECS_CONF; + *value = conf_read(addr, type1) >> (shift); + return PCIBIOS_SUCCESSFUL; +} + +static int +apecs_write_config(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 value) +{ + unsigned long addr, pci_addr; + unsigned char type1; + long mask; + + if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1)) + return PCIBIOS_DEVICE_NOT_FOUND; + + mask = (size - 1) * 8; + addr = (pci_addr << 5) + mask + APECS_CONF; + conf_write(addr, value << ((where & 3) * 8), type1); + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops apecs_pci_ops = +{ + .read = apecs_read_config, + .write = apecs_write_config, +}; + +void +apecs_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) +{ + wmb(); + *(vip)APECS_IOC_TBIA = 0; + mb(); +} + +void __init +apecs_init_arch(void) +{ + struct pci_controller *hose; + + /* + * Create our single hose. + */ + + pci_isa_hose = hose = alloc_pci_controller(); + hose->io_space = &ioport_resource; + hose->mem_space = &iomem_resource; + hose->index = 0; + + hose->sparse_mem_base = APECS_SPARSE_MEM - IDENT_ADDR; + hose->dense_mem_base = APECS_DENSE_MEM - IDENT_ADDR; + hose->sparse_io_base = APECS_IO - IDENT_ADDR; + hose->dense_io_base = 0; + + /* + * Set up the PCI to main memory translation windows. + * + * Window 1 is direct access 1GB at 1GB + * Window 2 is scatter-gather 8MB at 8MB (for isa) + */ + hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); + hose->sg_pci = NULL; + __direct_map_base = 0x40000000; + __direct_map_size = 0x40000000; + + *(vuip)APECS_IOC_PB1R = __direct_map_base | 0x00080000; + *(vuip)APECS_IOC_PM1R = (__direct_map_size - 1) & 0xfff00000U; + *(vuip)APECS_IOC_TB1R = 0; + + *(vuip)APECS_IOC_PB2R = hose->sg_isa->dma_base | 0x000c0000; + *(vuip)APECS_IOC_PM2R = (hose->sg_isa->size - 1) & 0xfff00000; + *(vuip)APECS_IOC_TB2R = virt_to_phys(hose->sg_isa->ptes) >> 1; + + apecs_pci_tbi(hose, 0, -1); + + /* + * Finally, clear the HAXR2 register, which gets used + * for PCI Config Space accesses. That is the way + * we want to use it, and we do not want to depend on + * what ARC or SRM might have left behind... + */ + *(vuip)APECS_IOC_HAXR2 = 0; + mb(); +} + +void +apecs_pci_clr_err(void) +{ + unsigned int jd; + + jd = *(vuip)APECS_IOC_DCSR; + if (jd & 0xffe0L) { + *(vuip)APECS_IOC_SEAR; + *(vuip)APECS_IOC_DCSR = jd | 0xffe1L; + mb(); + *(vuip)APECS_IOC_DCSR; + } + *(vuip)APECS_IOC_TBIA = (unsigned int)APECS_IOC_TBIA; + mb(); + *(vuip)APECS_IOC_TBIA; +} + +void +apecs_machine_check(unsigned long vector, unsigned long la_ptr, + struct pt_regs * regs) +{ + struct el_common *mchk_header; + struct el_apecs_procdata *mchk_procdata; + struct el_apecs_sysdata_mcheck *mchk_sysdata; + + mchk_header = (struct el_common *)la_ptr; + + mchk_procdata = (struct el_apecs_procdata *) + (la_ptr + mchk_header->proc_offset + - sizeof(mchk_procdata->paltemp)); + + mchk_sysdata = (struct el_apecs_sysdata_mcheck *) + (la_ptr + mchk_header->sys_offset); + + + /* Clear the error before any reporting. */ + mb(); + mb(); /* magic */ + draina(); + apecs_pci_clr_err(); + wrmces(0x7); /* reset machine check pending flag */ + mb(); + + process_mcheck_info(vector, la_ptr, regs, "APECS", + (mcheck_expected(0) + && (mchk_sysdata->epic_dcsr & 0x0c00UL))); +} diff --git a/arch/alpha/kernel/core_cia.c b/arch/alpha/kernel/core_cia.c new file mode 100644 index 0000000..fd56306 --- /dev/null +++ b/arch/alpha/kernel/core_cia.c @@ -0,0 +1,1212 @@ +/* + * linux/arch/alpha/kernel/core_cia.c + * + * Written by David A Rusling (david.rusling@reo.mts.dec.com). + * December 1995. + * + * Copyright (C) 1995 David A Rusling + * Copyright (C) 1997, 1998 Jay Estabrook + * Copyright (C) 1998, 1999, 2000 Richard Henderson + * + * Code common to all CIA core logic chips. + */ + +#define __EXTERN_INLINE inline +#include <asm/io.h> +#include <asm/core_cia.h> +#undef __EXTERN_INLINE + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/init.h> +#include <linux/bootmem.h> + +#include <asm/ptrace.h> + +#include "proto.h" +#include "pci_impl.h" + + +/* + * NOTE: Herein lie back-to-back mb instructions. They are magic. + * One plausible explanation is that the i/o controller does not properly + * handle the system transaction. Another involves timing. Ho hum. + */ + +#define DEBUG_CONFIG 0 +#if DEBUG_CONFIG +# define DBGC(args) printk args +#else +# define DBGC(args) +#endif + +#define vip volatile int * + +/* + * Given a bus, device, and function number, compute resulting + * configuration space address. It is therefore not safe to have + * concurrent invocations to configuration space access routines, but + * there really shouldn't be any need for this. + * + * Type 0: + * + * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 + * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * 31:11 Device select bit. + * 10:8 Function number + * 7:2 Register number + * + * Type 1: + * + * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 + * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * 31:24 reserved + * 23:16 bus number (8 bits = 128 possible buses) + * 15:11 Device number (5 bits) + * 10:8 function number + * 7:2 register number + * + * Notes: + * The function number selects which function of a multi-function device + * (e.g., SCSI and Ethernet). + * + * The register selects a DWORD (32 bit) register offset. Hence it + * doesn't get shifted by 2 bits as we want to "drop" the bottom two + * bits. + */ + +static int +mk_conf_addr(struct pci_bus *bus_dev, unsigned int device_fn, int where, + unsigned long *pci_addr, unsigned char *type1) +{ + u8 bus = bus_dev->number; + + *type1 = (bus != 0); + *pci_addr = (bus << 16) | (device_fn << 8) | where; + + DBGC(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x," + " returning address 0x%p\n" + bus, device_fn, where, *pci_addr)); + + return 0; +} + +static unsigned int +conf_read(unsigned long addr, unsigned char type1) +{ + unsigned long flags; + int stat0, value; + int cia_cfg = 0; + + DBGC(("conf_read(addr=0x%lx, type1=%d) ", addr, type1)); + local_irq_save(flags); + + /* Reset status register to avoid losing errors. */ + stat0 = *(vip)CIA_IOC_CIA_ERR; + *(vip)CIA_IOC_CIA_ERR = stat0; + mb(); + *(vip)CIA_IOC_CIA_ERR; /* re-read to force write */ + + /* If Type1 access, must set CIA CFG. */ + if (type1) { + cia_cfg = *(vip)CIA_IOC_CFG; + *(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1; + mb(); + *(vip)CIA_IOC_CFG; + } + + mb(); + draina(); + mcheck_expected(0) = 1; + mcheck_taken(0) = 0; + mb(); + + /* Access configuration space. */ + value = *(vip)addr; + mb(); + mb(); /* magic */ + if (mcheck_taken(0)) { + mcheck_taken(0) = 0; + value = 0xffffffff; + mb(); + } + mcheck_expected(0) = 0; + mb(); + + /* If Type1 access, must reset IOC CFG so normal IO space ops work. */ + if (type1) { + *(vip)CIA_IOC_CFG = cia_cfg; + mb(); + *(vip)CIA_IOC_CFG; + } + + local_irq_restore(flags); + DBGC(("done\n")); + + return value; +} + +static void +conf_write(unsigned long addr, unsigned int value, unsigned char type1) +{ + unsigned long flags; + int stat0, cia_cfg = 0; + + DBGC(("conf_write(addr=0x%lx, type1=%d) ", addr, type1)); + local_irq_save(flags); + + /* Reset status register to avoid losing errors. */ + stat0 = *(vip)CIA_IOC_CIA_ERR; + *(vip)CIA_IOC_CIA_ERR = stat0; + mb(); + *(vip)CIA_IOC_CIA_ERR; /* re-read to force write */ + + /* If Type1 access, must set CIA CFG. */ + if (type1) { + cia_cfg = *(vip)CIA_IOC_CFG; + *(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1; + mb(); + *(vip)CIA_IOC_CFG; + } + + mb(); + draina(); + mcheck_expected(0) = 1; + mcheck_taken(0) = 0; + mb(); + + /* Access configuration space. */ + *(vip)addr = value; + mb(); + *(vip)addr; /* read back to force the write */ + + mcheck_expected(0) = 0; + mb(); + + /* If Type1 access, must reset IOC CFG so normal IO space ops work. */ + if (type1) { + *(vip)CIA_IOC_CFG = cia_cfg; + mb(); + *(vip)CIA_IOC_CFG; + } + + local_irq_restore(flags); + DBGC(("done\n")); +} + +static int +cia_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, + u32 *value) +{ + unsigned long addr, pci_addr; + long mask; + unsigned char type1; + int shift; + + if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1)) + return PCIBIOS_DEVICE_NOT_FOUND; + + mask = (size - 1) * 8; + shift = (where & 3) * 8; + addr = (pci_addr << 5) + mask + CIA_CONF; + *value = conf_read(addr, type1) >> (shift); + return PCIBIOS_SUCCESSFUL; +} + +static int +cia_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, + u32 value) +{ + unsigned long addr, pci_addr; + long mask; + unsigned char type1; + + if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1)) + return PCIBIOS_DEVICE_NOT_FOUND; + + mask = (size - 1) * 8; + addr = (pci_addr << 5) + mask + CIA_CONF; + conf_write(addr, value << ((where & 3) * 8), type1); + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops cia_pci_ops = +{ + .read = cia_read_config, + .write = cia_write_config, +}; + +/* + * CIA Pass 1 and PYXIS Pass 1 and 2 have a broken scatter-gather tlb. + * It cannot be invalidated. Rather than hard code the pass numbers, + * actually try the tbia to see if it works. + */ + +void +cia_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) +{ + wmb(); + *(vip)CIA_IOC_PCI_TBIA = 3; /* Flush all locked and unlocked. */ + mb(); + *(vip)CIA_IOC_PCI_TBIA; +} + +/* + * On PYXIS, even if the tbia works, we cannot use it. It effectively locks + * the chip (as well as direct write to the tag registers) if there is a + * SG DMA operation in progress. This is true at least for PYXIS rev. 1, + * so always use the method below. + */ +/* + * This is the method NT and NetBSD use. + * + * Allocate mappings, and put the chip into DMA loopback mode to read a + * garbage page. This works by causing TLB misses, causing old entries to + * be purged to make room for the new entries coming in for the garbage page. + */ + +#define CIA_BROKEN_TBIA_BASE 0x30000000 +#define CIA_BROKEN_TBIA_SIZE 1024 + +/* Always called with interrupts disabled */ +void +cia_pci_tbi_try2(struct pci_controller *hose, + dma_addr_t start, dma_addr_t end) +{ + void __iomem *bus_addr; + int ctrl; + + /* Put the chip into PCI loopback mode. */ + mb(); + ctrl = *(vip)CIA_IOC_CIA_CTRL; + *(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN; + mb(); + *(vip)CIA_IOC_CIA_CTRL; + mb(); + + /* Read from PCI dense memory space at TBI_ADDR, skipping 32k on + each read. This forces SG TLB misses. NetBSD claims that the + TLB entries are not quite LRU, meaning that we need to read more + times than there are actual tags. The 2117x docs claim strict + round-robin. Oh well, we've come this far... */ + /* Even better - as seen on the PYXIS rev 1 the TLB tags 0-3 can + be filled by the TLB misses *only once* after being invalidated + (by tbia or direct write). Next misses won't update them even + though the lock bits are cleared. Tags 4-7 are "quite LRU" though, + so use them and read at window 3 base exactly 4 times. Reading + more sometimes makes the chip crazy. -ink */ + + bus_addr = cia_ioremap(CIA_BROKEN_TBIA_BASE, 32768 * 4); + + cia_readl(bus_addr + 0x00000); + cia_readl(bus_addr + 0x08000); + cia_readl(bus_addr + 0x10000); + cia_readl(bus_addr + 0x18000); + + cia_iounmap(bus_addr); + + /* Restore normal PCI operation. */ + mb(); + *(vip)CIA_IOC_CIA_CTRL = ctrl; + mb(); + *(vip)CIA_IOC_CIA_CTRL; + mb(); +} + +static inline void +cia_prepare_tbia_workaround(int window) +{ + unsigned long *ppte, pte; + long i; + + /* Use minimal 1K map. */ + ppte = __alloc_bootmem(CIA_BROKEN_TBIA_SIZE, 32768, 0); + pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1; + + for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i) + ppte[i] = pte; + + *(vip)CIA_IOC_PCI_Wn_BASE(window) = CIA_BROKEN_TBIA_BASE | 3; + *(vip)CIA_IOC_PCI_Wn_MASK(window) + = (CIA_BROKEN_TBIA_SIZE*1024 - 1) & 0xfff00000; + *(vip)CIA_IOC_PCI_Tn_BASE(window) = virt_to_phys(ppte) >> 2; +} + +static void __init +verify_tb_operation(void) +{ + static int page[PAGE_SIZE/4] + __attribute__((aligned(PAGE_SIZE))) + __initdata = { 0 }; + + struct pci_iommu_arena *arena = pci_isa_hose->sg_isa; + int ctrl, addr0, tag0, pte0, data0; + int temp, use_tbia_try2 = 0; + void __iomem *bus_addr; + + /* pyxis -- tbia is broken */ + if (pci_isa_hose->dense_io_base) + use_tbia_try2 = 1; + + /* Put the chip into PCI loopback mode. */ + mb(); + ctrl = *(vip)CIA_IOC_CIA_CTRL; + *(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN; + mb(); + *(vip)CIA_IOC_CIA_CTRL; + mb(); + + /* Write a valid entry directly into the TLB registers. */ + + addr0 = arena->dma_base; + tag0 = addr0 | 1; + pte0 = (virt_to_phys(page) >> (PAGE_SHIFT - 1)) | 1; + + *(vip)CIA_IOC_TB_TAGn(0) = tag0; + *(vip)CIA_IOC_TB_TAGn(1) = 0; + *(vip)CIA_IOC_TB_TAGn(2) = 0; + *(vip)CIA_IOC_TB_TAGn(3) = 0; + *(vip)CIA_IOC_TB_TAGn(4) = 0; + *(vip)CIA_IOC_TB_TAGn(5) = 0; + *(vip)CIA_IOC_TB_TAGn(6) = 0; + *(vip)CIA_IOC_TB_TAGn(7) = 0; + *(vip)CIA_IOC_TBn_PAGEm(0,0) = pte0; + *(vip)CIA_IOC_TBn_PAGEm(0,1) = 0; + *(vip)CIA_IOC_TBn_PAGEm(0,2) = 0; + *(vip)CIA_IOC_TBn_PAGEm(0,3) = 0; + mb(); + + /* Get a usable bus address */ + bus_addr = cia_ioremap(addr0, 8*PAGE_SIZE); + + /* First, verify we can read back what we've written. If + this fails, we can't be sure of any of the other testing + we're going to do, so bail. */ + /* ??? Actually, we could do the work with machine checks. + By passing this register update test, we pretty much + guarantee that cia_pci_tbi_try1 works. If this test + fails, cia_pci_tbi_try2 might still work. */ + + temp = *(vip)CIA_IOC_TB_TAGn(0); + if (temp != tag0) { + printk("pci: failed tb register update test " + "(tag0 %#x != %#x)\n", temp, tag0); + goto failed; + } + temp = *(vip)CIA_IOC_TB_TAGn(1); + if (temp != 0) { + printk("pci: failed tb register update test " + "(tag1 %#x != 0)\n", temp); + goto failed; + } + temp = *(vip)CIA_IOC_TBn_PAGEm(0,0); + if (temp != pte0) { + printk("pci: failed tb register update test " + "(pte0 %#x != %#x)\n", temp, pte0); + goto failed; + } + printk("pci: passed tb register update test\n"); + + /* Second, verify we can actually do I/O through this entry. */ + + data0 = 0xdeadbeef; + page[0] = data0; + mcheck_expected(0) = 1; + mcheck_taken(0) = 0; + mb(); + temp = cia_readl(bus_addr); + mb(); + mcheck_expected(0) = 0; + mb(); + if (mcheck_taken(0)) { + printk("pci: failed sg loopback i/o read test (mcheck)\n"); + goto failed; + } + if (temp != data0) { + printk("pci: failed sg loopback i/o read test " + "(%#x != %#x)\n", temp, data0); + goto failed; + } + printk("pci: passed sg loopback i/o read test\n"); + + /* Third, try to invalidate the TLB. */ + + if (! use_tbia_try2) { + cia_pci_tbi(arena->hose, 0, -1); + temp = *(vip)CIA_IOC_TB_TAGn(0); + if (temp & 1) { + use_tbia_try2 = 1; + printk("pci: failed tbia test; workaround available\n"); + } else { + printk("pci: passed tbia test\n"); + } + } + + /* Fourth, verify the TLB snoops the EV5's caches when + doing a tlb fill. */ + + data0 = 0x5adda15e; + page[0] = data0; + arena->ptes[4] = pte0; + mcheck_expected(0) = 1; + mcheck_taken(0) = 0; + mb(); + temp = cia_readl(bus_addr + 4*PAGE_SIZE); + mb(); + mcheck_expected(0) = 0; + mb(); + if (mcheck_taken(0)) { + printk("pci: failed pte write cache snoop test (mcheck)\n"); + goto failed; + } + if (temp != data0) { + printk("pci: failed pte write cache snoop test " + "(%#x != %#x)\n", temp, data0); + goto failed; + } + printk("pci: passed pte write cache snoop test\n"); + + /* Fifth, verify that a previously invalid PTE entry gets + filled from the page table. */ + + data0 = 0xabcdef12; + page[0] = data0; + arena->ptes[5] = pte0; + mcheck_expected(0) = 1; + mcheck_taken(0) = 0; + mb(); + temp = cia_readl(bus_addr + 5*PAGE_SIZE); + mb(); + mcheck_expected(0) = 0; + mb(); + if (mcheck_taken(0)) { + printk("pci: failed valid tag invalid pte reload test " + "(mcheck; workaround available)\n"); + /* Work around this bug by aligning new allocations + on 4 page boundaries. */ + arena->align_entry = 4; + } else if (temp != data0) { + printk("pci: failed valid tag invalid pte reload test " + "(%#x != %#x)\n", temp, data0); + goto failed; + } else { + printk("pci: passed valid tag invalid pte reload test\n"); + } + + /* Sixth, verify machine checks are working. Test invalid + pte under the same valid tag as we used above. */ + + mcheck_expected(0) = 1; + mcheck_taken(0) = 0; + mb(); + temp = cia_readl(bus_addr + 6*PAGE_SIZE); + mb(); + mcheck_expected(0) = 0; + mb(); + printk("pci: %s pci machine check test\n", + mcheck_taken(0) ? "passed" : "failed"); + + /* Clean up after the tests. */ + arena->ptes[4] = 0; + arena->ptes[5] = 0; + + if (use_tbia_try2) { + alpha_mv.mv_pci_tbi = cia_pci_tbi_try2; + + /* Tags 0-3 must be disabled if we use this workaraund. */ + wmb(); + *(vip)CIA_IOC_TB_TAGn(0) = 2; + *(vip)CIA_IOC_TB_TAGn(1) = 2; + *(vip)CIA_IOC_TB_TAGn(2) = 2; + *(vip)CIA_IOC_TB_TAGn(3) = 2; + + printk("pci: tbia workaround enabled\n"); + } + alpha_mv.mv_pci_tbi(arena->hose, 0, -1); + +exit: + /* unmap the bus addr */ + cia_iounmap(bus_addr); + + /* Restore normal PCI operation. */ + mb(); + *(vip)CIA_IOC_CIA_CTRL = ctrl; + mb(); + *(vip)CIA_IOC_CIA_CTRL; + mb(); + return; + +failed: + printk("pci: disabling sg translation window\n"); + *(vip)CIA_IOC_PCI_W0_BASE = 0; + *(vip)CIA_IOC_PCI_W1_BASE = 0; + pci_isa_hose->sg_isa = NULL; + alpha_mv.mv_pci_tbi = NULL; + goto exit; +} + +#if defined(ALPHA_RESTORE_SRM_SETUP) +/* Save CIA configuration data as the console had it set up. */ +struct +{ + unsigned int hae_mem; + unsigned int hae_io; + unsigned int pci_dac_offset; + unsigned int err_mask; + unsigned int cia_ctrl; + unsigned int cia_cnfg; + struct { + unsigned int w_base; + unsigned int w_mask; + unsigned int t_base; + } window[4]; +} saved_config __attribute((common)); + +void +cia_save_srm_settings(int is_pyxis) +{ + int i; + + /* Save some important registers. */ + saved_config.err_mask = *(vip)CIA_IOC_ERR_MASK; + saved_config.cia_ctrl = *(vip)CIA_IOC_CIA_CTRL; + saved_config.hae_mem = *(vip)CIA_IOC_HAE_MEM; + saved_config.hae_io = *(vip)CIA_IOC_HAE_IO; + saved_config.pci_dac_offset = *(vip)CIA_IOC_PCI_W_DAC; + + if (is_pyxis) + saved_config.cia_cnfg = *(vip)CIA_IOC_CIA_CNFG; + else + saved_config.cia_cnfg = 0; + + /* Save DMA windows configuration. */ + for (i = 0; i < 4; i++) { + saved_config.window[i].w_base = *(vip)CIA_IOC_PCI_Wn_BASE(i); + saved_config.window[i].w_mask = *(vip)CIA_IOC_PCI_Wn_MASK(i); + saved_config.window[i].t_base = *(vip)CIA_IOC_PCI_Tn_BASE(i); + } + mb(); +} + +void +cia_restore_srm_settings(void) +{ + int i; + + for (i = 0; i < 4; i++) { + *(vip)CIA_IOC_PCI_Wn_BASE(i) = saved_config.window[i].w_base; + *(vip)CIA_IOC_PCI_Wn_MASK(i) = saved_config.window[i].w_mask; + *(vip)CIA_IOC_PCI_Tn_BASE(i) = saved_config.window[i].t_base; + } + + *(vip)CIA_IOC_HAE_MEM = saved_config.hae_mem; + *(vip)CIA_IOC_HAE_IO = saved_config.hae_io; + *(vip)CIA_IOC_PCI_W_DAC = saved_config.pci_dac_offset; + *(vip)CIA_IOC_ERR_MASK = saved_config.err_mask; + *(vip)CIA_IOC_CIA_CTRL = saved_config.cia_ctrl; + + if (saved_config.cia_cnfg) /* Must be pyxis. */ + *(vip)CIA_IOC_CIA_CNFG = saved_config.cia_cnfg; + + mb(); +} +#else /* ALPHA_RESTORE_SRM_SETUP */ +#define cia_save_srm_settings(p) do {} while (0) +#define cia_restore_srm_settings() do {} while (0) +#endif /* ALPHA_RESTORE_SRM_SETUP */ + + +static void __init +do_init_arch(int is_pyxis) +{ + struct pci_controller *hose; + int temp, cia_rev, tbia_window; + + cia_rev = *(vip)CIA_IOC_CIA_REV & CIA_REV_MASK; + printk("pci: cia revision %d%s\n", + cia_rev, is_pyxis ? " (pyxis)" : ""); + + if (alpha_using_srm) + cia_save_srm_settings(is_pyxis); + + /* Set up error reporting. */ + temp = *(vip)CIA_IOC_ERR_MASK; + temp &= ~(CIA_ERR_CPU_PE | CIA_ERR_MEM_NEM | CIA_ERR_PA_PTE_INV + | CIA_ERR_RCVD_MAS_ABT | CIA_ERR_RCVD_TAR_ABT); + *(vip)CIA_IOC_ERR_MASK = temp; + + /* Clear all currently pending errors. */ + temp = *(vip)CIA_IOC_CIA_ERR; + *(vip)CIA_IOC_CIA_ERR = temp; + + /* Turn on mchecks. */ + temp = *(vip)CIA_IOC_CIA_CTRL; + temp |= CIA_CTRL_FILL_ERR_EN | CIA_CTRL_MCHK_ERR_EN; + *(vip)CIA_IOC_CIA_CTRL = temp; + + /* Clear the CFG register, which gets used for PCI config space + accesses. That is the way we want to use it, and we do not + want to depend on what ARC or SRM might have left behind. */ + *(vip)CIA_IOC_CFG = 0; + + /* Zero the HAEs. */ + *(vip)CIA_IOC_HAE_MEM = 0; + *(vip)CIA_IOC_HAE_IO = 0; + + /* For PYXIS, we always use BWX bus and i/o accesses. To that end, + make sure they're enabled on the controller. At the same time, + enable the monster window. */ + if (is_pyxis) { + temp = *(vip)CIA_IOC_CIA_CNFG; + temp |= CIA_CNFG_IOA_BWEN | CIA_CNFG_PCI_MWEN; + *(vip)CIA_IOC_CIA_CNFG = temp; + } + + /* Synchronize with all previous changes. */ + mb(); + *(vip)CIA_IOC_CIA_REV; + + /* + * Create our single hose. + */ + + pci_isa_hose = hose = alloc_pci_controller(); + hose->io_space = &ioport_resource; + hose->mem_space = &iomem_resource; + hose->index = 0; + + if (! is_pyxis) { + struct resource *hae_mem = alloc_resource(); + hose->mem_space = hae_mem; + + hae_mem->start = 0; + hae_mem->end = CIA_MEM_R1_MASK; + hae_mem->name = pci_hae0_name; + hae_mem->flags = IORESOURCE_MEM; + + if (request_resource(&iomem_resource, hae_mem) < 0) + printk(KERN_ERR "Failed to request HAE_MEM\n"); + + hose->sparse_mem_base = CIA_SPARSE_MEM - IDENT_ADDR; + hose->dense_mem_base = CIA_DENSE_MEM - IDENT_ADDR; + hose->sparse_io_base = CIA_IO - IDENT_ADDR; + hose->dense_io_base = 0; + } else { + hose->sparse_mem_base = 0; + hose->dense_mem_base = CIA_BW_MEM - IDENT_ADDR; + hose->sparse_io_base = 0; + hose->dense_io_base = CIA_BW_IO - IDENT_ADDR; + } + + /* + * Set up the PCI to main memory translation windows. + * + * Window 0 is S/G 8MB at 8MB (for isa) + * Window 1 is S/G 1MB at 768MB (for tbia) (unused for CIA rev 1) + * Window 2 is direct access 2GB at 2GB + * Window 3 is DAC access 4GB at 8GB (or S/G for tbia if CIA rev 1) + * + * ??? NetBSD hints that page tables must be aligned to 32K, + * possibly due to a hardware bug. This is over-aligned + * from the 8K alignment one would expect for an 8MB window. + * No description of what revisions affected. + */ + + hose->sg_pci = NULL; + hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 32768); + + __direct_map_base = 0x80000000; + __direct_map_size = 0x80000000; + + *(vip)CIA_IOC_PCI_W0_BASE = hose->sg_isa->dma_base | 3; + *(vip)CIA_IOC_PCI_W0_MASK = (hose->sg_isa->size - 1) & 0xfff00000; + *(vip)CIA_IOC_PCI_T0_BASE = virt_to_phys(hose->sg_isa->ptes) >> 2; + + *(vip)CIA_IOC_PCI_W2_BASE = __direct_map_base | 1; + *(vip)CIA_IOC_PCI_W2_MASK = (__direct_map_size - 1) & 0xfff00000; + *(vip)CIA_IOC_PCI_T2_BASE = 0 >> 2; + + /* On PYXIS we have the monster window, selected by bit 40, so + there is no need for window3 to be enabled. + + On CIA, we don't have true arbitrary addressing -- bits <39:32> + are compared against W_DAC. We can, however, directly map 4GB, + which is better than before. However, due to assumptions made + elsewhere, we should not claim that we support DAC unless that + 4GB covers all of physical memory. + + On CIA rev 1, apparently W1 and W2 can't be used for SG. + At least, there are reports that it doesn't work for Alcor. + In that case, we have no choice but to use W3 for the TBIA + workaround, which means we can't use DAC at all. */ + + tbia_window = 1; + if (is_pyxis) { + *(vip)CIA_IOC_PCI_W3_BASE = 0; + } else if (cia_rev == 1) { + *(vip)CIA_IOC_PCI_W1_BASE = 0; + tbia_window = 3; + } else if (max_low_pfn > (0x100000000UL >> PAGE_SHIFT)) { + *(vip)CIA_IOC_PCI_W3_BASE = 0; + } else { + *(vip)CIA_IOC_PCI_W3_BASE = 0x00000000 | 1 | 8; + *(vip)CIA_IOC_PCI_W3_MASK = 0xfff00000; + *(vip)CIA_IOC_PCI_T3_BASE = 0 >> 2; + + alpha_mv.pci_dac_offset = 0x200000000UL; + *(vip)CIA_IOC_PCI_W_DAC = alpha_mv.pci_dac_offset >> 32; + } + + /* Prepare workaround for apparently broken tbia. */ + cia_prepare_tbia_workaround(tbia_window); +} + +void __init +cia_init_arch(void) +{ + do_init_arch(0); +} + +void __init +pyxis_init_arch(void) +{ + /* On pyxis machines we can precisely calculate the + CPU clock frequency using pyxis real time counter. + It's especially useful for SX164 with broken RTC. + + Both CPU and chipset are driven by the single 16.666M + or 16.667M crystal oscillator. PYXIS_RT_COUNT clock is + 66.66 MHz. -ink */ + + unsigned int cc0, cc1; + unsigned long pyxis_cc; + + __asm__ __volatile__ ("rpcc %0" : "=r"(cc0)); + pyxis_cc = *(vulp)PYXIS_RT_COUNT; + do { } while(*(vulp)PYXIS_RT_COUNT - pyxis_cc < 4096); + __asm__ __volatile__ ("rpcc %0" : "=r"(cc1)); + cc1 -= cc0; + hwrpb->cycle_freq = ((cc1 >> 11) * 100000000UL) / 3; + hwrpb_update_checksum(hwrpb); + + do_init_arch(1); +} + +void +cia_kill_arch(int mode) +{ + if (alpha_using_srm) + cia_restore_srm_settings(); +} + +void __init +cia_init_pci(void) +{ + /* Must delay this from init_arch, as we need machine checks. */ + verify_tb_operation(); + common_init_pci(); +} + +static inline void +cia_pci_clr_err(void) +{ + int jd; + + jd = *(vip)CIA_IOC_CIA_ERR; + *(vip)CIA_IOC_CIA_ERR = jd; + mb(); + *(vip)CIA_IOC_CIA_ERR; /* re-read to force write. */ +} + +#ifdef CONFIG_VERBOSE_MCHECK +static void +cia_decode_pci_error(struct el_CIA_sysdata_mcheck *cia, const char *msg) +{ + static const char * const pci_cmd_desc[16] = { + "Interrupt Acknowledge", "Special Cycle", "I/O Read", + "I/O Write", "Reserved 0x4", "Reserved 0x5", "Memory Read", + "Memory Write", "Reserved 0x8", "Reserved 0x9", + "Configuration Read", "Configuration Write", + "Memory Read Multiple", "Dual Address Cycle", + "Memory Read Line", "Memory Write and Invalidate" + }; + + if (cia->cia_err & (CIA_ERR_COR_ERR + | CIA_ERR_UN_COR_ERR + | CIA_ERR_MEM_NEM + | CIA_ERR_PA_PTE_INV)) { + static const char * const window_desc[6] = { + "No window active", "Window 0 hit", "Window 1 hit", + "Window 2 hit", "Window 3 hit", "Monster window hit" + }; + + const char *window; + const char *cmd; + unsigned long addr, tmp; + int lock, dac; + + cmd = pci_cmd_desc[cia->pci_err0 & 0x7]; + lock = (cia->pci_err0 >> 4) & 1; + dac = (cia->pci_err0 >> 5) & 1; + + tmp = (cia->pci_err0 >> 8) & 0x1F; + tmp = ffs(tmp); + window = window_desc[tmp]; + + addr = cia->pci_err1; + if (dac) { + tmp = *(vip)CIA_IOC_PCI_W_DAC & 0xFFUL; + addr |= tmp << 32; + } + + printk(KERN_CRIT "CIA machine check: %s\n", msg); + printk(KERN_CRIT " DMA command: %s\n", cmd); + printk(KERN_CRIT " PCI address: %#010lx\n", addr); + printk(KERN_CRIT " %s, Lock: %d, DAC: %d\n", + window, lock, dac); + } else if (cia->cia_err & (CIA_ERR_PERR + | CIA_ERR_PCI_ADDR_PE + | CIA_ERR_RCVD_MAS_ABT + | CIA_ERR_RCVD_TAR_ABT + | CIA_ERR_IOA_TIMEOUT)) { + static const char * const master_st_desc[16] = { + "Idle", "Drive bus", "Address step cycle", + "Address cycle", "Data cycle", "Last read data cycle", + "Last write data cycle", "Read stop cycle", + "Write stop cycle", "Read turnaround cycle", + "Write turnaround cycle", "Reserved 0xB", + "Reserved 0xC", "Reserved 0xD", "Reserved 0xE", + "Unknown state" + }; + static const char * const target_st_desc[16] = { + "Idle", "Busy", "Read data cycle", "Write data cycle", + "Read stop cycle", "Write stop cycle", + "Read turnaround cycle", "Write turnaround cycle", + "Read wait cycle", "Write wait cycle", + "Reserved 0xA", "Reserved 0xB", "Reserved 0xC", + "Reserved 0xD", "Reserved 0xE", "Unknown state" + }; + + const char *cmd; + const char *master, *target; + unsigned long addr, tmp; + int dac; + + master = master_st_desc[(cia->pci_err0 >> 16) & 0xF]; + target = target_st_desc[(cia->pci_err0 >> 20) & 0xF]; + cmd = pci_cmd_desc[(cia->pci_err0 >> 24) & 0xF]; + dac = (cia->pci_err0 >> 28) & 1; + + addr = cia->pci_err2; + if (dac) { + tmp = *(volatile int *)CIA_IOC_PCI_W_DAC & 0xFFUL; + addr |= tmp << 32; + } + + printk(KERN_CRIT "CIA machine check: %s\n", msg); + printk(KERN_CRIT " PCI command: %s\n", cmd); + printk(KERN_CRIT " Master state: %s, Target state: %s\n", + master, target); + printk(KERN_CRIT " PCI address: %#010lx, DAC: %d\n", + addr, dac); + } else { + printk(KERN_CRIT "CIA machine check: %s\n", msg); + printk(KERN_CRIT " Unknown PCI error\n"); + printk(KERN_CRIT " PCI_ERR0 = %#08lx", cia->pci_err0); + printk(KERN_CRIT " PCI_ERR1 = %#08lx", cia->pci_err1); + printk(KERN_CRIT " PCI_ERR2 = %#08lx", cia->pci_err2); + } +} + +static void +cia_decode_mem_error(struct el_CIA_sysdata_mcheck *cia, const char *msg) +{ + unsigned long mem_port_addr; + unsigned long mem_port_mask; + const char *mem_port_cmd; + const char *seq_state; + const char *set_select; + unsigned long tmp; + + /* If this is a DMA command, also decode the PCI bits. */ + if ((cia->mem_err1 >> 20) & 1) + cia_decode_pci_error(cia, msg); + else + printk(KERN_CRIT "CIA machine check: %s\n", msg); + + mem_port_addr = cia->mem_err0 & 0xfffffff0; + mem_port_addr |= (cia->mem_err1 & 0x83UL) << 32; + + mem_port_mask = (cia->mem_err1 >> 12) & 0xF; + + tmp = (cia->mem_err1 >> 8) & 0xF; + tmp |= ((cia->mem_err1 >> 20) & 1) << 4; + if ((tmp & 0x1E) == 0x06) + mem_port_cmd = "WRITE BLOCK or WRITE BLOCK LOCK"; + else if ((tmp & 0x1C) == 0x08) + mem_port_cmd = "READ MISS or READ MISS MODIFY"; + else if (tmp == 0x1C) + mem_port_cmd = "BC VICTIM"; + else if ((tmp & 0x1E) == 0x0E) + mem_port_cmd = "READ MISS MODIFY"; + else if ((tmp & 0x1C) == 0x18) + mem_port_cmd = "DMA READ or DMA READ MODIFY"; + else if ((tmp & 0x1E) == 0x12) + mem_port_cmd = "DMA WRITE"; + else + mem_port_cmd = "Unknown"; + + tmp = (cia->mem_err1 >> 16) & 0xF; + switch (tmp) { + case 0x0: + seq_state = "Idle"; + break; + case 0x1: + seq_state = "DMA READ or DMA WRITE"; + break; + case 0x2: case 0x3: + seq_state = "READ MISS (or READ MISS MODIFY) with victim"; + break; + case 0x4: case 0x5: case 0x6: + seq_state = "READ MISS (or READ MISS MODIFY) with no victim"; + break; + case 0x8: case 0x9: case 0xB: + seq_state = "Refresh"; + break; + case 0xC: + seq_state = "Idle, waiting for DMA pending read"; + break; + case 0xE: case 0xF: + seq_state = "Idle, ras precharge"; + break; + default: + seq_state = "Unknown"; + break; + } + + tmp = (cia->mem_err1 >> 24) & 0x1F; + switch (tmp) { + case 0x00: set_select = "Set 0 selected"; break; + case 0x01: set_select = "Set 1 selected"; break; + case 0x02: set_select = "Set 2 selected"; break; + case 0x03: set_select = "Set 3 selected"; break; + case 0x04: set_select = "Set 4 selected"; break; + case 0x05: set_select = "Set 5 selected"; break; + case 0x06: set_select = "Set 6 selected"; break; + case 0x07: set_select = "Set 7 selected"; break; + case 0x08: set_select = "Set 8 selected"; break; + case 0x09: set_select = "Set 9 selected"; break; + case 0x0A: set_select = "Set A selected"; break; + case 0x0B: set_select = "Set B selected"; break; + case 0x0C: set_select = "Set C selected"; break; + case 0x0D: set_select = "Set D selected"; break; + case 0x0E: set_select = "Set E selected"; break; + case 0x0F: set_select = "Set F selected"; break; + case 0x10: set_select = "No set selected"; break; + case 0x1F: set_select = "Refresh cycle"; break; + default: set_select = "Unknown"; break; + } + + printk(KERN_CRIT " Memory port command: %s\n", mem_port_cmd); + printk(KERN_CRIT " Memory port address: %#010lx, mask: %#lx\n", + mem_port_addr, mem_port_mask); + printk(KERN_CRIT " Memory sequencer state: %s\n", seq_state); + printk(KERN_CRIT " Memory set: %s\n", set_select); +} + +static void +cia_decode_ecc_error(struct el_CIA_sysdata_mcheck *cia, const char *msg) +{ + long syn; + long i; + const char *fmt; + + cia_decode_mem_error(cia, msg); + + syn = cia->cia_syn & 0xff; + if (syn == (syn & -syn)) { + fmt = KERN_CRIT " ECC syndrome %#x -- check bit %d\n"; + i = ffs(syn) - 1; + } else { + static unsigned char const data_bit[64] = { + 0xCE, 0xCB, 0xD3, 0xD5, + 0xD6, 0xD9, 0xDA, 0xDC, + 0x23, 0x25, 0x26, 0x29, + 0x2A, 0x2C, 0x31, 0x34, + 0x0E, 0x0B, 0x13, 0x15, + 0x16, 0x19, 0x1A, 0x1C, + 0xE3, 0xE5, 0xE6, 0xE9, + 0xEA, 0xEC, 0xF1, 0xF4, + 0x4F, 0x4A, 0x52, 0x54, + 0x57, 0x58, 0x5B, 0x5D, + 0xA2, 0xA4, 0xA7, 0xA8, + 0xAB, 0xAD, 0xB0, 0xB5, + 0x8F, 0x8A, 0x92, 0x94, + 0x97, 0x98, 0x9B, 0x9D, + 0x62, 0x64, 0x67, 0x68, + 0x6B, 0x6D, 0x70, 0x75 + }; + + for (i = 0; i < 64; ++i) + if (data_bit[i] == syn) + break; + + if (i < 64) + fmt = KERN_CRIT " ECC syndrome %#x -- data bit %d\n"; + else + fmt = KERN_CRIT " ECC syndrome %#x -- unknown bit\n"; + } + + printk (fmt, syn, i); +} + +static void +cia_decode_parity_error(struct el_CIA_sysdata_mcheck *cia) +{ + static const char * const cmd_desc[16] = { + "NOP", "LOCK", "FETCH", "FETCH_M", "MEMORY BARRIER", + "SET DIRTY", "WRITE BLOCK", "WRITE BLOCK LOCK", + "READ MISS0", "READ MISS1", "READ MISS MOD0", + "READ MISS MOD1", "BCACHE VICTIM", "Spare", + "READ MISS MOD STC0", "READ MISS MOD STC1" + }; + + unsigned long addr; + unsigned long mask; + const char *cmd; + int par; + + addr = cia->cpu_err0 & 0xfffffff0; + addr |= (cia->cpu_err1 & 0x83UL) << 32; + cmd = cmd_desc[(cia->cpu_err1 >> 8) & 0xF]; + mask = (cia->cpu_err1 >> 12) & 0xF; + par = (cia->cpu_err1 >> 21) & 1; + + printk(KERN_CRIT "CIA machine check: System bus parity error\n"); + printk(KERN_CRIT " Command: %s, Parity bit: %d\n", cmd, par); + printk(KERN_CRIT " Address: %#010lx, Mask: %#lx\n", addr, mask); +} +#endif /* CONFIG_VERBOSE_MCHECK */ + + +static int +cia_decode_mchk(unsigned long la_ptr) +{ + struct el_common *com; + struct el_CIA_sysdata_mcheck *cia; + + com = (void *)la_ptr; + cia = (void *)(la_ptr + com->sys_offset); + + if ((cia->cia_err & CIA_ERR_VALID) == 0) + return 0; + +#ifdef CONFIG_VERBOSE_MCHECK + if (!alpha_verbose_mcheck) + return 1; + + switch (ffs(cia->cia_err & 0xfff) - 1) { + case 0: /* CIA_ERR_COR_ERR */ + cia_decode_ecc_error(cia, "Corrected ECC error"); + break; + case 1: /* CIA_ERR_UN_COR_ERR */ + cia_decode_ecc_error(cia, "Uncorrected ECC error"); + break; + case 2: /* CIA_ERR_CPU_PE */ + cia_decode_parity_error(cia); + break; + case 3: /* CIA_ERR_MEM_NEM */ + cia_decode_mem_error(cia, "Access to nonexistent memory"); + break; + case 4: /* CIA_ERR_PCI_SERR */ + cia_decode_pci_error(cia, "PCI bus system error"); + break; + case 5: /* CIA_ERR_PERR */ + cia_decode_pci_error(cia, "PCI data parity error"); + break; + case 6: /* CIA_ERR_PCI_ADDR_PE */ + cia_decode_pci_error(cia, "PCI address parity error"); + break; + case 7: /* CIA_ERR_RCVD_MAS_ABT */ + cia_decode_pci_error(cia, "PCI master abort"); + break; + case 8: /* CIA_ERR_RCVD_TAR_ABT */ + cia_decode_pci_error(cia, "PCI target abort"); + break; + case 9: /* CIA_ERR_PA_PTE_INV */ + cia_decode_pci_error(cia, "PCI invalid PTE"); + break; + case 10: /* CIA_ERR_FROM_WRT_ERR */ + cia_decode_mem_error(cia, "Write to flash ROM attempted"); + break; + case 11: /* CIA_ERR_IOA_TIMEOUT */ + cia_decode_pci_error(cia, "I/O timeout"); + break; + } + + if (cia->cia_err & CIA_ERR_LOST_CORR_ERR) + printk(KERN_CRIT "CIA lost machine check: " + "Correctable ECC error\n"); + if (cia->cia_err & CIA_ERR_LOST_UN_CORR_ERR) + printk(KERN_CRIT "CIA lost machine check: " + "Uncorrectable ECC error\n"); + if (cia->cia_err & CIA_ERR_LOST_CPU_PE) + printk(KERN_CRIT "CIA lost machine check: " + "System bus parity error\n"); + if (cia->cia_err & CIA_ERR_LOST_MEM_NEM) + printk(KERN_CRIT "CIA lost machine check: " + "Access to nonexistent memory\n"); + if (cia->cia_err & CIA_ERR_LOST_PERR) + printk(KERN_CRIT "CIA lost machine check: " + "PCI data parity error\n"); + if (cia->cia_err & CIA_ERR_LOST_PCI_ADDR_PE) + printk(KERN_CRIT "CIA lost machine check: " + "PCI address parity error\n"); + if (cia->cia_err & CIA_ERR_LOST_RCVD_MAS_ABT) + printk(KERN_CRIT "CIA lost machine check: " + "PCI master abort\n"); + if (cia->cia_err & CIA_ERR_LOST_RCVD_TAR_ABT) + printk(KERN_CRIT "CIA lost machine check: " + "PCI target abort\n"); + if (cia->cia_err & CIA_ERR_LOST_PA_PTE_INV) + printk(KERN_CRIT "CIA lost machine check: " + "PCI invalid PTE\n"); + if (cia->cia_err & CIA_ERR_LOST_FROM_WRT_ERR) + printk(KERN_CRIT "CIA lost machine check: " + "Write to flash ROM attempted\n"); + if (cia->cia_err & CIA_ERR_LOST_IOA_TIMEOUT) + printk(KERN_CRIT "CIA lost machine check: " + "I/O timeout\n"); +#endif /* CONFIG_VERBOSE_MCHECK */ + + return 1; +} + +void +cia_machine_check(unsigned long vector, unsigned long la_ptr, + struct pt_regs * regs) +{ + int expected; + + /* Clear the error before any reporting. */ + mb(); + mb(); /* magic */ + draina(); + cia_pci_clr_err(); + wrmces(rdmces()); /* reset machine check pending flag. */ + mb(); + + expected = mcheck_expected(0); + if (!expected && vector == 0x660) + expected = cia_decode_mchk(la_ptr); + process_mcheck_info(vector, la_ptr, regs, "CIA", expected); +} diff --git a/arch/alpha/kernel/core_irongate.c b/arch/alpha/kernel/core_irongate.c new file mode 100644 index 0000000..138d497 --- /dev/null +++ b/arch/alpha/kernel/core_irongate.c @@ -0,0 +1,416 @@ +/* + * linux/arch/alpha/kernel/core_irongate.c + * + * Based on code written by David A. Rusling (david.rusling@reo.mts.dec.com). + * + * Copyright (C) 1999 Alpha Processor, Inc., + * (David Daniel, Stig Telfer, Soohoon Lee) + * + * Code common to all IRONGATE core logic chips. + */ + +#define __EXTERN_INLINE inline +#include <asm/io.h> +#include <asm/core_irongate.h> +#undef __EXTERN_INLINE + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/init.h> +#include <linux/initrd.h> +#include <linux/bootmem.h> + +#include <asm/ptrace.h> +#include <asm/pci.h> +#include <asm/cacheflush.h> +#include <asm/tlbflush.h> + +#include "proto.h" +#include "pci_impl.h" + +/* + * BIOS32-style PCI interface: + */ + +#define DEBUG_CONFIG 0 + +#if DEBUG_CONFIG +# define DBG_CFG(args) printk args +#else +# define DBG_CFG(args) +#endif + +igcsr32 *IronECC; + +/* + * Given a bus, device, and function number, compute resulting + * configuration space address accordingly. It is therefore not safe + * to have concurrent invocations to configuration space access + * routines, but there really shouldn't be any need for this. + * + * addr[31:24] reserved + * addr[23:16] bus number (8 bits = 128 possible buses) + * addr[15:11] Device number (5 bits) + * addr[10: 8] function number + * addr[ 7: 2] register number + * + * For IRONGATE: + * if (bus = addr[23:16]) == 0 + * then + * type 0 config cycle: + * addr_on_pci[31:11] = id selection for device = addr[15:11] + * addr_on_pci[10: 2] = addr[10: 2] ??? + * addr_on_pci[ 1: 0] = 00 + * else + * type 1 config cycle (pass on with no decoding): + * addr_on_pci[31:24] = 0 + * addr_on_pci[23: 2] = addr[23: 2] + * addr_on_pci[ 1: 0] = 01 + * fi + * + * Notes: + * The function number selects which function of a multi-function device + * (e.g., SCSI and Ethernet). + * + * The register selects a DWORD (32 bit) register offset. Hence it + * doesn't get shifted by 2 bits as we want to "drop" the bottom two + * bits. + */ + +static int +mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, + unsigned long *pci_addr, unsigned char *type1) +{ + unsigned long addr; + u8 bus = pbus->number; + + DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, " + "pci_addr=0x%p, type1=0x%p)\n", + bus, device_fn, where, pci_addr, type1)); + + *type1 = (bus != 0); + + addr = (bus << 16) | (device_fn << 8) | where; + addr |= IRONGATE_CONF; + + *pci_addr = addr; + DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); + return 0; +} + +static int +irongate_read_config(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 *value) +{ + unsigned long addr; + unsigned char type1; + + if (mk_conf_addr(bus, devfn, where, &addr, &type1)) + return PCIBIOS_DEVICE_NOT_FOUND; + + switch (size) { + case 1: + *value = __kernel_ldbu(*(vucp)addr); + break; + case 2: + *value = __kernel_ldwu(*(vusp)addr); + break; + case 4: + *value = *(vuip)addr; + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int +irongate_write_config(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 value) +{ + unsigned long addr; + unsigned char type1; + + if (mk_conf_addr(bus, devfn, where, &addr, &type1)) + return PCIBIOS_DEVICE_NOT_FOUND; + + switch (size) { + case 1: + __kernel_stb(value, *(vucp)addr); + mb(); + __kernel_ldbu(*(vucp)addr); + break; + case 2: + __kernel_stw(value, *(vusp)addr); + mb(); + __kernel_ldwu(*(vusp)addr); + break; + case 4: + *(vuip)addr = value; + mb(); + *(vuip)addr; + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops irongate_pci_ops = +{ + .read = irongate_read_config, + .write = irongate_write_config, +}; + +int +irongate_pci_clr_err(void) +{ + unsigned int nmi_ctl=0; + unsigned int IRONGATE_jd; + +again: + IRONGATE_jd = IRONGATE0->stat_cmd; + printk("Iron stat_cmd %x\n", IRONGATE_jd); + IRONGATE0->stat_cmd = IRONGATE_jd; /* write again clears error bits */ + mb(); + IRONGATE_jd = IRONGATE0->stat_cmd; /* re-read to force write */ + + IRONGATE_jd = *IronECC; + printk("Iron ECC %x\n", IRONGATE_jd); + *IronECC = IRONGATE_jd; /* write again clears error bits */ + mb(); + IRONGATE_jd = *IronECC; /* re-read to force write */ + + /* Clear ALI NMI */ + nmi_ctl = inb(0x61); + nmi_ctl |= 0x0c; + outb(nmi_ctl, 0x61); + nmi_ctl &= ~0x0c; + outb(nmi_ctl, 0x61); + + IRONGATE_jd = *IronECC; + if (IRONGATE_jd & 0x300) goto again; + + return 0; +} + +#define IRONGATE_3GB 0xc0000000UL + +/* On Albacore (aka UP1500) with 4Gb of RAM we have to reserve some + memory for PCI. At this point we just reserve memory above 3Gb. Most + of this memory will be freed after PCI setup is done. */ +static void __init +albacore_init_arch(void) +{ + unsigned long memtop = max_low_pfn << PAGE_SHIFT; + unsigned long pci_mem = (memtop + 0x1000000UL) & ~0xffffffUL; + struct percpu_struct *cpu; + int pal_rev, pal_var; + + cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset); + pal_rev = cpu->pal_revision & 0xffff; + pal_var = (cpu->pal_revision >> 16) & 0xff; + + /* Consoles earlier than A5.6-18 (OSF PALcode v1.62-2) set up + the CPU incorrectly (leave speculative stores enabled), + which causes memory corruption under certain conditions. + Issue a warning for such consoles. */ + if (alpha_using_srm && + (pal_rev < 0x13e || (pal_rev == 0x13e && pal_var < 2))) + printk(KERN_WARNING "WARNING! Upgrade to SRM A5.6-19 " + "or later\n"); + + if (pci_mem > IRONGATE_3GB) + pci_mem = IRONGATE_3GB; + IRONGATE0->pci_mem = pci_mem; + alpha_mv.min_mem_address = pci_mem; + if (memtop > pci_mem) { +#ifdef CONFIG_BLK_DEV_INITRD + extern unsigned long initrd_start, initrd_end; + extern void *move_initrd(unsigned long); + + /* Move the initrd out of the way. */ + if (initrd_end && __pa(initrd_end) > pci_mem) { + unsigned long size; + + size = initrd_end - initrd_start; + free_bootmem_node(NODE_DATA(0), __pa(initrd_start), + PAGE_ALIGN(size)); + if (!move_initrd(pci_mem)) + printk("irongate_init_arch: initrd too big " + "(%ldK)\ndisabling initrd\n", + size / 1024); + } +#endif + reserve_bootmem_node(NODE_DATA(0), pci_mem, memtop - pci_mem); + printk("irongate_init_arch: temporarily reserving " + "region %08lx-%08lx for PCI\n", pci_mem, memtop - 1); + } +} + +static void __init +irongate_setup_agp(void) +{ + /* Disable the GART window. AGPGART doesn't work due to yet + unresolved memory coherency issues... */ + IRONGATE0->agpva = IRONGATE0->agpva & ~0xf; + alpha_agpgart_size = 0; +} + +void __init +irongate_init_arch(void) +{ + struct pci_controller *hose; + int amd761 = (IRONGATE0->dev_vendor >> 16) > 0x7006; /* Albacore? */ + + IronECC = amd761 ? &IRONGATE0->bacsr54_eccms761 : &IRONGATE0->dramms; + + irongate_pci_clr_err(); + + if (amd761) + albacore_init_arch(); + + irongate_setup_agp(); + + /* + * Create our single hose. + */ + + pci_isa_hose = hose = alloc_pci_controller(); + hose->io_space = &ioport_resource; + hose->mem_space = &iomem_resource; + hose->index = 0; + + /* This is for userland consumption. For some reason, the 40-bit + PIO bias that we use in the kernel through KSEG didn't work for + the page table based user mappings. So make sure we get the + 43-bit PIO bias. */ + hose->sparse_mem_base = 0; + hose->sparse_io_base = 0; + hose->dense_mem_base + = (IRONGATE_MEM & 0xffffffffffUL) | 0x80000000000UL; + hose->dense_io_base + = (IRONGATE_IO & 0xffffffffffUL) | 0x80000000000UL; + + hose->sg_isa = hose->sg_pci = NULL; + __direct_map_base = 0; + __direct_map_size = 0xffffffff; +} + +/* + * IO map and AGP support + */ +#include <linux/vmalloc.h> +#include <linux/agp_backend.h> +#include <linux/agpgart.h> +#include <asm/pgalloc.h> + +#define GET_PAGE_DIR_OFF(addr) (addr >> 22) +#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr)) + +#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) +#define GET_GATT(addr) (gatt_pages[GET_PAGE_DIR_IDX(addr)]) + +void __iomem * +irongate_ioremap(unsigned long addr, unsigned long size) +{ + struct vm_struct *area; + unsigned long vaddr; + unsigned long baddr, last; + u32 *mmio_regs, *gatt_pages, *cur_gatt, pte; + unsigned long gart_bus_addr; + + if (!alpha_agpgart_size) + return (void __iomem *)(addr + IRONGATE_MEM); + + gart_bus_addr = (unsigned long)IRONGATE0->bar0 & + PCI_BASE_ADDRESS_MEM_MASK; + + /* + * Check for within the AGP aperture... + */ + do { + /* + * Check the AGP area + */ + if (addr >= gart_bus_addr && addr + size - 1 < + gart_bus_addr + alpha_agpgart_size) + break; + + /* + * Not found - assume legacy ioremap + */ + return (void __iomem *)(addr + IRONGATE_MEM); + } while(0); + + mmio_regs = (u32 *)(((unsigned long)IRONGATE0->bar1 & + PCI_BASE_ADDRESS_MEM_MASK) + IRONGATE_MEM); + + gatt_pages = (u32 *)(phys_to_virt(mmio_regs[1])); /* FIXME */ + + /* + * Adjust the limits (mappings must be page aligned) + */ + if (addr & ~PAGE_MASK) { + printk("AGP ioremap failed... addr not page aligned (0x%lx)\n", + addr); + return (void __iomem *)(addr + IRONGATE_MEM); + } + last = addr + size - 1; + size = PAGE_ALIGN(last) - addr; + +#if 0 + printk("irongate_ioremap(0x%lx, 0x%lx)\n", addr, size); + printk("irongate_ioremap: gart_bus_addr 0x%lx\n", gart_bus_addr); + printk("irongate_ioremap: gart_aper_size 0x%lx\n", gart_aper_size); + printk("irongate_ioremap: mmio_regs %p\n", mmio_regs); + printk("irongate_ioremap: gatt_pages %p\n", gatt_pages); + + for(baddr = addr; baddr <= last; baddr += PAGE_SIZE) + { + cur_gatt = phys_to_virt(GET_GATT(baddr) & ~1); + pte = cur_gatt[GET_GATT_OFF(baddr)] & ~1; + printk("irongate_ioremap: cur_gatt %p pte 0x%x\n", + cur_gatt, pte); + } +#endif + + /* + * Map it + */ + area = get_vm_area(size, VM_IOREMAP); + if (!area) return NULL; + + for(baddr = addr, vaddr = (unsigned long)area->addr; + baddr <= last; + baddr += PAGE_SIZE, vaddr += PAGE_SIZE) + { + cur_gatt = phys_to_virt(GET_GATT(baddr) & ~1); + pte = cur_gatt[GET_GATT_OFF(baddr)] & ~1; + + if (__alpha_remap_area_pages(vaddr, + pte, PAGE_SIZE, 0)) { + printk("AGP ioremap: FAILED to map...\n"); + vfree(area->addr); + return NULL; + } + } + + flush_tlb_all(); + + vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK); +#if 0 + printk("irongate_ioremap(0x%lx, 0x%lx) returning 0x%lx\n", + addr, size, vaddr); +#endif + return (void __iomem *)vaddr; +} + +void +irongate_iounmap(volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + if (((long)addr >> 41) == -2) + return; /* kseg map, nothing to do */ + if (addr) + return vfree((void *)(PAGE_MASK & addr)); +} diff --git a/arch/alpha/kernel/core_lca.c b/arch/alpha/kernel/core_lca.c new file mode 100644 index 0000000..6a5a914 --- /dev/null +++ b/arch/alpha/kernel/core_lca.c @@ -0,0 +1,515 @@ +/* + * linux/arch/alpha/kernel/core_lca.c + * + * Written by David Mosberger (davidm@cs.arizona.edu) with some code + * taken from Dave Rusling's (david.rusling@reo.mts.dec.com) 32-bit + * bios code. + * + * Code common to all LCA core logic chips. + */ + +#define __EXTERN_INLINE inline +#include <asm/io.h> +#include <asm/core_lca.h> +#undef __EXTERN_INLINE + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/tty.h> + +#include <asm/ptrace.h> +#include <asm/smp.h> + +#include "proto.h" +#include "pci_impl.h" + + +/* + * BIOS32-style PCI interface: + */ + +/* + * Machine check reasons. Defined according to PALcode sources + * (osf.h and platform.h). + */ +#define MCHK_K_TPERR 0x0080 +#define MCHK_K_TCPERR 0x0082 +#define MCHK_K_HERR 0x0084 +#define MCHK_K_ECC_C 0x0086 +#define MCHK_K_ECC_NC 0x0088 +#define MCHK_K_UNKNOWN 0x008A +#define MCHK_K_CACKSOFT 0x008C +#define MCHK_K_BUGCHECK 0x008E +#define MCHK_K_OS_BUGCHECK 0x0090 +#define MCHK_K_DCPERR 0x0092 +#define MCHK_K_ICPERR 0x0094 + + +/* + * Platform-specific machine-check reasons: + */ +#define MCHK_K_SIO_SERR 0x204 /* all platforms so far */ +#define MCHK_K_SIO_IOCHK 0x206 /* all platforms so far */ +#define MCHK_K_DCSR 0x208 /* all but Noname */ + + +/* + * Given a bus, device, and function number, compute resulting + * configuration space address and setup the LCA_IOC_CONF register + * accordingly. It is therefore not safe to have concurrent + * invocations to configuration space access routines, but there + * really shouldn't be any need for this. + * + * Type 0: + * + * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 + * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | | | | | | | | | | | | | | | | | | | | | | |F|F|F|R|R|R|R|R|R|0|0| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * 31:11 Device select bit. + * 10:8 Function number + * 7:2 Register number + * + * Type 1: + * + * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 + * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * 31:24 reserved + * 23:16 bus number (8 bits = 128 possible buses) + * 15:11 Device number (5 bits) + * 10:8 function number + * 7:2 register number + * + * Notes: + * The function number selects which function of a multi-function device + * (e.g., SCSI and Ethernet). + * + * The register selects a DWORD (32 bit) register offset. Hence it + * doesn't get shifted by 2 bits as we want to "drop" the bottom two + * bits. + */ + +static int +mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, + unsigned long *pci_addr) +{ + unsigned long addr; + u8 bus = pbus->number; + + if (bus == 0) { + int device = device_fn >> 3; + int func = device_fn & 0x7; + + /* Type 0 configuration cycle. */ + + if (device > 12) { + return -1; + } + + *(vulp)LCA_IOC_CONF = 0; + addr = (1 << (11 + device)) | (func << 8) | where; + } else { + /* Type 1 configuration cycle. */ + *(vulp)LCA_IOC_CONF = 1; + addr = (bus << 16) | (device_fn << 8) | where; + } + *pci_addr = addr; + return 0; +} + +static unsigned int +conf_read(unsigned long addr) +{ + unsigned long flags, code, stat0; + unsigned int value; + + local_irq_save(flags); + + /* Reset status register to avoid loosing errors. */ + stat0 = *(vulp)LCA_IOC_STAT0; + *(vulp)LCA_IOC_STAT0 = stat0; + mb(); + + /* Access configuration space. */ + value = *(vuip)addr; + draina(); + + stat0 = *(vulp)LCA_IOC_STAT0; + if (stat0 & LCA_IOC_STAT0_ERR) { + code = ((stat0 >> LCA_IOC_STAT0_CODE_SHIFT) + & LCA_IOC_STAT0_CODE_MASK); + if (code != 1) { + printk("lca.c:conf_read: got stat0=%lx\n", stat0); + } + + /* Reset error status. */ + *(vulp)LCA_IOC_STAT0 = stat0; + mb(); + + /* Reset machine check. */ + wrmces(0x7); + + value = 0xffffffff; + } + local_irq_restore(flags); + return value; +} + +static void +conf_write(unsigned long addr, unsigned int value) +{ + unsigned long flags, code, stat0; + + local_irq_save(flags); /* avoid getting hit by machine check */ + + /* Reset status register to avoid loosing errors. */ + stat0 = *(vulp)LCA_IOC_STAT0; + *(vulp)LCA_IOC_STAT0 = stat0; + mb(); + + /* Access configuration space. */ + *(vuip)addr = value; + draina(); + + stat0 = *(vulp)LCA_IOC_STAT0; + if (stat0 & LCA_IOC_STAT0_ERR) { + code = ((stat0 >> LCA_IOC_STAT0_CODE_SHIFT) + & LCA_IOC_STAT0_CODE_MASK); + if (code != 1) { + printk("lca.c:conf_write: got stat0=%lx\n", stat0); + } + + /* Reset error status. */ + *(vulp)LCA_IOC_STAT0 = stat0; + mb(); + + /* Reset machine check. */ + wrmces(0x7); + } + local_irq_restore(flags); +} + +static int +lca_read_config(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 *value) +{ + unsigned long addr, pci_addr; + long mask; + int shift; + + if (mk_conf_addr(bus, devfn, where, &pci_addr)) + return PCIBIOS_DEVICE_NOT_FOUND; + + shift = (where & 3) * 8; + mask = (size - 1) * 8; + addr = (pci_addr << 5) + mask + LCA_CONF; + *value = conf_read(addr) >> (shift); + return PCIBIOS_SUCCESSFUL; +} + +static int +lca_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, + u32 value) +{ + unsigned long addr, pci_addr; + long mask; + + if (mk_conf_addr(bus, devfn, where, &pci_addr)) + return PCIBIOS_DEVICE_NOT_FOUND; + + mask = (size - 1) * 8; + addr = (pci_addr << 5) + mask + LCA_CONF; + conf_write(addr, value << ((where & 3) * 8)); + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops lca_pci_ops = +{ + .read = lca_read_config, + .write = lca_write_config, +}; + +void +lca_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) +{ + wmb(); + *(vulp)LCA_IOC_TBIA = 0; + mb(); +} + +void __init +lca_init_arch(void) +{ + struct pci_controller *hose; + + /* + * Create our single hose. + */ + + pci_isa_hose = hose = alloc_pci_controller(); + hose->io_space = &ioport_resource; + hose->mem_space = &iomem_resource; + hose->index = 0; + + hose->sparse_mem_base = LCA_SPARSE_MEM - IDENT_ADDR; + hose->dense_mem_base = LCA_DENSE_MEM - IDENT_ADDR; + hose->sparse_io_base = LCA_IO - IDENT_ADDR; + hose->dense_io_base = 0; + + /* + * Set up the PCI to main memory translation windows. + * + * Mimic the SRM settings for the direct-map window. + * Window 0 is scatter-gather 8MB at 8MB (for isa). + * Window 1 is direct access 1GB at 1GB. + * + * Note that we do not try to save any of the DMA window CSRs + * before setting them, since we cannot read those CSRs on LCA. + */ + hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); + hose->sg_pci = NULL; + __direct_map_base = 0x40000000; + __direct_map_size = 0x40000000; + + *(vulp)LCA_IOC_W_BASE0 = hose->sg_isa->dma_base | (3UL << 32); + *(vulp)LCA_IOC_W_MASK0 = (hose->sg_isa->size - 1) & 0xfff00000; + *(vulp)LCA_IOC_T_BASE0 = virt_to_phys(hose->sg_isa->ptes); + + *(vulp)LCA_IOC_W_BASE1 = __direct_map_base | (2UL << 32); + *(vulp)LCA_IOC_W_MASK1 = (__direct_map_size - 1) & 0xfff00000; + *(vulp)LCA_IOC_T_BASE1 = 0; + + *(vulp)LCA_IOC_TB_ENA = 0x80; + + lca_pci_tbi(hose, 0, -1); + + /* + * Disable PCI parity for now. The NCR53c810 chip has + * troubles meeting the PCI spec which results in + * data parity errors. + */ + *(vulp)LCA_IOC_PAR_DIS = 1UL<<5; + + /* + * Finally, set up for restoring the correct HAE if using SRM. + * Again, since we cannot read many of the CSRs on the LCA, + * one of which happens to be the HAE, we save the value that + * the SRM will expect... + */ + if (alpha_using_srm) + srm_hae = 0x80000000UL; +} + +/* + * Constants used during machine-check handling. I suppose these + * could be moved into lca.h but I don't see much reason why anybody + * else would want to use them. + */ + +#define ESR_EAV (1UL<< 0) /* error address valid */ +#define ESR_CEE (1UL<< 1) /* correctable error */ +#define ESR_UEE (1UL<< 2) /* uncorrectable error */ +#define ESR_WRE (1UL<< 3) /* write-error */ +#define ESR_SOR (1UL<< 4) /* error source */ +#define ESR_CTE (1UL<< 7) /* cache-tag error */ +#define ESR_MSE (1UL<< 9) /* multiple soft errors */ +#define ESR_MHE (1UL<<10) /* multiple hard errors */ +#define ESR_NXM (1UL<<12) /* non-existent memory */ + +#define IOC_ERR ( 1<<4) /* ioc logs an error */ +#define IOC_CMD_SHIFT 0 +#define IOC_CMD (0xf<<IOC_CMD_SHIFT) +#define IOC_CODE_SHIFT 8 +#define IOC_CODE (0xf<<IOC_CODE_SHIFT) +#define IOC_LOST ( 1<<5) +#define IOC_P_NBR ((__u32) ~((1<<13) - 1)) + +static void +mem_error(unsigned long esr, unsigned long ear) +{ + printk(" %s %s error to %s occurred at address %x\n", + ((esr & ESR_CEE) ? "Correctable" : + (esr & ESR_UEE) ? "Uncorrectable" : "A"), + (esr & ESR_WRE) ? "write" : "read", + (esr & ESR_SOR) ? "memory" : "b-cache", + (unsigned) (ear & 0x1ffffff8)); + if (esr & ESR_CTE) { + printk(" A b-cache tag parity error was detected.\n"); + } + if (esr & ESR_MSE) { + printk(" Several other correctable errors occurred.\n"); + } + if (esr & ESR_MHE) { + printk(" Several other uncorrectable errors occurred.\n"); + } + if (esr & ESR_NXM) { + printk(" Attempted to access non-existent memory.\n"); + } +} + +static void +ioc_error(__u32 stat0, __u32 stat1) +{ + static const char * const pci_cmd[] = { + "Interrupt Acknowledge", "Special", "I/O Read", "I/O Write", + "Rsvd 1", "Rsvd 2", "Memory Read", "Memory Write", "Rsvd3", + "Rsvd4", "Configuration Read", "Configuration Write", + "Memory Read Multiple", "Dual Address", "Memory Read Line", + "Memory Write and Invalidate" + }; + static const char * const err_name[] = { + "exceeded retry limit", "no device", "bad data parity", + "target abort", "bad address parity", "page table read error", + "invalid page", "data error" + }; + unsigned code = (stat0 & IOC_CODE) >> IOC_CODE_SHIFT; + unsigned cmd = (stat0 & IOC_CMD) >> IOC_CMD_SHIFT; + + printk(" %s initiated PCI %s cycle to address %x" + " failed due to %s.\n", + code > 3 ? "PCI" : "CPU", pci_cmd[cmd], stat1, err_name[code]); + + if (code == 5 || code == 6) { + printk(" (Error occurred at PCI memory address %x.)\n", + (stat0 & ~IOC_P_NBR)); + } + if (stat0 & IOC_LOST) { + printk(" Other PCI errors occurred simultaneously.\n"); + } +} + +void +lca_machine_check(unsigned long vector, unsigned long la_ptr, + struct pt_regs *regs) +{ + const char * reason; + union el_lca el; + + el.c = (struct el_common *) la_ptr; + + wrmces(rdmces()); /* reset machine check pending flag */ + + printk(KERN_CRIT "LCA machine check: vector=%#lx pc=%#lx code=%#x\n", + vector, regs->pc, (unsigned int) el.c->code); + + /* + * The first quadword after the common header always seems to + * be the machine check reason---don't know why this isn't + * part of the common header instead. In the case of a long + * logout frame, the upper 32 bits is the machine check + * revision level, which we ignore for now. + */ + switch ((unsigned int) el.c->code) { + case MCHK_K_TPERR: reason = "tag parity error"; break; + case MCHK_K_TCPERR: reason = "tag control parity error"; break; + case MCHK_K_HERR: reason = "access to non-existent memory"; break; + case MCHK_K_ECC_C: reason = "correctable ECC error"; break; + case MCHK_K_ECC_NC: reason = "non-correctable ECC error"; break; + case MCHK_K_CACKSOFT: reason = "MCHK_K_CACKSOFT"; break; + case MCHK_K_BUGCHECK: reason = "illegal exception in PAL mode"; break; + case MCHK_K_OS_BUGCHECK: reason = "callsys in kernel mode"; break; + case MCHK_K_DCPERR: reason = "d-cache parity error"; break; + case MCHK_K_ICPERR: reason = "i-cache parity error"; break; + case MCHK_K_SIO_SERR: reason = "SIO SERR occurred on PCI bus"; break; + case MCHK_K_SIO_IOCHK: reason = "SIO IOCHK occurred on ISA bus"; break; + case MCHK_K_DCSR: reason = "MCHK_K_DCSR"; break; + case MCHK_K_UNKNOWN: + default: reason = "unknown"; break; + } + + switch (el.c->size) { + case sizeof(struct el_lca_mcheck_short): + printk(KERN_CRIT + " Reason: %s (short frame%s, dc_stat=%#lx):\n", + reason, el.c->retry ? ", retryable" : "", + el.s->dc_stat); + if (el.s->esr & ESR_EAV) { + mem_error(el.s->esr, el.s->ear); + } + if (el.s->ioc_stat0 & IOC_ERR) { + ioc_error(el.s->ioc_stat0, el.s->ioc_stat1); + } + break; + + case sizeof(struct el_lca_mcheck_long): + printk(KERN_CRIT " Reason: %s (long frame%s):\n", + reason, el.c->retry ? ", retryable" : ""); + printk(KERN_CRIT + " reason: %#lx exc_addr: %#lx dc_stat: %#lx\n", + el.l->pt[0], el.l->exc_addr, el.l->dc_stat); + printk(KERN_CRIT " car: %#lx\n", el.l->car); + if (el.l->esr & ESR_EAV) { + mem_error(el.l->esr, el.l->ear); + } + if (el.l->ioc_stat0 & IOC_ERR) { + ioc_error(el.l->ioc_stat0, el.l->ioc_stat1); + } + break; + + default: + printk(KERN_CRIT " Unknown errorlog size %d\n", el.c->size); + } + + /* Dump the logout area to give all info. */ +#ifdef CONFIG_VERBOSE_MCHECK + if (alpha_verbose_mcheck > 1) { + unsigned long * ptr = (unsigned long *) la_ptr; + long i; + for (i = 0; i < el.c->size / sizeof(long); i += 2) { + printk(KERN_CRIT " +%8lx %016lx %016lx\n", + i*sizeof(long), ptr[i], ptr[i+1]); + } + } +#endif /* CONFIG_VERBOSE_MCHECK */ +} + +/* + * The following routines are needed to support the SPEED changing + * necessary to successfully manage the thermal problem on the AlphaBook1. + */ + +void +lca_clock_print(void) +{ + long pmr_reg; + + pmr_reg = LCA_READ_PMR; + + printk("Status of clock control:\n"); + printk("\tPrimary clock divisor\t0x%lx\n", LCA_GET_PRIMARY(pmr_reg)); + printk("\tOverride clock divisor\t0x%lx\n", LCA_GET_OVERRIDE(pmr_reg)); + printk("\tInterrupt override is %s\n", + (pmr_reg & LCA_PMR_INTO) ? "on" : "off"); + printk("\tDMA override is %s\n", + (pmr_reg & LCA_PMR_DMAO) ? "on" : "off"); + +} + +int +lca_get_clock(void) +{ + long pmr_reg; + + pmr_reg = LCA_READ_PMR; + return(LCA_GET_PRIMARY(pmr_reg)); + +} + +void +lca_clock_fiddle(int divisor) +{ + long pmr_reg; + + pmr_reg = LCA_READ_PMR; + LCA_SET_PRIMARY_CLOCK(pmr_reg, divisor); + /* lca_norm_clock = divisor; */ + LCA_WRITE_PMR(pmr_reg); + mb(); +} diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c new file mode 100644 index 0000000..44866cb --- /dev/null +++ b/arch/alpha/kernel/core_marvel.c @@ -0,0 +1,1154 @@ +/* + * linux/arch/alpha/kernel/core_marvel.c + * + * Code common to all Marvel based systems. + */ + +#define __EXTERN_INLINE inline +#include <asm/io.h> +#include <asm/core_marvel.h> +#undef __EXTERN_INLINE + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/init.h> +#include <linux/vmalloc.h> +#include <linux/mc146818rtc.h> +#include <linux/rtc.h> +#include <linux/module.h> +#include <linux/bootmem.h> + +#include <asm/ptrace.h> +#include <asm/smp.h> +#include <asm/gct.h> +#include <asm/pgalloc.h> +#include <asm/tlbflush.h> +#include <asm/rtc.h> + +#include "proto.h" +#include "pci_impl.h" + + +/* + * Debug helpers + */ +#define DEBUG_CONFIG 0 + +#if DEBUG_CONFIG +# define DBG_CFG(args) printk args +#else +# define DBG_CFG(args) +#endif + + +/* + * Private data + */ +static struct io7 *io7_head = NULL; + + +/* + * Helper functions + */ +static unsigned long __attribute__ ((unused)) +read_ev7_csr(int pe, unsigned long offset) +{ + ev7_csr *ev7csr = EV7_CSR_KERN(pe, offset); + unsigned long q; + + mb(); + q = ev7csr->csr; + mb(); + + return q; +} + +static void __attribute__ ((unused)) +write_ev7_csr(int pe, unsigned long offset, unsigned long q) +{ + ev7_csr *ev7csr = EV7_CSR_KERN(pe, offset); + + mb(); + ev7csr->csr = q; + mb(); +} + +static char * __init +mk_resource_name(int pe, int port, char *str) +{ + char tmp[80]; + char *name; + + sprintf(tmp, "PCI %s PE %d PORT %d", str, pe, port); + name = alloc_bootmem(strlen(tmp) + 1); + strcpy(name, tmp); + + return name; +} + +inline struct io7 * +marvel_next_io7(struct io7 *prev) +{ + return (prev ? prev->next : io7_head); +} + +struct io7 * +marvel_find_io7(int pe) +{ + struct io7 *io7; + + for (io7 = io7_head; io7 && io7->pe != pe; io7 = io7->next) + continue; + + return io7; +} + +static struct io7 * __init +alloc_io7(unsigned int pe) +{ + struct io7 *io7; + struct io7 *insp; + int h; + + if (marvel_find_io7(pe)) { + printk(KERN_WARNING "IO7 at PE %d already allocated!\n", pe); + return NULL; + } + + io7 = alloc_bootmem(sizeof(*io7)); + io7->pe = pe; + spin_lock_init(&io7->irq_lock); + + for (h = 0; h < 4; h++) { + io7->ports[h].io7 = io7; + io7->ports[h].port = h; + io7->ports[h].enabled = 0; /* default to disabled */ + } + + /* + * Insert in pe sorted order. + */ + if (NULL == io7_head) /* empty list */ + io7_head = io7; + else if (io7_head->pe > io7->pe) { /* insert at head */ + io7->next = io7_head; + io7_head = io7; + } else { /* insert at position */ + for (insp = io7_head; insp; insp = insp->next) { + if (insp->pe == io7->pe) { + printk(KERN_ERR "Too many IO7s at PE %d\n", + io7->pe); + return NULL; + } + + if (NULL == insp->next || + insp->next->pe > io7->pe) { /* insert here */ + io7->next = insp->next; + insp->next = io7; + break; + } + } + + if (NULL == insp) { /* couldn't insert ?!? */ + printk(KERN_WARNING "Failed to insert IO7 at PE %d " + " - adding at head of list\n", io7->pe); + io7->next = io7_head; + io7_head = io7; + } + } + + return io7; +} + +void +io7_clear_errors(struct io7 *io7) +{ + io7_port7_csrs *p7csrs; + io7_ioport_csrs *csrs; + int port; + + + /* + * First the IO ports. + */ + for (port = 0; port < 4; port++) { + csrs = IO7_CSRS_KERN(io7->pe, port); + + csrs->POx_ERR_SUM.csr = -1UL; + csrs->POx_TLB_ERR.csr = -1UL; + csrs->POx_SPL_COMPLT.csr = -1UL; + csrs->POx_TRANS_SUM.csr = -1UL; + } + + /* + * Then the common ones. + */ + p7csrs = IO7_PORT7_CSRS_KERN(io7->pe); + + p7csrs->PO7_ERROR_SUM.csr = -1UL; + p7csrs->PO7_UNCRR_SYM.csr = -1UL; + p7csrs->PO7_CRRCT_SYM.csr = -1UL; +} + + +/* + * IO7 PCI, PCI/X, AGP configuration. + */ +static void __init +io7_init_hose(struct io7 *io7, int port) +{ + static int hose_index = 0; + + struct pci_controller *hose = alloc_pci_controller(); + struct io7_port *io7_port = &io7->ports[port]; + io7_ioport_csrs *csrs = IO7_CSRS_KERN(io7->pe, port); + int i; + + hose->index = hose_index++; /* arbitrary */ + + /* + * We don't have an isa or legacy hose, but glibc expects to be + * able to use the bus == 0 / dev == 0 form of the iobase syscall + * to determine information about the i/o system. Since XFree86 + * relies on glibc's determination to tell whether or not to use + * sparse access, we need to point the pci_isa_hose at a real hose + * so at least that determination is correct. + */ + if (hose->index == 0) + pci_isa_hose = hose; + + io7_port->csrs = csrs; + io7_port->hose = hose; + hose->sysdata = io7_port; + + hose->io_space = alloc_resource(); + hose->mem_space = alloc_resource(); + + /* + * Base addresses for userland consumption. Since these are going + * to be mapped, they are pure physical addresses. + */ + hose->sparse_mem_base = hose->sparse_io_base = 0; + hose->dense_mem_base = IO7_MEM_PHYS(io7->pe, port); + hose->dense_io_base = IO7_IO_PHYS(io7->pe, port); + + /* + * Base addresses and resource ranges for kernel consumption. + */ + hose->config_space_base = (unsigned long)IO7_CONF_KERN(io7->pe, port); + + hose->io_space->start = (unsigned long)IO7_IO_KERN(io7->pe, port); + hose->io_space->end = hose->io_space->start + IO7_IO_SPACE - 1; + hose->io_space->name = mk_resource_name(io7->pe, port, "IO"); + hose->io_space->flags = IORESOURCE_IO; + + hose->mem_space->start = (unsigned long)IO7_MEM_KERN(io7->pe, port); + hose->mem_space->end = hose->mem_space->start + IO7_MEM_SPACE - 1; + hose->mem_space->name = mk_resource_name(io7->pe, port, "MEM"); + hose->mem_space->flags = IORESOURCE_MEM; + + if (request_resource(&ioport_resource, hose->io_space) < 0) + printk(KERN_ERR "Failed to request IO on hose %d\n", + hose->index); + if (request_resource(&iomem_resource, hose->mem_space) < 0) + printk(KERN_ERR "Failed to request MEM on hose %d\n", + hose->index); + + /* + * Save the existing DMA window settings for later restoration. + */ + for (i = 0; i < 4; i++) { + io7_port->saved_wbase[i] = csrs->POx_WBASE[i].csr; + io7_port->saved_wmask[i] = csrs->POx_WMASK[i].csr; + io7_port->saved_tbase[i] = csrs->POx_TBASE[i].csr; + } + + /* + * Set up the PCI to main memory translation windows. + * + * Window 0 is scatter-gather 8MB at 8MB + * Window 1 is direct access 1GB at 2GB + * Window 2 is scatter-gather (up-to) 1GB at 3GB + * Window 3 is disabled + */ + + /* + * TBIA before modifying windows. + */ + marvel_pci_tbi(hose, 0, -1); + + /* + * Set up window 0 for scatter-gather 8MB at 8MB. + */ + hose->sg_isa = iommu_arena_new_node(marvel_cpuid_to_nid(io7->pe), + hose, 0x00800000, 0x00800000, 0); + hose->sg_isa->align_entry = 8; /* cache line boundary */ + csrs->POx_WBASE[0].csr = + hose->sg_isa->dma_base | wbase_m_ena | wbase_m_sg; + csrs->POx_WMASK[0].csr = (hose->sg_isa->size - 1) & wbase_m_addr; + csrs->POx_TBASE[0].csr = virt_to_phys(hose->sg_isa->ptes); + + /* + * Set up window 1 for direct-mapped 1GB at 2GB. + */ + csrs->POx_WBASE[1].csr = __direct_map_base | wbase_m_ena; + csrs->POx_WMASK[1].csr = (__direct_map_size - 1) & wbase_m_addr; + csrs->POx_TBASE[1].csr = 0; + + /* + * Set up window 2 for scatter-gather (up-to) 1GB at 3GB. + */ + hose->sg_pci = iommu_arena_new_node(marvel_cpuid_to_nid(io7->pe), + hose, 0xc0000000, 0x40000000, 0); + hose->sg_pci->align_entry = 8; /* cache line boundary */ + csrs->POx_WBASE[2].csr = + hose->sg_pci->dma_base | wbase_m_ena | wbase_m_sg; + csrs->POx_WMASK[2].csr = (hose->sg_pci->size - 1) & wbase_m_addr; + csrs->POx_TBASE[2].csr = virt_to_phys(hose->sg_pci->ptes); + + /* + * Disable window 3. + */ + csrs->POx_WBASE[3].csr = 0; + + /* + * Make sure that the AGP Monster Window is disabled. + */ + csrs->POx_CTRL.csr &= ~(1UL << 61); + +#if 1 + printk("FIXME: disabling master aborts\n"); + csrs->POx_MSK_HEI.csr &= ~(3UL << 14); +#endif + /* + * TBIA after modifying windows. + */ + marvel_pci_tbi(hose, 0, -1); +} + +static void __init +marvel_init_io7(struct io7 *io7) +{ + int i; + + printk("Initializing IO7 at PID %d\n", io7->pe); + + /* + * Get the Port 7 CSR pointer. + */ + io7->csrs = IO7_PORT7_CSRS_KERN(io7->pe); + + /* + * Init this IO7's hoses. + */ + for (i = 0; i < IO7_NUM_PORTS; i++) { + io7_ioport_csrs *csrs = IO7_CSRS_KERN(io7->pe, i); + if (csrs->POx_CACHE_CTL.csr == 8) { + io7->ports[i].enabled = 1; + io7_init_hose(io7, i); + } + } +} + +void +marvel_io7_present(gct6_node *node) +{ + int pe; + + if (node->type != GCT_TYPE_HOSE || + node->subtype != GCT_SUBTYPE_IO_PORT_MODULE) + return; + + pe = (node->id >> 8) & 0xff; + printk("Found an IO7 at PID %d\n", pe); + + alloc_io7(pe); +} + +static void __init +marvel_init_vga_hose(void) +{ +#ifdef CONFIG_VGA_HOSE + u64 *pu64 = (u64 *)((u64)hwrpb + hwrpb->ctbt_offset); + + if (pu64[7] == 3) { /* TERM_TYPE == graphics */ + struct pci_controller *hose = NULL; + int h = (pu64[30] >> 24) & 0xff; /* TERM_OUT_LOC, hose # */ + struct io7 *io7; + int pid, port; + + /* FIXME - encoding is going to have to change for Marvel + * since hose will be able to overflow a byte... + * need to fix this decode when the console + * changes its encoding + */ + printk("console graphics is on hose %d (console)\n", h); + + /* + * The console's hose numbering is: + * + * hose<n:2>: PID + * hose<1:0>: PORT + * + * We need to find the hose at that pid and port + */ + pid = h >> 2; + port = h & 3; + if ((io7 = marvel_find_io7(pid))) + hose = io7->ports[port].hose; + + if (hose) { + printk("Console graphics on hose %d\n", hose->index); + pci_vga_hose = hose; + } + } +#endif /* CONFIG_VGA_HOSE */ +} + +gct6_search_struct gct_wanted_node_list[] = { + { GCT_TYPE_HOSE, GCT_SUBTYPE_IO_PORT_MODULE, marvel_io7_present }, + { 0, 0, NULL } +}; + +/* + * In case the GCT is not complete, let the user specify PIDs with IO7s + * at boot time. Syntax is 'io7=a,b,c,...,n' where a-n are the PIDs (decimal) + * where IO7s are connected + */ +static int __init +marvel_specify_io7(char *str) +{ + unsigned long pid; + struct io7 *io7; + char *pchar; + + do { + pid = simple_strtoul(str, &pchar, 0); + if (pchar != str) { + printk("User-specified IO7 at PID %lu\n", pid); + io7 = alloc_io7(pid); + if (io7) marvel_init_io7(io7); + } + + if (pchar == str) pchar++; + str = pchar; + } while(*str); + + return 0; +} +__setup("io7=", marvel_specify_io7); + +void __init +marvel_init_arch(void) +{ + struct io7 *io7; + + /* With multiple PCI busses, we play with I/O as physical addrs. */ + ioport_resource.end = ~0UL; + + /* PCI DMA Direct Mapping is 1GB at 2GB. */ + __direct_map_base = 0x80000000; + __direct_map_size = 0x40000000; + + /* Parse the config tree. */ + gct6_find_nodes(GCT_NODE_PTR(0), gct_wanted_node_list); + + /* Init the io7s. */ + for (io7 = NULL; NULL != (io7 = marvel_next_io7(io7)); ) + marvel_init_io7(io7); + + /* Check for graphic console location (if any). */ + marvel_init_vga_hose(); +} + +void +marvel_kill_arch(int mode) +{ +} + + +/* + * PCI Configuration Space access functions + * + * Configuration space addresses have the following format: + * + * |2 2 2 2|1 1 1 1|1 1 1 1|1 1 + * |3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|R|R| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * n:24 reserved for hose base + * 23:16 bus number (8 bits = 128 possible buses) + * 15:11 Device number (5 bits) + * 10:8 function number + * 7:2 register number + * + * Notes: + * IO7 determines whether to use a type 0 or type 1 config cycle + * based on the bus number. Therefore the bus number must be set + * to 0 for the root bus on any hose. + * + * The function number selects which function of a multi-function device + * (e.g., SCSI and Ethernet). + * + */ + +static inline unsigned long +build_conf_addr(struct pci_controller *hose, u8 bus, + unsigned int devfn, int where) +{ + return (hose->config_space_base | (bus << 16) | (devfn << 8) | where); +} + +static unsigned long +mk_conf_addr(struct pci_bus *pbus, unsigned int devfn, int where) +{ + struct pci_controller *hose = pbus->sysdata; + struct io7_port *io7_port; + unsigned long addr = 0; + u8 bus = pbus->number; + + if (!hose) + return addr; + + /* Check for enabled. */ + io7_port = hose->sysdata; + if (!io7_port->enabled) + return addr; + + if (!pbus->parent) { /* No parent means peer PCI bus. */ + /* Don't support idsel > 20 on primary bus. */ + if (devfn >= PCI_DEVFN(21, 0)) + return addr; + bus = 0; + } + + addr = build_conf_addr(hose, bus, devfn, where); + + DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); + return addr; +} + +static int +marvel_read_config(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 *value) +{ + unsigned long addr; + + if (0 == (addr = mk_conf_addr(bus, devfn, where))) + return PCIBIOS_DEVICE_NOT_FOUND; + + switch(size) { + case 1: + *value = __kernel_ldbu(*(vucp)addr); + break; + case 2: + *value = __kernel_ldwu(*(vusp)addr); + break; + case 4: + *value = *(vuip)addr; + break; + default: + return PCIBIOS_FUNC_NOT_SUPPORTED; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int +marvel_write_config(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 value) +{ + unsigned long addr; + + if (0 == (addr = mk_conf_addr(bus, devfn, where))) + return PCIBIOS_DEVICE_NOT_FOUND; + + switch (size) { + case 1: + __kernel_stb(value, *(vucp)addr); + mb(); + __kernel_ldbu(*(vucp)addr); + break; + case 2: + __kernel_stw(value, *(vusp)addr); + mb(); + __kernel_ldwu(*(vusp)addr); + break; + case 4: + *(vuip)addr = value; + mb(); + *(vuip)addr; + break; + default: + return PCIBIOS_FUNC_NOT_SUPPORTED; + } + + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops marvel_pci_ops = +{ + .read = marvel_read_config, + .write = marvel_write_config, +}; + + +/* + * Other PCI helper functions. + */ +void +marvel_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) +{ + io7_ioport_csrs *csrs = ((struct io7_port *)hose->sysdata)->csrs; + + wmb(); + csrs->POx_SG_TBIA.csr = 0; + mb(); + csrs->POx_SG_TBIA.csr; +} + + + +/* + * RTC Support + */ +struct marvel_rtc_access_info { + unsigned long function; + unsigned long index; + unsigned long data; +}; + +static void +__marvel_access_rtc(void *info) +{ + struct marvel_rtc_access_info *rtc_access = info; + + register unsigned long __r0 __asm__("$0"); + register unsigned long __r16 __asm__("$16") = rtc_access->function; + register unsigned long __r17 __asm__("$17") = rtc_access->index; + register unsigned long __r18 __asm__("$18") = rtc_access->data; + + __asm__ __volatile__( + "call_pal %4 # cserve rtc" + : "=r"(__r16), "=r"(__r17), "=r"(__r18), "=r"(__r0) + : "i"(PAL_cserve), "0"(__r16), "1"(__r17), "2"(__r18) + : "$1", "$22", "$23", "$24", "$25"); + + rtc_access->data = __r0; +} + +static u8 +__marvel_rtc_io(u8 b, unsigned long addr, int write) +{ + static u8 index = 0; + + struct marvel_rtc_access_info rtc_access; + u8 ret = 0; + + switch(addr) { + case 0x70: /* RTC_PORT(0) */ + if (write) index = b; + ret = index; + break; + + case 0x71: /* RTC_PORT(1) */ + rtc_access.index = index; + rtc_access.data = BCD_TO_BIN(b); + rtc_access.function = 0x48 + !write; /* GET/PUT_TOY */ + +#ifdef CONFIG_SMP + if (smp_processor_id() != boot_cpuid) + smp_call_function_on_cpu(__marvel_access_rtc, + &rtc_access, 1, 1, + cpumask_of_cpu(boot_cpuid)); + else + __marvel_access_rtc(&rtc_access); +#else + __marvel_access_rtc(&rtc_access); +#endif + ret = BIN_TO_BCD(rtc_access.data); + break; + + default: + printk(KERN_WARNING "Illegal RTC port %lx\n", addr); + break; + } + + return ret; +} + + +/* + * IO map support. + */ + +#define __marvel_is_mem_vga(a) (((a) >= 0xa0000) && ((a) <= 0xc0000)) + +void __iomem * +marvel_ioremap(unsigned long addr, unsigned long size) +{ + struct pci_controller *hose; + unsigned long baddr, last; + struct vm_struct *area; + unsigned long vaddr; + unsigned long *ptes; + unsigned long pfn; + + /* + * Adjust the addr. + */ +#ifdef CONFIG_VGA_HOSE + if (pci_vga_hose && __marvel_is_mem_vga(addr)) { + addr += pci_vga_hose->mem_space->start; + } +#endif + + /* + * Find the hose. + */ + for (hose = hose_head; hose; hose = hose->next) { + if ((addr >> 32) == (hose->mem_space->start >> 32)) + break; + } + if (!hose) + return NULL; + + /* + * We have the hose - calculate the bus limits. + */ + baddr = addr - hose->mem_space->start; + last = baddr + size - 1; + + /* + * Is it direct-mapped? + */ + if ((baddr >= __direct_map_base) && + ((baddr + size - 1) < __direct_map_base + __direct_map_size)) { + addr = IDENT_ADDR | (baddr - __direct_map_base); + return (void __iomem *) addr; + } + + /* + * Check the scatter-gather arena. + */ + if (hose->sg_pci && + baddr >= (unsigned long)hose->sg_pci->dma_base && + last < (unsigned long)hose->sg_pci->dma_base + hose->sg_pci->size) { + + /* + * Adjust the limits (mappings must be page aligned) + */ + baddr -= hose->sg_pci->dma_base; + last -= hose->sg_pci->dma_base; + baddr &= PAGE_MASK; + size = PAGE_ALIGN(last) - baddr; + + /* + * Map it. + */ + area = get_vm_area(size, VM_IOREMAP); + if (!area) + return NULL; + + ptes = hose->sg_pci->ptes; + for (vaddr = (unsigned long)area->addr; + baddr <= last; + baddr += PAGE_SIZE, vaddr += PAGE_SIZE) { + pfn = ptes[baddr >> PAGE_SHIFT]; + if (!(pfn & 1)) { + printk("ioremap failed... pte not valid...\n"); + vfree(area->addr); + return NULL; + } + pfn >>= 1; /* make it a true pfn */ + + if (__alpha_remap_area_pages(vaddr, + pfn << PAGE_SHIFT, + PAGE_SIZE, 0)) { + printk("FAILED to map...\n"); + vfree(area->addr); + return NULL; + } + } + + flush_tlb_all(); + + vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK); + + return (void __iomem *) vaddr; + } + + return NULL; +} + +void +marvel_iounmap(volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + if (addr >= VMALLOC_START) + vfree((void *)(PAGE_MASK & addr)); +} + +int +marvel_is_mmio(const volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + + if (addr >= VMALLOC_START) + return 1; + else + return (addr & 0xFF000000UL) == 0; +} + +#define __marvel_is_port_vga(a) \ + (((a) >= 0x3b0) && ((a) < 0x3e0) && ((a) != 0x3b3) && ((a) != 0x3d3)) +#define __marvel_is_port_kbd(a) (((a) == 0x60) || ((a) == 0x64)) +#define __marvel_is_port_rtc(a) (((a) == 0x70) || ((a) == 0x71)) + +void __iomem *marvel_ioportmap (unsigned long addr) +{ + if (__marvel_is_port_rtc (addr) || __marvel_is_port_kbd(addr)) + ; +#ifdef CONFIG_VGA_HOSE + else if (__marvel_is_port_vga (addr) && pci_vga_hose) + addr += pci_vga_hose->io_space->start; +#endif + else + return NULL; + return (void __iomem *)addr; +} + +unsigned int +marvel_ioread8(void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + if (__marvel_is_port_kbd(addr)) + return 0; + else if (__marvel_is_port_rtc(addr)) + return __marvel_rtc_io(0, addr, 0); + else + return __kernel_ldbu(*(vucp)addr); +} + +void +marvel_iowrite8(u8 b, void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + if (__marvel_is_port_kbd(addr)) + return; + else if (__marvel_is_port_rtc(addr)) + __marvel_rtc_io(b, addr, 1); + else + __kernel_stb(b, *(vucp)addr); +} + +#ifndef CONFIG_ALPHA_GENERIC +EXPORT_SYMBOL(marvel_ioremap); +EXPORT_SYMBOL(marvel_iounmap); +EXPORT_SYMBOL(marvel_is_mmio); +EXPORT_SYMBOL(marvel_ioportmap); +EXPORT_SYMBOL(marvel_ioread8); +EXPORT_SYMBOL(marvel_iowrite8); +#endif + +/* + * NUMA Support + */ +/********** + * FIXME - for now each cpu is a node by itself + * -- no real support for striped mode + ********** + */ +int +marvel_pa_to_nid(unsigned long pa) +{ + int cpuid; + + if ((pa >> 43) & 1) /* I/O */ + cpuid = (~(pa >> 35) & 0xff); + else /* mem */ + cpuid = ((pa >> 34) & 0x3) | ((pa >> (37 - 2)) & (0x1f << 2)); + + return marvel_cpuid_to_nid(cpuid); +} + +int +marvel_cpuid_to_nid(int cpuid) +{ + return cpuid; +} + +unsigned long +marvel_node_mem_start(int nid) +{ + unsigned long pa; + + pa = (nid & 0x3) | ((nid & (0x1f << 2)) << 1); + pa <<= 34; + + return pa; +} + +unsigned long +marvel_node_mem_size(int nid) +{ + return 16UL * 1024 * 1024 * 1024; /* 16GB */ +} + + +/* + * AGP GART Support. + */ +#include <linux/agp_backend.h> +#include <asm/agp_backend.h> +#include <linux/slab.h> +#include <linux/delay.h> + +struct marvel_agp_aperture { + struct pci_iommu_arena *arena; + long pg_start; + long pg_count; +}; + +static int +marvel_agp_setup(alpha_agp_info *agp) +{ + struct marvel_agp_aperture *aper; + + if (!alpha_agpgart_size) + return -ENOMEM; + + aper = kmalloc(sizeof(*aper), GFP_KERNEL); + if (aper == NULL) return -ENOMEM; + + aper->arena = agp->hose->sg_pci; + aper->pg_count = alpha_agpgart_size / PAGE_SIZE; + aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, + aper->pg_count - 1); + + if (aper->pg_start < 0) { + printk(KERN_ERR "Failed to reserve AGP memory\n"); + kfree(aper); + return -ENOMEM; + } + + agp->aperture.bus_base = + aper->arena->dma_base + aper->pg_start * PAGE_SIZE; + agp->aperture.size = aper->pg_count * PAGE_SIZE; + agp->aperture.sysdata = aper; + + return 0; +} + +static void +marvel_agp_cleanup(alpha_agp_info *agp) +{ + struct marvel_agp_aperture *aper = agp->aperture.sysdata; + int status; + + status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); + if (status == -EBUSY) { + printk(KERN_WARNING + "Attempted to release bound AGP memory - unbinding\n"); + iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); + status = iommu_release(aper->arena, aper->pg_start, + aper->pg_count); + } + if (status < 0) + printk(KERN_ERR "Failed to release AGP memory\n"); + + kfree(aper); + kfree(agp); +} + +static int +marvel_agp_configure(alpha_agp_info *agp) +{ + io7_ioport_csrs *csrs = ((struct io7_port *)agp->hose->sysdata)->csrs; + struct io7 *io7 = ((struct io7_port *)agp->hose->sysdata)->io7; + unsigned int new_rate = 0; + unsigned long agp_pll; + + /* + * Check the requested mode against the PLL setting. + * The agpgart_be code has not programmed the card yet, + * so we can still tweak mode here. + */ + agp_pll = io7->csrs->POx_RST[IO7_AGP_PORT].csr; + switch(IO7_PLL_RNGB(agp_pll)) { + case 0x4: /* 2x only */ + /* + * The PLL is only programmed for 2x, so adjust the + * rate to 2x, if necessary. + */ + if (agp->mode.bits.rate != 2) + new_rate = 2; + break; + + case 0x6: /* 1x / 4x */ + /* + * The PLL is programmed for 1x or 4x. Don't go faster + * than requested, so if the requested rate is 2x, use 1x. + */ + if (agp->mode.bits.rate == 2) + new_rate = 1; + break; + + default: /* ??????? */ + /* + * Don't know what this PLL setting is, take the requested + * rate, but warn the user. + */ + printk("%s: unknown PLL setting RNGB=%lx (PLL6_CTL=%016lx)\n", + __FUNCTION__, IO7_PLL_RNGB(agp_pll), agp_pll); + break; + } + + /* + * Set the new rate, if necessary. + */ + if (new_rate) { + printk("Requested AGP Rate %dX not compatible " + "with PLL setting - using %dX\n", + agp->mode.bits.rate, + new_rate); + + agp->mode.bits.rate = new_rate; + } + + printk("Enabling AGP on hose %d: %dX%s RQ %d\n", + agp->hose->index, agp->mode.bits.rate, + agp->mode.bits.sba ? " - SBA" : "", agp->mode.bits.rq); + + csrs->AGP_CMD.csr = agp->mode.lw; + + return 0; +} + +static int +marvel_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) +{ + struct marvel_agp_aperture *aper = agp->aperture.sysdata; + return iommu_bind(aper->arena, aper->pg_start + pg_start, + mem->page_count, mem->memory); +} + +static int +marvel_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) +{ + struct marvel_agp_aperture *aper = agp->aperture.sysdata; + return iommu_unbind(aper->arena, aper->pg_start + pg_start, + mem->page_count); +} + +static unsigned long +marvel_agp_translate(alpha_agp_info *agp, dma_addr_t addr) +{ + struct marvel_agp_aperture *aper = agp->aperture.sysdata; + unsigned long baddr = addr - aper->arena->dma_base; + unsigned long pte; + + if (addr < agp->aperture.bus_base || + addr >= agp->aperture.bus_base + agp->aperture.size) { + printk("%s: addr out of range\n", __FUNCTION__); + return -EINVAL; + } + + pte = aper->arena->ptes[baddr >> PAGE_SHIFT]; + if (!(pte & 1)) { + printk("%s: pte not valid\n", __FUNCTION__); + return -EINVAL; + } + return (pte >> 1) << PAGE_SHIFT; +} + +struct alpha_agp_ops marvel_agp_ops = +{ + .setup = marvel_agp_setup, + .cleanup = marvel_agp_cleanup, + .configure = marvel_agp_configure, + .bind = marvel_agp_bind_memory, + .unbind = marvel_agp_unbind_memory, + .translate = marvel_agp_translate +}; + +alpha_agp_info * +marvel_agp_info(void) +{ + struct pci_controller *hose; + io7_ioport_csrs *csrs; + alpha_agp_info *agp; + struct io7 *io7; + + /* + * Find the first IO7 with an AGP card. + * + * FIXME -- there should be a better way (we want to be able to + * specify and what if the agp card is not video???) + */ + hose = NULL; + for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; ) { + struct pci_controller *h; + vuip addr; + + if (!io7->ports[IO7_AGP_PORT].enabled) + continue; + + h = io7->ports[IO7_AGP_PORT].hose; + addr = (vuip)build_conf_addr(h, 0, PCI_DEVFN(5, 0), 0); + + if (*addr != 0xffffffffu) { + hose = h; + break; + } + } + + if (!hose || !hose->sg_pci) + return NULL; + + printk("MARVEL - using hose %d as AGP\n", hose->index); + + /* + * Get the csrs from the hose. + */ + csrs = ((struct io7_port *)hose->sysdata)->csrs; + + /* + * Allocate the info structure. + */ + agp = kmalloc(sizeof(*agp), GFP_KERNEL); + + /* + * Fill it in. + */ + agp->hose = hose; + agp->private = NULL; + agp->ops = &marvel_agp_ops; + + /* + * Aperture - not configured until ops.setup(). + */ + agp->aperture.bus_base = 0; + agp->aperture.size = 0; + agp->aperture.sysdata = NULL; + + /* + * Capabilities. + * + * NOTE: IO7 reports through AGP_STAT that it can support a read queue + * depth of 17 (rq = 0x10). It actually only supports a depth of + * 16 (rq = 0xf). + */ + agp->capability.lw = csrs->AGP_STAT.csr; + agp->capability.bits.rq = 0xf; + + /* + * Mode. + */ + agp->mode.lw = csrs->AGP_CMD.csr; + + return agp; +} diff --git a/arch/alpha/kernel/core_mcpcia.c b/arch/alpha/kernel/core_mcpcia.c new file mode 100644 index 0000000..28849c8 --- /dev/null +++ b/arch/alpha/kernel/core_mcpcia.c @@ -0,0 +1,618 @@ +/* + * linux/arch/alpha/kernel/core_mcpcia.c + * + * Based on code written by David A Rusling (david.rusling@reo.mts.dec.com). + * + * Code common to all MCbus-PCI Adaptor core logic chipsets + */ + +#define __EXTERN_INLINE inline +#include <asm/io.h> +#include <asm/core_mcpcia.h> +#undef __EXTERN_INLINE + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/init.h> +#include <linux/delay.h> + +#include <asm/ptrace.h> + +#include "proto.h" +#include "pci_impl.h" + +/* + * NOTE: Herein lie back-to-back mb instructions. They are magic. + * One plausible explanation is that the i/o controller does not properly + * handle the system transaction. Another involves timing. Ho hum. + */ + +/* + * BIOS32-style PCI interface: + */ + +#define DEBUG_CFG 0 + +#if DEBUG_CFG +# define DBG_CFG(args) printk args +#else +# define DBG_CFG(args) +#endif + +#define MCPCIA_MAX_HOSES 4 + +/* + * Given a bus, device, and function number, compute resulting + * configuration space address and setup the MCPCIA_HAXR2 register + * accordingly. It is therefore not safe to have concurrent + * invocations to configuration space access routines, but there + * really shouldn't be any need for this. + * + * Type 0: + * + * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 + * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * 31:11 Device select bit. + * 10:8 Function number + * 7:2 Register number + * + * Type 1: + * + * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 + * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * 31:24 reserved + * 23:16 bus number (8 bits = 128 possible buses) + * 15:11 Device number (5 bits) + * 10:8 function number + * 7:2 register number + * + * Notes: + * The function number selects which function of a multi-function device + * (e.g., SCSI and Ethernet). + * + * The register selects a DWORD (32 bit) register offset. Hence it + * doesn't get shifted by 2 bits as we want to "drop" the bottom two + * bits. + */ + +static unsigned int +conf_read(unsigned long addr, unsigned char type1, + struct pci_controller *hose) +{ + unsigned long flags; + unsigned long mid = MCPCIA_HOSE2MID(hose->index); + unsigned int stat0, value, temp, cpu; + + cpu = smp_processor_id(); + + local_irq_save(flags); + + DBG_CFG(("conf_read(addr=0x%lx, type1=%d, hose=%d)\n", + addr, type1, mid)); + + /* Reset status register to avoid losing errors. */ + stat0 = *(vuip)MCPCIA_CAP_ERR(mid); + *(vuip)MCPCIA_CAP_ERR(mid) = stat0; + mb(); + temp = *(vuip)MCPCIA_CAP_ERR(mid); + DBG_CFG(("conf_read: MCPCIA_CAP_ERR(%d) was 0x%x\n", mid, stat0)); + + mb(); + draina(); + mcheck_expected(cpu) = 1; + mcheck_taken(cpu) = 0; + mcheck_extra(cpu) = mid; + mb(); + + /* Access configuration space. */ + value = *((vuip)addr); + mb(); + mb(); /* magic */ + + if (mcheck_taken(cpu)) { + mcheck_taken(cpu) = 0; + value = 0xffffffffU; + mb(); + } + mcheck_expected(cpu) = 0; + mb(); + + DBG_CFG(("conf_read(): finished\n")); + + local_irq_restore(flags); + return value; +} + +static void +conf_write(unsigned long addr, unsigned int value, unsigned char type1, + struct pci_controller *hose) +{ + unsigned long flags; + unsigned long mid = MCPCIA_HOSE2MID(hose->index); + unsigned int stat0, temp, cpu; + + cpu = smp_processor_id(); + + local_irq_save(flags); /* avoid getting hit by machine check */ + + /* Reset status register to avoid losing errors. */ + stat0 = *(vuip)MCPCIA_CAP_ERR(mid); + *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb(); + temp = *(vuip)MCPCIA_CAP_ERR(mid); + DBG_CFG(("conf_write: MCPCIA CAP_ERR(%d) was 0x%x\n", mid, stat0)); + + draina(); + mcheck_expected(cpu) = 1; + mcheck_extra(cpu) = mid; + mb(); + + /* Access configuration space. */ + *((vuip)addr) = value; + mb(); + mb(); /* magic */ + temp = *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */ + mcheck_expected(cpu) = 0; + mb(); + + DBG_CFG(("conf_write(): finished\n")); + local_irq_restore(flags); +} + +static int +mk_conf_addr(struct pci_bus *pbus, unsigned int devfn, int where, + struct pci_controller *hose, unsigned long *pci_addr, + unsigned char *type1) +{ + u8 bus = pbus->number; + unsigned long addr; + + DBG_CFG(("mk_conf_addr(bus=%d,devfn=0x%x,hose=%d,where=0x%x," + " pci_addr=0x%p, type1=0x%p)\n", + bus, devfn, hose->index, where, pci_addr, type1)); + + /* Type 1 configuration cycle for *ALL* busses. */ + *type1 = 1; + + if (!pbus->parent) /* No parent means peer PCI bus. */ + bus = 0; + addr = (bus << 16) | (devfn << 8) | (where); + addr <<= 5; /* swizzle for SPARSE */ + addr |= hose->config_space_base; + + *pci_addr = addr; + DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); + return 0; +} + +static int +mcpcia_read_config(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 *value) +{ + struct pci_controller *hose = bus->sysdata; + unsigned long addr, w; + unsigned char type1; + + if (mk_conf_addr(bus, devfn, where, hose, &addr, &type1)) + return PCIBIOS_DEVICE_NOT_FOUND; + + addr |= (size - 1) * 8; + w = conf_read(addr, type1, hose); + switch (size) { + case 1: + *value = __kernel_extbl(w, where & 3); + break; + case 2: + *value = __kernel_extwl(w, where & 3); + break; + case 4: + *value = w; + break; + } + return PCIBIOS_SUCCESSFUL; +} + +static int +mcpcia_write_config(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 value) +{ + struct pci_controller *hose = bus->sysdata; + unsigned long addr; + unsigned char type1; + + if (mk_conf_addr(bus, devfn, where, hose, &addr, &type1)) + return PCIBIOS_DEVICE_NOT_FOUND; + + addr |= (size - 1) * 8; + value = __kernel_insql(value, where & 3); + conf_write(addr, value, type1, hose); + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops mcpcia_pci_ops = +{ + .read = mcpcia_read_config, + .write = mcpcia_write_config, +}; + +void +mcpcia_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) +{ + wmb(); + *(vuip)MCPCIA_SG_TBIA(MCPCIA_HOSE2MID(hose->index)) = 0; + mb(); +} + +static int __init +mcpcia_probe_hose(int h) +{ + int cpu = smp_processor_id(); + int mid = MCPCIA_HOSE2MID(h); + unsigned int pci_rev; + + /* Gotta be REAL careful. If hose is absent, we get an mcheck. */ + + mb(); + mb(); + draina(); + wrmces(7); + + mcheck_expected(cpu) = 2; /* indicates probing */ + mcheck_taken(cpu) = 0; + mcheck_extra(cpu) = mid; + mb(); + + /* Access the bus revision word. */ + pci_rev = *(vuip)MCPCIA_REV(mid); + + mb(); + mb(); /* magic */ + if (mcheck_taken(cpu)) { + mcheck_taken(cpu) = 0; + pci_rev = 0xffffffff; + mb(); + } + mcheck_expected(cpu) = 0; + mb(); + + return (pci_rev >> 16) == PCI_CLASS_BRIDGE_HOST; +} + +static void __init +mcpcia_new_hose(int h) +{ + struct pci_controller *hose; + struct resource *io, *mem, *hae_mem; + int mid = MCPCIA_HOSE2MID(h); + + hose = alloc_pci_controller(); + if (h == 0) + pci_isa_hose = hose; + io = alloc_resource(); + mem = alloc_resource(); + hae_mem = alloc_resource(); + + hose->io_space = io; + hose->mem_space = hae_mem; + hose->sparse_mem_base = MCPCIA_SPARSE(mid) - IDENT_ADDR; + hose->dense_mem_base = MCPCIA_DENSE(mid) - IDENT_ADDR; + hose->sparse_io_base = MCPCIA_IO(mid) - IDENT_ADDR; + hose->dense_io_base = 0; + hose->config_space_base = MCPCIA_CONF(mid); + hose->index = h; + + io->start = MCPCIA_IO(mid) - MCPCIA_IO_BIAS; + io->end = io->start + 0xffff; + io->name = pci_io_names[h]; + io->flags = IORESOURCE_IO; + + mem->start = MCPCIA_DENSE(mid) - MCPCIA_MEM_BIAS; + mem->end = mem->start + 0xffffffff; + mem->name = pci_mem_names[h]; + mem->flags = IORESOURCE_MEM; + + hae_mem->start = mem->start; + hae_mem->end = mem->start + MCPCIA_MEM_MASK; + hae_mem->name = pci_hae0_name; + hae_mem->flags = IORESOURCE_MEM; + + if (request_resource(&ioport_resource, io) < 0) + printk(KERN_ERR "Failed to request IO on hose %d\n", h); + if (request_resource(&iomem_resource, mem) < 0) + printk(KERN_ERR "Failed to request MEM on hose %d\n", h); + if (request_resource(mem, hae_mem) < 0) + printk(KERN_ERR "Failed to request HAE_MEM on hose %d\n", h); +} + +static void +mcpcia_pci_clr_err(int mid) +{ + *(vuip)MCPCIA_CAP_ERR(mid); + *(vuip)MCPCIA_CAP_ERR(mid) = 0xffffffff; /* Clear them all. */ + mb(); + *(vuip)MCPCIA_CAP_ERR(mid); /* Re-read for force write. */ +} + +static void __init +mcpcia_startup_hose(struct pci_controller *hose) +{ + int mid = MCPCIA_HOSE2MID(hose->index); + unsigned int tmp; + + mcpcia_pci_clr_err(mid); + + /* + * Set up error reporting. + */ + tmp = *(vuip)MCPCIA_CAP_ERR(mid); + tmp |= 0x0006; /* master/target abort */ + *(vuip)MCPCIA_CAP_ERR(mid) = tmp; + mb(); + tmp = *(vuip)MCPCIA_CAP_ERR(mid); + + /* + * Set up the PCI->physical memory translation windows. + * + * Window 0 is scatter-gather 8MB at 8MB (for isa) + * Window 1 is scatter-gather (up to) 1GB at 1GB (for pci) + * Window 2 is direct access 2GB at 2GB + */ + hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); + hose->sg_pci = iommu_arena_new(hose, 0x40000000, + size_for_memory(0x40000000), 0); + + __direct_map_base = 0x80000000; + __direct_map_size = 0x80000000; + + *(vuip)MCPCIA_W0_BASE(mid) = hose->sg_isa->dma_base | 3; + *(vuip)MCPCIA_W0_MASK(mid) = (hose->sg_isa->size - 1) & 0xfff00000; + *(vuip)MCPCIA_T0_BASE(mid) = virt_to_phys(hose->sg_isa->ptes) >> 8; + + *(vuip)MCPCIA_W1_BASE(mid) = hose->sg_pci->dma_base | 3; + *(vuip)MCPCIA_W1_MASK(mid) = (hose->sg_pci->size - 1) & 0xfff00000; + *(vuip)MCPCIA_T1_BASE(mid) = virt_to_phys(hose->sg_pci->ptes) >> 8; + + *(vuip)MCPCIA_W2_BASE(mid) = __direct_map_base | 1; + *(vuip)MCPCIA_W2_MASK(mid) = (__direct_map_size - 1) & 0xfff00000; + *(vuip)MCPCIA_T2_BASE(mid) = 0; + + *(vuip)MCPCIA_W3_BASE(mid) = 0x0; + + mcpcia_pci_tbi(hose, 0, -1); + + *(vuip)MCPCIA_HBASE(mid) = 0x0; + mb(); + + *(vuip)MCPCIA_HAE_MEM(mid) = 0U; + mb(); + *(vuip)MCPCIA_HAE_MEM(mid); /* read it back. */ + *(vuip)MCPCIA_HAE_IO(mid) = 0; + mb(); + *(vuip)MCPCIA_HAE_IO(mid); /* read it back. */ +} + +void __init +mcpcia_init_arch(void) +{ + /* With multiple PCI busses, we play with I/O as physical addrs. */ + ioport_resource.end = ~0UL; + + /* Allocate hose 0. That's the one that all the ISA junk hangs + off of, from which we'll be registering stuff here in a bit. + Other hose detection is done in mcpcia_init_hoses, which is + called from init_IRQ. */ + + mcpcia_new_hose(0); +} + +/* This is called from init_IRQ, since we cannot take interrupts + before then. Which means we cannot do this in init_arch. */ + +void __init +mcpcia_init_hoses(void) +{ + struct pci_controller *hose; + int hose_count; + int h; + + /* First, find how many hoses we have. */ + hose_count = 0; + for (h = 0; h < MCPCIA_MAX_HOSES; ++h) { + if (mcpcia_probe_hose(h)) { + if (h != 0) + mcpcia_new_hose(h); + hose_count++; + } + } + + printk("mcpcia_init_hoses: found %d hoses\n", hose_count); + + /* Now do init for each hose. */ + for (hose = hose_head; hose; hose = hose->next) + mcpcia_startup_hose(hose); +} + +static void +mcpcia_print_uncorrectable(struct el_MCPCIA_uncorrected_frame_mcheck *logout) +{ + struct el_common_EV5_uncorrectable_mcheck *frame; + int i; + + frame = &logout->procdata; + + /* Print PAL fields */ + for (i = 0; i < 24; i += 2) { + printk(" paltmp[%d-%d] = %16lx %16lx\n", + i, i+1, frame->paltemp[i], frame->paltemp[i+1]); + } + for (i = 0; i < 8; i += 2) { + printk(" shadow[%d-%d] = %16lx %16lx\n", + i, i+1, frame->shadow[i], + frame->shadow[i+1]); + } + printk(" Addr of excepting instruction = %16lx\n", + frame->exc_addr); + printk(" Summary of arithmetic traps = %16lx\n", + frame->exc_sum); + printk(" Exception mask = %16lx\n", + frame->exc_mask); + printk(" Base address for PALcode = %16lx\n", + frame->pal_base); + printk(" Interrupt Status Reg = %16lx\n", + frame->isr); + printk(" CURRENT SETUP OF EV5 IBOX = %16lx\n", + frame->icsr); + printk(" I-CACHE Reg %s parity error = %16lx\n", + (frame->ic_perr_stat & 0x800L) ? + "Data" : "Tag", + frame->ic_perr_stat); + printk(" D-CACHE error Reg = %16lx\n", + frame->dc_perr_stat); + if (frame->dc_perr_stat & 0x2) { + switch (frame->dc_perr_stat & 0x03c) { + case 8: + printk(" Data error in bank 1\n"); + break; + case 4: + printk(" Data error in bank 0\n"); + break; + case 20: + printk(" Tag error in bank 1\n"); + break; + case 10: + printk(" Tag error in bank 0\n"); + break; + } + } + printk(" Effective VA = %16lx\n", + frame->va); + printk(" Reason for D-stream = %16lx\n", + frame->mm_stat); + printk(" EV5 SCache address = %16lx\n", + frame->sc_addr); + printk(" EV5 SCache TAG/Data parity = %16lx\n", + frame->sc_stat); + printk(" EV5 BC_TAG_ADDR = %16lx\n", + frame->bc_tag_addr); + printk(" EV5 EI_ADDR: Phys addr of Xfer = %16lx\n", + frame->ei_addr); + printk(" Fill Syndrome = %16lx\n", + frame->fill_syndrome); + printk(" EI_STAT reg = %16lx\n", + frame->ei_stat); + printk(" LD_LOCK = %16lx\n", + frame->ld_lock); +} + +static void +mcpcia_print_system_area(unsigned long la_ptr) +{ + struct el_common *frame; + struct pci_controller *hose; + + struct IOD_subpacket { + unsigned long base; + unsigned int whoami; + unsigned int rsvd1; + unsigned int pci_rev; + unsigned int cap_ctrl; + unsigned int hae_mem; + unsigned int hae_io; + unsigned int int_ctl; + unsigned int int_reg; + unsigned int int_mask0; + unsigned int int_mask1; + unsigned int mc_err0; + unsigned int mc_err1; + unsigned int cap_err; + unsigned int rsvd2; + unsigned int pci_err1; + unsigned int mdpa_stat; + unsigned int mdpa_syn; + unsigned int mdpb_stat; + unsigned int mdpb_syn; + unsigned int rsvd3; + unsigned int rsvd4; + unsigned int rsvd5; + } *iodpp; + + frame = (struct el_common *)la_ptr; + iodpp = (struct IOD_subpacket *) (la_ptr + frame->sys_offset); + + for (hose = hose_head; hose; hose = hose->next, iodpp++) { + + printk("IOD %d Register Subpacket - Bridge Base Address %16lx\n", + hose->index, iodpp->base); + printk(" WHOAMI = %8x\n", iodpp->whoami); + printk(" PCI_REV = %8x\n", iodpp->pci_rev); + printk(" CAP_CTRL = %8x\n", iodpp->cap_ctrl); + printk(" HAE_MEM = %8x\n", iodpp->hae_mem); + printk(" HAE_IO = %8x\n", iodpp->hae_io); + printk(" INT_CTL = %8x\n", iodpp->int_ctl); + printk(" INT_REG = %8x\n", iodpp->int_reg); + printk(" INT_MASK0 = %8x\n", iodpp->int_mask0); + printk(" INT_MASK1 = %8x\n", iodpp->int_mask1); + printk(" MC_ERR0 = %8x\n", iodpp->mc_err0); + printk(" MC_ERR1 = %8x\n", iodpp->mc_err1); + printk(" CAP_ERR = %8x\n", iodpp->cap_err); + printk(" PCI_ERR1 = %8x\n", iodpp->pci_err1); + printk(" MDPA_STAT = %8x\n", iodpp->mdpa_stat); + printk(" MDPA_SYN = %8x\n", iodpp->mdpa_syn); + printk(" MDPB_STAT = %8x\n", iodpp->mdpb_stat); + printk(" MDPB_SYN = %8x\n", iodpp->mdpb_syn); + } +} + +void +mcpcia_machine_check(unsigned long vector, unsigned long la_ptr, + struct pt_regs * regs) +{ + struct el_common *mchk_header; + struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout; + unsigned int cpu = smp_processor_id(); + int expected; + + mchk_header = (struct el_common *)la_ptr; + mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr; + expected = mcheck_expected(cpu); + + mb(); + mb(); /* magic */ + draina(); + + switch (expected) { + case 0: + { + /* FIXME: how do we figure out which hose the + error was on? */ + struct pci_controller *hose; + for (hose = hose_head; hose; hose = hose->next) + mcpcia_pci_clr_err(MCPCIA_HOSE2MID(hose->index)); + break; + } + case 1: + mcpcia_pci_clr_err(mcheck_extra(cpu)); + break; + default: + /* Otherwise, we're being called from mcpcia_probe_hose + and there's no hose clear an error from. */ + break; + } + + wrmces(0x7); + mb(); + + process_mcheck_info(vector, la_ptr, regs, "MCPCIA", expected != 0); + if (!expected && vector != 0x620 && vector != 0x630) { + mcpcia_print_uncorrectable(mchk_logout); + mcpcia_print_system_area(la_ptr); + } +} diff --git a/arch/alpha/kernel/core_polaris.c b/arch/alpha/kernel/core_polaris.c new file mode 100644 index 0000000..277674a --- /dev/null +++ b/arch/alpha/kernel/core_polaris.c @@ -0,0 +1,203 @@ +/* + * linux/arch/alpha/kernel/core_polaris.c + * + * POLARIS chip-specific code + */ + +#define __EXTERN_INLINE inline +#include <asm/io.h> +#include <asm/core_polaris.h> +#undef __EXTERN_INLINE + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/init.h> + +#include <asm/ptrace.h> + +#include "proto.h" +#include "pci_impl.h" + +/* + * BIOS32-style PCI interface: + */ + +#define DEBUG_CONFIG 0 + +#if DEBUG_CONFIG +# define DBG_CFG(args) printk args +#else +# define DBG_CFG(args) +#endif + + +/* + * Given a bus, device, and function number, compute resulting + * configuration space address. This is fairly straightforward + * on POLARIS, since the chip itself generates Type 0 or Type 1 + * cycles automatically depending on the bus number (Bus 0 is + * hardwired to Type 0, all others are Type 1. Peer bridges + * are not supported). + * + * All types: + * + * 3 3 3 3|3 3 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 + * 9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |1|1|1|1|1|0|0|1|1|1|1|1|1|1|1|0|B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|x|x| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * 23:16 bus number (8 bits = 128 possible buses) + * 15:11 Device number (5 bits) + * 10:8 function number + * 7:2 register number + * + * Notes: + * The function number selects which function of a multi-function device + * (e.g., scsi and ethernet). + * + * The register selects a DWORD (32 bit) register offset. Hence it + * doesn't get shifted by 2 bits as we want to "drop" the bottom two + * bits. + */ + +static int +mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, + unsigned long *pci_addr, u8 *type1) +{ + u8 bus = pbus->number; + + *type1 = (bus == 0) ? 0 : 1; + *pci_addr = (bus << 16) | (device_fn << 8) | (where) | + POLARIS_DENSE_CONFIG_BASE; + + DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x," + " returning address 0x%p\n" + bus, device_fn, where, *pci_addr)); + + return 0; +} + +static int +polaris_read_config(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 *value) +{ + unsigned long addr; + unsigned char type1; + + if (mk_conf_addr(bus, devfn, where, &addr, &type1)) + return PCIBIOS_DEVICE_NOT_FOUND; + + switch (size) { + case 1: + *value = __kernel_ldbu(*(vucp)addr); + break; + case 2: + *value = __kernel_ldwu(*(vusp)addr); + break; + case 4: + *value = *(vuip)addr; + break; + } + + return PCIBIOS_SUCCESSFUL; +} + + +static int +polaris_write_config(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 value) +{ + unsigned long addr; + unsigned char type1; + + if (mk_conf_addr(bus, devfn, where, &addr, &type1)) + return PCIBIOS_DEVICE_NOT_FOUND; + + switch (size) { + case 1: + __kernel_stb(value, *(vucp)addr); + mb(); + __kernel_ldbu(*(vucp)addr); + break; + case 2: + __kernel_stw(value, *(vusp)addr); + mb(); + __kernel_ldwu(*(vusp)addr); + break; + case 4: + *(vuip)addr = value; + mb(); + *(vuip)addr; + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops polaris_pci_ops = +{ + .read = polaris_read_config, + .write = polaris_write_config, +}; + +void __init +polaris_init_arch(void) +{ + struct pci_controller *hose; + + /* May need to initialize error reporting (see PCICTL0/1), but + * for now assume that the firmware has done the right thing + * already. + */ +#if 0 + printk("polaris_init_arch(): trusting firmware for setup\n"); +#endif + + /* + * Create our single hose. + */ + + pci_isa_hose = hose = alloc_pci_controller(); + hose->io_space = &ioport_resource; + hose->mem_space = &iomem_resource; + hose->index = 0; + + hose->sparse_mem_base = 0; + hose->dense_mem_base = POLARIS_DENSE_MEM_BASE - IDENT_ADDR; + hose->sparse_io_base = 0; + hose->dense_io_base = POLARIS_DENSE_IO_BASE - IDENT_ADDR; + + hose->sg_isa = hose->sg_pci = NULL; + + /* The I/O window is fixed at 2G @ 2G. */ + __direct_map_base = 0x80000000; + __direct_map_size = 0x80000000; +} + +static inline void +polaris_pci_clr_err(void) +{ + *(vusp)POLARIS_W_STATUS; + /* Write 1's to settable bits to clear errors */ + *(vusp)POLARIS_W_STATUS = 0x7800; + mb(); + *(vusp)POLARIS_W_STATUS; +} + +void +polaris_machine_check(unsigned long vector, unsigned long la_ptr, + struct pt_regs * regs) +{ + /* Clear the error before any reporting. */ + mb(); + mb(); + draina(); + polaris_pci_clr_err(); + wrmces(0x7); + mb(); + + process_mcheck_info(vector, la_ptr, regs, "POLARIS", + mcheck_expected(0)); +} diff --git a/arch/alpha/kernel/core_t2.c b/arch/alpha/kernel/core_t2.c new file mode 100644 index 0000000..ecce09e --- /dev/null +++ b/arch/alpha/kernel/core_t2.c @@ -0,0 +1,622 @@ +/* + * linux/arch/alpha/kernel/core_t2.c + * + * Written by Jay A Estabrook (jestabro@amt.tay1.dec.com). + * December 1996. + * + * based on CIA code by David A Rusling (david.rusling@reo.mts.dec.com) + * + * Code common to all T2 core logic chips. + */ + +#define __EXTERN_INLINE +#include <asm/io.h> +#include <asm/core_t2.h> +#undef __EXTERN_INLINE + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/init.h> + +#include <asm/ptrace.h> +#include <asm/delay.h> + +#include "proto.h" +#include "pci_impl.h" + +/* For dumping initial DMA window settings. */ +#define DEBUG_PRINT_INITIAL_SETTINGS 0 + +/* For dumping final DMA window settings. */ +#define DEBUG_PRINT_FINAL_SETTINGS 0 + +/* + * By default, we direct-map starting at 2GB, in order to allow the + * maximum size direct-map window (2GB) to match the maximum amount of + * memory (2GB) that can be present on SABLEs. But that limits the + * floppy to DMA only via the scatter/gather window set up for 8MB + * ISA DMA, since the maximum ISA DMA address is 2GB-1. + * + * For now, this seems a reasonable trade-off: even though most SABLEs + * have less than 1GB of memory, floppy usage/performance will not + * really be affected by forcing it to go via scatter/gather... + */ +#define T2_DIRECTMAP_2G 1 + +#if T2_DIRECTMAP_2G +# define T2_DIRECTMAP_START 0x80000000UL +# define T2_DIRECTMAP_LENGTH 0x80000000UL +#else +# define T2_DIRECTMAP_START 0x40000000UL +# define T2_DIRECTMAP_LENGTH 0x40000000UL +#endif + +/* The ISA scatter/gather window settings. */ +#define T2_ISA_SG_START 0x00800000UL +#define T2_ISA_SG_LENGTH 0x00800000UL + +/* + * NOTE: Herein lie back-to-back mb instructions. They are magic. + * One plausible explanation is that the i/o controller does not properly + * handle the system transaction. Another involves timing. Ho hum. + */ + +/* + * BIOS32-style PCI interface: + */ + +#define DEBUG_CONFIG 0 + +#if DEBUG_CONFIG +# define DBG(args) printk args +#else +# define DBG(args) +#endif + +static volatile unsigned int t2_mcheck_any_expected; +static volatile unsigned int t2_mcheck_last_taken; + +/* Place to save the DMA Window registers as set up by SRM + for restoration during shutdown. */ +static struct +{ + struct { + unsigned long wbase; + unsigned long wmask; + unsigned long tbase; + } window[2]; + unsigned long hae_1; + unsigned long hae_2; + unsigned long hae_3; + unsigned long hae_4; + unsigned long hbase; +} t2_saved_config __attribute((common)); + +/* + * Given a bus, device, and function number, compute resulting + * configuration space address and setup the T2_HAXR2 register + * accordingly. It is therefore not safe to have concurrent + * invocations to configuration space access routines, but there + * really shouldn't be any need for this. + * + * Type 0: + * + * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 + * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * 31:11 Device select bit. + * 10:8 Function number + * 7:2 Register number + * + * Type 1: + * + * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 + * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * 31:24 reserved + * 23:16 bus number (8 bits = 128 possible buses) + * 15:11 Device number (5 bits) + * 10:8 function number + * 7:2 register number + * + * Notes: + * The function number selects which function of a multi-function device + * (e.g., SCSI and Ethernet). + * + * The register selects a DWORD (32 bit) register offset. Hence it + * doesn't get shifted by 2 bits as we want to "drop" the bottom two + * bits. + */ + +static int +mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, + unsigned long *pci_addr, unsigned char *type1) +{ + unsigned long addr; + u8 bus = pbus->number; + + DBG(("mk_conf_addr(bus=%d, dfn=0x%x, where=0x%x," + " addr=0x%lx, type1=0x%x)\n", + bus, device_fn, where, pci_addr, type1)); + + if (bus == 0) { + int device = device_fn >> 3; + + /* Type 0 configuration cycle. */ + + if (device > 8) { + DBG(("mk_conf_addr: device (%d)>20, returning -1\n", + device)); + return -1; + } + + *type1 = 0; + addr = (0x0800L << device) | ((device_fn & 7) << 8) | (where); + } else { + /* Type 1 configuration cycle. */ + *type1 = 1; + addr = (bus << 16) | (device_fn << 8) | (where); + } + *pci_addr = addr; + DBG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); + return 0; +} + +/* + * NOTE: both conf_read() and conf_write() may set HAE_3 when needing + * to do type1 access. This is protected by the use of spinlock IRQ + * primitives in the wrapper functions pci_{read,write}_config_*() + * defined in drivers/pci/pci.c. + */ +static unsigned int +conf_read(unsigned long addr, unsigned char type1) +{ + unsigned int value, cpu, taken; + unsigned long t2_cfg = 0; + + cpu = smp_processor_id(); + + DBG(("conf_read(addr=0x%lx, type1=%d)\n", addr, type1)); + + /* If Type1 access, must set T2 CFG. */ + if (type1) { + t2_cfg = *(vulp)T2_HAE_3 & ~0xc0000000UL; + *(vulp)T2_HAE_3 = 0x40000000UL | t2_cfg; + mb(); + } + mb(); + draina(); + + mcheck_expected(cpu) = 1; + mcheck_taken(cpu) = 0; + t2_mcheck_any_expected |= (1 << cpu); + mb(); + + /* Access configuration space. */ + value = *(vuip)addr; + mb(); + mb(); /* magic */ + + /* Wait for possible mcheck. Also, this lets other CPUs clear + their mchecks as well, as they can reliably tell when + another CPU is in the midst of handling a real mcheck via + the "taken" function. */ + udelay(100); + + if ((taken = mcheck_taken(cpu))) { + mcheck_taken(cpu) = 0; + t2_mcheck_last_taken |= (1 << cpu); + value = 0xffffffffU; + mb(); + } + mcheck_expected(cpu) = 0; + t2_mcheck_any_expected = 0; + mb(); + + /* If Type1 access, must reset T2 CFG so normal IO space ops work. */ + if (type1) { + *(vulp)T2_HAE_3 = t2_cfg; + mb(); + } + + return value; +} + +static void +conf_write(unsigned long addr, unsigned int value, unsigned char type1) +{ + unsigned int cpu, taken; + unsigned long t2_cfg = 0; + + cpu = smp_processor_id(); + + /* If Type1 access, must set T2 CFG. */ + if (type1) { + t2_cfg = *(vulp)T2_HAE_3 & ~0xc0000000UL; + *(vulp)T2_HAE_3 = t2_cfg | 0x40000000UL; + mb(); + } + mb(); + draina(); + + mcheck_expected(cpu) = 1; + mcheck_taken(cpu) = 0; + t2_mcheck_any_expected |= (1 << cpu); + mb(); + + /* Access configuration space. */ + *(vuip)addr = value; + mb(); + mb(); /* magic */ + + /* Wait for possible mcheck. Also, this lets other CPUs clear + their mchecks as well, as they can reliably tell when + this CPU is in the midst of handling a real mcheck via + the "taken" function. */ + udelay(100); + + if ((taken = mcheck_taken(cpu))) { + mcheck_taken(cpu) = 0; + t2_mcheck_last_taken |= (1 << cpu); + mb(); + } + mcheck_expected(cpu) = 0; + t2_mcheck_any_expected = 0; + mb(); + + /* If Type1 access, must reset T2 CFG so normal IO space ops work. */ + if (type1) { + *(vulp)T2_HAE_3 = t2_cfg; + mb(); + } +} + +static int +t2_read_config(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 *value) +{ + unsigned long addr, pci_addr; + unsigned char type1; + int shift; + long mask; + + if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1)) + return PCIBIOS_DEVICE_NOT_FOUND; + + mask = (size - 1) * 8; + shift = (where & 3) * 8; + addr = (pci_addr << 5) + mask + T2_CONF; + *value = conf_read(addr, type1) >> (shift); + return PCIBIOS_SUCCESSFUL; +} + +static int +t2_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, + u32 value) +{ + unsigned long addr, pci_addr; + unsigned char type1; + long mask; + + if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1)) + return PCIBIOS_DEVICE_NOT_FOUND; + + mask = (size - 1) * 8; + addr = (pci_addr << 5) + mask + T2_CONF; + conf_write(addr, value << ((where & 3) * 8), type1); + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops t2_pci_ops = +{ + .read = t2_read_config, + .write = t2_write_config, +}; + +static void __init +t2_direct_map_window1(unsigned long base, unsigned long length) +{ + unsigned long temp; + + __direct_map_base = base; + __direct_map_size = length; + + temp = (base & 0xfff00000UL) | ((base + length - 1) >> 20); + *(vulp)T2_WBASE1 = temp | 0x80000UL; /* OR in ENABLE bit */ + temp = (length - 1) & 0xfff00000UL; + *(vulp)T2_WMASK1 = temp; + *(vulp)T2_TBASE1 = 0; + +#if DEBUG_PRINT_FINAL_SETTINGS + printk("%s: setting WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n", + __FUNCTION__, + *(vulp)T2_WBASE1, + *(vulp)T2_WMASK1, + *(vulp)T2_TBASE1); +#endif +} + +static void __init +t2_sg_map_window2(struct pci_controller *hose, + unsigned long base, + unsigned long length) +{ + unsigned long temp; + + /* Note we can only do 1 SG window, as the other is for direct, so + do an ISA SG area, especially for the floppy. */ + hose->sg_isa = iommu_arena_new(hose, base, length, 0); + hose->sg_pci = NULL; + + temp = (base & 0xfff00000UL) | ((base + length - 1) >> 20); + *(vulp)T2_WBASE2 = temp | 0xc0000UL; /* OR in ENABLE/SG bits */ + temp = (length - 1) & 0xfff00000UL; + *(vulp)T2_WMASK2 = temp; + *(vulp)T2_TBASE2 = virt_to_phys(hose->sg_isa->ptes) >> 1; + mb(); + + t2_pci_tbi(hose, 0, -1); /* flush TLB all */ + +#if DEBUG_PRINT_FINAL_SETTINGS + printk("%s: setting WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n", + __FUNCTION__, + *(vulp)T2_WBASE2, + *(vulp)T2_WMASK2, + *(vulp)T2_TBASE2); +#endif +} + +static void __init +t2_save_configuration(void) +{ +#if DEBUG_PRINT_INITIAL_SETTINGS + printk("%s: HAE_1 was 0x%lx\n", __FUNCTION__, srm_hae); /* HW is 0 */ + printk("%s: HAE_2 was 0x%lx\n", __FUNCTION__, *(vulp)T2_HAE_2); + printk("%s: HAE_3 was 0x%lx\n", __FUNCTION__, *(vulp)T2_HAE_3); + printk("%s: HAE_4 was 0x%lx\n", __FUNCTION__, *(vulp)T2_HAE_4); + printk("%s: HBASE was 0x%lx\n", __FUNCTION__, *(vulp)T2_HBASE); + + printk("%s: WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n", __FUNCTION__, + *(vulp)T2_WBASE1, *(vulp)T2_WMASK1, *(vulp)T2_TBASE1); + printk("%s: WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n", __FUNCTION__, + *(vulp)T2_WBASE2, *(vulp)T2_WMASK2, *(vulp)T2_TBASE2); +#endif + + /* + * Save the DMA Window registers. + */ + t2_saved_config.window[0].wbase = *(vulp)T2_WBASE1; + t2_saved_config.window[0].wmask = *(vulp)T2_WMASK1; + t2_saved_config.window[0].tbase = *(vulp)T2_TBASE1; + t2_saved_config.window[1].wbase = *(vulp)T2_WBASE2; + t2_saved_config.window[1].wmask = *(vulp)T2_WMASK2; + t2_saved_config.window[1].tbase = *(vulp)T2_TBASE2; + + t2_saved_config.hae_1 = srm_hae; /* HW is already set to 0 */ + t2_saved_config.hae_2 = *(vulp)T2_HAE_2; + t2_saved_config.hae_3 = *(vulp)T2_HAE_3; + t2_saved_config.hae_4 = *(vulp)T2_HAE_4; + t2_saved_config.hbase = *(vulp)T2_HBASE; +} + +void __init +t2_init_arch(void) +{ + struct pci_controller *hose; + unsigned long temp; + unsigned int i; + + for (i = 0; i < NR_CPUS; i++) { + mcheck_expected(i) = 0; + mcheck_taken(i) = 0; + } + t2_mcheck_any_expected = 0; + t2_mcheck_last_taken = 0; + + /* Enable scatter/gather TLB use. */ + temp = *(vulp)T2_IOCSR; + if (!(temp & (0x1UL << 26))) { + printk("t2_init_arch: enabling SG TLB, IOCSR was 0x%lx\n", + temp); + *(vulp)T2_IOCSR = temp | (0x1UL << 26); + mb(); + *(vulp)T2_IOCSR; /* read it back to make sure */ + } + + t2_save_configuration(); + + /* + * Create our single hose. + */ + pci_isa_hose = hose = alloc_pci_controller(); + hose->io_space = &ioport_resource; + hose->mem_space = &iomem_resource; + hose->index = 0; + + hose->sparse_mem_base = T2_SPARSE_MEM - IDENT_ADDR; + hose->dense_mem_base = T2_DENSE_MEM - IDENT_ADDR; + hose->sparse_io_base = T2_IO - IDENT_ADDR; + hose->dense_io_base = 0; + + /* + * Set up the PCI->physical memory translation windows. + * + * Window 1 is direct mapped. + * Window 2 is scatter/gather (for ISA). + */ + + t2_direct_map_window1(T2_DIRECTMAP_START, T2_DIRECTMAP_LENGTH); + + /* Always make an ISA DMA window. */ + t2_sg_map_window2(hose, T2_ISA_SG_START, T2_ISA_SG_LENGTH); + + *(vulp)T2_HBASE = 0x0; /* Disable HOLES. */ + + /* Zero HAE. */ + *(vulp)T2_HAE_1 = 0; mb(); /* Sparse MEM HAE */ + *(vulp)T2_HAE_2 = 0; mb(); /* Sparse I/O HAE */ + *(vulp)T2_HAE_3 = 0; mb(); /* Config Space HAE */ + + /* + * We also now zero out HAE_4, the dense memory HAE, so that + * we need not account for its "offset" when accessing dense + * memory resources which we allocated in our normal way. This + * HAE would need to stay untouched were we to keep the SRM + * resource settings. + * + * Thus we can now run standard X servers on SABLE/LYNX. :-) + */ + *(vulp)T2_HAE_4 = 0; mb(); +} + +void +t2_kill_arch(int mode) +{ + /* + * Restore the DMA Window registers. + */ + *(vulp)T2_WBASE1 = t2_saved_config.window[0].wbase; + *(vulp)T2_WMASK1 = t2_saved_config.window[0].wmask; + *(vulp)T2_TBASE1 = t2_saved_config.window[0].tbase; + *(vulp)T2_WBASE2 = t2_saved_config.window[1].wbase; + *(vulp)T2_WMASK2 = t2_saved_config.window[1].wmask; + *(vulp)T2_TBASE2 = t2_saved_config.window[1].tbase; + mb(); + + *(vulp)T2_HAE_1 = srm_hae; + *(vulp)T2_HAE_2 = t2_saved_config.hae_2; + *(vulp)T2_HAE_3 = t2_saved_config.hae_3; + *(vulp)T2_HAE_4 = t2_saved_config.hae_4; + *(vulp)T2_HBASE = t2_saved_config.hbase; + mb(); + *(vulp)T2_HBASE; /* READ it back to ensure WRITE occurred. */ +} + +void +t2_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) +{ + unsigned long t2_iocsr; + + t2_iocsr = *(vulp)T2_IOCSR; + + /* set the TLB Clear bit */ + *(vulp)T2_IOCSR = t2_iocsr | (0x1UL << 28); + mb(); + *(vulp)T2_IOCSR; /* read it back to make sure */ + + /* clear the TLB Clear bit */ + *(vulp)T2_IOCSR = t2_iocsr & ~(0x1UL << 28); + mb(); + *(vulp)T2_IOCSR; /* read it back to make sure */ +} + +#define SIC_SEIC (1UL << 33) /* System Event Clear */ + +static void +t2_clear_errors(int cpu) +{ + struct sable_cpu_csr *cpu_regs; + + cpu_regs = (struct sable_cpu_csr *)T2_CPUn_BASE(cpu); + + cpu_regs->sic &= ~SIC_SEIC; + + /* Clear CPU errors. */ + cpu_regs->bcce |= cpu_regs->bcce; + cpu_regs->cbe |= cpu_regs->cbe; + cpu_regs->bcue |= cpu_regs->bcue; + cpu_regs->dter |= cpu_regs->dter; + + *(vulp)T2_CERR1 |= *(vulp)T2_CERR1; + *(vulp)T2_PERR1 |= *(vulp)T2_PERR1; + + mb(); + mb(); /* magic */ +} + +/* + * SABLE seems to have a "broadcast" style machine check, in that all + * CPUs receive it. And, the issuing CPU, in the case of PCI Config + * space read/write faults, will also receive a second mcheck, upon + * lowering IPL during completion processing in pci_read_config_byte() + * et al. + * + * Hence all the taken/expected/any_expected/last_taken stuff... + */ +void +t2_machine_check(unsigned long vector, unsigned long la_ptr, + struct pt_regs * regs) +{ + int cpu = smp_processor_id(); +#ifdef CONFIG_VERBOSE_MCHECK + struct el_common *mchk_header = (struct el_common *)la_ptr; +#endif + + /* Clear the error before any reporting. */ + mb(); + mb(); /* magic */ + draina(); + t2_clear_errors(cpu); + + /* This should not actually be done until the logout frame is + examined, but, since we don't do that, go on and do this... */ + wrmces(0x7); + mb(); + + /* Now, do testing for the anomalous conditions. */ + if (!mcheck_expected(cpu) && t2_mcheck_any_expected) { + /* + * FUNKY: Received mcheck on a CPU and not + * expecting it, but another CPU is expecting one. + * + * Just dismiss it for now on this CPU... + */ +#ifdef CONFIG_VERBOSE_MCHECK + if (alpha_verbose_mcheck > 1) { + printk("t2_machine_check(cpu%d): any_expected 0x%x -" + " (assumed) spurious -" + " code 0x%x\n", cpu, t2_mcheck_any_expected, + (unsigned int)mchk_header->code); + } +#endif + return; + } + + if (!mcheck_expected(cpu) && !t2_mcheck_any_expected) { + if (t2_mcheck_last_taken & (1 << cpu)) { +#ifdef CONFIG_VERBOSE_MCHECK + if (alpha_verbose_mcheck > 1) { + printk("t2_machine_check(cpu%d): last_taken 0x%x - " + "unexpected mcheck - code 0x%x\n", + cpu, t2_mcheck_last_taken, + (unsigned int)mchk_header->code); + } +#endif + t2_mcheck_last_taken = 0; + mb(); + return; + } else { + t2_mcheck_last_taken = 0; + mb(); + } + } + +#ifdef CONFIG_VERBOSE_MCHECK + if (alpha_verbose_mcheck > 1) { + printk("%s t2_mcheck(cpu%d): last_taken 0x%x - " + "any_expected 0x%x - code 0x%x\n", + (mcheck_expected(cpu) ? "EX" : "UN"), cpu, + t2_mcheck_last_taken, t2_mcheck_any_expected, + (unsigned int)mchk_header->code); + } +#endif + + process_mcheck_info(vector, la_ptr, regs, "T2", mcheck_expected(cpu)); +} diff --git a/arch/alpha/kernel/core_titan.c b/arch/alpha/kernel/core_titan.c new file mode 100644 index 0000000..3662fef --- /dev/null +++ b/arch/alpha/kernel/core_titan.c @@ -0,0 +1,806 @@ +/* + * linux/arch/alpha/kernel/core_titan.c + * + * Code common to all TITAN core logic chips. + */ + +#define __EXTERN_INLINE inline +#include <asm/io.h> +#include <asm/core_titan.h> +#undef __EXTERN_INLINE + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/init.h> +#include <linux/vmalloc.h> +#include <linux/bootmem.h> + +#include <asm/ptrace.h> +#include <asm/smp.h> +#include <asm/pgalloc.h> +#include <asm/tlbflush.h> + +#include "proto.h" +#include "pci_impl.h" + +/* Save Titan configuration data as the console had it set up. */ + +struct +{ + unsigned long wsba[4]; + unsigned long wsm[4]; + unsigned long tba[4]; +} saved_config[4] __attribute__((common)); + +/* + * BIOS32-style PCI interface: + */ + +#define DEBUG_CONFIG 0 + +#if DEBUG_CONFIG +# define DBG_CFG(args) printk args +#else +# define DBG_CFG(args) +#endif + + +/* + * Routines to access TIG registers. + */ +static inline volatile unsigned long * +mk_tig_addr(int offset) +{ + return (volatile unsigned long *)(TITAN_TIG_SPACE + (offset << 6)); +} + +static inline u8 +titan_read_tig(int offset, u8 value) +{ + volatile unsigned long *tig_addr = mk_tig_addr(offset); + return (u8)(*tig_addr & 0xff); +} + +static inline void +titan_write_tig(int offset, u8 value) +{ + volatile unsigned long *tig_addr = mk_tig_addr(offset); + *tig_addr = (unsigned long)value; +} + + +/* + * Given a bus, device, and function number, compute resulting + * configuration space address + * accordingly. It is therefore not safe to have concurrent + * invocations to configuration space access routines, but there + * really shouldn't be any need for this. + * + * Note that all config space accesses use Type 1 address format. + * + * Note also that type 1 is determined by non-zero bus number. + * + * Type 1: + * + * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 + * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * 31:24 reserved + * 23:16 bus number (8 bits = 128 possible buses) + * 15:11 Device number (5 bits) + * 10:8 function number + * 7:2 register number + * + * Notes: + * The function number selects which function of a multi-function device + * (e.g., SCSI and Ethernet). + * + * The register selects a DWORD (32 bit) register offset. Hence it + * doesn't get shifted by 2 bits as we want to "drop" the bottom two + * bits. + */ + +static int +mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, + unsigned long *pci_addr, unsigned char *type1) +{ + struct pci_controller *hose = pbus->sysdata; + unsigned long addr; + u8 bus = pbus->number; + + DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, " + "pci_addr=0x%p, type1=0x%p)\n", + bus, device_fn, where, pci_addr, type1)); + + if (!pbus->parent) /* No parent means peer PCI bus. */ + bus = 0; + *type1 = (bus != 0); + + addr = (bus << 16) | (device_fn << 8) | where; + addr |= hose->config_space_base; + + *pci_addr = addr; + DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); + return 0; +} + +static int +titan_read_config(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 *value) +{ + unsigned long addr; + unsigned char type1; + + if (mk_conf_addr(bus, devfn, where, &addr, &type1)) + return PCIBIOS_DEVICE_NOT_FOUND; + + switch (size) { + case 1: + *value = __kernel_ldbu(*(vucp)addr); + break; + case 2: + *value = __kernel_ldwu(*(vusp)addr); + break; + case 4: + *value = *(vuip)addr; + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int +titan_write_config(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 value) +{ + unsigned long addr; + unsigned char type1; + + if (mk_conf_addr(bus, devfn, where, &addr, &type1)) + return PCIBIOS_DEVICE_NOT_FOUND; + + switch (size) { + case 1: + __kernel_stb(value, *(vucp)addr); + mb(); + __kernel_ldbu(*(vucp)addr); + break; + case 2: + __kernel_stw(value, *(vusp)addr); + mb(); + __kernel_ldwu(*(vusp)addr); + break; + case 4: + *(vuip)addr = value; + mb(); + *(vuip)addr; + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops titan_pci_ops = +{ + .read = titan_read_config, + .write = titan_write_config, +}; + + +void +titan_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) +{ + titan_pachip *pachip = + (hose->index & 1) ? TITAN_pachip1 : TITAN_pachip0; + titan_pachip_port *port; + volatile unsigned long *csr; + unsigned long value; + + /* Get the right hose. */ + port = &pachip->g_port; + if (hose->index & 2) + port = &pachip->a_port; + + /* We can invalidate up to 8 tlb entries in a go. The flush + matches against <31:16> in the pci address. + Note that gtlbi* and atlbi* are in the same place in the g_port + and a_port, respectively, so the g_port offset can be used + even if hose is an a_port */ + csr = &port->port_specific.g.gtlbia.csr; + if (((start ^ end) & 0xffff0000) == 0) + csr = &port->port_specific.g.gtlbiv.csr; + + /* For TBIA, it doesn't matter what value we write. For TBI, + it's the shifted tag bits. */ + value = (start & 0xffff0000) >> 12; + + wmb(); + *csr = value; + mb(); + *csr; +} + +static int +titan_query_agp(titan_pachip_port *port) +{ + union TPAchipPCTL pctl; + + /* set up APCTL */ + pctl.pctl_q_whole = port->pctl.csr; + + return pctl.pctl_r_bits.apctl_v_agp_present; + +} + +static void __init +titan_init_one_pachip_port(titan_pachip_port *port, int index) +{ + struct pci_controller *hose; + + hose = alloc_pci_controller(); + if (index == 0) + pci_isa_hose = hose; + hose->io_space = alloc_resource(); + hose->mem_space = alloc_resource(); + + /* + * This is for userland consumption. The 40-bit PIO bias that we + * use in the kernel through KSEG doesn't work in the page table + * based user mappings. (43-bit KSEG sign extends the physical + * address from bit 40 to hit the I/O bit - mapped addresses don't). + * So make sure we get the 43-bit PIO bias. + */ + hose->sparse_mem_base = 0; + hose->sparse_io_base = 0; + hose->dense_mem_base + = (TITAN_MEM(index) & 0xffffffffffUL) | 0x80000000000UL; + hose->dense_io_base + = (TITAN_IO(index) & 0xffffffffffUL) | 0x80000000000UL; + + hose->config_space_base = TITAN_CONF(index); + hose->index = index; + + hose->io_space->start = TITAN_IO(index) - TITAN_IO_BIAS; + hose->io_space->end = hose->io_space->start + TITAN_IO_SPACE - 1; + hose->io_space->name = pci_io_names[index]; + hose->io_space->flags = IORESOURCE_IO; + + hose->mem_space->start = TITAN_MEM(index) - TITAN_MEM_BIAS; + hose->mem_space->end = hose->mem_space->start + 0xffffffff; + hose->mem_space->name = pci_mem_names[index]; + hose->mem_space->flags = IORESOURCE_MEM; + + if (request_resource(&ioport_resource, hose->io_space) < 0) + printk(KERN_ERR "Failed to request IO on hose %d\n", index); + if (request_resource(&iomem_resource, hose->mem_space) < 0) + printk(KERN_ERR "Failed to request MEM on hose %d\n", index); + + /* + * Save the existing PCI window translations. SRM will + * need them when we go to reboot. + */ + saved_config[index].wsba[0] = port->wsba[0].csr; + saved_config[index].wsm[0] = port->wsm[0].csr; + saved_config[index].tba[0] = port->tba[0].csr; + + saved_config[index].wsba[1] = port->wsba[1].csr; + saved_config[index].wsm[1] = port->wsm[1].csr; + saved_config[index].tba[1] = port->tba[1].csr; + + saved_config[index].wsba[2] = port->wsba[2].csr; + saved_config[index].wsm[2] = port->wsm[2].csr; + saved_config[index].tba[2] = port->tba[2].csr; + + saved_config[index].wsba[3] = port->wsba[3].csr; + saved_config[index].wsm[3] = port->wsm[3].csr; + saved_config[index].tba[3] = port->tba[3].csr; + + /* + * Set up the PCI to main memory translation windows. + * + * Note: Window 3 on Titan is Scatter-Gather ONLY. + * + * Window 0 is scatter-gather 8MB at 8MB (for isa) + * Window 1 is direct access 1GB at 2GB + * Window 2 is scatter-gather 1GB at 3GB + */ + hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); + hose->sg_isa->align_entry = 8; /* 64KB for ISA */ + + hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x40000000, 0); + hose->sg_pci->align_entry = 4; /* Titan caches 4 PTEs at a time */ + + port->wsba[0].csr = hose->sg_isa->dma_base | 3; + port->wsm[0].csr = (hose->sg_isa->size - 1) & 0xfff00000; + port->tba[0].csr = virt_to_phys(hose->sg_isa->ptes); + + port->wsba[1].csr = __direct_map_base | 1; + port->wsm[1].csr = (__direct_map_size - 1) & 0xfff00000; + port->tba[1].csr = 0; + + port->wsba[2].csr = hose->sg_pci->dma_base | 3; + port->wsm[2].csr = (hose->sg_pci->size - 1) & 0xfff00000; + port->tba[2].csr = virt_to_phys(hose->sg_pci->ptes); + + port->wsba[3].csr = 0; + + /* Enable the Monster Window to make DAC pci64 possible. */ + port->pctl.csr |= pctl_m_mwin; + + /* + * If it's an AGP port, initialize agplastwr. + */ + if (titan_query_agp(port)) + port->port_specific.a.agplastwr.csr = __direct_map_base; + + titan_pci_tbi(hose, 0, -1); +} + +static void __init +titan_init_pachips(titan_pachip *pachip0, titan_pachip *pachip1) +{ + int pchip1_present = TITAN_cchip->csc.csr & 1L<<14; + + /* Init the ports in hose order... */ + titan_init_one_pachip_port(&pachip0->g_port, 0); /* hose 0 */ + if (pchip1_present) + titan_init_one_pachip_port(&pachip1->g_port, 1);/* hose 1 */ + titan_init_one_pachip_port(&pachip0->a_port, 2); /* hose 2 */ + if (pchip1_present) + titan_init_one_pachip_port(&pachip1->a_port, 3);/* hose 3 */ +} + +static void __init +titan_init_vga_hose(void) +{ +#ifdef CONFIG_VGA_HOSE + u64 *pu64 = (u64 *)((u64)hwrpb + hwrpb->ctbt_offset); + + if (pu64[7] == 3) { /* TERM_TYPE == graphics */ + struct pci_controller *hose; + int h = (pu64[30] >> 24) & 0xff; /* console hose # */ + + /* + * Our hose numbering matches the console's, so just find + * the right one... + */ + for (hose = hose_head; hose; hose = hose->next) { + if (hose->index == h) break; + } + + if (hose) { + printk("Console graphics on hose %d\n", hose->index); + pci_vga_hose = hose; + } + } +#endif /* CONFIG_VGA_HOSE */ +} + +void __init +titan_init_arch(void) +{ +#if 0 + printk("%s: titan_init_arch()\n", __FUNCTION__); + printk("%s: CChip registers:\n", __FUNCTION__); + printk("%s: CSR_CSC 0x%lx\n", __FUNCTION__, TITAN_cchip->csc.csr); + printk("%s: CSR_MTR 0x%lx\n", __FUNCTION__, TITAN_cchip->mtr.csr); + printk("%s: CSR_MISC 0x%lx\n", __FUNCTION__, TITAN_cchip->misc.csr); + printk("%s: CSR_DIM0 0x%lx\n", __FUNCTION__, TITAN_cchip->dim0.csr); + printk("%s: CSR_DIM1 0x%lx\n", __FUNCTION__, TITAN_cchip->dim1.csr); + printk("%s: CSR_DIR0 0x%lx\n", __FUNCTION__, TITAN_cchip->dir0.csr); + printk("%s: CSR_DIR1 0x%lx\n", __FUNCTION__, TITAN_cchip->dir1.csr); + printk("%s: CSR_DRIR 0x%lx\n", __FUNCTION__, TITAN_cchip->drir.csr); + + printk("%s: DChip registers:\n", __FUNCTION__); + printk("%s: CSR_DSC 0x%lx\n", __FUNCTION__, TITAN_dchip->dsc.csr); + printk("%s: CSR_STR 0x%lx\n", __FUNCTION__, TITAN_dchip->str.csr); + printk("%s: CSR_DREV 0x%lx\n", __FUNCTION__, TITAN_dchip->drev.csr); +#endif + + boot_cpuid = __hard_smp_processor_id(); + + /* With multiple PCI busses, we play with I/O as physical addrs. */ + ioport_resource.end = ~0UL; + + /* PCI DMA Direct Mapping is 1GB at 2GB. */ + __direct_map_base = 0x80000000; + __direct_map_size = 0x40000000; + + /* Init the PA chip(s). */ + titan_init_pachips(TITAN_pachip0, TITAN_pachip1); + + /* Check for graphic console location (if any). */ + titan_init_vga_hose(); +} + +static void +titan_kill_one_pachip_port(titan_pachip_port *port, int index) +{ + port->wsba[0].csr = saved_config[index].wsba[0]; + port->wsm[0].csr = saved_config[index].wsm[0]; + port->tba[0].csr = saved_config[index].tba[0]; + + port->wsba[1].csr = saved_config[index].wsba[1]; + port->wsm[1].csr = saved_config[index].wsm[1]; + port->tba[1].csr = saved_config[index].tba[1]; + + port->wsba[2].csr = saved_config[index].wsba[2]; + port->wsm[2].csr = saved_config[index].wsm[2]; + port->tba[2].csr = saved_config[index].tba[2]; + + port->wsba[3].csr = saved_config[index].wsba[3]; + port->wsm[3].csr = saved_config[index].wsm[3]; + port->tba[3].csr = saved_config[index].tba[3]; +} + +static void +titan_kill_pachips(titan_pachip *pachip0, titan_pachip *pachip1) +{ + int pchip1_present = TITAN_cchip->csc.csr & 1L<<14; + + if (pchip1_present) { + titan_kill_one_pachip_port(&pachip1->g_port, 1); + titan_kill_one_pachip_port(&pachip1->a_port, 3); + } + titan_kill_one_pachip_port(&pachip0->g_port, 0); + titan_kill_one_pachip_port(&pachip0->a_port, 2); +} + +void +titan_kill_arch(int mode) +{ + titan_kill_pachips(TITAN_pachip0, TITAN_pachip1); +} + + +/* + * IO map support. + */ + +void __iomem * +titan_ioremap(unsigned long addr, unsigned long size) +{ + int h = (addr & TITAN_HOSE_MASK) >> TITAN_HOSE_SHIFT; + unsigned long baddr = addr & ~TITAN_HOSE_MASK; + unsigned long last = baddr + size - 1; + struct pci_controller *hose; + struct vm_struct *area; + unsigned long vaddr; + unsigned long *ptes; + unsigned long pfn; + + /* + * Adjust the addr. + */ +#ifdef CONFIG_VGA_HOSE + if (pci_vga_hose && __titan_is_mem_vga(addr)) { + h = pci_vga_hose->index; + addr += pci_vga_hose->mem_space->start; + } +#endif + + /* + * Find the hose. + */ + for (hose = hose_head; hose; hose = hose->next) + if (hose->index == h) + break; + if (!hose) + return NULL; + + /* + * Is it direct-mapped? + */ + if ((baddr >= __direct_map_base) && + ((baddr + size - 1) < __direct_map_base + __direct_map_size)) { + vaddr = addr - __direct_map_base + TITAN_MEM_BIAS; + return (void __iomem *) vaddr; + } + + /* + * Check the scatter-gather arena. + */ + if (hose->sg_pci && + baddr >= (unsigned long)hose->sg_pci->dma_base && + last < (unsigned long)hose->sg_pci->dma_base + hose->sg_pci->size){ + + /* + * Adjust the limits (mappings must be page aligned) + */ + baddr -= hose->sg_pci->dma_base; + last -= hose->sg_pci->dma_base; + baddr &= PAGE_MASK; + size = PAGE_ALIGN(last) - baddr; + + /* + * Map it + */ + area = get_vm_area(size, VM_IOREMAP); + if (!area) + return NULL; + + ptes = hose->sg_pci->ptes; + for (vaddr = (unsigned long)area->addr; + baddr <= last; + baddr += PAGE_SIZE, vaddr += PAGE_SIZE) { + pfn = ptes[baddr >> PAGE_SHIFT]; + if (!(pfn & 1)) { + printk("ioremap failed... pte not valid...\n"); + vfree(area->addr); + return NULL; + } + pfn >>= 1; /* make it a true pfn */ + + if (__alpha_remap_area_pages(vaddr, + pfn << PAGE_SHIFT, + PAGE_SIZE, 0)) { + printk("FAILED to map...\n"); + vfree(area->addr); + return NULL; + } + } + + flush_tlb_all(); + + vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK); + return (void __iomem *) vaddr; + } + + return NULL; +} + +void +titan_iounmap(volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + if (addr >= VMALLOC_START) + vfree((void *)(PAGE_MASK & addr)); +} + +int +titan_is_mmio(const volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + + if (addr >= VMALLOC_START) + return 1; + else + return (addr & 0x100000000UL) == 0; +} + +#ifndef CONFIG_ALPHA_GENERIC +EXPORT_SYMBOL(titan_ioremap); +EXPORT_SYMBOL(titan_iounmap); +EXPORT_SYMBOL(titan_is_mmio); +#endif + +/* + * AGP GART Support. + */ +#include <linux/agp_backend.h> +#include <asm/agp_backend.h> +#include <linux/slab.h> +#include <linux/delay.h> + +struct titan_agp_aperture { + struct pci_iommu_arena *arena; + long pg_start; + long pg_count; +}; + +static int +titan_agp_setup(alpha_agp_info *agp) +{ + struct titan_agp_aperture *aper; + + if (!alpha_agpgart_size) + return -ENOMEM; + + aper = kmalloc(sizeof(struct titan_agp_aperture), GFP_KERNEL); + if (aper == NULL) + return -ENOMEM; + + aper->arena = agp->hose->sg_pci; + aper->pg_count = alpha_agpgart_size / PAGE_SIZE; + aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, + aper->pg_count - 1); + if (aper->pg_start < 0) { + printk(KERN_ERR "Failed to reserve AGP memory\n"); + kfree(aper); + return -ENOMEM; + } + + agp->aperture.bus_base = + aper->arena->dma_base + aper->pg_start * PAGE_SIZE; + agp->aperture.size = aper->pg_count * PAGE_SIZE; + agp->aperture.sysdata = aper; + + return 0; +} + +static void +titan_agp_cleanup(alpha_agp_info *agp) +{ + struct titan_agp_aperture *aper = agp->aperture.sysdata; + int status; + + status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); + if (status == -EBUSY) { + printk(KERN_WARNING + "Attempted to release bound AGP memory - unbinding\n"); + iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); + status = iommu_release(aper->arena, aper->pg_start, + aper->pg_count); + } + if (status < 0) + printk(KERN_ERR "Failed to release AGP memory\n"); + + kfree(aper); + kfree(agp); +} + +static int +titan_agp_configure(alpha_agp_info *agp) +{ + union TPAchipPCTL pctl; + titan_pachip_port *port = agp->private; + pctl.pctl_q_whole = port->pctl.csr; + + /* Side-Band Addressing? */ + pctl.pctl_r_bits.apctl_v_agp_sba_en = agp->mode.bits.sba; + + /* AGP Rate? */ + pctl.pctl_r_bits.apctl_v_agp_rate = 0; /* 1x */ + if (agp->mode.bits.rate & 2) + pctl.pctl_r_bits.apctl_v_agp_rate = 1; /* 2x */ +#if 0 + if (agp->mode.bits.rate & 4) + pctl.pctl_r_bits.apctl_v_agp_rate = 2; /* 4x */ +#endif + + /* RQ Depth? */ + pctl.pctl_r_bits.apctl_v_agp_hp_rd = 2; + pctl.pctl_r_bits.apctl_v_agp_lp_rd = 7; + + /* + * AGP Enable. + */ + pctl.pctl_r_bits.apctl_v_agp_en = agp->mode.bits.enable; + + /* Tell the user. */ + printk("Enabling AGP: %dX%s\n", + 1 << pctl.pctl_r_bits.apctl_v_agp_rate, + pctl.pctl_r_bits.apctl_v_agp_sba_en ? " - SBA" : ""); + + /* Write it. */ + port->pctl.csr = pctl.pctl_q_whole; + + /* And wait at least 5000 66MHz cycles (per Titan spec). */ + udelay(100); + + return 0; +} + +static int +titan_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) +{ + struct titan_agp_aperture *aper = agp->aperture.sysdata; + return iommu_bind(aper->arena, aper->pg_start + pg_start, + mem->page_count, mem->memory); +} + +static int +titan_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) +{ + struct titan_agp_aperture *aper = agp->aperture.sysdata; + return iommu_unbind(aper->arena, aper->pg_start + pg_start, + mem->page_count); +} + +static unsigned long +titan_agp_translate(alpha_agp_info *agp, dma_addr_t addr) +{ + struct titan_agp_aperture *aper = agp->aperture.sysdata; + unsigned long baddr = addr - aper->arena->dma_base; + unsigned long pte; + + if (addr < agp->aperture.bus_base || + addr >= agp->aperture.bus_base + agp->aperture.size) { + printk("%s: addr out of range\n", __FUNCTION__); + return -EINVAL; + } + + pte = aper->arena->ptes[baddr >> PAGE_SHIFT]; + if (!(pte & 1)) { + printk("%s: pte not valid\n", __FUNCTION__); + return -EINVAL; + } + + return (pte >> 1) << PAGE_SHIFT; +} + +struct alpha_agp_ops titan_agp_ops = +{ + .setup = titan_agp_setup, + .cleanup = titan_agp_cleanup, + .configure = titan_agp_configure, + .bind = titan_agp_bind_memory, + .unbind = titan_agp_unbind_memory, + .translate = titan_agp_translate +}; + +alpha_agp_info * +titan_agp_info(void) +{ + alpha_agp_info *agp; + struct pci_controller *hose; + titan_pachip_port *port; + int hosenum = -1; + union TPAchipPCTL pctl; + + /* + * Find the AGP port. + */ + port = &TITAN_pachip0->a_port; + if (titan_query_agp(port)) + hosenum = 2; + if (hosenum < 0 && + titan_query_agp(port = &TITAN_pachip1->a_port)) + hosenum = 3; + + /* + * Find the hose the port is on. + */ + for (hose = hose_head; hose; hose = hose->next) + if (hose->index == hosenum) + break; + + if (!hose || !hose->sg_pci) + return NULL; + + /* + * Allocate the info structure. + */ + agp = kmalloc(sizeof(*agp), GFP_KERNEL); + + /* + * Fill it in. + */ + agp->hose = hose; + agp->private = port; + agp->ops = &titan_agp_ops; + + /* + * Aperture - not configured until ops.setup(). + * + * FIXME - should we go ahead and allocate it here? + */ + agp->aperture.bus_base = 0; + agp->aperture.size = 0; + agp->aperture.sysdata = NULL; + + /* + * Capabilities. + */ + agp->capability.lw = 0; + agp->capability.bits.rate = 3; /* 2x, 1x */ + agp->capability.bits.sba = 1; + agp->capability.bits.rq = 7; /* 8 - 1 */ + + /* + * Mode. + */ + pctl.pctl_q_whole = port->pctl.csr; + agp->mode.lw = 0; + agp->mode.bits.rate = 1 << pctl.pctl_r_bits.apctl_v_agp_rate; + agp->mode.bits.sba = pctl.pctl_r_bits.apctl_v_agp_sba_en; + agp->mode.bits.rq = 7; /* RQ Depth? */ + agp->mode.bits.enable = pctl.pctl_r_bits.apctl_v_agp_en; + + return agp; +} diff --git a/arch/alpha/kernel/core_tsunami.c b/arch/alpha/kernel/core_tsunami.c new file mode 100644 index 0000000..8aa305b --- /dev/null +++ b/arch/alpha/kernel/core_tsunami.c @@ -0,0 +1,459 @@ +/* + * linux/arch/alpha/kernel/core_tsunami.c + * + * Based on code written by David A. Rusling (david.rusling@reo.mts.dec.com). + * + * Code common to all TSUNAMI core logic chips. + */ + +#define __EXTERN_INLINE inline +#include <asm/io.h> +#include <asm/core_tsunami.h> +#undef __EXTERN_INLINE + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/init.h> +#include <linux/bootmem.h> + +#include <asm/ptrace.h> +#include <asm/smp.h> + +#include "proto.h" +#include "pci_impl.h" + +/* Save Tsunami configuration data as the console had it set up. */ + +struct +{ + unsigned long wsba[4]; + unsigned long wsm[4]; + unsigned long tba[4]; +} saved_config[2] __attribute__((common)); + +/* + * NOTE: Herein lie back-to-back mb instructions. They are magic. + * One plausible explanation is that the I/O controller does not properly + * handle the system transaction. Another involves timing. Ho hum. + */ + +/* + * BIOS32-style PCI interface: + */ + +#define DEBUG_CONFIG 0 + +#if DEBUG_CONFIG +# define DBG_CFG(args) printk args +#else +# define DBG_CFG(args) +#endif + + +/* + * Given a bus, device, and function number, compute resulting + * configuration space address + * accordingly. It is therefore not safe to have concurrent + * invocations to configuration space access routines, but there + * really shouldn't be any need for this. + * + * Note that all config space accesses use Type 1 address format. + * + * Note also that type 1 is determined by non-zero bus number. + * + * Type 1: + * + * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 + * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * 31:24 reserved + * 23:16 bus number (8 bits = 128 possible buses) + * 15:11 Device number (5 bits) + * 10:8 function number + * 7:2 register number + * + * Notes: + * The function number selects which function of a multi-function device + * (e.g., SCSI and Ethernet). + * + * The register selects a DWORD (32 bit) register offset. Hence it + * doesn't get shifted by 2 bits as we want to "drop" the bottom two + * bits. + */ + +static int +mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, + unsigned long *pci_addr, unsigned char *type1) +{ + struct pci_controller *hose = pbus->sysdata; + unsigned long addr; + u8 bus = pbus->number; + + DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, " + "pci_addr=0x%p, type1=0x%p)\n", + bus, device_fn, where, pci_addr, type1)); + + if (!pbus->parent) /* No parent means peer PCI bus. */ + bus = 0; + *type1 = (bus != 0); + + addr = (bus << 16) | (device_fn << 8) | where; + addr |= hose->config_space_base; + + *pci_addr = addr; + DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); + return 0; +} + +static int +tsunami_read_config(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 *value) +{ + unsigned long addr; + unsigned char type1; + + if (mk_conf_addr(bus, devfn, where, &addr, &type1)) + return PCIBIOS_DEVICE_NOT_FOUND; + + switch (size) { + case 1: + *value = __kernel_ldbu(*(vucp)addr); + break; + case 2: + *value = __kernel_ldwu(*(vusp)addr); + break; + case 4: + *value = *(vuip)addr; + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int +tsunami_write_config(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 value) +{ + unsigned long addr; + unsigned char type1; + + if (mk_conf_addr(bus, devfn, where, &addr, &type1)) + return PCIBIOS_DEVICE_NOT_FOUND; + + switch (size) { + case 1: + __kernel_stb(value, *(vucp)addr); + mb(); + __kernel_ldbu(*(vucp)addr); + break; + case 2: + __kernel_stw(value, *(vusp)addr); + mb(); + __kernel_ldwu(*(vusp)addr); + break; + case 4: + *(vuip)addr = value; + mb(); + *(vuip)addr; + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops tsunami_pci_ops = +{ + .read = tsunami_read_config, + .write = tsunami_write_config, +}; + +void +tsunami_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) +{ + tsunami_pchip *pchip = hose->index ? TSUNAMI_pchip1 : TSUNAMI_pchip0; + volatile unsigned long *csr; + unsigned long value; + + /* We can invalidate up to 8 tlb entries in a go. The flush + matches against <31:16> in the pci address. */ + csr = &pchip->tlbia.csr; + if (((start ^ end) & 0xffff0000) == 0) + csr = &pchip->tlbiv.csr; + + /* For TBIA, it doesn't matter what value we write. For TBI, + it's the shifted tag bits. */ + value = (start & 0xffff0000) >> 12; + + *csr = value; + mb(); + *csr; +} + +#ifdef NXM_MACHINE_CHECKS_ON_TSUNAMI +static long __init +tsunami_probe_read(volatile unsigned long *vaddr) +{ + long dont_care, probe_result; + int cpu = smp_processor_id(); + int s = swpipl(IPL_MCHECK - 1); + + mcheck_taken(cpu) = 0; + mcheck_expected(cpu) = 1; + mb(); + dont_care = *vaddr; + draina(); + mcheck_expected(cpu) = 0; + probe_result = !mcheck_taken(cpu); + mcheck_taken(cpu) = 0; + setipl(s); + + printk("dont_care == 0x%lx\n", dont_care); + + return probe_result; +} + +static long __init +tsunami_probe_write(volatile unsigned long *vaddr) +{ + long true_contents, probe_result = 1; + + TSUNAMI_cchip->misc.csr |= (1L << 28); /* clear NXM... */ + true_contents = *vaddr; + *vaddr = 0; + draina(); + if (TSUNAMI_cchip->misc.csr & (1L << 28)) { + int source = (TSUNAMI_cchip->misc.csr >> 29) & 7; + TSUNAMI_cchip->misc.csr |= (1L << 28); /* ...and unlock NXS. */ + probe_result = 0; + printk("tsunami_probe_write: unit %d at 0x%016lx\n", source, + (unsigned long)vaddr); + } + if (probe_result) + *vaddr = true_contents; + return probe_result; +} +#else +#define tsunami_probe_read(ADDR) 1 +#endif /* NXM_MACHINE_CHECKS_ON_TSUNAMI */ + +#define FN __FUNCTION__ + +static void __init +tsunami_init_one_pchip(tsunami_pchip *pchip, int index) +{ + struct pci_controller *hose; + + if (tsunami_probe_read(&pchip->pctl.csr) == 0) + return; + + hose = alloc_pci_controller(); + if (index == 0) + pci_isa_hose = hose; + hose->io_space = alloc_resource(); + hose->mem_space = alloc_resource(); + + /* This is for userland consumption. For some reason, the 40-bit + PIO bias that we use in the kernel through KSEG didn't work for + the page table based user mappings. So make sure we get the + 43-bit PIO bias. */ + hose->sparse_mem_base = 0; + hose->sparse_io_base = 0; + hose->dense_mem_base + = (TSUNAMI_MEM(index) & 0xffffffffffL) | 0x80000000000L; + hose->dense_io_base + = (TSUNAMI_IO(index) & 0xffffffffffL) | 0x80000000000L; + + hose->config_space_base = TSUNAMI_CONF(index); + hose->index = index; + + hose->io_space->start = TSUNAMI_IO(index) - TSUNAMI_IO_BIAS; + hose->io_space->end = hose->io_space->start + TSUNAMI_IO_SPACE - 1; + hose->io_space->name = pci_io_names[index]; + hose->io_space->flags = IORESOURCE_IO; + + hose->mem_space->start = TSUNAMI_MEM(index) - TSUNAMI_MEM_BIAS; + hose->mem_space->end = hose->mem_space->start + 0xffffffff; + hose->mem_space->name = pci_mem_names[index]; + hose->mem_space->flags = IORESOURCE_MEM; + + if (request_resource(&ioport_resource, hose->io_space) < 0) + printk(KERN_ERR "Failed to request IO on hose %d\n", index); + if (request_resource(&iomem_resource, hose->mem_space) < 0) + printk(KERN_ERR "Failed to request MEM on hose %d\n", index); + + /* + * Save the existing PCI window translations. SRM will + * need them when we go to reboot. + */ + + saved_config[index].wsba[0] = pchip->wsba[0].csr; + saved_config[index].wsm[0] = pchip->wsm[0].csr; + saved_config[index].tba[0] = pchip->tba[0].csr; + + saved_config[index].wsba[1] = pchip->wsba[1].csr; + saved_config[index].wsm[1] = pchip->wsm[1].csr; + saved_config[index].tba[1] = pchip->tba[1].csr; + + saved_config[index].wsba[2] = pchip->wsba[2].csr; + saved_config[index].wsm[2] = pchip->wsm[2].csr; + saved_config[index].tba[2] = pchip->tba[2].csr; + + saved_config[index].wsba[3] = pchip->wsba[3].csr; + saved_config[index].wsm[3] = pchip->wsm[3].csr; + saved_config[index].tba[3] = pchip->tba[3].csr; + + /* + * Set up the PCI to main memory translation windows. + * + * Note: Window 3 is scatter-gather only + * + * Window 0 is scatter-gather 8MB at 8MB (for isa) + * Window 1 is scatter-gather (up to) 1GB at 1GB + * Window 2 is direct access 2GB at 2GB + * + * NOTE: we need the align_entry settings for Acer devices on ES40, + * specifically floppy and IDE when memory is larger than 2GB. + */ + hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); + /* Initially set for 4 PTEs, but will be overridden to 64K for ISA. */ + hose->sg_isa->align_entry = 4; + + hose->sg_pci = iommu_arena_new(hose, 0x40000000, + size_for_memory(0x40000000), 0); + hose->sg_pci->align_entry = 4; /* Tsunami caches 4 PTEs at a time */ + + __direct_map_base = 0x80000000; + __direct_map_size = 0x80000000; + + pchip->wsba[0].csr = hose->sg_isa->dma_base | 3; + pchip->wsm[0].csr = (hose->sg_isa->size - 1) & 0xfff00000; + pchip->tba[0].csr = virt_to_phys(hose->sg_isa->ptes); + + pchip->wsba[1].csr = hose->sg_pci->dma_base | 3; + pchip->wsm[1].csr = (hose->sg_pci->size - 1) & 0xfff00000; + pchip->tba[1].csr = virt_to_phys(hose->sg_pci->ptes); + + pchip->wsba[2].csr = 0x80000000 | 1; + pchip->wsm[2].csr = (0x80000000 - 1) & 0xfff00000; + pchip->tba[2].csr = 0; + + pchip->wsba[3].csr = 0; + + /* Enable the Monster Window to make DAC pci64 possible. */ + pchip->pctl.csr |= pctl_m_mwin; + + tsunami_pci_tbi(hose, 0, -1); +} + +void __init +tsunami_init_arch(void) +{ +#ifdef NXM_MACHINE_CHECKS_ON_TSUNAMI + unsigned long tmp; + + /* Ho hum.. init_arch is called before init_IRQ, but we need to be + able to handle machine checks. So install the handler now. */ + wrent(entInt, 0); + + /* NXMs just don't matter to Tsunami--unless they make it + choke completely. */ + tmp = (unsigned long)(TSUNAMI_cchip - 1); + printk("%s: probing bogus address: 0x%016lx\n", FN, bogus_addr); + printk("\tprobe %s\n", + tsunami_probe_write((unsigned long *)bogus_addr) + ? "succeeded" : "failed"); +#endif /* NXM_MACHINE_CHECKS_ON_TSUNAMI */ + +#if 0 + printk("%s: CChip registers:\n", FN); + printk("%s: CSR_CSC 0x%lx\n", FN, TSUNAMI_cchip->csc.csr); + printk("%s: CSR_MTR 0x%lx\n", FN, TSUNAMI_cchip.mtr.csr); + printk("%s: CSR_MISC 0x%lx\n", FN, TSUNAMI_cchip->misc.csr); + printk("%s: CSR_DIM0 0x%lx\n", FN, TSUNAMI_cchip->dim0.csr); + printk("%s: CSR_DIM1 0x%lx\n", FN, TSUNAMI_cchip->dim1.csr); + printk("%s: CSR_DIR0 0x%lx\n", FN, TSUNAMI_cchip->dir0.csr); + printk("%s: CSR_DIR1 0x%lx\n", FN, TSUNAMI_cchip->dir1.csr); + printk("%s: CSR_DRIR 0x%lx\n", FN, TSUNAMI_cchip->drir.csr); + + printk("%s: DChip registers:\n"); + printk("%s: CSR_DSC 0x%lx\n", FN, TSUNAMI_dchip->dsc.csr); + printk("%s: CSR_STR 0x%lx\n", FN, TSUNAMI_dchip->str.csr); + printk("%s: CSR_DREV 0x%lx\n", FN, TSUNAMI_dchip->drev.csr); +#endif + /* With multiple PCI busses, we play with I/O as physical addrs. */ + ioport_resource.end = ~0UL; + + /* Find how many hoses we have, and initialize them. TSUNAMI + and TYPHOON can have 2, but might only have 1 (DS10). */ + + tsunami_init_one_pchip(TSUNAMI_pchip0, 0); + if (TSUNAMI_cchip->csc.csr & 1L<<14) + tsunami_init_one_pchip(TSUNAMI_pchip1, 1); +} + +static void +tsunami_kill_one_pchip(tsunami_pchip *pchip, int index) +{ + pchip->wsba[0].csr = saved_config[index].wsba[0]; + pchip->wsm[0].csr = saved_config[index].wsm[0]; + pchip->tba[0].csr = saved_config[index].tba[0]; + + pchip->wsba[1].csr = saved_config[index].wsba[1]; + pchip->wsm[1].csr = saved_config[index].wsm[1]; + pchip->tba[1].csr = saved_config[index].tba[1]; + + pchip->wsba[2].csr = saved_config[index].wsba[2]; + pchip->wsm[2].csr = saved_config[index].wsm[2]; + pchip->tba[2].csr = saved_config[index].tba[2]; + + pchip->wsba[3].csr = saved_config[index].wsba[3]; + pchip->wsm[3].csr = saved_config[index].wsm[3]; + pchip->tba[3].csr = saved_config[index].tba[3]; +} + +void +tsunami_kill_arch(int mode) +{ + tsunami_kill_one_pchip(TSUNAMI_pchip0, 0); + if (TSUNAMI_cchip->csc.csr & 1L<<14) + tsunami_kill_one_pchip(TSUNAMI_pchip1, 1); +} + +static inline void +tsunami_pci_clr_err_1(tsunami_pchip *pchip) +{ + pchip->perror.csr; + pchip->perror.csr = 0x040; + mb(); + pchip->perror.csr; +} + +static inline void +tsunami_pci_clr_err(void) +{ + tsunami_pci_clr_err_1(TSUNAMI_pchip0); + + /* TSUNAMI and TYPHOON can have 2, but might only have 1 (DS10) */ + if (TSUNAMI_cchip->csc.csr & 1L<<14) + tsunami_pci_clr_err_1(TSUNAMI_pchip1); +} + +void +tsunami_machine_check(unsigned long vector, unsigned long la_ptr, + struct pt_regs * regs) +{ + /* Clear error before any reporting. */ + mb(); + mb(); /* magic */ + draina(); + tsunami_pci_clr_err(); + wrmces(0x7); + mb(); + + process_mcheck_info(vector, la_ptr, regs, "TSUNAMI", + mcheck_expected(smp_processor_id())); +} diff --git a/arch/alpha/kernel/core_wildfire.c b/arch/alpha/kernel/core_wildfire.c new file mode 100644 index 0000000..2b767a1 --- /dev/null +++ b/arch/alpha/kernel/core_wildfire.c @@ -0,0 +1,658 @@ +/* + * linux/arch/alpha/kernel/core_wildfire.c + * + * Wildfire support. + * + * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE + */ + +#define __EXTERN_INLINE inline +#include <asm/io.h> +#include <asm/core_wildfire.h> +#undef __EXTERN_INLINE + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/init.h> + +#include <asm/ptrace.h> +#include <asm/smp.h> + +#include "proto.h" +#include "pci_impl.h" + +#define DEBUG_CONFIG 0 +#define DEBUG_DUMP_REGS 0 +#define DEBUG_DUMP_CONFIG 1 + +#if DEBUG_CONFIG +# define DBG_CFG(args) printk args +#else +# define DBG_CFG(args) +#endif + +#if DEBUG_DUMP_REGS +static void wildfire_dump_pci_regs(int qbbno, int hoseno); +static void wildfire_dump_pca_regs(int qbbno, int pcano); +static void wildfire_dump_qsa_regs(int qbbno); +static void wildfire_dump_qsd_regs(int qbbno); +static void wildfire_dump_iop_regs(int qbbno); +static void wildfire_dump_gp_regs(int qbbno); +#endif +#if DEBUG_DUMP_CONFIG +static void wildfire_dump_hardware_config(void); +#endif + +unsigned char wildfire_hard_qbb_map[WILDFIRE_MAX_QBB]; +unsigned char wildfire_soft_qbb_map[WILDFIRE_MAX_QBB]; +#define QBB_MAP_EMPTY 0xff + +unsigned long wildfire_hard_qbb_mask; +unsigned long wildfire_soft_qbb_mask; +unsigned long wildfire_gp_mask; +unsigned long wildfire_hs_mask; +unsigned long wildfire_iop_mask; +unsigned long wildfire_ior_mask; +unsigned long wildfire_pca_mask; +unsigned long wildfire_cpu_mask; +unsigned long wildfire_mem_mask; + +void __init +wildfire_init_hose(int qbbno, int hoseno) +{ + struct pci_controller *hose; + wildfire_pci *pci; + + hose = alloc_pci_controller(); + hose->io_space = alloc_resource(); + hose->mem_space = alloc_resource(); + + /* This is for userland consumption. */ + hose->sparse_mem_base = 0; + hose->sparse_io_base = 0; + hose->dense_mem_base = WILDFIRE_MEM(qbbno, hoseno); + hose->dense_io_base = WILDFIRE_IO(qbbno, hoseno); + + hose->config_space_base = WILDFIRE_CONF(qbbno, hoseno); + hose->index = (qbbno << 3) + hoseno; + + hose->io_space->start = WILDFIRE_IO(qbbno, hoseno) - WILDFIRE_IO_BIAS; + hose->io_space->end = hose->io_space->start + WILDFIRE_IO_SPACE - 1; + hose->io_space->name = pci_io_names[hoseno]; + hose->io_space->flags = IORESOURCE_IO; + + hose->mem_space->start = WILDFIRE_MEM(qbbno, hoseno)-WILDFIRE_MEM_BIAS; + hose->mem_space->end = hose->mem_space->start + 0xffffffff; + hose->mem_space->name = pci_mem_names[hoseno]; + hose->mem_space->flags = IORESOURCE_MEM; + + if (request_resource(&ioport_resource, hose->io_space) < 0) + printk(KERN_ERR "Failed to request IO on qbb %d hose %d\n", + qbbno, hoseno); + if (request_resource(&iomem_resource, hose->mem_space) < 0) + printk(KERN_ERR "Failed to request MEM on qbb %d hose %d\n", + qbbno, hoseno); + +#if DEBUG_DUMP_REGS + wildfire_dump_pci_regs(qbbno, hoseno); +#endif + + /* + * Set up the PCI to main memory translation windows. + * + * Note: Window 3 is scatter-gather only + * + * Window 0 is scatter-gather 8MB at 8MB (for isa) + * Window 1 is direct access 1GB at 1GB + * Window 2 is direct access 1GB at 2GB + * Window 3 is scatter-gather 128MB at 3GB + * ??? We ought to scale window 3 memory. + * + */ + hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); + hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x08000000, 0); + + pci = WILDFIRE_pci(qbbno, hoseno); + + pci->pci_window[0].wbase.csr = hose->sg_isa->dma_base | 3; + pci->pci_window[0].wmask.csr = (hose->sg_isa->size - 1) & 0xfff00000; + pci->pci_window[0].tbase.csr = virt_to_phys(hose->sg_isa->ptes); + + pci->pci_window[1].wbase.csr = 0x40000000 | 1; + pci->pci_window[1].wmask.csr = (0x40000000 -1) & 0xfff00000; + pci->pci_window[1].tbase.csr = 0; + + pci->pci_window[2].wbase.csr = 0x80000000 | 1; + pci->pci_window[2].wmask.csr = (0x40000000 -1) & 0xfff00000; + pci->pci_window[2].tbase.csr = 0x40000000; + + pci->pci_window[3].wbase.csr = hose->sg_pci->dma_base | 3; + pci->pci_window[3].wmask.csr = (hose->sg_pci->size - 1) & 0xfff00000; + pci->pci_window[3].tbase.csr = virt_to_phys(hose->sg_pci->ptes); + + wildfire_pci_tbi(hose, 0, 0); /* Flush TLB at the end. */ +} + +void __init +wildfire_init_pca(int qbbno, int pcano) +{ + + /* Test for PCA existence first. */ + if (!WILDFIRE_PCA_EXISTS(qbbno, pcano)) + return; + +#if DEBUG_DUMP_REGS + wildfire_dump_pca_regs(qbbno, pcano); +#endif + + /* Do both hoses of the PCA. */ + wildfire_init_hose(qbbno, (pcano << 1) + 0); + wildfire_init_hose(qbbno, (pcano << 1) + 1); +} + +void __init +wildfire_init_qbb(int qbbno) +{ + int pcano; + + /* Test for QBB existence first. */ + if (!WILDFIRE_QBB_EXISTS(qbbno)) + return; + +#if DEBUG_DUMP_REGS + wildfire_dump_qsa_regs(qbbno); + wildfire_dump_qsd_regs(qbbno); + wildfire_dump_iop_regs(qbbno); + wildfire_dump_gp_regs(qbbno); +#endif + + /* Init all PCAs here. */ + for (pcano = 0; pcano < WILDFIRE_PCA_PER_QBB; pcano++) { + wildfire_init_pca(qbbno, pcano); + } +} + +void __init +wildfire_hardware_probe(void) +{ + unsigned long temp; + unsigned int hard_qbb, soft_qbb; + wildfire_fast_qsd *fast = WILDFIRE_fast_qsd(); + wildfire_qsd *qsd; + wildfire_qsa *qsa; + wildfire_iop *iop; + wildfire_gp *gp; + wildfire_ne *ne; + wildfire_fe *fe; + int i; + + temp = fast->qsd_whami.csr; +#if 0 + printk(KERN_ERR "fast QSD_WHAMI at base %p is 0x%lx\n", fast, temp); +#endif + + hard_qbb = (temp >> 8) & 7; + soft_qbb = (temp >> 4) & 7; + + /* Init the HW configuration variables. */ + wildfire_hard_qbb_mask = (1 << hard_qbb); + wildfire_soft_qbb_mask = (1 << soft_qbb); + + wildfire_gp_mask = 0; + wildfire_hs_mask = 0; + wildfire_iop_mask = 0; + wildfire_ior_mask = 0; + wildfire_pca_mask = 0; + + wildfire_cpu_mask = 0; + wildfire_mem_mask = 0; + + memset(wildfire_hard_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB); + memset(wildfire_soft_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB); + + /* First, determine which QBBs are present. */ + qsa = WILDFIRE_qsa(soft_qbb); + + temp = qsa->qsa_qbb_id.csr; +#if 0 + printk(KERN_ERR "QSA_QBB_ID at base %p is 0x%lx\n", qsa, temp); +#endif + + if (temp & 0x40) /* Is there an HS? */ + wildfire_hs_mask = 1; + + if (temp & 0x20) { /* Is there a GP? */ + gp = WILDFIRE_gp(soft_qbb); + temp = 0; + for (i = 0; i < 4; i++) { + temp |= gp->gpa_qbb_map[i].csr << (i * 8); +#if 0 + printk(KERN_ERR "GPA_QBB_MAP[%d] at base %p is 0x%lx\n", + i, gp, temp); +#endif + } + + for (hard_qbb = 0; hard_qbb < WILDFIRE_MAX_QBB; hard_qbb++) { + if (temp & 8) { /* Is there a QBB? */ + soft_qbb = temp & 7; + wildfire_hard_qbb_mask |= (1 << hard_qbb); + wildfire_soft_qbb_mask |= (1 << soft_qbb); + } + temp >>= 4; + } + wildfire_gp_mask = wildfire_soft_qbb_mask; + } + + /* Next determine each QBBs resources. */ + for (soft_qbb = 0; soft_qbb < WILDFIRE_MAX_QBB; soft_qbb++) { + if (WILDFIRE_QBB_EXISTS(soft_qbb)) { + qsd = WILDFIRE_qsd(soft_qbb); + temp = qsd->qsd_whami.csr; +#if 0 + printk(KERN_ERR "QSD_WHAMI at base %p is 0x%lx\n", qsd, temp); +#endif + hard_qbb = (temp >> 8) & 7; + wildfire_hard_qbb_map[hard_qbb] = soft_qbb; + wildfire_soft_qbb_map[soft_qbb] = hard_qbb; + + qsa = WILDFIRE_qsa(soft_qbb); + temp = qsa->qsa_qbb_pop[0].csr; +#if 0 + printk(KERN_ERR "QSA_QBB_POP_0 at base %p is 0x%lx\n", qsa, temp); +#endif + wildfire_cpu_mask |= ((temp >> 0) & 0xf) << (soft_qbb << 2); + wildfire_mem_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2); + + temp = qsa->qsa_qbb_pop[1].csr; +#if 0 + printk(KERN_ERR "QSA_QBB_POP_1 at base %p is 0x%lx\n", qsa, temp); +#endif + wildfire_iop_mask |= (1 << soft_qbb); + wildfire_ior_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2); + + temp = qsa->qsa_qbb_id.csr; +#if 0 + printk(KERN_ERR "QSA_QBB_ID at %p is 0x%lx\n", qsa, temp); +#endif + if (temp & 0x20) + wildfire_gp_mask |= (1 << soft_qbb); + + /* Probe for PCA existence here. */ + for (i = 0; i < WILDFIRE_PCA_PER_QBB; i++) { + iop = WILDFIRE_iop(soft_qbb); + ne = WILDFIRE_ne(soft_qbb, i); + fe = WILDFIRE_fe(soft_qbb, i); + + if ((iop->iop_hose[i].init.csr & 1) == 1 && + ((ne->ne_what_am_i.csr & 0xf00000300UL) == 0x100000300UL) && + ((fe->fe_what_am_i.csr & 0xf00000300UL) == 0x100000200UL)) + { + wildfire_pca_mask |= 1 << ((soft_qbb << 2) + i); + } + } + + } + } +#if DEBUG_DUMP_CONFIG + wildfire_dump_hardware_config(); +#endif +} + +void __init +wildfire_init_arch(void) +{ + int qbbno; + + /* With multiple PCI buses, we play with I/O as physical addrs. */ + ioport_resource.end = ~0UL; + + + /* Probe the hardware for info about configuration. */ + wildfire_hardware_probe(); + + /* Now init all the found QBBs. */ + for (qbbno = 0; qbbno < WILDFIRE_MAX_QBB; qbbno++) { + wildfire_init_qbb(qbbno); + } + + /* Normal direct PCI DMA mapping. */ + __direct_map_base = 0x40000000UL; + __direct_map_size = 0x80000000UL; +} + +void +wildfire_machine_check(unsigned long vector, unsigned long la_ptr, + struct pt_regs * regs) +{ + mb(); + mb(); /* magic */ + draina(); + /* FIXME: clear pci errors */ + wrmces(0x7); + mb(); + + process_mcheck_info(vector, la_ptr, regs, "WILDFIRE", + mcheck_expected(smp_processor_id())); +} + +void +wildfire_kill_arch(int mode) +{ +} + +void +wildfire_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) +{ + int qbbno = hose->index >> 3; + int hoseno = hose->index & 7; + wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno); + + mb(); + pci->pci_flush_tlb.csr; /* reading does the trick */ +} + +static int +mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, + unsigned long *pci_addr, unsigned char *type1) +{ + struct pci_controller *hose = pbus->sysdata; + unsigned long addr; + u8 bus = pbus->number; + + DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, " + "pci_addr=0x%p, type1=0x%p)\n", + bus, device_fn, where, pci_addr, type1)); + + if (!pbus->parent) /* No parent means peer PCI bus. */ + bus = 0; + *type1 = (bus != 0); + + addr = (bus << 16) | (device_fn << 8) | where; + addr |= hose->config_space_base; + + *pci_addr = addr; + DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); + return 0; +} + +static int +wildfire_read_config(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 *value) +{ + unsigned long addr; + unsigned char type1; + + if (mk_conf_addr(bus, devfn, where, &addr, &type1)) + return PCIBIOS_DEVICE_NOT_FOUND; + + switch (size) { + case 1: + *value = __kernel_ldbu(*(vucp)addr); + break; + case 2: + *value = __kernel_ldwu(*(vusp)addr); + break; + case 4: + *value = *(vuip)addr; + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int +wildfire_write_config(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 value) +{ + unsigned long addr; + unsigned char type1; + + if (mk_conf_addr(bus, devfn, where, &addr, &type1)) + return PCIBIOS_DEVICE_NOT_FOUND; + + switch (size) { + case 1: + __kernel_stb(value, *(vucp)addr); + mb(); + __kernel_ldbu(*(vucp)addr); + break; + case 2: + __kernel_stw(value, *(vusp)addr); + mb(); + __kernel_ldwu(*(vusp)addr); + break; + case 4: + *(vuip)addr = value; + mb(); + *(vuip)addr; + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops wildfire_pci_ops = +{ + .read = wildfire_read_config, + .write = wildfire_write_config, +}; + + +/* + * NUMA Support + */ +int wildfire_pa_to_nid(unsigned long pa) +{ + return pa >> 36; +} + +int wildfire_cpuid_to_nid(int cpuid) +{ + /* assume 4 CPUs per node */ + return cpuid >> 2; +} + +unsigned long wildfire_node_mem_start(int nid) +{ + /* 64GB per node */ + return (unsigned long)nid * (64UL * 1024 * 1024 * 1024); +} + +unsigned long wildfire_node_mem_size(int nid) +{ + /* 64GB per node */ + return 64UL * 1024 * 1024 * 1024; +} + +#if DEBUG_DUMP_REGS + +static void __init +wildfire_dump_pci_regs(int qbbno, int hoseno) +{ + wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno); + int i; + + printk(KERN_ERR "PCI registers for QBB %d hose %d (%p)\n", + qbbno, hoseno, pci); + + printk(KERN_ERR " PCI_IO_ADDR_EXT: 0x%16lx\n", + pci->pci_io_addr_ext.csr); + printk(KERN_ERR " PCI_CTRL: 0x%16lx\n", pci->pci_ctrl.csr); + printk(KERN_ERR " PCI_ERR_SUM: 0x%16lx\n", pci->pci_err_sum.csr); + printk(KERN_ERR " PCI_ERR_ADDR: 0x%16lx\n", pci->pci_err_addr.csr); + printk(KERN_ERR " PCI_STALL_CNT: 0x%16lx\n", pci->pci_stall_cnt.csr); + printk(KERN_ERR " PCI_PEND_INT: 0x%16lx\n", pci->pci_pend_int.csr); + printk(KERN_ERR " PCI_SENT_INT: 0x%16lx\n", pci->pci_sent_int.csr); + + printk(KERN_ERR " DMA window registers for QBB %d hose %d (%p)\n", + qbbno, hoseno, pci); + for (i = 0; i < 4; i++) { + printk(KERN_ERR " window %d: 0x%16lx 0x%16lx 0x%16lx\n", i, + pci->pci_window[i].wbase.csr, + pci->pci_window[i].wmask.csr, + pci->pci_window[i].tbase.csr); + } + printk(KERN_ERR "\n"); +} + +static void __init +wildfire_dump_pca_regs(int qbbno, int pcano) +{ + wildfire_pca *pca = WILDFIRE_pca(qbbno, pcano); + int i; + + printk(KERN_ERR "PCA registers for QBB %d PCA %d (%p)\n", + qbbno, pcano, pca); + + printk(KERN_ERR " PCA_WHAT_AM_I: 0x%16lx\n", pca->pca_what_am_i.csr); + printk(KERN_ERR " PCA_ERR_SUM: 0x%16lx\n", pca->pca_err_sum.csr); + printk(KERN_ERR " PCA_PEND_INT: 0x%16lx\n", pca->pca_pend_int.csr); + printk(KERN_ERR " PCA_SENT_INT: 0x%16lx\n", pca->pca_sent_int.csr); + printk(KERN_ERR " PCA_STDIO_EL: 0x%16lx\n", + pca->pca_stdio_edge_level.csr); + + printk(KERN_ERR " PCA target registers for QBB %d PCA %d (%p)\n", + qbbno, pcano, pca); + for (i = 0; i < 4; i++) { + printk(KERN_ERR " target %d: 0x%16lx 0x%16lx\n", i, + pca->pca_int[i].target.csr, + pca->pca_int[i].enable.csr); + } + + printk(KERN_ERR "\n"); +} + +static void __init +wildfire_dump_qsa_regs(int qbbno) +{ + wildfire_qsa *qsa = WILDFIRE_qsa(qbbno); + int i; + + printk(KERN_ERR "QSA registers for QBB %d (%p)\n", qbbno, qsa); + + printk(KERN_ERR " QSA_QBB_ID: 0x%16lx\n", qsa->qsa_qbb_id.csr); + printk(KERN_ERR " QSA_PORT_ENA: 0x%16lx\n", qsa->qsa_port_ena.csr); + printk(KERN_ERR " QSA_REF_INT: 0x%16lx\n", qsa->qsa_ref_int.csr); + + for (i = 0; i < 5; i++) + printk(KERN_ERR " QSA_CONFIG_%d: 0x%16lx\n", + i, qsa->qsa_config[i].csr); + + for (i = 0; i < 2; i++) + printk(KERN_ERR " QSA_QBB_POP_%d: 0x%16lx\n", + i, qsa->qsa_qbb_pop[0].csr); + + printk(KERN_ERR "\n"); +} + +static void __init +wildfire_dump_qsd_regs(int qbbno) +{ + wildfire_qsd *qsd = WILDFIRE_qsd(qbbno); + + printk(KERN_ERR "QSD registers for QBB %d (%p)\n", qbbno, qsd); + + printk(KERN_ERR " QSD_WHAMI: 0x%16lx\n", qsd->qsd_whami.csr); + printk(KERN_ERR " QSD_REV: 0x%16lx\n", qsd->qsd_rev.csr); + printk(KERN_ERR " QSD_PORT_PRESENT: 0x%16lx\n", + qsd->qsd_port_present.csr); + printk(KERN_ERR " QSD_PORT_ACTUVE: 0x%16lx\n", + qsd->qsd_port_active.csr); + printk(KERN_ERR " QSD_FAULT_ENA: 0x%16lx\n", + qsd->qsd_fault_ena.csr); + printk(KERN_ERR " QSD_CPU_INT_ENA: 0x%16lx\n", + qsd->qsd_cpu_int_ena.csr); + printk(KERN_ERR " QSD_MEM_CONFIG: 0x%16lx\n", + qsd->qsd_mem_config.csr); + printk(KERN_ERR " QSD_ERR_SUM: 0x%16lx\n", + qsd->qsd_err_sum.csr); + + printk(KERN_ERR "\n"); +} + +static void __init +wildfire_dump_iop_regs(int qbbno) +{ + wildfire_iop *iop = WILDFIRE_iop(qbbno); + int i; + + printk(KERN_ERR "IOP registers for QBB %d (%p)\n", qbbno, iop); + + printk(KERN_ERR " IOA_CONFIG: 0x%16lx\n", iop->ioa_config.csr); + printk(KERN_ERR " IOD_CONFIG: 0x%16lx\n", iop->iod_config.csr); + printk(KERN_ERR " IOP_SWITCH_CREDITS: 0x%16lx\n", + iop->iop_switch_credits.csr); + printk(KERN_ERR " IOP_HOSE_CREDITS: 0x%16lx\n", + iop->iop_hose_credits.csr); + + for (i = 0; i < 4; i++) + printk(KERN_ERR " IOP_HOSE_%d_INIT: 0x%16lx\n", + i, iop->iop_hose[i].init.csr); + for (i = 0; i < 4; i++) + printk(KERN_ERR " IOP_DEV_INT_TARGET_%d: 0x%16lx\n", + i, iop->iop_dev_int[i].target.csr); + + printk(KERN_ERR "\n"); +} + +static void __init +wildfire_dump_gp_regs(int qbbno) +{ + wildfire_gp *gp = WILDFIRE_gp(qbbno); + int i; + + printk(KERN_ERR "GP registers for QBB %d (%p)\n", qbbno, gp); + for (i = 0; i < 4; i++) + printk(KERN_ERR " GPA_QBB_MAP_%d: 0x%16lx\n", + i, gp->gpa_qbb_map[i].csr); + + printk(KERN_ERR " GPA_MEM_POP_MAP: 0x%16lx\n", + gp->gpa_mem_pop_map.csr); + printk(KERN_ERR " GPA_SCRATCH: 0x%16lx\n", gp->gpa_scratch.csr); + printk(KERN_ERR " GPA_DIAG: 0x%16lx\n", gp->gpa_diag.csr); + printk(KERN_ERR " GPA_CONFIG_0: 0x%16lx\n", gp->gpa_config_0.csr); + printk(KERN_ERR " GPA_INIT_ID: 0x%16lx\n", gp->gpa_init_id.csr); + printk(KERN_ERR " GPA_CONFIG_2: 0x%16lx\n", gp->gpa_config_2.csr); + + printk(KERN_ERR "\n"); +} +#endif /* DUMP_REGS */ + +#if DEBUG_DUMP_CONFIG +static void __init +wildfire_dump_hardware_config(void) +{ + int i; + + printk(KERN_ERR "Probed Hardware Configuration\n"); + + printk(KERN_ERR " hard_qbb_mask: 0x%16lx\n", wildfire_hard_qbb_mask); + printk(KERN_ERR " soft_qbb_mask: 0x%16lx\n", wildfire_soft_qbb_mask); + + printk(KERN_ERR " gp_mask: 0x%16lx\n", wildfire_gp_mask); + printk(KERN_ERR " hs_mask: 0x%16lx\n", wildfire_hs_mask); + printk(KERN_ERR " iop_mask: 0x%16lx\n", wildfire_iop_mask); + printk(KERN_ERR " ior_mask: 0x%16lx\n", wildfire_ior_mask); + printk(KERN_ERR " pca_mask: 0x%16lx\n", wildfire_pca_mask); + + printk(KERN_ERR " cpu_mask: 0x%16lx\n", wildfire_cpu_mask); + printk(KERN_ERR " mem_mask: 0x%16lx\n", wildfire_mem_mask); + + printk(" hard_qbb_map: "); + for (i = 0; i < WILDFIRE_MAX_QBB; i++) + if (wildfire_hard_qbb_map[i] == QBB_MAP_EMPTY) + printk("--- "); + else + printk("%3d ", wildfire_hard_qbb_map[i]); + printk("\n"); + + printk(" soft_qbb_map: "); + for (i = 0; i < WILDFIRE_MAX_QBB; i++) + if (wildfire_soft_qbb_map[i] == QBB_MAP_EMPTY) + printk("--- "); + else + printk("%3d ", wildfire_soft_qbb_map[i]); + printk("\n"); +} +#endif /* DUMP_CONFIG */ diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S new file mode 100644 index 0000000..f0927ee --- /dev/null +++ b/arch/alpha/kernel/entry.S @@ -0,0 +1,957 @@ +/* + * arch/alpha/kernel/entry.S + * + * Kernel entry-points. + */ + +#include <linux/config.h> +#include <asm/asm_offsets.h> +#include <asm/thread_info.h> +#include <asm/pal.h> +#include <asm/errno.h> +#include <asm/unistd.h> + + .text + .set noat + +/* Stack offsets. */ +#define SP_OFF 184 +#define SWITCH_STACK_SIZE 320 + +/* + * This defines the normal kernel pt-regs layout. + * + * regs 9-15 preserved by C code + * regs 16-18 saved by PAL-code + * regs 29-30 saved and set up by PAL-code + * JRP - Save regs 16-18 in a special area of the stack, so that + * the palcode-provided values are available to the signal handler. + */ + +#define SAVE_ALL \ + subq $sp, SP_OFF, $sp; \ + stq $0, 0($sp); \ + stq $1, 8($sp); \ + stq $2, 16($sp); \ + stq $3, 24($sp); \ + stq $4, 32($sp); \ + stq $28, 144($sp); \ + lda $2, alpha_mv; \ + stq $5, 40($sp); \ + stq $6, 48($sp); \ + stq $7, 56($sp); \ + stq $8, 64($sp); \ + stq $19, 72($sp); \ + stq $20, 80($sp); \ + stq $21, 88($sp); \ + ldq $2, HAE_CACHE($2); \ + stq $22, 96($sp); \ + stq $23, 104($sp); \ + stq $24, 112($sp); \ + stq $25, 120($sp); \ + stq $26, 128($sp); \ + stq $27, 136($sp); \ + stq $2, 152($sp); \ + stq $16, 160($sp); \ + stq $17, 168($sp); \ + stq $18, 176($sp) + +#define RESTORE_ALL \ + lda $19, alpha_mv; \ + ldq $0, 0($sp); \ + ldq $1, 8($sp); \ + ldq $2, 16($sp); \ + ldq $3, 24($sp); \ + ldq $21, 152($sp); \ + ldq $20, HAE_CACHE($19); \ + ldq $4, 32($sp); \ + ldq $5, 40($sp); \ + ldq $6, 48($sp); \ + ldq $7, 56($sp); \ + subq $20, $21, $20; \ + ldq $8, 64($sp); \ + beq $20, 99f; \ + ldq $20, HAE_REG($19); \ + stq $21, HAE_CACHE($19); \ + stq $21, 0($20); \ + ldq $0, 0($sp); \ + ldq $1, 8($sp); \ +99:; \ + ldq $19, 72($sp); \ + ldq $20, 80($sp); \ + ldq $21, 88($sp); \ + ldq $22, 96($sp); \ + ldq $23, 104($sp); \ + ldq $24, 112($sp); \ + ldq $25, 120($sp); \ + ldq $26, 128($sp); \ + ldq $27, 136($sp); \ + ldq $28, 144($sp); \ + addq $sp, SP_OFF, $sp + +/* + * Non-syscall kernel entry points. + */ + + .align 4 + .globl entInt + .ent entInt +entInt: + SAVE_ALL + lda $8, 0x3fff + lda $26, ret_from_sys_call + bic $sp, $8, $8 + mov $sp, $19 + jsr $31, do_entInt +.end entInt + + .align 4 + .globl entArith + .ent entArith +entArith: + SAVE_ALL + lda $8, 0x3fff + lda $26, ret_from_sys_call + bic $sp, $8, $8 + mov $sp, $18 + jsr $31, do_entArith +.end entArith + + .align 4 + .globl entMM + .ent entMM +entMM: + SAVE_ALL +/* save $9 - $15 so the inline exception code can manipulate them. */ + subq $sp, 56, $sp + stq $9, 0($sp) + stq $10, 8($sp) + stq $11, 16($sp) + stq $12, 24($sp) + stq $13, 32($sp) + stq $14, 40($sp) + stq $15, 48($sp) + addq $sp, 56, $19 +/* handle the fault */ + lda $8, 0x3fff + bic $sp, $8, $8 + jsr $26, do_page_fault +/* reload the registers after the exception code played. */ + ldq $9, 0($sp) + ldq $10, 8($sp) + ldq $11, 16($sp) + ldq $12, 24($sp) + ldq $13, 32($sp) + ldq $14, 40($sp) + ldq $15, 48($sp) + addq $sp, 56, $sp +/* finish up the syscall as normal. */ + br ret_from_sys_call +.end entMM + + .align 4 + .globl entIF + .ent entIF +entIF: + SAVE_ALL + lda $8, 0x3fff + lda $26, ret_from_sys_call + bic $sp, $8, $8 + mov $sp, $17 + jsr $31, do_entIF +.end entIF + + .align 4 + .globl entUna + .ent entUna +entUna: + lda $sp, -256($sp) + stq $0, 0($sp) + ldq $0, 256($sp) /* get PS */ + stq $1, 8($sp) + stq $2, 16($sp) + stq $3, 24($sp) + and $0, 8, $0 /* user mode? */ + stq $4, 32($sp) + bne $0, entUnaUser /* yup -> do user-level unaligned fault */ + stq $5, 40($sp) + stq $6, 48($sp) + stq $7, 56($sp) + stq $8, 64($sp) + stq $9, 72($sp) + stq $10, 80($sp) + stq $11, 88($sp) + stq $12, 96($sp) + stq $13, 104($sp) + stq $14, 112($sp) + stq $15, 120($sp) + /* 16-18 PAL-saved */ + stq $19, 152($sp) + stq $20, 160($sp) + stq $21, 168($sp) + stq $22, 176($sp) + stq $23, 184($sp) + stq $24, 192($sp) + stq $25, 200($sp) + stq $26, 208($sp) + stq $27, 216($sp) + stq $28, 224($sp) + stq $gp, 232($sp) + lda $8, 0x3fff + stq $31, 248($sp) + bic $sp, $8, $8 + jsr $26, do_entUna + ldq $0, 0($sp) + ldq $1, 8($sp) + ldq $2, 16($sp) + ldq $3, 24($sp) + ldq $4, 32($sp) + ldq $5, 40($sp) + ldq $6, 48($sp) + ldq $7, 56($sp) + ldq $8, 64($sp) + ldq $9, 72($sp) + ldq $10, 80($sp) + ldq $11, 88($sp) + ldq $12, 96($sp) + ldq $13, 104($sp) + ldq $14, 112($sp) + ldq $15, 120($sp) + /* 16-18 PAL-saved */ + ldq $19, 152($sp) + ldq $20, 160($sp) + ldq $21, 168($sp) + ldq $22, 176($sp) + ldq $23, 184($sp) + ldq $24, 192($sp) + ldq $25, 200($sp) + ldq $26, 208($sp) + ldq $27, 216($sp) + ldq $28, 224($sp) + ldq $gp, 232($sp) + lda $sp, 256($sp) + call_pal PAL_rti +.end entUna + + .align 4 + .ent entUnaUser +entUnaUser: + ldq $0, 0($sp) /* restore original $0 */ + lda $sp, 256($sp) /* pop entUna's stack frame */ + SAVE_ALL /* setup normal kernel stack */ + lda $sp, -56($sp) + stq $9, 0($sp) + stq $10, 8($sp) + stq $11, 16($sp) + stq $12, 24($sp) + stq $13, 32($sp) + stq $14, 40($sp) + stq $15, 48($sp) + lda $8, 0x3fff + addq $sp, 56, $19 + bic $sp, $8, $8 + jsr $26, do_entUnaUser + ldq $9, 0($sp) + ldq $10, 8($sp) + ldq $11, 16($sp) + ldq $12, 24($sp) + ldq $13, 32($sp) + ldq $14, 40($sp) + ldq $15, 48($sp) + lda $sp, 56($sp) + br ret_from_sys_call +.end entUnaUser + + .align 4 + .globl entDbg + .ent entDbg +entDbg: + SAVE_ALL + lda $8, 0x3fff + lda $26, ret_from_sys_call + bic $sp, $8, $8 + mov $sp, $16 + jsr $31, do_entDbg +.end entDbg + +/* + * The system call entry point is special. Most importantly, it looks + * like a function call to userspace as far as clobbered registers. We + * do preserve the argument registers (for syscall restarts) and $26 + * (for leaf syscall functions). + * + * So much for theory. We don't take advantage of this yet. + * + * Note that a0-a2 are not saved by PALcode as with the other entry points. + */ + + .align 4 + .globl entSys + .globl ret_from_sys_call + .ent entSys +entSys: + SAVE_ALL + lda $8, 0x3fff + bic $sp, $8, $8 + lda $4, NR_SYSCALLS($31) + stq $16, SP_OFF+24($sp) + lda $5, sys_call_table + lda $27, sys_ni_syscall + cmpult $0, $4, $4 + ldl $3, TI_FLAGS($8) + stq $17, SP_OFF+32($sp) + s8addq $0, $5, $5 + stq $18, SP_OFF+40($sp) + blbs $3, strace + beq $4, 1f + ldq $27, 0($5) +1: jsr $26, ($27), alpha_ni_syscall + ldgp $gp, 0($26) + blt $0, $syscall_error /* the call failed */ + stq $0, 0($sp) + stq $31, 72($sp) /* a3=0 => no error */ + + .align 4 +ret_from_sys_call: + cmovne $26, 0, $19 /* $19 = 0 => non-restartable */ + ldq $0, SP_OFF($sp) + and $0, 8, $0 + beq $0, restore_all +ret_from_reschedule: + /* Make sure need_resched and sigpending don't change between + sampling and the rti. */ + lda $16, 7 + call_pal PAL_swpipl + ldl $5, TI_FLAGS($8) + and $5, _TIF_WORK_MASK, $2 + bne $5, work_pending +restore_all: + RESTORE_ALL + call_pal PAL_rti + + .align 3 +$syscall_error: + /* + * Some system calls (e.g., ptrace) can return arbitrary + * values which might normally be mistaken as error numbers. + * Those functions must zero $0 (v0) directly in the stack + * frame to indicate that a negative return value wasn't an + * error number.. + */ + ldq $19, 0($sp) /* old syscall nr (zero if success) */ + beq $19, $ret_success + + ldq $20, 72($sp) /* .. and this a3 */ + subq $31, $0, $0 /* with error in v0 */ + addq $31, 1, $1 /* set a3 for errno return */ + stq $0, 0($sp) + mov $31, $26 /* tell "ret_from_sys_call" we can restart */ + stq $1, 72($sp) /* a3 for return */ + br ret_from_sys_call + +$ret_success: + stq $0, 0($sp) + stq $31, 72($sp) /* a3=0 => no error */ + br ret_from_sys_call +.end entSys + +/* + * Do all cleanup when returning from all interrupts and system calls. + * + * Arguments: + * $5: TI_FLAGS. + * $8: current. + * $19: The old syscall number, or zero if this is not a return + * from a syscall that errored and is possibly restartable. + * $20: Error indication. + */ + + .align 4 + .ent work_pending +work_pending: + and $5, _TIF_NEED_RESCHED, $2 + beq $2, $work_notifysig + +$work_resched: + subq $sp, 16, $sp + stq $19, 0($sp) /* save syscall nr */ + stq $20, 8($sp) /* and error indication (a3) */ + jsr $26, schedule + ldq $19, 0($sp) + ldq $20, 8($sp) + addq $sp, 16, $sp + /* Make sure need_resched and sigpending don't change between + sampling and the rti. */ + lda $16, 7 + call_pal PAL_swpipl + ldl $5, TI_FLAGS($8) + and $5, _TIF_WORK_MASK, $2 + beq $2, restore_all + and $5, _TIF_NEED_RESCHED, $2 + bne $2, $work_resched + +$work_notifysig: + mov $sp, $17 + br $1, do_switch_stack + mov $5, $21 + mov $sp, $18 + mov $31, $16 + jsr $26, do_notify_resume + bsr $1, undo_switch_stack + br restore_all +.end work_pending + +/* + * PTRACE syscall handler + */ + + .align 4 + .ent strace +strace: + /* set up signal stack, call syscall_trace */ + bsr $1, do_switch_stack + jsr $26, syscall_trace + bsr $1, undo_switch_stack + + /* get the system call number and the arguments back.. */ + ldq $0, 0($sp) + ldq $16, SP_OFF+24($sp) + ldq $17, SP_OFF+32($sp) + ldq $18, SP_OFF+40($sp) + ldq $19, 72($sp) + ldq $20, 80($sp) + ldq $21, 88($sp) + + /* get the system call pointer.. */ + lda $1, NR_SYSCALLS($31) + lda $2, sys_call_table + lda $27, alpha_ni_syscall + cmpult $0, $1, $1 + s8addq $0, $2, $2 + beq $1, 1f + ldq $27, 0($2) +1: jsr $26, ($27), sys_gettimeofday + ldgp $gp, 0($26) + + /* check return.. */ + blt $0, $strace_error /* the call failed */ + stq $31, 72($sp) /* a3=0 => no error */ +$strace_success: + stq $0, 0($sp) /* save return value */ + + bsr $1, do_switch_stack + jsr $26, syscall_trace + bsr $1, undo_switch_stack + br $31, ret_from_sys_call + + .align 3 +$strace_error: + ldq $19, 0($sp) /* old syscall nr (zero if success) */ + beq $19, $strace_success + ldq $20, 72($sp) /* .. and this a3 */ + + subq $31, $0, $0 /* with error in v0 */ + addq $31, 1, $1 /* set a3 for errno return */ + stq $0, 0($sp) + stq $1, 72($sp) /* a3 for return */ + + bsr $1, do_switch_stack + mov $19, $9 /* save old syscall number */ + mov $20, $10 /* save old a3 */ + jsr $26, syscall_trace + mov $9, $19 + mov $10, $20 + bsr $1, undo_switch_stack + + mov $31, $26 /* tell "ret_from_sys_call" we can restart */ + br ret_from_sys_call +.end strace + +/* + * Save and restore the switch stack -- aka the balance of the user context. + */ + + .align 4 + .ent do_switch_stack +do_switch_stack: + lda $sp, -SWITCH_STACK_SIZE($sp) + stq $9, 0($sp) + stq $10, 8($sp) + stq $11, 16($sp) + stq $12, 24($sp) + stq $13, 32($sp) + stq $14, 40($sp) + stq $15, 48($sp) + stq $26, 56($sp) + stt $f0, 64($sp) + stt $f1, 72($sp) + stt $f2, 80($sp) + stt $f3, 88($sp) + stt $f4, 96($sp) + stt $f5, 104($sp) + stt $f6, 112($sp) + stt $f7, 120($sp) + stt $f8, 128($sp) + stt $f9, 136($sp) + stt $f10, 144($sp) + stt $f11, 152($sp) + stt $f12, 160($sp) + stt $f13, 168($sp) + stt $f14, 176($sp) + stt $f15, 184($sp) + stt $f16, 192($sp) + stt $f17, 200($sp) + stt $f18, 208($sp) + stt $f19, 216($sp) + stt $f20, 224($sp) + stt $f21, 232($sp) + stt $f22, 240($sp) + stt $f23, 248($sp) + stt $f24, 256($sp) + stt $f25, 264($sp) + stt $f26, 272($sp) + stt $f27, 280($sp) + mf_fpcr $f0 # get fpcr + stt $f28, 288($sp) + stt $f29, 296($sp) + stt $f30, 304($sp) + stt $f0, 312($sp) # save fpcr in slot of $f31 + ldt $f0, 64($sp) # dont let "do_switch_stack" change fp state. + ret $31, ($1), 1 +.end do_switch_stack + + .align 4 + .ent undo_switch_stack +undo_switch_stack: + ldq $9, 0($sp) + ldq $10, 8($sp) + ldq $11, 16($sp) + ldq $12, 24($sp) + ldq $13, 32($sp) + ldq $14, 40($sp) + ldq $15, 48($sp) + ldq $26, 56($sp) + ldt $f30, 312($sp) # get saved fpcr + ldt $f0, 64($sp) + ldt $f1, 72($sp) + ldt $f2, 80($sp) + ldt $f3, 88($sp) + mt_fpcr $f30 # install saved fpcr + ldt $f4, 96($sp) + ldt $f5, 104($sp) + ldt $f6, 112($sp) + ldt $f7, 120($sp) + ldt $f8, 128($sp) + ldt $f9, 136($sp) + ldt $f10, 144($sp) + ldt $f11, 152($sp) + ldt $f12, 160($sp) + ldt $f13, 168($sp) + ldt $f14, 176($sp) + ldt $f15, 184($sp) + ldt $f16, 192($sp) + ldt $f17, 200($sp) + ldt $f18, 208($sp) + ldt $f19, 216($sp) + ldt $f20, 224($sp) + ldt $f21, 232($sp) + ldt $f22, 240($sp) + ldt $f23, 248($sp) + ldt $f24, 256($sp) + ldt $f25, 264($sp) + ldt $f26, 272($sp) + ldt $f27, 280($sp) + ldt $f28, 288($sp) + ldt $f29, 296($sp) + ldt $f30, 304($sp) + lda $sp, SWITCH_STACK_SIZE($sp) + ret $31, ($1), 1 +.end undo_switch_stack + +/* + * The meat of the context switch code. + */ + + .align 4 + .globl alpha_switch_to + .ent alpha_switch_to +alpha_switch_to: + .prologue 0 + bsr $1, do_switch_stack + call_pal PAL_swpctx + lda $8, 0x3fff + bsr $1, undo_switch_stack + bic $sp, $8, $8 + mov $17, $0 + ret +.end alpha_switch_to + +/* + * New processes begin life here. + */ + + .globl ret_from_fork + .align 4 + .ent ret_from_fork +ret_from_fork: + lda $26, ret_from_sys_call + mov $17, $16 + jmp $31, schedule_tail +.end ret_from_fork + +/* + * kernel_thread(fn, arg, clone_flags) + */ + .align 4 + .globl kernel_thread + .ent kernel_thread +kernel_thread: + /* We can be called from a module. */ + ldgp $gp, 0($27) + .prologue 1 + subq $sp, SP_OFF+6*8, $sp + br $1, 2f /* load start address */ + + /* We've now "returned" from a fake system call. */ + unop + blt $0, 1f /* error? */ + ldi $1, 0x3fff + beq $20, 1f /* parent or child? */ + + bic $sp, $1, $8 /* in child. */ + jsr $26, ($27) + ldgp $gp, 0($26) + mov $0, $16 + mov $31, $26 + jmp $31, sys_exit + +1: ret /* in parent. */ + + .align 4 +2: /* Fake a system call stack frame, as we can't do system calls + from kernel space. Note that we store FN and ARG as they + need to be set up in the child for the call. Also store $8 + and $26 for use in the parent. */ + stq $31, SP_OFF($sp) /* ps */ + stq $1, SP_OFF+8($sp) /* pc */ + stq $gp, SP_OFF+16($sp) /* gp */ + stq $16, 136($sp) /* $27; FN for child */ + stq $17, SP_OFF+24($sp) /* $16; ARG for child */ + stq $8, 64($sp) /* $8 */ + stq $26, 128($sp) /* $26 */ + /* Avoid the HAE being gratuitously wrong, to avoid restoring it. */ + ldq $2, alpha_mv+HAE_CACHE + stq $2, 152($sp) /* HAE */ + + /* Shuffle FLAGS to the front; add CLONE_VM. */ + ldi $1, CLONE_VM|CLONE_UNTRACED + or $18, $1, $16 + bsr $26, sys_clone + + /* We don't actually care for a3 success widgetry in the kernel. + Not for positive errno values. */ + stq $0, 0($sp) /* $0 */ + br restore_all +.end kernel_thread + +/* + * execve(path, argv, envp) + */ + .align 4 + .globl execve + .ent execve +execve: + /* We can be called from a module. */ + ldgp $gp, 0($27) + lda $sp, -(32+SIZEOF_PT_REGS+8)($sp) + .frame $sp, 32+SIZEOF_PT_REGS+8, $26, 0 + stq $26, 0($sp) + stq $16, 8($sp) + stq $17, 16($sp) + stq $18, 24($sp) + .prologue 1 + + lda $16, 32($sp) + lda $17, 0 + lda $18, SIZEOF_PT_REGS + bsr $26, memset !samegp + + /* Avoid the HAE being gratuitously wrong, which would cause us + to do the whole turn off interrupts thing and restore it. */ + ldq $2, alpha_mv+HAE_CACHE + stq $2, 152+32($sp) + + ldq $16, 8($sp) + ldq $17, 16($sp) + ldq $18, 24($sp) + lda $19, 32($sp) + bsr $26, do_execve !samegp + + ldq $26, 0($sp) + bne $0, 1f /* error! */ + + /* Move the temporary pt_regs struct from its current location + to the top of the kernel stack frame. See copy_thread for + details for a normal process. */ + lda $16, 0x4000 - SIZEOF_PT_REGS($8) + lda $17, 32($sp) + lda $18, SIZEOF_PT_REGS + bsr $26, memmove !samegp + + /* Take that over as our new stack frame and visit userland! */ + lda $sp, 0x4000 - SIZEOF_PT_REGS($8) + br $31, ret_from_sys_call + +1: lda $sp, 32+SIZEOF_PT_REGS+8($sp) + ret +.end execve + + +/* + * Special system calls. Most of these are special in that they either + * have to play switch_stack games or in some way use the pt_regs struct. + */ + .align 4 + .globl sys_fork + .ent sys_fork +sys_fork: + .prologue 0 + mov $sp, $21 + bsr $1, do_switch_stack + bis $31, SIGCHLD, $16 + mov $31, $17 + mov $31, $18 + mov $31, $19 + mov $31, $20 + jsr $26, alpha_clone + bsr $1, undo_switch_stack + ret +.end sys_fork + + .align 4 + .globl sys_clone + .ent sys_clone +sys_clone: + .prologue 0 + mov $sp, $21 + bsr $1, do_switch_stack + /* $16, $17, $18, $19, $20 come from the user. */ + jsr $26, alpha_clone + bsr $1, undo_switch_stack + ret +.end sys_clone + + .align 4 + .globl sys_vfork + .ent sys_vfork +sys_vfork: + .prologue 0 + mov $sp, $16 + bsr $1, do_switch_stack + jsr $26, alpha_vfork + bsr $1, undo_switch_stack + ret +.end sys_vfork + + .align 4 + .globl sys_sigreturn + .ent sys_sigreturn +sys_sigreturn: + .prologue 0 + mov $sp, $17 + lda $18, -SWITCH_STACK_SIZE($sp) + lda $sp, -SWITCH_STACK_SIZE($sp) + jsr $26, do_sigreturn + br $1, undo_switch_stack + br ret_from_sys_call +.end sys_sigreturn + + .align 4 + .globl sys_rt_sigreturn + .ent sys_rt_sigreturn +sys_rt_sigreturn: + .prologue 0 + mov $sp, $17 + lda $18, -SWITCH_STACK_SIZE($sp) + lda $sp, -SWITCH_STACK_SIZE($sp) + jsr $26, do_rt_sigreturn + br $1, undo_switch_stack + br ret_from_sys_call +.end sys_rt_sigreturn + + .align 4 + .globl sys_sigsuspend + .ent sys_sigsuspend +sys_sigsuspend: + .prologue 0 + mov $sp, $17 + br $1, do_switch_stack + mov $sp, $18 + subq $sp, 16, $sp + stq $26, 0($sp) + jsr $26, do_sigsuspend + ldq $26, 0($sp) + lda $sp, SWITCH_STACK_SIZE+16($sp) + ret +.end sys_sigsuspend + + .align 4 + .globl sys_rt_sigsuspend + .ent sys_rt_sigsuspend +sys_rt_sigsuspend: + .prologue 0 + mov $sp, $18 + br $1, do_switch_stack + mov $sp, $19 + subq $sp, 16, $sp + stq $26, 0($sp) + jsr $26, do_rt_sigsuspend + ldq $26, 0($sp) + lda $sp, SWITCH_STACK_SIZE+16($sp) + ret +.end sys_rt_sigsuspend + + .align 4 + .globl sys_sethae + .ent sys_sethae +sys_sethae: + .prologue 0 + stq $16, 152($sp) + ret +.end sys_sethae + + .align 4 + .globl osf_getpriority + .ent osf_getpriority +osf_getpriority: + lda $sp, -16($sp) + stq $26, 0($sp) + .prologue 0 + + jsr $26, sys_getpriority + + ldq $26, 0($sp) + blt $0, 1f + + /* Return value is the unbiased priority, i.e. 20 - prio. + This does result in negative return values, so signal + no error by writing into the R0 slot. */ + lda $1, 20 + stq $31, 16($sp) + subl $1, $0, $0 + unop + +1: lda $sp, 16($sp) + ret +.end osf_getpriority + + .align 4 + .globl sys_getxuid + .ent sys_getxuid +sys_getxuid: + .prologue 0 + ldq $2, TI_TASK($8) + ldl $0, TASK_UID($2) + ldl $1, TASK_EUID($2) + stq $1, 80($sp) + ret +.end sys_getxuid + + .align 4 + .globl sys_getxgid + .ent sys_getxgid +sys_getxgid: + .prologue 0 + ldq $2, TI_TASK($8) + ldl $0, TASK_GID($2) + ldl $1, TASK_EGID($2) + stq $1, 80($sp) + ret +.end sys_getxgid + + .align 4 + .globl sys_getxpid + .ent sys_getxpid +sys_getxpid: + .prologue 0 + ldq $2, TI_TASK($8) + + /* See linux/kernel/timer.c sys_getppid for discussion + about this loop. */ + ldq $3, TASK_REAL_PARENT($2) +1: ldl $1, TASK_TGID($3) +#ifdef CONFIG_SMP + mov $3, $4 + mb + ldq $3, TASK_REAL_PARENT($2) + cmpeq $3, $4, $4 + beq $4, 1b +#endif + stq $1, 80($sp) + ldl $0, TASK_TGID($2) + ret +.end sys_getxpid + + .align 4 + .globl sys_pipe + .ent sys_pipe +sys_pipe: + lda $sp, -16($sp) + stq $26, 0($sp) + .prologue 0 + + lda $16, 8($sp) + jsr $26, do_pipe + + ldq $26, 0($sp) + bne $0, 1f + + /* The return values are in $0 and $20. */ + ldl $1, 12($sp) + ldl $0, 8($sp) + + stq $1, 80+16($sp) +1: lda $sp, 16($sp) + ret +.end sys_pipe + + .align 4 + .globl sys_ptrace + .ent sys_ptrace +sys_ptrace: + .prologue 0 + mov $sp, $20 + jmp $31, do_sys_ptrace +.end sys_ptrace + + .align 4 + .globl sys_execve + .ent sys_execve +sys_execve: + .prologue 0 + mov $sp, $19 + jmp $31, do_sys_execve +.end sys_execve + + .align 4 + .globl osf_sigprocmask + .ent osf_sigprocmask +osf_sigprocmask: + .prologue 0 + mov $sp, $18 + jmp $31, do_osf_sigprocmask +.end osf_sigprocmask + + .align 4 + .globl alpha_ni_syscall + .ent alpha_ni_syscall +alpha_ni_syscall: + .prologue 0 + /* Special because it also implements overflow handling via + syscall number 0. And if you recall, zero is a special + trigger for "not an error". Store large non-zero there. */ + lda $0, -ENOSYS + unop + stq $0, 0($sp) + ret +.end alpha_ni_syscall diff --git a/arch/alpha/kernel/err_common.c b/arch/alpha/kernel/err_common.c new file mode 100644 index 0000000..687580b --- /dev/null +++ b/arch/alpha/kernel/err_common.c @@ -0,0 +1,321 @@ +/* + * linux/arch/alpha/kernel/err_common.c + * + * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation) + * + * Error handling code supporting Alpha systems + */ + +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/sched.h> + +#include <asm/io.h> +#include <asm/hwrpb.h> +#include <asm/smp.h> +#include <asm/err_common.h> + +#include "err_impl.h" +#include "proto.h" + +/* + * err_print_prefix -- error handling print routines should prefix + * all prints with this + */ +char *err_print_prefix = KERN_NOTICE; + + +/* + * Generic + */ +void +mchk_dump_mem(void *data, size_t length, char **annotation) +{ + unsigned long *ldata = data; + size_t i; + + for (i = 0; (i * sizeof(*ldata)) < length; i++) { + if (annotation && !annotation[i]) + annotation = NULL; + printk("%s %08x: %016lx %s\n", + err_print_prefix, + (unsigned)(i * sizeof(*ldata)), ldata[i], + annotation ? annotation[i] : ""); + } +} + +void +mchk_dump_logout_frame(struct el_common *mchk_header) +{ + printk("%s -- Frame Header --\n" + " Frame Size: %d (0x%x) bytes\n" + " Flags: %s%s\n" + " MCHK Code: 0x%x\n" + " Frame Rev: %d\n" + " Proc Offset: 0x%08x\n" + " Sys Offset: 0x%08x\n" + " -- Processor Region --\n", + err_print_prefix, + mchk_header->size, mchk_header->size, + mchk_header->retry ? "RETRY " : "", + mchk_header->err2 ? "SECOND_ERR " : "", + mchk_header->code, + mchk_header->frame_rev, + mchk_header->proc_offset, + mchk_header->sys_offset); + + mchk_dump_mem((void *) + ((unsigned long)mchk_header + mchk_header->proc_offset), + mchk_header->sys_offset - mchk_header->proc_offset, + NULL); + + printk("%s -- System Region --\n", err_print_prefix); + mchk_dump_mem((void *) + ((unsigned long)mchk_header + mchk_header->sys_offset), + mchk_header->size - mchk_header->sys_offset, + NULL); + printk("%s -- End of Frame --\n", err_print_prefix); +} + + +/* + * Console Data Log + */ +/* Data */ +static struct el_subpacket_handler *subpacket_handler_list = NULL; +static struct el_subpacket_annotation *subpacket_annotation_list = NULL; + +static struct el_subpacket * +el_process_header_subpacket(struct el_subpacket *header) +{ + union el_timestamp timestamp; + char *name = "UNKNOWN EVENT"; + int packet_count = 0; + int length = 0; + + if (header->class != EL_CLASS__HEADER) { + printk("%s** Unexpected header CLASS %d TYPE %d, aborting\n", + err_print_prefix, + header->class, header->type); + return NULL; + } + + switch(header->type) { + case EL_TYPE__HEADER__SYSTEM_ERROR_FRAME: + name = "SYSTEM ERROR"; + length = header->by_type.sys_err.frame_length; + packet_count = + header->by_type.sys_err.frame_packet_count; + timestamp.as_int = 0; + break; + case EL_TYPE__HEADER__SYSTEM_EVENT_FRAME: + name = "SYSTEM EVENT"; + length = header->by_type.sys_event.frame_length; + packet_count = + header->by_type.sys_event.frame_packet_count; + timestamp = header->by_type.sys_event.timestamp; + break; + case EL_TYPE__HEADER__HALT_FRAME: + name = "ERROR HALT"; + length = header->by_type.err_halt.frame_length; + packet_count = + header->by_type.err_halt.frame_packet_count; + timestamp = header->by_type.err_halt.timestamp; + break; + case EL_TYPE__HEADER__LOGOUT_FRAME: + name = "LOGOUT FRAME"; + length = header->by_type.logout_header.frame_length; + packet_count = 1; + timestamp.as_int = 0; + break; + default: /* Unknown */ + printk("%s** Unknown header - CLASS %d TYPE %d, aborting\n", + err_print_prefix, + header->class, header->type); + return NULL; + } + + printk("%s*** %s:\n" + " CLASS %d, TYPE %d\n", + err_print_prefix, + name, + header->class, header->type); + el_print_timestamp(×tamp); + + /* + * Process the subpackets + */ + el_process_subpackets(header, packet_count); + + /* return the next header */ + header = (struct el_subpacket *) + ((unsigned long)header + header->length + length); + return header; +} + +static struct el_subpacket * +el_process_subpacket_reg(struct el_subpacket *header) +{ + struct el_subpacket *next = NULL; + struct el_subpacket_handler *h = subpacket_handler_list; + + for (; h && h->class != header->class; h = h->next); + if (h) next = h->handler(header); + + return next; +} + +void +el_print_timestamp(union el_timestamp *timestamp) +{ + if (timestamp->as_int) + printk("%s TIMESTAMP: %d/%d/%02d %d:%02d:%0d\n", + err_print_prefix, + timestamp->b.month, timestamp->b.day, + timestamp->b.year, timestamp->b.hour, + timestamp->b.minute, timestamp->b.second); +} + +void +el_process_subpackets(struct el_subpacket *header, int packet_count) +{ + struct el_subpacket *subpacket; + int i; + + subpacket = (struct el_subpacket *) + ((unsigned long)header + header->length); + + for (i = 0; subpacket && i < packet_count; i++) { + printk("%sPROCESSING SUBPACKET %d\n", err_print_prefix, i); + subpacket = el_process_subpacket(subpacket); + } +} + +struct el_subpacket * +el_process_subpacket(struct el_subpacket *header) +{ + struct el_subpacket *next = NULL; + + switch(header->class) { + case EL_CLASS__TERMINATION: + /* Termination packet, there are no more */ + break; + case EL_CLASS__HEADER: + next = el_process_header_subpacket(header); + break; + default: + if (NULL == (next = el_process_subpacket_reg(header))) { + printk("%s** Unexpected header CLASS %d TYPE %d" + " -- aborting.\n", + err_print_prefix, + header->class, header->type); + } + break; + } + + return next; +} + +void +el_annotate_subpacket(struct el_subpacket *header) +{ + struct el_subpacket_annotation *a; + char **annotation = NULL; + + for (a = subpacket_annotation_list; a; a = a->next) { + if (a->class == header->class && + a->type == header->type && + a->revision == header->revision) { + /* + * We found the annotation + */ + annotation = a->annotation; + printk("%s %s\n", err_print_prefix, a->description); + break; + } + } + + mchk_dump_mem(header, header->length, annotation); +} + +static void __init +cdl_process_console_data_log(int cpu, struct percpu_struct *pcpu) +{ + struct el_subpacket *header = (struct el_subpacket *) + (IDENT_ADDR | pcpu->console_data_log_pa); + int err; + + printk("%s******* CONSOLE DATA LOG FOR CPU %d. *******\n" + "*** Error(s) were logged on a previous boot\n", + err_print_prefix, cpu); + + for (err = 0; header && (header->class != EL_CLASS__TERMINATION); err++) + header = el_process_subpacket(header); + + /* let the console know it's ok to clear the error(s) at restart */ + pcpu->console_data_log_pa = 0; + + printk("%s*** %d total error(s) logged\n" + "**** END OF CONSOLE DATA LOG FOR CPU %d ****\n", + err_print_prefix, err, cpu); +} + +void __init +cdl_check_console_data_log(void) +{ + struct percpu_struct *pcpu; + unsigned long cpu; + + for (cpu = 0; cpu < hwrpb->nr_processors; cpu++) { + pcpu = (struct percpu_struct *) + ((unsigned long)hwrpb + hwrpb->processor_offset + + cpu * hwrpb->processor_size); + if (pcpu->console_data_log_pa) + cdl_process_console_data_log(cpu, pcpu); + } + +} + +int __init +cdl_register_subpacket_annotation(struct el_subpacket_annotation *new) +{ + struct el_subpacket_annotation *a = subpacket_annotation_list; + + if (a == NULL) subpacket_annotation_list = new; + else { + for (; a->next != NULL; a = a->next) { + if ((a->class == new->class && a->type == new->type) || + a == new) { + printk("Attempted to re-register " + "subpacket annotation\n"); + return -EINVAL; + } + } + a->next = new; + } + new->next = NULL; + + return 0; +} + +int __init +cdl_register_subpacket_handler(struct el_subpacket_handler *new) +{ + struct el_subpacket_handler *h = subpacket_handler_list; + + if (h == NULL) subpacket_handler_list = new; + else { + for (; h->next != NULL; h = h->next) { + if (h->class == new->class || h == new) { + printk("Attempted to re-register " + "subpacket handler\n"); + return -EINVAL; + } + } + h->next = new; + } + new->next = NULL; + + return 0; +} + diff --git a/arch/alpha/kernel/err_ev6.c b/arch/alpha/kernel/err_ev6.c new file mode 100644 index 0000000..64f59f2 --- /dev/null +++ b/arch/alpha/kernel/err_ev6.c @@ -0,0 +1,274 @@ +/* + * linux/arch/alpha/kernel/err_ev6.c + * + * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation) + * + * Error handling code supporting Alpha systems + */ + +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/sched.h> + +#include <asm/io.h> +#include <asm/hwrpb.h> +#include <asm/smp.h> +#include <asm/err_common.h> +#include <asm/err_ev6.h> + +#include "err_impl.h" +#include "proto.h" + +static int +ev6_parse_ibox(u64 i_stat, int print) +{ + int status = MCHK_DISPOSITION_REPORT; + +#define EV6__I_STAT__PAR (1UL << 29) +#define EV6__I_STAT__ERRMASK (EV6__I_STAT__PAR) + + if (!(i_stat & EV6__I_STAT__ERRMASK)) + return MCHK_DISPOSITION_UNKNOWN_ERROR; + + if (!print) + return status; + + if (i_stat & EV6__I_STAT__PAR) + printk("%s Icache parity error\n", err_print_prefix); + + return status; +} + +static int +ev6_parse_mbox(u64 mm_stat, u64 d_stat, u64 c_stat, int print) +{ + int status = MCHK_DISPOSITION_REPORT; + +#define EV6__MM_STAT__DC_TAG_PERR (1UL << 10) +#define EV6__MM_STAT__ERRMASK (EV6__MM_STAT__DC_TAG_PERR) +#define EV6__D_STAT__TPERR_P0 (1UL << 0) +#define EV6__D_STAT__TPERR_P1 (1UL << 1) +#define EV6__D_STAT__ECC_ERR_ST (1UL << 2) +#define EV6__D_STAT__ECC_ERR_LD (1UL << 3) +#define EV6__D_STAT__SEO (1UL << 4) +#define EV6__D_STAT__ERRMASK (EV6__D_STAT__TPERR_P0 | \ + EV6__D_STAT__TPERR_P1 | \ + EV6__D_STAT__ECC_ERR_ST | \ + EV6__D_STAT__ECC_ERR_LD | \ + EV6__D_STAT__SEO) + + if (!(d_stat & EV6__D_STAT__ERRMASK) && + !(mm_stat & EV6__MM_STAT__ERRMASK)) + return MCHK_DISPOSITION_UNKNOWN_ERROR; + + if (!print) + return status; + + if (mm_stat & EV6__MM_STAT__DC_TAG_PERR) + printk("%s Dcache tag parity error on probe\n", + err_print_prefix); + if (d_stat & EV6__D_STAT__TPERR_P0) + printk("%s Dcache tag parity error - pipe 0\n", + err_print_prefix); + if (d_stat & EV6__D_STAT__TPERR_P1) + printk("%s Dcache tag parity error - pipe 1\n", + err_print_prefix); + if (d_stat & EV6__D_STAT__ECC_ERR_ST) + printk("%s ECC error occurred on a store\n", + err_print_prefix); + if (d_stat & EV6__D_STAT__ECC_ERR_LD) + printk("%s ECC error occurred on a %s load\n", + err_print_prefix, + c_stat ? "" : "speculative "); + if (d_stat & EV6__D_STAT__SEO) + printk("%s Dcache second error\n", err_print_prefix); + + return status; +} + +static int +ev6_parse_cbox(u64 c_addr, u64 c1_syn, u64 c2_syn, + u64 c_stat, u64 c_sts, int print) +{ + char *sourcename[] = { "UNKNOWN", "UNKNOWN", "UNKNOWN", + "MEMORY", "BCACHE", "DCACHE", + "BCACHE PROBE", "BCACHE PROBE" }; + char *streamname[] = { "D", "I" }; + char *bitsname[] = { "SINGLE", "DOUBLE" }; + int status = MCHK_DISPOSITION_REPORT; + int source = -1, stream = -1, bits = -1; + +#define EV6__C_STAT__BC_PERR (0x01) +#define EV6__C_STAT__DC_PERR (0x02) +#define EV6__C_STAT__DSTREAM_MEM_ERR (0x03) +#define EV6__C_STAT__DSTREAM_BC_ERR (0x04) +#define EV6__C_STAT__DSTREAM_DC_ERR (0x05) +#define EV6__C_STAT__PROBE_BC_ERR0 (0x06) /* both 6 and 7 indicate... */ +#define EV6__C_STAT__PROBE_BC_ERR1 (0x07) /* ...probe bc error. */ +#define EV6__C_STAT__ISTREAM_MEM_ERR (0x0B) +#define EV6__C_STAT__ISTREAM_BC_ERR (0x0C) +#define EV6__C_STAT__DSTREAM_MEM_DBL (0x13) +#define EV6__C_STAT__DSTREAM_BC_DBL (0x14) +#define EV6__C_STAT__ISTREAM_MEM_DBL (0x1B) +#define EV6__C_STAT__ISTREAM_BC_DBL (0x1C) +#define EV6__C_STAT__SOURCE_MEMORY (0x03) +#define EV6__C_STAT__SOURCE_BCACHE (0x04) +#define EV6__C_STAT__SOURCE__S (0) +#define EV6__C_STAT__SOURCE__M (0x07) +#define EV6__C_STAT__ISTREAM__S (3) +#define EV6__C_STAT__ISTREAM__M (0x01) +#define EV6__C_STAT__DOUBLE__S (4) +#define EV6__C_STAT__DOUBLE__M (0x01) +#define EV6__C_STAT__ERRMASK (0x1F) +#define EV6__C_STS__SHARED (1 << 0) +#define EV6__C_STS__DIRTY (1 << 1) +#define EV6__C_STS__VALID (1 << 2) +#define EV6__C_STS__PARITY (1 << 3) + + if (!(c_stat & EV6__C_STAT__ERRMASK)) + return MCHK_DISPOSITION_UNKNOWN_ERROR; + + if (!print) + return status; + + source = EXTRACT(c_stat, EV6__C_STAT__SOURCE); + stream = EXTRACT(c_stat, EV6__C_STAT__ISTREAM); + bits = EXTRACT(c_stat, EV6__C_STAT__DOUBLE); + + if (c_stat & EV6__C_STAT__BC_PERR) { + printk("%s Bcache tag parity error\n", err_print_prefix); + source = -1; + } + + if (c_stat & EV6__C_STAT__DC_PERR) { + printk("%s Dcache tag parity error\n", err_print_prefix); + source = -1; + } + + if (c_stat == EV6__C_STAT__PROBE_BC_ERR0 || + c_stat == EV6__C_STAT__PROBE_BC_ERR1) { + printk("%s Bcache single-bit error on a probe hit\n", + err_print_prefix); + source = -1; + } + + if (source != -1) + printk("%s %s-STREAM %s-BIT ECC error from %s\n", + err_print_prefix, + streamname[stream], bitsname[bits], sourcename[source]); + + printk("%s Address: 0x%016lx\n" + " Syndrome[upper.lower]: %02lx.%02lx\n", + err_print_prefix, + c_addr, + c2_syn, c1_syn); + + if (source == EV6__C_STAT__SOURCE_MEMORY || + source == EV6__C_STAT__SOURCE_BCACHE) + printk("%s Block status: %s%s%s%s\n", + err_print_prefix, + (c_sts & EV6__C_STS__SHARED) ? "SHARED " : "", + (c_sts & EV6__C_STS__DIRTY) ? "DIRTY " : "", + (c_sts & EV6__C_STS__VALID) ? "VALID " : "", + (c_sts & EV6__C_STS__PARITY) ? "PARITY " : ""); + + return status; +} + +void +ev6_register_error_handlers(void) +{ + /* None right now. */ +} + +int +ev6_process_logout_frame(struct el_common *mchk_header, int print) +{ + struct el_common_EV6_mcheck *ev6mchk = + (struct el_common_EV6_mcheck *)mchk_header; + int status = MCHK_DISPOSITION_UNKNOWN_ERROR; + + status |= ev6_parse_ibox(ev6mchk->I_STAT, print); + status |= ev6_parse_mbox(ev6mchk->MM_STAT, ev6mchk->DC_STAT, + ev6mchk->C_STAT, print); + status |= ev6_parse_cbox(ev6mchk->C_ADDR, ev6mchk->DC1_SYNDROME, + ev6mchk->DC0_SYNDROME, ev6mchk->C_STAT, + ev6mchk->C_STS, print); + + if (!print) + return status; + + if (status != MCHK_DISPOSITION_DISMISS) { + char *saved_err_prefix = err_print_prefix; + + /* + * Dump some additional information from the frame + */ + printk("%s EXC_ADDR: 0x%016lx IER_CM: 0x%016lx" + " ISUM: 0x%016lx\n" + " PAL_BASE: 0x%016lx I_CTL: 0x%016lx" + " PCTX: 0x%016lx\n", + err_print_prefix, + ev6mchk->EXC_ADDR, ev6mchk->IER_CM, ev6mchk->ISUM, + ev6mchk->PAL_BASE, ev6mchk->I_CTL, ev6mchk->PCTX); + + if (status == MCHK_DISPOSITION_UNKNOWN_ERROR) { + printk("%s UNKNOWN error, frame follows:\n", + err_print_prefix); + } else { + /* had decode -- downgrade print level for frame */ + err_print_prefix = KERN_NOTICE; + } + + mchk_dump_logout_frame(mchk_header); + + err_print_prefix = saved_err_prefix; + } + + return status; +} + +void +ev6_machine_check(u64 vector, u64 la_ptr, struct pt_regs *regs) +{ + struct el_common *mchk_header = (struct el_common *)la_ptr; + + /* + * Sync the processor + */ + mb(); + draina(); + + /* + * Parse the logout frame without printing first. If the only error(s) + * found are have a disposition of "dismiss", then just dismiss them + * and don't print any message + */ + if (ev6_process_logout_frame(mchk_header, 0) != + MCHK_DISPOSITION_DISMISS) { + char *saved_err_prefix = err_print_prefix; + err_print_prefix = KERN_CRIT; + + /* + * Either a nondismissable error was detected or no + * recognized error was detected in the logout frame + * -- report the error in either case + */ + printk("%s*CPU %s Error (Vector 0x%x) reported on CPU %d:\n", + err_print_prefix, + (vector == SCB_Q_PROCERR)?"Correctable":"Uncorrectable", + (unsigned int)vector, (int)smp_processor_id()); + + ev6_process_logout_frame(mchk_header, 1); + dik_show_regs(regs, NULL); + + err_print_prefix = saved_err_prefix; + } + + /* + * Release the logout frame + */ + wrmces(0x7); + mb(); +} + diff --git a/arch/alpha/kernel/err_ev7.c b/arch/alpha/kernel/err_ev7.c new file mode 100644 index 0000000..bf52ba6 --- /dev/null +++ b/arch/alpha/kernel/err_ev7.c @@ -0,0 +1,289 @@ +/* + * linux/arch/alpha/kernel/err_ev7.c + * + * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation) + * + * Error handling code supporting Alpha systems + */ + +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/sched.h> + +#include <asm/io.h> +#include <asm/hwrpb.h> +#include <asm/smp.h> +#include <asm/err_common.h> +#include <asm/err_ev7.h> + +#include "err_impl.h" +#include "proto.h" + +struct ev7_lf_subpackets * +ev7_collect_logout_frame_subpackets(struct el_subpacket *el_ptr, + struct ev7_lf_subpackets *lf_subpackets) +{ + struct el_subpacket *subpacket; + int i; + + /* + * A Marvel machine check frame is always packaged in an + * el_subpacket of class HEADER, type LOGOUT_FRAME. + */ + if (el_ptr->class != EL_CLASS__HEADER || + el_ptr->type != EL_TYPE__HEADER__LOGOUT_FRAME) + return NULL; + + /* + * It is a logout frame header. Look at the one subpacket. + */ + el_ptr = (struct el_subpacket *) + ((unsigned long)el_ptr + el_ptr->length); + + /* + * It has to be class PAL, type LOGOUT_FRAME. + */ + if (el_ptr->class != EL_CLASS__PAL || + el_ptr->type != EL_TYPE__PAL__LOGOUT_FRAME) + return NULL; + + lf_subpackets->logout = (struct ev7_pal_logout_subpacket *) + el_ptr->by_type.raw.data_start; + + /* + * Process the subpackets. + */ + subpacket = (struct el_subpacket *) + ((unsigned long)el_ptr + el_ptr->length); + for (i = 0; + subpacket && i < lf_subpackets->logout->subpacket_count; + subpacket = (struct el_subpacket *) + ((unsigned long)subpacket + subpacket->length), i++) { + /* + * All subpackets should be class PAL. + */ + if (subpacket->class != EL_CLASS__PAL) { + printk("%s**UNEXPECTED SUBPACKET CLASS %d " + "IN LOGOUT FRAME (packet %d\n", + err_print_prefix, subpacket->class, i); + return NULL; + } + + /* + * Remember the subpacket. + */ + switch(subpacket->type) { + case EL_TYPE__PAL__EV7_PROCESSOR: + lf_subpackets->ev7 = + (struct ev7_pal_processor_subpacket *) + subpacket->by_type.raw.data_start; + break; + + case EL_TYPE__PAL__EV7_RBOX: + lf_subpackets->rbox = (struct ev7_pal_rbox_subpacket *) + subpacket->by_type.raw.data_start; + break; + + case EL_TYPE__PAL__EV7_ZBOX: + lf_subpackets->zbox = (struct ev7_pal_zbox_subpacket *) + subpacket->by_type.raw.data_start; + break; + + case EL_TYPE__PAL__EV7_IO: + lf_subpackets->io = (struct ev7_pal_io_subpacket *) + subpacket->by_type.raw.data_start; + break; + + case EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE: + case EL_TYPE__PAL__ENV__AIRMOVER_FAN: + case EL_TYPE__PAL__ENV__VOLTAGE: + case EL_TYPE__PAL__ENV__INTRUSION: + case EL_TYPE__PAL__ENV__POWER_SUPPLY: + case EL_TYPE__PAL__ENV__LAN: + case EL_TYPE__PAL__ENV__HOT_PLUG: + lf_subpackets->env[ev7_lf_env_index(subpacket->type)] = + (struct ev7_pal_environmental_subpacket *) + subpacket->by_type.raw.data_start; + break; + + default: + /* + * Don't know what kind of frame this is. + */ + return NULL; + } + } + + return lf_subpackets; +} + +void +ev7_machine_check(u64 vector, u64 la_ptr, struct pt_regs *regs) +{ + struct el_subpacket *el_ptr = (struct el_subpacket *)la_ptr; + char *saved_err_prefix = err_print_prefix; + + /* + * Sync the processor + */ + mb(); + draina(); + + err_print_prefix = KERN_CRIT; + printk("%s*CPU %s Error (Vector 0x%x) reported on CPU %d\n", + err_print_prefix, + (vector == SCB_Q_PROCERR) ? "Correctable" : "Uncorrectable", + (unsigned int)vector, (int)smp_processor_id()); + el_process_subpacket(el_ptr); + err_print_prefix = saved_err_prefix; + + /* + * Release the logout frame + */ + wrmces(0x7); + mb(); +} + +static char *el_ev7_processor_subpacket_annotation[] = { + "Subpacket Header", "I_STAT", "DC_STAT", + "C_ADDR", "C_SYNDROME_1", "C_SYNDROME_0", + "C_STAT", "C_STS", "MM_STAT", + "EXC_ADDR", "IER_CM", "ISUM", + "PAL_BASE", "I_CTL", "PROCESS_CONTEXT", + "CBOX_CTL", "CBOX_STP_CTL", "CBOX_ACC_CTL", + "CBOX_LCL_SET", "CBOX_GLB_SET", "BBOX_CTL", + "BBOX_ERR_STS", "BBOX_ERR_IDX", "CBOX_DDP_ERR_STS", + "BBOX_DAT_RMP", NULL +}; + +static char *el_ev7_zbox_subpacket_annotation[] = { + "Subpacket Header", + "ZBOX(0): DRAM_ERR_STATUS_2 / DRAM_ERR_STATUS_1", + "ZBOX(0): DRAM_ERROR_CTL / DRAM_ERR_STATUS_3", + "ZBOX(0): DIFT_TIMEOUT / DRAM_ERR_ADR", + "ZBOX(0): FRC_ERR_ADR / DRAM_MAPPER_CTL", + "ZBOX(0): reserved / DIFT_ERR_STATUS", + "ZBOX(1): DRAM_ERR_STATUS_2 / DRAM_ERR_STATUS_1", + "ZBOX(1): DRAM_ERROR_CTL / DRAM_ERR_STATUS_3", + "ZBOX(1): DIFT_TIMEOUT / DRAM_ERR_ADR", + "ZBOX(1): FRC_ERR_ADR / DRAM_MAPPER_CTL", + "ZBOX(1): reserved / DIFT_ERR_STATUS", + "CBOX_CTL", "CBOX_STP_CTL", + "ZBOX(0)_ERROR_PA", "ZBOX(1)_ERROR_PA", + "ZBOX(0)_ORED_SYNDROME","ZBOX(1)_ORED_SYNDROME", + NULL +}; + +static char *el_ev7_rbox_subpacket_annotation[] = { + "Subpacket Header", "RBOX_CFG", "RBOX_N_CFG", + "RBOX_S_CFG", "RBOX_E_CFG", "RBOX_W_CFG", + "RBOX_N_ERR", "RBOX_S_ERR", "RBOX_E_ERR", + "RBOX_W_ERR", "RBOX_IO_CFG", "RBOX_IO_ERR", + "RBOX_L_ERR", "RBOX_WHOAMI", "RBOX_IMASL", + "RBOX_INTQ", "RBOX_INT", NULL +}; + +static char *el_ev7_io_subpacket_annotation[] = { + "Subpacket Header", "IO_ASIC_REV", "IO_SYS_REV", + "IO7_UPH", "HPI_CTL", "CRD_CTL", + "HEI_CTL", "PO7_ERROR_SUM","PO7_UNCRR_SYM", + "PO7_CRRCT_SYM", "PO7_UGBGE_SYM","PO7_ERR_PKT0", + "PO7_ERR_PKT1", "reserved", "reserved", + "PO0_ERR_SUM", "PO0_TLB_ERR", "PO0_SPL_COMPLT", + "PO0_TRANS_SUM", "PO0_FIRST_ERR","PO0_MULT_ERR", + "DM CSR PH", "DM CSR PH", "DM CSR PH", + "DM CSR PH", "reserved", + "PO1_ERR_SUM", "PO1_TLB_ERR", "PO1_SPL_COMPLT", + "PO1_TRANS_SUM", "PO1_FIRST_ERR","PO1_MULT_ERR", + "DM CSR PH", "DM CSR PH", "DM CSR PH", + "DM CSR PH", "reserved", + "PO2_ERR_SUM", "PO2_TLB_ERR", "PO2_SPL_COMPLT", + "PO2_TRANS_SUM", "PO2_FIRST_ERR","PO2_MULT_ERR", + "DM CSR PH", "DM CSR PH", "DM CSR PH", + "DM CSR PH", "reserved", + "PO3_ERR_SUM", "PO3_TLB_ERR", "PO3_SPL_COMPLT", + "PO3_TRANS_SUM", "PO3_FIRST_ERR","PO3_MULT_ERR", + "DM CSR PH", "DM CSR PH", "DM CSR PH", + "DM CSR PH", "reserved", + NULL +}; + +static struct el_subpacket_annotation el_ev7_pal_annotations[] = { + SUBPACKET_ANNOTATION(EL_CLASS__PAL, + EL_TYPE__PAL__EV7_PROCESSOR, + 1, + "EV7 Processor Subpacket", + el_ev7_processor_subpacket_annotation), + SUBPACKET_ANNOTATION(EL_CLASS__PAL, + EL_TYPE__PAL__EV7_ZBOX, + 1, + "EV7 ZBOX Subpacket", + el_ev7_zbox_subpacket_annotation), + SUBPACKET_ANNOTATION(EL_CLASS__PAL, + EL_TYPE__PAL__EV7_RBOX, + 1, + "EV7 RBOX Subpacket", + el_ev7_rbox_subpacket_annotation), + SUBPACKET_ANNOTATION(EL_CLASS__PAL, + EL_TYPE__PAL__EV7_IO, + 1, + "EV7 IO Subpacket", + el_ev7_io_subpacket_annotation) +}; + +static struct el_subpacket * +ev7_process_pal_subpacket(struct el_subpacket *header) +{ + struct ev7_pal_subpacket *packet; + + if (header->class != EL_CLASS__PAL) { + printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n", + err_print_prefix, + header->class, header->type); + return NULL; + } + + packet = (struct ev7_pal_subpacket *)header->by_type.raw.data_start; + + switch(header->type) { + case EL_TYPE__PAL__LOGOUT_FRAME: + printk("%s*** MCHK occurred on LPID %ld (RBOX %lx)\n", + err_print_prefix, + packet->by_type.logout.whami, + packet->by_type.logout.rbox_whami); + el_print_timestamp(&packet->by_type.logout.timestamp); + printk("%s EXC_ADDR: %016lx\n" + " HALT_CODE: %lx\n", + err_print_prefix, + packet->by_type.logout.exc_addr, + packet->by_type.logout.halt_code); + el_process_subpackets(header, + packet->by_type.logout.subpacket_count); + break; + default: + printk("%s ** PAL TYPE %d SUBPACKET\n", + err_print_prefix, + header->type); + el_annotate_subpacket(header); + break; + } + + return (struct el_subpacket *)((unsigned long)header + header->length); +} + +struct el_subpacket_handler ev7_pal_subpacket_handler = + SUBPACKET_HANDLER_INIT(EL_CLASS__PAL, ev7_process_pal_subpacket); + +void +ev7_register_error_handlers(void) +{ + int i; + + for(i = 0; + i<sizeof(el_ev7_pal_annotations)/sizeof(el_ev7_pal_annotations[1]); + i++) { + cdl_register_subpacket_annotation(&el_ev7_pal_annotations[i]); + } + cdl_register_subpacket_handler(&ev7_pal_subpacket_handler); +} + diff --git a/arch/alpha/kernel/err_impl.h b/arch/alpha/kernel/err_impl.h new file mode 100644 index 0000000..64e9b73 --- /dev/null +++ b/arch/alpha/kernel/err_impl.h @@ -0,0 +1,85 @@ +/* + * linux/arch/alpha/kernel/err_impl.h + * + * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation) + * + * Contains declarations and macros to support Alpha error handling + * implementations. + */ + +union el_timestamp; +struct el_subpacket; +struct ev7_lf_subpackets; + +struct el_subpacket_annotation { + struct el_subpacket_annotation *next; + u16 class; + u16 type; + u16 revision; + char *description; + char **annotation; +}; +#define SUBPACKET_ANNOTATION(c, t, r, d, a) {NULL, (c), (t), (r), (d), (a)} + +struct el_subpacket_handler { + struct el_subpacket_handler *next; + u16 class; + struct el_subpacket *(*handler)(struct el_subpacket *); +}; +#define SUBPACKET_HANDLER_INIT(c, h) {NULL, (c), (h)} + +/* + * Manipulate a field from a register given it's name. defines + * for the LSB (__S - shift count) and bitmask (__M) are required + * + * EXTRACT(u, f) - extracts the field and places it at bit position 0 + * GEN_MASK(f) - creates an in-position mask for the field + */ +#define EXTRACT(u, f) (((u) >> f##__S) & f##__M) +#define GEN_MASK(f) ((u64)f##__M << f##__S) + +/* + * err_common.c + */ +extern char *err_print_prefix; + +extern void mchk_dump_mem(void *, size_t, char **); +extern void mchk_dump_logout_frame(struct el_common *); +extern void el_print_timestamp(union el_timestamp *); +extern void el_process_subpackets(struct el_subpacket *, int); +extern struct el_subpacket *el_process_subpacket(struct el_subpacket *); +extern void el_annotate_subpacket(struct el_subpacket *); +extern void cdl_check_console_data_log(void); +extern int cdl_register_subpacket_annotation(struct el_subpacket_annotation *); +extern int cdl_register_subpacket_handler(struct el_subpacket_handler *); + +/* + * err_ev7.c + */ +extern struct ev7_lf_subpackets * +ev7_collect_logout_frame_subpackets(struct el_subpacket *, + struct ev7_lf_subpackets *); +extern void ev7_register_error_handlers(void); +extern void ev7_machine_check(u64, u64, struct pt_regs *); + +/* + * err_ev6.c + */ +extern void ev6_register_error_handlers(void); +extern int ev6_process_logout_frame(struct el_common *, int); +extern void ev6_machine_check(u64, u64, struct pt_regs *); + +/* + * err_marvel.c + */ +extern void marvel_machine_check(u64, u64, struct pt_regs *); +extern void marvel_register_error_handlers(void); + +/* + * err_titan.c + */ +extern int titan_process_logout_frame(struct el_common *, int); +extern void titan_machine_check(u64, u64, struct pt_regs *); +extern void titan_register_error_handlers(void); +extern int privateer_process_logout_frame(struct el_common *, int); +extern void privateer_machine_check(u64, u64, struct pt_regs *); diff --git a/arch/alpha/kernel/err_marvel.c b/arch/alpha/kernel/err_marvel.c new file mode 100644 index 0000000..70b38b1 --- /dev/null +++ b/arch/alpha/kernel/err_marvel.c @@ -0,0 +1,1159 @@ +/* + * linux/arch/alpha/kernel/err_marvel.c + * + * Copyright (C) 2001 Jeff Wiedemeier (Compaq Computer Corporation) + * + */ + +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/sched.h> + +#include <asm/io.h> +#include <asm/console.h> +#include <asm/core_marvel.h> +#include <asm/hwrpb.h> +#include <asm/smp.h> +#include <asm/err_common.h> +#include <asm/err_ev7.h> + +#include "err_impl.h" +#include "proto.h" + +static void +marvel_print_680_frame(struct ev7_lf_subpackets *lf_subpackets) +{ +#ifdef CONFIG_VERBOSE_MCHECK + struct ev7_pal_environmental_subpacket *env; + struct { int type; char *name; } ev_packets[] = { + { EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE, + "Ambient Temperature" }, + { EL_TYPE__PAL__ENV__AIRMOVER_FAN, + "AirMover / Fan" }, + { EL_TYPE__PAL__ENV__VOLTAGE, + "Voltage" }, + { EL_TYPE__PAL__ENV__INTRUSION, + "Intrusion" }, + { EL_TYPE__PAL__ENV__POWER_SUPPLY, + "Power Supply" }, + { EL_TYPE__PAL__ENV__LAN, + "LAN" }, + { EL_TYPE__PAL__ENV__HOT_PLUG, + "Hot Plug" }, + { 0, NULL } + }; + int i; + + for (i = 0; ev_packets[i].type != 0; i++) { + env = lf_subpackets->env[ev7_lf_env_index(ev_packets[i].type)]; + if (!env) + continue; + + printk("%s**%s event (cabinet %d, drawer %d)\n", + err_print_prefix, + ev_packets[i].name, + env->cabinet, + env->drawer); + printk("%s Module Type: 0x%x - Unit ID 0x%x - " + "Condition 0x%x\n", + err_print_prefix, + env->module_type, + env->unit_id, + env->condition); + } +#endif /* CONFIG_VERBOSE_MCHECK */ +} + +static int +marvel_process_680_frame(struct ev7_lf_subpackets *lf_subpackets, int print) +{ + int status = MCHK_DISPOSITION_UNKNOWN_ERROR; + int i; + + for (i = ev7_lf_env_index(EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE); + i <= ev7_lf_env_index(EL_TYPE__PAL__ENV__HOT_PLUG); + i++) { + if (lf_subpackets->env[i]) + status = MCHK_DISPOSITION_REPORT; + } + + if (print) + marvel_print_680_frame(lf_subpackets); + + return status; +} + +#ifdef CONFIG_VERBOSE_MCHECK + +static void +marvel_print_err_cyc(u64 err_cyc) +{ + static char *packet_desc[] = { + "No Error", + "UNKNOWN", + "1 cycle (1 or 2 flit packet)", + "2 cycles (3 flit packet)", + "9 cycles (18 flit packet)", + "10 cycles (19 flit packet)", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN" + }; + +#define IO7__ERR_CYC__ODD_FLT (1UL << 0) +#define IO7__ERR_CYC__EVN_FLT (1UL << 1) +#define IO7__ERR_CYC__PACKET__S (6) +#define IO7__ERR_CYC__PACKET__M (0x7) +#define IO7__ERR_CYC__LOC (1UL << 5) +#define IO7__ERR_CYC__CYCLE__S (2) +#define IO7__ERR_CYC__CYCLE__M (0x7) + + printk("%s Packet In Error: %s\n" + "%s Error in %s, cycle %ld%s%s\n", + err_print_prefix, + packet_desc[EXTRACT(err_cyc, IO7__ERR_CYC__PACKET)], + err_print_prefix, + (err_cyc & IO7__ERR_CYC__LOC) ? "DATA" : "HEADER", + EXTRACT(err_cyc, IO7__ERR_CYC__CYCLE), + (err_cyc & IO7__ERR_CYC__ODD_FLT) ? " [ODD Flit]": "", + (err_cyc & IO7__ERR_CYC__EVN_FLT) ? " [Even Flit]": ""); +} + +static void +marvel_print_po7_crrct_sym(u64 crrct_sym) +{ +#define IO7__PO7_CRRCT_SYM__SYN__S (0) +#define IO7__PO7_CRRCT_SYM__SYN__M (0x7f) +#define IO7__PO7_CRRCT_SYM__ERR_CYC__S (7) /* ERR_CYC + ODD_FLT + EVN_FLT */ +#define IO7__PO7_CRRCT_SYM__ERR_CYC__M (0x1ff) + + + printk("%s Correctable Error Symptoms:\n" + "%s Syndrome: 0x%lx\n", + err_print_prefix, + err_print_prefix, EXTRACT(crrct_sym, IO7__PO7_CRRCT_SYM__SYN)); + marvel_print_err_cyc(EXTRACT(crrct_sym, IO7__PO7_CRRCT_SYM__ERR_CYC)); +} + +static void +marvel_print_po7_uncrr_sym(u64 uncrr_sym, u64 valid_mask) +{ + static char *clk_names[] = { "_h[0]", "_h[1]", "_n[0]", "_n[1]" }; + static char *clk_decode[] = { + "No Error", + "One extra rising edge", + "Two extra rising edges", + "Lost one clock" + }; + static char *port_names[] = { "Port 0", "Port 1", + "Port 2", "Port 3", + "Unknown Port", "Unknown Port", + "Unknown Port", "Port 7" }; + int scratch, i; + +#define IO7__PO7_UNCRR_SYM__SYN__S (0) +#define IO7__PO7_UNCRR_SYM__SYN__M (0x7f) +#define IO7__PO7_UNCRR_SYM__ERR_CYC__S (7) /* ERR_CYC + ODD_FLT... */ +#define IO7__PO7_UNCRR_SYM__ERR_CYC__M (0x1ff) /* ... + EVN_FLT */ +#define IO7__PO7_UNCRR_SYM__CLK__S (16) +#define IO7__PO7_UNCRR_SYM__CLK__M (0xff) +#define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__REQ (1UL << 24) +#define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__RIO (1UL << 25) +#define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__WIO (1UL << 26) +#define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__BLK (1UL << 27) +#define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__NBK (1UL << 28) +#define IO7__PO7_UNCRR_SYM__OVF__READIO (1UL << 29) +#define IO7__PO7_UNCRR_SYM__OVF__WRITEIO (1UL << 30) +#define IO7__PO7_UNCRR_SYM__OVF__FWD (1UL << 31) +#define IO7__PO7_UNCRR_SYM__VICTIM_SP__S (32) +#define IO7__PO7_UNCRR_SYM__VICTIM_SP__M (0xff) +#define IO7__PO7_UNCRR_SYM__DETECT_SP__S (40) +#define IO7__PO7_UNCRR_SYM__DETECT_SP__M (0xff) +#define IO7__PO7_UNCRR_SYM__STRV_VTR__S (48) +#define IO7__PO7_UNCRR_SYM__STRV_VTR__M (0x3ff) + +#define IO7__STRV_VTR__LSI__INTX__S (0) +#define IO7__STRV_VTR__LSI__INTX__M (0x3) +#define IO7__STRV_VTR__LSI__SLOT__S (2) +#define IO7__STRV_VTR__LSI__SLOT__M (0x7) +#define IO7__STRV_VTR__LSI__BUS__S (5) +#define IO7__STRV_VTR__LSI__BUS__M (0x3) +#define IO7__STRV_VTR__MSI__INTNUM__S (0) +#define IO7__STRV_VTR__MSI__INTNUM__M (0x1ff) +#define IO7__STRV_VTR__IS_MSI (1UL << 9) + + printk("%s Uncorrectable Error Symptoms:\n", err_print_prefix); + uncrr_sym &= valid_mask; + + if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__SYN)) + printk("%s Syndrome: 0x%lx\n", + err_print_prefix, + EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__SYN)); + + if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__ERR_CYC)) + marvel_print_err_cyc(EXTRACT(uncrr_sym, + IO7__PO7_UNCRR_SYM__ERR_CYC)); + + scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__CLK); + for (i = 0; i < 4; i++, scratch >>= 2) { + if (scratch & 0x3) + printk("%s Clock %s: %s\n", + err_print_prefix, + clk_names[i], clk_decode[scratch & 0x3]); + } + + if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__REQ) + printk("%s REQ Credit Timeout or Overflow\n", + err_print_prefix); + if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__RIO) + printk("%s RIO Credit Timeout or Overflow\n", + err_print_prefix); + if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__WIO) + printk("%s WIO Credit Timeout or Overflow\n", + err_print_prefix); + if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__BLK) + printk("%s BLK Credit Timeout or Overflow\n", + err_print_prefix); + if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__NBK) + printk("%s NBK Credit Timeout or Overflow\n", + err_print_prefix); + + if (uncrr_sym & IO7__PO7_UNCRR_SYM__OVF__READIO) + printk("%s Read I/O Buffer Overflow\n", + err_print_prefix); + if (uncrr_sym & IO7__PO7_UNCRR_SYM__OVF__WRITEIO) + printk("%s Write I/O Buffer Overflow\n", + err_print_prefix); + if (uncrr_sym & IO7__PO7_UNCRR_SYM__OVF__FWD) + printk("%s FWD Buffer Overflow\n", + err_print_prefix); + + if ((scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__VICTIM_SP))) { + int lost = scratch & (1UL << 4); + scratch &= ~lost; + for (i = 0; i < 8; i++, scratch >>= 1) { + if (!(scratch & 1)) + continue; + printk("%s Error Response sent to %s", + err_print_prefix, port_names[i]); + } + if (lost) + printk("%s Lost Error sent somewhere else\n", + err_print_prefix); + } + + if ((scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__DETECT_SP))) { + for (i = 0; i < 8; i++, scratch >>= 1) { + if (!(scratch & 1)) + continue; + printk("%s Error Reported by %s", + err_print_prefix, port_names[i]); + } + } + + if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__STRV_VTR)) { + char starvation_message[80]; + + scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__STRV_VTR); + if (scratch & IO7__STRV_VTR__IS_MSI) + sprintf(starvation_message, + "MSI Interrupt 0x%x", + EXTRACT(scratch, IO7__STRV_VTR__MSI__INTNUM)); + else + sprintf(starvation_message, + "LSI INT%c for Bus:Slot (%d:%d)\n", + 'A' + EXTRACT(scratch, + IO7__STRV_VTR__LSI__INTX), + EXTRACT(scratch, IO7__STRV_VTR__LSI__BUS), + EXTRACT(scratch, IO7__STRV_VTR__LSI__SLOT)); + + printk("%s Starvation Int Trigger By: %s\n", + err_print_prefix, starvation_message); + } +} + +static void +marvel_print_po7_ugbge_sym(u64 ugbge_sym) +{ + char opcode_str[10]; + +#define IO7__PO7_UGBGE_SYM__UPH_PKT_OFF__S (6) +#define IO7__PO7_UGBGE_SYM__UPH_PKT_OFF__M (0xfffffffful) +#define IO7__PO7_UGBGE_SYM__UPH_OPCODE__S (40) +#define IO7__PO7_UGBGE_SYM__UPH_OPCODE__M (0xff) +#define IO7__PO7_UGBGE_SYM__UPH_SRC_PORT__S (48) +#define IO7__PO7_UGBGE_SYM__UPH_SRC_PORT__M (0xf) +#define IO7__PO7_UGBGE_SYM__UPH_DEST_PID__S (52) +#define IO7__PO7_UGBGE_SYM__UPH_DEST_PID__M (0x7ff) +#define IO7__PO7_UGBGE_SYM__VALID (1UL << 63) + + if (!(ugbge_sym & IO7__PO7_UGBGE_SYM__VALID)) + return; + + switch(EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE)) { + case 0x51: + sprintf(opcode_str, "Wr32"); + break; + case 0x50: + sprintf(opcode_str, "WrQW"); + break; + case 0x54: + sprintf(opcode_str, "WrIPR"); + break; + case 0xD8: + sprintf(opcode_str, "Victim"); + break; + case 0xC5: + sprintf(opcode_str, "BlkIO"); + break; + default: + sprintf(opcode_str, "0x%lx\n", + EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE)); + break; + } + + printk("%s Up Hose Garbage Symptom:\n" + "%s Source Port: %ld - Dest PID: %ld - OpCode: %s\n", + err_print_prefix, + err_print_prefix, + EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_SRC_PORT), + EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_DEST_PID), + opcode_str); + + if (0xC5 != EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE)) + printk("%s Packet Offset 0x%08lx\n", + err_print_prefix, + EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_PKT_OFF)); +} + +static void +marvel_print_po7_err_sum(struct ev7_pal_io_subpacket *io) +{ + u64 uncrr_sym_valid = 0; + +#define IO7__PO7_ERRSUM__CR_SBE (1UL << 32) +#define IO7__PO7_ERRSUM__CR_SBE2 (1UL << 33) +#define IO7__PO7_ERRSUM__CR_PIO_WBYTE (1UL << 34) +#define IO7__PO7_ERRSUM__CR_CSR_NXM (1UL << 35) +#define IO7__PO7_ERRSUM__CR_RPID_ACV (1UL << 36) +#define IO7__PO7_ERRSUM__CR_RSP_NXM (1UL << 37) +#define IO7__PO7_ERRSUM__CR_ERR_RESP (1UL << 38) +#define IO7__PO7_ERRSUM__CR_CLK_DERR (1UL << 39) +#define IO7__PO7_ERRSUM__CR_DAT_DBE (1UL << 40) +#define IO7__PO7_ERRSUM__CR_DAT_GRBG (1UL << 41) +#define IO7__PO7_ERRSUM__MAF_TO (1UL << 42) +#define IO7__PO7_ERRSUM__UGBGE (1UL << 43) +#define IO7__PO7_ERRSUM__UN_MAF_LOST (1UL << 44) +#define IO7__PO7_ERRSUM__UN_PKT_OVF (1UL << 45) +#define IO7__PO7_ERRSUM__UN_CDT_OVF (1UL << 46) +#define IO7__PO7_ERRSUM__UN_DEALLOC (1UL << 47) +#define IO7__PO7_ERRSUM__BH_CDT_TO (1UL << 51) +#define IO7__PO7_ERRSUM__BH_CLK_HDR (1UL << 52) +#define IO7__PO7_ERRSUM__BH_DBE_HDR (1UL << 53) +#define IO7__PO7_ERRSUM__BH_GBG_HDR (1UL << 54) +#define IO7__PO7_ERRSUM__BH_BAD_CMD (1UL << 55) +#define IO7__PO7_ERRSUM__HLT_INT (1UL << 56) +#define IO7__PO7_ERRSUM__HP_INT (1UL << 57) +#define IO7__PO7_ERRSUM__CRD_INT (1UL << 58) +#define IO7__PO7_ERRSUM__STV_INT (1UL << 59) +#define IO7__PO7_ERRSUM__HRD_INT (1UL << 60) +#define IO7__PO7_ERRSUM__BH_SUM (1UL << 61) +#define IO7__PO7_ERRSUM__ERR_LST (1UL << 62) +#define IO7__PO7_ERRSUM__ERR_VALID (1UL << 63) + +#define IO7__PO7_ERRSUM__ERR_MASK (IO7__PO7_ERRSUM__ERR_VALID | \ + IO7__PO7_ERRSUM__CR_SBE) + + /* + * Single bit errors aren't covered by ERR_VALID. + */ + if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_SBE) { + printk("%s %sSingle Bit Error(s) detected/corrected\n", + err_print_prefix, + (io->po7_error_sum & IO7__PO7_ERRSUM__CR_SBE2) + ? "Multiple " : ""); + marvel_print_po7_crrct_sym(io->po7_crrct_sym); + } + + /* + * Neither are the interrupt status bits + */ + if (io->po7_error_sum & IO7__PO7_ERRSUM__HLT_INT) + printk("%s Halt Interrupt posted", err_print_prefix); + if (io->po7_error_sum & IO7__PO7_ERRSUM__HP_INT) { + printk("%s Hot Plug Event Interrupt posted", + err_print_prefix); + uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__DETECT_SP); + } + if (io->po7_error_sum & IO7__PO7_ERRSUM__CRD_INT) + printk("%s Correctable Error Interrupt posted", + err_print_prefix); + if (io->po7_error_sum & IO7__PO7_ERRSUM__STV_INT) { + printk("%s Starvation Interrupt posted", err_print_prefix); + uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__STRV_VTR); + } + if (io->po7_error_sum & IO7__PO7_ERRSUM__HRD_INT) { + printk("%s Hard Error Interrupt posted", err_print_prefix); + uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__DETECT_SP); + } + + /* + * Everything else is valid only with ERR_VALID, so skip to the end + * (uncrr_sym check) unless ERR_VALID is set. + */ + if (!(io->po7_error_sum & IO7__PO7_ERRSUM__ERR_VALID)) + goto check_uncrr_sym; + + /* + * Since ERR_VALID is set, VICTIM_SP in uncrr_sym is valid. + * For bits [29:0] to also be valid, the following bits must + * not be set: + * CR_PIO_WBYTE CR_CSR_NXM CR_RSP_NXM + * CR_ERR_RESP MAF_TO + */ + uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__VICTIM_SP); + if (!(io->po7_error_sum & (IO7__PO7_ERRSUM__CR_PIO_WBYTE | + IO7__PO7_ERRSUM__CR_CSR_NXM | + IO7__PO7_ERRSUM__CR_RSP_NXM | + IO7__PO7_ERRSUM__CR_ERR_RESP | + IO7__PO7_ERRSUM__MAF_TO))) + uncrr_sym_valid |= 0x3ffffffful; + + if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_PIO_WBYTE) + printk("%s Write byte into IO7 CSR\n", err_print_prefix); + if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_CSR_NXM) + printk("%s PIO to non-existent CSR\n", err_print_prefix); + if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_RPID_ACV) + printk("%s Bus Requester PID (Access Violation)\n", + err_print_prefix); + if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_RSP_NXM) + printk("%s Received NXM response from EV7\n", + err_print_prefix); + if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_ERR_RESP) + printk("%s Received ERROR RESPONSE\n", err_print_prefix); + if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_CLK_DERR) + printk("%s Clock error on data flit\n", err_print_prefix); + if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_DAT_DBE) + printk("%s Double Bit Error Data Error Detected\n", + err_print_prefix); + if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_DAT_GRBG) + printk("%s Garbage Encoding Detected on the data\n", + err_print_prefix); + if (io->po7_error_sum & IO7__PO7_ERRSUM__UGBGE) { + printk("%s Garbage Encoding sent up hose\n", + err_print_prefix); + marvel_print_po7_ugbge_sym(io->po7_ugbge_sym); + } + if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_MAF_LOST) + printk("%s Orphan response (unexpected response)\n", + err_print_prefix); + if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_PKT_OVF) + printk("%s Down hose packet overflow\n", err_print_prefix); + if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_CDT_OVF) + printk("%s Down hose credit overflow\n", err_print_prefix); + if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_DEALLOC) + printk("%s Unexpected or bad dealloc field\n", + err_print_prefix); + + /* + * The black hole events. + */ + if (io->po7_error_sum & IO7__PO7_ERRSUM__MAF_TO) + printk("%s BLACK HOLE: Timeout for all responses\n", + err_print_prefix); + if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_CDT_TO) + printk("%s BLACK HOLE: Credit Timeout\n", err_print_prefix); + if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_CLK_HDR) + printk("%s BLACK HOLE: Clock check on header\n", + err_print_prefix); + if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_DBE_HDR) + printk("%s BLACK HOLE: Uncorrectable Error on header\n", + err_print_prefix); + if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_GBG_HDR) + printk("%s BLACK HOLE: Garbage on header\n", + err_print_prefix); + if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_BAD_CMD) + printk("%s BLACK HOLE: Bad EV7 command\n", + err_print_prefix); + + if (io->po7_error_sum & IO7__PO7_ERRSUM__ERR_LST) + printk("%s Lost Error\n", err_print_prefix); + + printk("%s Failing Packet:\n" + "%s Cycle 1: %016lx\n" + "%s Cycle 2: %016lx\n", + err_print_prefix, + err_print_prefix, io->po7_err_pkt0, + err_print_prefix, io->po7_err_pkt1); + /* + * If there are any valid bits in UNCRR sym for this err, + * print UNCRR_SYM as well. + */ +check_uncrr_sym: + if (uncrr_sym_valid) + marvel_print_po7_uncrr_sym(io->po7_uncrr_sym, uncrr_sym_valid); +} + +static void +marvel_print_pox_tlb_err(u64 tlb_err) +{ + static char *tlb_errors[] = { + "No Error", + "North Port Signaled Error fetching TLB entry", + "PTE invalid or UCC or GBG error on this entry", + "Address did not hit any DMA window" + }; + +#define IO7__POX_TLBERR__ERR_VALID (1UL << 63) +#define IO7__POX_TLBERR__ERRCODE__S (0) +#define IO7__POX_TLBERR__ERRCODE__M (0x3) +#define IO7__POX_TLBERR__ERR_TLB_PTR__S (3) +#define IO7__POX_TLBERR__ERR_TLB_PTR__M (0x7) +#define IO7__POX_TLBERR__FADDR__S (6) +#define IO7__POX_TLBERR__FADDR__M (0x3fffffffffful) + + if (!(tlb_err & IO7__POX_TLBERR__ERR_VALID)) + return; + + printk("%s TLB Error on index 0x%lx:\n" + "%s - %s\n" + "%s - Addr: 0x%016lx\n", + err_print_prefix, + EXTRACT(tlb_err, IO7__POX_TLBERR__ERR_TLB_PTR), + err_print_prefix, + tlb_errors[EXTRACT(tlb_err, IO7__POX_TLBERR__ERRCODE)], + err_print_prefix, + EXTRACT(tlb_err, IO7__POX_TLBERR__FADDR) << 6); +} + +static void +marvel_print_pox_spl_cmplt(u64 spl_cmplt) +{ + char message[80]; + +#define IO7__POX_SPLCMPLT__MESSAGE__S (0) +#define IO7__POX_SPLCMPLT__MESSAGE__M (0x0fffffffful) +#define IO7__POX_SPLCMPLT__SOURCE_BUS__S (40) +#define IO7__POX_SPLCMPLT__SOURCE_BUS__M (0xfful) +#define IO7__POX_SPLCMPLT__SOURCE_DEV__S (35) +#define IO7__POX_SPLCMPLT__SOURCE_DEV__M (0x1ful) +#define IO7__POX_SPLCMPLT__SOURCE_FUNC__S (32) +#define IO7__POX_SPLCMPLT__SOURCE_FUNC__M (0x07ul) + +#define IO7__POX_SPLCMPLT__MSG_CLASS__S (28) +#define IO7__POX_SPLCMPLT__MSG_CLASS__M (0xf) +#define IO7__POX_SPLCMPLT__MSG_INDEX__S (20) +#define IO7__POX_SPLCMPLT__MSG_INDEX__M (0xff) +#define IO7__POX_SPLCMPLT__MSG_CLASSINDEX__S (20) +#define IO7__POX_SPLCMPLT__MSG_CLASSINDEX__M (0xfff) +#define IO7__POX_SPLCMPLT__REM_LOWER_ADDR__S (12) +#define IO7__POX_SPLCMPLT__REM_LOWER_ADDR__M (0x7f) +#define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__S (0) +#define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__M (0xfff) + + printk("%s Split Completion Error:\n" + "%s Source (Bus:Dev:Func): %ld:%ld:%ld\n", + err_print_prefix, + err_print_prefix, + EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_BUS), + EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_DEV), + EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_FUNC)); + + switch(EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__MSG_CLASSINDEX)) { + case 0x000: + sprintf(message, "Normal completion"); + break; + case 0x100: + sprintf(message, "Bridge - Master Abort"); + break; + case 0x101: + sprintf(message, "Bridge - Target Abort"); + break; + case 0x102: + sprintf(message, "Bridge - Uncorrectable Write Data Error"); + break; + case 0x200: + sprintf(message, "Byte Count Out of Range"); + break; + case 0x201: + sprintf(message, "Uncorrectable Split Write Data Error"); + break; + default: + sprintf(message, "%08lx\n", + EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__MESSAGE)); + break; + } + printk("%s Message: %s\n", err_print_prefix, message); +} + +static void +marvel_print_pox_trans_sum(u64 trans_sum) +{ + char *pcix_cmd[] = { "Interrupt Acknowledge", + "Special Cycle", + "I/O Read", + "I/O Write", + "Reserved", + "Reserved / Device ID Message", + "Memory Read", + "Memory Write", + "Reserved / Alias to Memory Read Block", + "Reserved / Alias to Memory Write Block", + "Configuration Read", + "Configuration Write", + "Memory Read Multiple / Split Completion", + "Dual Address Cycle", + "Memory Read Line / Memory Read Block", + "Memory Write and Invalidate / Memory Write Block" + }; + +#define IO7__POX_TRANSUM__PCI_ADDR__S (0) +#define IO7__POX_TRANSUM__PCI_ADDR__M (0x3fffffffffffful) +#define IO7__POX_TRANSUM__DAC (1UL << 50) +#define IO7__POX_TRANSUM__PCIX_MASTER_SLOT__S (52) +#define IO7__POX_TRANSUM__PCIX_MASTER_SLOT__M (0xf) +#define IO7__POX_TRANSUM__PCIX_CMD__S (56) +#define IO7__POX_TRANSUM__PCIX_CMD__M (0xf) +#define IO7__POX_TRANSUM__ERR_VALID (1UL << 63) + + if (!(trans_sum & IO7__POX_TRANSUM__ERR_VALID)) + return; + + printk("%s Transaction Summary:\n" + "%s Command: 0x%lx - %s\n" + "%s Address: 0x%016lx%s\n" + "%s PCI-X Master Slot: 0x%lx\n", + err_print_prefix, + err_print_prefix, + EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_CMD), + pcix_cmd[EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_CMD)], + err_print_prefix, + EXTRACT(trans_sum, IO7__POX_TRANSUM__PCI_ADDR), + (trans_sum & IO7__POX_TRANSUM__DAC) ? " (DAC)" : "", + err_print_prefix, + EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_MASTER_SLOT)); +} + +static void +marvel_print_pox_err(u64 err_sum, struct ev7_pal_io_one_port *port) +{ +#define IO7__POX_ERRSUM__AGP_REQQ_OVFL (1UL << 4) +#define IO7__POX_ERRSUM__AGP_SYNC_ERR (1UL << 5) +#define IO7__POX_ERRSUM__MRETRY_TO (1UL << 6) +#define IO7__POX_ERRSUM__PCIX_UX_SPL (1UL << 7) +#define IO7__POX_ERRSUM__PCIX_SPLIT_TO (1UL << 8) +#define IO7__POX_ERRSUM__PCIX_DISCARD_SPL (1UL << 9) +#define IO7__POX_ERRSUM__DMA_RD_TO (1UL << 10) +#define IO7__POX_ERRSUM__CSR_NXM_RD (1UL << 11) +#define IO7__POX_ERRSUM__CSR_NXM_WR (1UL << 12) +#define IO7__POX_ERRSUM__DMA_TO (1UL << 13) +#define IO7__POX_ERRSUM__ALL_MABORTS (1UL << 14) +#define IO7__POX_ERRSUM__MABORT (1UL << 15) +#define IO7__POX_ERRSUM__MABORT_MASK (IO7__POX_ERRSUM__ALL_MABORTS|\ + IO7__POX_ERRSUM__MABORT) +#define IO7__POX_ERRSUM__PT_TABORT (1UL << 16) +#define IO7__POX_ERRSUM__PM_TABORT (1UL << 17) +#define IO7__POX_ERRSUM__TABORT_MASK (IO7__POX_ERRSUM__PT_TABORT | \ + IO7__POX_ERRSUM__PM_TABORT) +#define IO7__POX_ERRSUM__SERR (1UL << 18) +#define IO7__POX_ERRSUM__ADDRERR_STB (1UL << 19) +#define IO7__POX_ERRSUM__DETECTED_SERR (1UL << 20) +#define IO7__POX_ERRSUM__PERR (1UL << 21) +#define IO7__POX_ERRSUM__DATAERR_STB_NIOW (1UL << 22) +#define IO7__POX_ERRSUM__DETECTED_PERR (1UL << 23) +#define IO7__POX_ERRSUM__PM_PERR (1UL << 24) +#define IO7__POX_ERRSUM__PT_SCERROR (1UL << 26) +#define IO7__POX_ERRSUM__HUNG_BUS (1UL << 28) +#define IO7__POX_ERRSUM__UPE_ERROR__S (51) +#define IO7__POX_ERRSUM__UPE_ERROR__M (0xffUL) +#define IO7__POX_ERRSUM__UPE_ERROR GEN_MASK(IO7__POX_ERRSUM__UPE_ERROR) +#define IO7__POX_ERRSUM__TLB_ERR (1UL << 59) +#define IO7__POX_ERRSUM__ERR_VALID (1UL << 63) + +#define IO7__POX_ERRSUM__TRANS_SUM__MASK (IO7__POX_ERRSUM__MRETRY_TO | \ + IO7__POX_ERRSUM__PCIX_UX_SPL | \ + IO7__POX_ERRSUM__PCIX_SPLIT_TO | \ + IO7__POX_ERRSUM__DMA_TO | \ + IO7__POX_ERRSUM__MABORT_MASK | \ + IO7__POX_ERRSUM__TABORT_MASK | \ + IO7__POX_ERRSUM__SERR | \ + IO7__POX_ERRSUM__ADDRERR_STB | \ + IO7__POX_ERRSUM__PERR | \ + IO7__POX_ERRSUM__DATAERR_STB_NIOW |\ + IO7__POX_ERRSUM__DETECTED_PERR | \ + IO7__POX_ERRSUM__PM_PERR | \ + IO7__POX_ERRSUM__PT_SCERROR | \ + IO7__POX_ERRSUM__UPE_ERROR) + + if (!(err_sum & IO7__POX_ERRSUM__ERR_VALID)) + return; + + /* + * First the transaction summary errors + */ + if (err_sum & IO7__POX_ERRSUM__MRETRY_TO) + printk("%s IO7 Master Retry Timeout expired\n", + err_print_prefix); + if (err_sum & IO7__POX_ERRSUM__PCIX_UX_SPL) + printk("%s Unexpected Split Completion\n", + err_print_prefix); + if (err_sum & IO7__POX_ERRSUM__PCIX_SPLIT_TO) + printk("%s IO7 Split Completion Timeout expired\n", + err_print_prefix); + if (err_sum & IO7__POX_ERRSUM__DMA_TO) + printk("%s Hung bus during DMA transaction\n", + err_print_prefix); + if (err_sum & IO7__POX_ERRSUM__MABORT_MASK) + printk("%s Master Abort\n", err_print_prefix); + if (err_sum & IO7__POX_ERRSUM__PT_TABORT) + printk("%s IO7 Asserted Target Abort\n", err_print_prefix); + if (err_sum & IO7__POX_ERRSUM__PM_TABORT) + printk("%s IO7 Received Target Abort\n", err_print_prefix); + if (err_sum & IO7__POX_ERRSUM__ADDRERR_STB) { + printk("%s Address or PCI-X Attribute Parity Error\n", + err_print_prefix); + if (err_sum & IO7__POX_ERRSUM__SERR) + printk("%s IO7 Asserted SERR\n", err_print_prefix); + } + if (err_sum & IO7__POX_ERRSUM__PERR) { + if (err_sum & IO7__POX_ERRSUM__DATAERR_STB_NIOW) + printk("%s IO7 Detected Data Parity Error\n", + err_print_prefix); + else + printk("%s Split Completion Response with " + "Parity Error\n", err_print_prefix); + } + if (err_sum & IO7__POX_ERRSUM__DETECTED_PERR) + printk("%s PERR detected\n", err_print_prefix); + if (err_sum & IO7__POX_ERRSUM__PM_PERR) + printk("%s PERR while IO7 is master\n", err_print_prefix); + if (err_sum & IO7__POX_ERRSUM__PT_SCERROR) { + printk("%s IO7 Received Split Completion Error message\n", + err_print_prefix); + marvel_print_pox_spl_cmplt(port->pox_spl_cmplt); + } + if (err_sum & IO7__POX_ERRSUM__UPE_ERROR) { + unsigned int upe_error = EXTRACT(err_sum, + IO7__POX_ERRSUM__UPE_ERROR); + int i; + static char *upe_errors[] = { + "Parity Error on MSI write data", + "MSI read (MSI window is write only", + "TLB - Invalid WR transaction", + "TLB - Invalid RD transaction", + "DMA - WR error (see north port)", + "DMA - RD error (see north port)", + "PPR - WR error (see north port)", + "PPR - RD error (see north port)" + }; + + printk("%s UPE Error:\n", err_print_prefix); + for (i = 0; i < 8; i++) { + if (upe_error & (1 << i)) + printk("%s %s\n", err_print_prefix, + upe_errors[i]); + } + } + + /* + * POx_TRANS_SUM, if appropriate. + */ + if (err_sum & IO7__POX_ERRSUM__TRANS_SUM__MASK) + marvel_print_pox_trans_sum(port->pox_trans_sum); + + /* + * Then TLB_ERR. + */ + if (err_sum & IO7__POX_ERRSUM__TLB_ERR) { + printk("%s TLB ERROR\n", err_print_prefix); + marvel_print_pox_tlb_err(port->pox_tlb_err); + } + + /* + * And the single bit status errors. + */ + if (err_sum & IO7__POX_ERRSUM__AGP_REQQ_OVFL) + printk("%s AGP Request Queue Overflow\n", err_print_prefix); + if (err_sum & IO7__POX_ERRSUM__AGP_SYNC_ERR) + printk("%s AGP Sync Error\n", err_print_prefix); + if (err_sum & IO7__POX_ERRSUM__PCIX_DISCARD_SPL) + printk("%s Discarded split completion\n", err_print_prefix); + if (err_sum & IO7__POX_ERRSUM__DMA_RD_TO) + printk("%s DMA Read Timeout\n", err_print_prefix); + if (err_sum & IO7__POX_ERRSUM__CSR_NXM_RD) + printk("%s CSR NXM READ\n", err_print_prefix); + if (err_sum & IO7__POX_ERRSUM__CSR_NXM_WR) + printk("%s CSR NXM WRITE\n", err_print_prefix); + if (err_sum & IO7__POX_ERRSUM__DETECTED_SERR) + printk("%s SERR detected\n", err_print_prefix); + if (err_sum & IO7__POX_ERRSUM__HUNG_BUS) + printk("%s HUNG BUS detected\n", err_print_prefix); +} + +#endif /* CONFIG_VERBOSE_MCHECK */ + +static struct ev7_pal_io_subpacket * +marvel_find_io7_with_error(struct ev7_lf_subpackets *lf_subpackets) +{ + struct ev7_pal_io_subpacket *io = lf_subpackets->io; + struct io7 *io7; + int i; + + /* + * Caller must provide the packet to fill + */ + if (!io) + return NULL; + + /* + * Fill the subpacket with the console's standard fill pattern + */ + memset(io, 0x55, sizeof(*io)); + + for (io7 = NULL; NULL != (io7 = marvel_next_io7(io7)); ) { + unsigned long err_sum = 0; + + err_sum |= io7->csrs->PO7_ERROR_SUM.csr; + for (i = 0; i < IO7_NUM_PORTS; i++) { + if (!io7->ports[i].enabled) + continue; + err_sum |= io7->ports[i].csrs->POx_ERR_SUM.csr; + } + + /* + * Is there at least one error? + */ + if (err_sum & (1UL << 63)) + break; + } + + /* + * Did we find an IO7 with an error? + */ + if (!io7) + return NULL; + + /* + * We have an IO7 with an error. + * + * Fill in the IO subpacket. + */ + io->io_asic_rev = io7->csrs->IO_ASIC_REV.csr; + io->io_sys_rev = io7->csrs->IO_SYS_REV.csr; + io->io7_uph = io7->csrs->IO7_UPH.csr; + io->hpi_ctl = io7->csrs->HPI_CTL.csr; + io->crd_ctl = io7->csrs->CRD_CTL.csr; + io->hei_ctl = io7->csrs->HEI_CTL.csr; + io->po7_error_sum = io7->csrs->PO7_ERROR_SUM.csr; + io->po7_uncrr_sym = io7->csrs->PO7_UNCRR_SYM.csr; + io->po7_crrct_sym = io7->csrs->PO7_CRRCT_SYM.csr; + io->po7_ugbge_sym = io7->csrs->PO7_UGBGE_SYM.csr; + io->po7_err_pkt0 = io7->csrs->PO7_ERR_PKT[0].csr; + io->po7_err_pkt1 = io7->csrs->PO7_ERR_PKT[1].csr; + + for (i = 0; i < IO7_NUM_PORTS; i++) { + io7_ioport_csrs *csrs = io7->ports[i].csrs; + + if (!io7->ports[i].enabled) + continue; + + io->ports[i].pox_err_sum = csrs->POx_ERR_SUM.csr; + io->ports[i].pox_tlb_err = csrs->POx_TLB_ERR.csr; + io->ports[i].pox_spl_cmplt = csrs->POx_SPL_COMPLT.csr; + io->ports[i].pox_trans_sum = csrs->POx_TRANS_SUM.csr; + io->ports[i].pox_first_err = csrs->POx_FIRST_ERR.csr; + io->ports[i].pox_mult_err = csrs->POx_MULT_ERR.csr; + io->ports[i].pox_dm_source = csrs->POx_DM_SOURCE.csr; + io->ports[i].pox_dm_dest = csrs->POx_DM_DEST.csr; + io->ports[i].pox_dm_size = csrs->POx_DM_SIZE.csr; + io->ports[i].pox_dm_ctrl = csrs->POx_DM_CTRL.csr; + + /* + * Ack this port's errors, if any. POx_ERR_SUM must be last. + * + * Most of the error registers get cleared and unlocked when + * the associated bits in POx_ERR_SUM are cleared (by writing + * 1). POx_TLB_ERR is an exception and must be explicitly + * cleared. + */ + csrs->POx_TLB_ERR.csr = io->ports[i].pox_tlb_err; + csrs->POx_ERR_SUM.csr = io->ports[i].pox_err_sum; + mb(); + csrs->POx_ERR_SUM.csr; + } + + /* + * Ack any port 7 error(s). + */ + io7->csrs->PO7_ERROR_SUM.csr = io->po7_error_sum; + mb(); + io7->csrs->PO7_ERROR_SUM.csr; + + /* + * Correct the io7_pid. + */ + lf_subpackets->io_pid = io7->pe; + + return io; +} + +static int +marvel_process_io_error(struct ev7_lf_subpackets *lf_subpackets, int print) +{ + int status = MCHK_DISPOSITION_UNKNOWN_ERROR; + +#ifdef CONFIG_VERBOSE_MCHECK + struct ev7_pal_io_subpacket *io = lf_subpackets->io; + int i; +#endif /* CONFIG_VERBOSE_MCHECK */ + +#define MARVEL_IO_ERR_VALID(x) ((x) & (1UL << 63)) + + if (!lf_subpackets->logout || !lf_subpackets->io) + return status; + + /* + * The PALcode only builds an IO subpacket if there is a + * locally connected IO7. In the cases of + * 1) a uniprocessor kernel + * 2) an mp kernel before the local secondary has called in + * error interrupts are all directed to the primary processor. + * In that case, we may not have an IO subpacket at all and, event + * if we do, it may not be the right now. + * + * If the RBOX indicates an I/O error interrupt, make sure we have + * the correct IO7 information. If we don't have an IO subpacket + * or it's the wrong one, try to find the right one. + * + * RBOX I/O error interrupts are indicated by RBOX_INT<29> and + * RBOX_INT<10>. + */ + if ((lf_subpackets->io->po7_error_sum & (1UL << 32)) || + ((lf_subpackets->io->po7_error_sum | + lf_subpackets->io->ports[0].pox_err_sum | + lf_subpackets->io->ports[1].pox_err_sum | + lf_subpackets->io->ports[2].pox_err_sum | + lf_subpackets->io->ports[3].pox_err_sum) & (1UL << 63))) { + /* + * Either we have no IO subpacket or no error is + * indicated in the one we do have. Try find the + * one with the error. + */ + if (!marvel_find_io7_with_error(lf_subpackets)) + return status; + } + + /* + * We have an IO7 indicating an error - we're going to report it + */ + status = MCHK_DISPOSITION_REPORT; + +#ifdef CONFIG_VERBOSE_MCHECK + + if (!print) + return status; + + printk("%s*Error occurred on IO7 at PID %u\n", + err_print_prefix, lf_subpackets->io_pid); + + /* + * Check port 7 first + */ + if (lf_subpackets->io->po7_error_sum & IO7__PO7_ERRSUM__ERR_MASK) { + marvel_print_po7_err_sum(io); + +#if 0 + printk("%s PORT 7 ERROR:\n" + "%s PO7_ERROR_SUM: %016lx\n" + "%s PO7_UNCRR_SYM: %016lx\n" + "%s PO7_CRRCT_SYM: %016lx\n" + "%s PO7_UGBGE_SYM: %016lx\n" + "%s PO7_ERR_PKT0: %016lx\n" + "%s PO7_ERR_PKT1: %016lx\n", + err_print_prefix, + err_print_prefix, io->po7_error_sum, + err_print_prefix, io->po7_uncrr_sym, + err_print_prefix, io->po7_crrct_sym, + err_print_prefix, io->po7_ugbge_sym, + err_print_prefix, io->po7_err_pkt0, + err_print_prefix, io->po7_err_pkt1); +#endif + } + + /* + * Then loop through the ports + */ + for (i = 0; i < IO7_NUM_PORTS; i++) { + if (!MARVEL_IO_ERR_VALID(io->ports[i].pox_err_sum)) + continue; + + printk("%s PID %u PORT %d POx_ERR_SUM: %016lx\n", + err_print_prefix, + lf_subpackets->io_pid, i, io->ports[i].pox_err_sum); + marvel_print_pox_err(io->ports[i].pox_err_sum, &io->ports[i]); + + printk("%s [ POx_FIRST_ERR: %016lx ]\n", + err_print_prefix, io->ports[i].pox_first_err); + marvel_print_pox_err(io->ports[i].pox_first_err, + &io->ports[i]); + + } + + +#endif /* CONFIG_VERBOSE_MCHECK */ + + return status; +} + +static int +marvel_process_logout_frame(struct ev7_lf_subpackets *lf_subpackets, int print) +{ + int status = MCHK_DISPOSITION_UNKNOWN_ERROR; + + /* + * I/O error? + */ +#define EV7__RBOX_INT__IO_ERROR__MASK 0x20000400ul + if (lf_subpackets->logout && + (lf_subpackets->logout->rbox_int & 0x20000400ul)) + status = marvel_process_io_error(lf_subpackets, print); + + /* + * Probing behind PCI-X bridges can cause machine checks on + * Marvel when the probe is handled by the bridge as a split + * completion transaction. The symptom is an ERROR_RESPONSE + * to a CONFIG address. Since these errors will happen in + * normal operation, dismiss them. + * + * Dismiss if: + * C_STAT = 0x14 (Error Reponse) + * C_STS<3> = 0 (C_ADDR valid) + * C_ADDR<42> = 1 (I/O) + * C_ADDR<31:22> = 111110xxb (PCI Config space) + */ + if (lf_subpackets->ev7 && + (lf_subpackets->ev7->c_stat == 0x14) && + !(lf_subpackets->ev7->c_sts & 0x8) && + ((lf_subpackets->ev7->c_addr & 0x400ff000000ul) + == 0x400fe000000ul)) + status = MCHK_DISPOSITION_DISMISS; + + return status; +} + +void +marvel_machine_check(u64 vector, u64 la_ptr, struct pt_regs *regs) +{ + struct el_subpacket *el_ptr = (struct el_subpacket *)la_ptr; + int (*process_frame)(struct ev7_lf_subpackets *, int) = NULL; + struct ev7_lf_subpackets subpacket_collection = { NULL, }; + struct ev7_pal_io_subpacket scratch_io_packet = { 0, }; + struct ev7_lf_subpackets *lf_subpackets = NULL; + int disposition = MCHK_DISPOSITION_UNKNOWN_ERROR; + char *saved_err_prefix = err_print_prefix; + char *error_type = NULL; + + /* + * Sync the processor + */ + mb(); + draina(); + + switch(vector) { + case SCB_Q_SYSEVENT: + process_frame = marvel_process_680_frame; + error_type = "System Event"; + break; + + case SCB_Q_SYSMCHK: + process_frame = marvel_process_logout_frame; + error_type = "System Uncorrectable Error"; + break; + + case SCB_Q_SYSERR: + process_frame = marvel_process_logout_frame; + error_type = "System Correctable Error"; + break; + + default: + /* Don't know it - pass it up. */ + ev7_machine_check(vector, la_ptr, regs); + return; + } + + /* + * A system event or error has occured, handle it here. + * + * Any errors in the logout frame have already been cleared by the + * PALcode, so just parse it. + */ + err_print_prefix = KERN_CRIT; + + /* + * Parse the logout frame without printing first. If the only error(s) + * found are classified as "dismissable", then just dismiss them and + * don't print any message + */ + lf_subpackets = + ev7_collect_logout_frame_subpackets(el_ptr, + &subpacket_collection); + if (process_frame && lf_subpackets && lf_subpackets->logout) { + /* + * We might not have the correct (or any) I/O subpacket. + * [ See marvel_process_io_error() for explanation. ] + * If we don't have one, point the io subpacket in + * lf_subpackets at scratch_io_packet so that + * marvel_find_io7_with_error() will have someplace to + * store the info. + */ + if (!lf_subpackets->io) + lf_subpackets->io = &scratch_io_packet; + + /* + * Default io_pid to the processor reporting the error + * [this will get changed in marvel_find_io7_with_error() + * if a different one is needed] + */ + lf_subpackets->io_pid = lf_subpackets->logout->whami; + + /* + * Evaluate the frames. + */ + disposition = process_frame(lf_subpackets, 0); + } + switch(disposition) { + case MCHK_DISPOSITION_DISMISS: + /* Nothing to do. */ + break; + + case MCHK_DISPOSITION_REPORT: + /* Recognized error, report it. */ + printk("%s*%s (Vector 0x%x) reported on CPU %d\n", + err_print_prefix, error_type, + (unsigned int)vector, (int)smp_processor_id()); + el_print_timestamp(&lf_subpackets->logout->timestamp); + process_frame(lf_subpackets, 1); + break; + + default: + /* Unknown - dump the annotated subpackets. */ + printk("%s*%s (Vector 0x%x) reported on CPU %d\n", + err_print_prefix, error_type, + (unsigned int)vector, (int)smp_processor_id()); + el_process_subpacket(el_ptr); + break; + + } + + err_print_prefix = saved_err_prefix; + + /* Release the logout frame. */ + wrmces(0x7); + mb(); +} + +void +marvel_register_error_handlers(void) +{ + ev7_register_error_handlers(); +} diff --git a/arch/alpha/kernel/err_titan.c b/arch/alpha/kernel/err_titan.c new file mode 100644 index 0000000..7e6720d --- /dev/null +++ b/arch/alpha/kernel/err_titan.c @@ -0,0 +1,756 @@ +/* + * linux/arch/alpha/kernel/err_titan.c + * + * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation) + * + * Error handling code supporting TITAN systems + */ + +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/sched.h> + +#include <asm/io.h> +#include <asm/core_titan.h> +#include <asm/hwrpb.h> +#include <asm/smp.h> +#include <asm/err_common.h> +#include <asm/err_ev6.h> + +#include "err_impl.h" +#include "proto.h" + + +static int +titan_parse_c_misc(u64 c_misc, int print) +{ +#ifdef CONFIG_VERBOSE_MCHECK + char *src; + int nxs = 0; +#endif + int status = MCHK_DISPOSITION_REPORT; + +#define TITAN__CCHIP_MISC__NXM (1UL << 28) +#define TITAN__CCHIP_MISC__NXS__S (29) +#define TITAN__CCHIP_MISC__NXS__M (0x7) + + if (!(c_misc & TITAN__CCHIP_MISC__NXM)) + return MCHK_DISPOSITION_UNKNOWN_ERROR; + +#ifdef CONFIG_VERBOSE_MCHECK + if (!print) + return status; + + nxs = EXTRACT(c_misc, TITAN__CCHIP_MISC__NXS); + switch(nxs) { + case 0: /* CPU 0 */ + case 1: /* CPU 1 */ + case 2: /* CPU 2 */ + case 3: /* CPU 3 */ + src = "CPU"; + /* num is already the CPU number */ + break; + case 4: /* Pchip 0 */ + case 5: /* Pchip 1 */ + src = "Pchip"; + nxs -= 4; + break; + default:/* reserved */ + src = "Unknown, NXS ="; + /* leave num untouched */ + break; + } + + printk("%s Non-existent memory access from: %s %d\n", + err_print_prefix, src, nxs); +#endif /* CONFIG_VERBOSE_MCHECK */ + + return status; +} + +static int +titan_parse_p_serror(int which, u64 serror, int print) +{ + int status = MCHK_DISPOSITION_REPORT; + +#ifdef CONFIG_VERBOSE_MCHECK + char *serror_src[] = {"GPCI", "APCI", "AGP HP", "AGP LP"}; + char *serror_cmd[] = {"DMA Read", "DMA RMW", "SGTE Read", "Reserved"}; +#endif /* CONFIG_VERBOSE_MCHECK */ + +#define TITAN__PCHIP_SERROR__LOST_UECC (1UL << 0) +#define TITAN__PCHIP_SERROR__UECC (1UL << 1) +#define TITAN__PCHIP_SERROR__CRE (1UL << 2) +#define TITAN__PCHIP_SERROR__NXIO (1UL << 3) +#define TITAN__PCHIP_SERROR__LOST_CRE (1UL << 4) +#define TITAN__PCHIP_SERROR__ECCMASK (TITAN__PCHIP_SERROR__UECC | \ + TITAN__PCHIP_SERROR__CRE) +#define TITAN__PCHIP_SERROR__ERRMASK (TITAN__PCHIP_SERROR__LOST_UECC | \ + TITAN__PCHIP_SERROR__UECC | \ + TITAN__PCHIP_SERROR__CRE | \ + TITAN__PCHIP_SERROR__NXIO | \ + TITAN__PCHIP_SERROR__LOST_CRE) +#define TITAN__PCHIP_SERROR__SRC__S (52) +#define TITAN__PCHIP_SERROR__SRC__M (0x3) +#define TITAN__PCHIP_SERROR__CMD__S (54) +#define TITAN__PCHIP_SERROR__CMD__M (0x3) +#define TITAN__PCHIP_SERROR__SYN__S (56) +#define TITAN__PCHIP_SERROR__SYN__M (0xff) +#define TITAN__PCHIP_SERROR__ADDR__S (15) +#define TITAN__PCHIP_SERROR__ADDR__M (0xffffffffUL) + + if (!(serror & TITAN__PCHIP_SERROR__ERRMASK)) + return MCHK_DISPOSITION_UNKNOWN_ERROR; + +#ifdef CONFIG_VERBOSE_MCHECK + if (!print) + return status; + + printk("%s PChip %d SERROR: %016lx\n", + err_print_prefix, which, serror); + if (serror & TITAN__PCHIP_SERROR__ECCMASK) { + printk("%s %sorrectable ECC Error:\n" + " Source: %-6s Command: %-8s Syndrome: 0x%08x\n" + " Address: 0x%lx\n", + err_print_prefix, + (serror & TITAN__PCHIP_SERROR__UECC) ? "Unc" : "C", + serror_src[EXTRACT(serror, TITAN__PCHIP_SERROR__SRC)], + serror_cmd[EXTRACT(serror, TITAN__PCHIP_SERROR__CMD)], + (unsigned)EXTRACT(serror, TITAN__PCHIP_SERROR__SYN), + EXTRACT(serror, TITAN__PCHIP_SERROR__ADDR)); + } + if (serror & TITAN__PCHIP_SERROR__NXIO) + printk("%s Non Existent I/O Error\n", err_print_prefix); + if (serror & TITAN__PCHIP_SERROR__LOST_UECC) + printk("%s Lost Uncorrectable ECC Error\n", + err_print_prefix); + if (serror & TITAN__PCHIP_SERROR__LOST_CRE) + printk("%s Lost Correctable ECC Error\n", err_print_prefix); +#endif /* CONFIG_VERBOSE_MCHECK */ + + return status; +} + +static int +titan_parse_p_perror(int which, int port, u64 perror, int print) +{ + int cmd; + unsigned long addr; + int status = MCHK_DISPOSITION_REPORT; + +#ifdef CONFIG_VERBOSE_MCHECK + char *perror_cmd[] = { "Interrupt Acknowledge", "Special Cycle", + "I/O Read", "I/O Write", + "Reserved", "Reserved", + "Memory Read", "Memory Write", + "Reserved", "Reserved", + "Configuration Read", "Configuration Write", + "Memory Read Multiple", "Dual Address Cycle", + "Memory Read Line","Memory Write and Invalidate" + }; +#endif /* CONFIG_VERBOSE_MCHECK */ + +#define TITAN__PCHIP_PERROR__LOST (1UL << 0) +#define TITAN__PCHIP_PERROR__SERR (1UL << 1) +#define TITAN__PCHIP_PERROR__PERR (1UL << 2) +#define TITAN__PCHIP_PERROR__DCRTO (1UL << 3) +#define TITAN__PCHIP_PERROR__SGE (1UL << 4) +#define TITAN__PCHIP_PERROR__APE (1UL << 5) +#define TITAN__PCHIP_PERROR__TA (1UL << 6) +#define TITAN__PCHIP_PERROR__DPE (1UL << 7) +#define TITAN__PCHIP_PERROR__NDS (1UL << 8) +#define TITAN__PCHIP_PERROR__IPTPR (1UL << 9) +#define TITAN__PCHIP_PERROR__IPTPW (1UL << 10) +#define TITAN__PCHIP_PERROR__ERRMASK (TITAN__PCHIP_PERROR__LOST | \ + TITAN__PCHIP_PERROR__SERR | \ + TITAN__PCHIP_PERROR__PERR | \ + TITAN__PCHIP_PERROR__DCRTO | \ + TITAN__PCHIP_PERROR__SGE | \ + TITAN__PCHIP_PERROR__APE | \ + TITAN__PCHIP_PERROR__TA | \ + TITAN__PCHIP_PERROR__DPE | \ + TITAN__PCHIP_PERROR__NDS | \ + TITAN__PCHIP_PERROR__IPTPR | \ + TITAN__PCHIP_PERROR__IPTPW) +#define TITAN__PCHIP_PERROR__DAC (1UL << 47) +#define TITAN__PCHIP_PERROR__MWIN (1UL << 48) +#define TITAN__PCHIP_PERROR__CMD__S (52) +#define TITAN__PCHIP_PERROR__CMD__M (0x0f) +#define TITAN__PCHIP_PERROR__ADDR__S (14) +#define TITAN__PCHIP_PERROR__ADDR__M (0x1fffffffful) + + if (!(perror & TITAN__PCHIP_PERROR__ERRMASK)) + return MCHK_DISPOSITION_UNKNOWN_ERROR; + + cmd = EXTRACT(perror, TITAN__PCHIP_PERROR__CMD); + addr = EXTRACT(perror, TITAN__PCHIP_PERROR__ADDR) << 2; + + /* + * Initializing the BIOS on a video card on a bus without + * a south bridge (subtractive decode agent) can result in + * master aborts as the BIOS probes the capabilities of the + * card. XFree86 does such initialization. If the error + * is a master abort (No DevSel as PCI Master) and the command + * is an I/O read or write below the address where we start + * assigning PCI I/O spaces (SRM uses 0x1000), then mark the + * error as dismissable so starting XFree86 doesn't result + * in a series of uncorrectable errors being reported. Also + * dismiss master aborts to VGA frame buffer space + * (0xA0000 - 0xC0000) and legacy BIOS space (0xC0000 - 0x100000) + * for the same reason. + * + * Also mark the error dismissible if it looks like the right + * error but only the Lost bit is set. Since the BIOS initialization + * can cause multiple master aborts and the error interrupt can + * be handled on a different CPU than the BIOS code is run on, + * it is possible for a second master abort to occur between the + * time the PALcode reads PERROR and the time it writes PERROR + * to acknowledge the error. If this timing happens, a second + * error will be signalled after the first, and if no additional + * errors occur, will look like a Lost error with no additional + * errors on the same transaction as the previous error. + */ + if (((perror & TITAN__PCHIP_PERROR__NDS) || + ((perror & TITAN__PCHIP_PERROR__ERRMASK) == + TITAN__PCHIP_PERROR__LOST)) && + ((((cmd & 0xE) == 2) && (addr < 0x1000)) || + (((cmd & 0xE) == 6) && (addr >= 0xA0000) && (addr < 0x100000)))) { + status = MCHK_DISPOSITION_DISMISS; + } + +#ifdef CONFIG_VERBOSE_MCHECK + if (!print) + return status; + + printk("%s PChip %d %cPERROR: %016lx\n", + err_print_prefix, which, + port ? 'A' : 'G', perror); + if (perror & TITAN__PCHIP_PERROR__IPTPW) + printk("%s Invalid Peer-to-Peer Write\n", err_print_prefix); + if (perror & TITAN__PCHIP_PERROR__IPTPR) + printk("%s Invalid Peer-to-Peer Read\n", err_print_prefix); + if (perror & TITAN__PCHIP_PERROR__NDS) + printk("%s No DEVSEL as PCI Master [Master Abort]\n", + err_print_prefix); + if (perror & TITAN__PCHIP_PERROR__DPE) + printk("%s Data Parity Error\n", err_print_prefix); + if (perror & TITAN__PCHIP_PERROR__TA) + printk("%s Target Abort\n", err_print_prefix); + if (perror & TITAN__PCHIP_PERROR__APE) + printk("%s Address Parity Error\n", err_print_prefix); + if (perror & TITAN__PCHIP_PERROR__SGE) + printk("%s Scatter-Gather Error, Invalid PTE\n", + err_print_prefix); + if (perror & TITAN__PCHIP_PERROR__DCRTO) + printk("%s Delayed-Completion Retry Timeout\n", + err_print_prefix); + if (perror & TITAN__PCHIP_PERROR__PERR) + printk("%s PERR Asserted\n", err_print_prefix); + if (perror & TITAN__PCHIP_PERROR__SERR) + printk("%s SERR Asserted\n", err_print_prefix); + if (perror & TITAN__PCHIP_PERROR__LOST) + printk("%s Lost Error\n", err_print_prefix); + printk("%s Command: 0x%x - %s\n" + " Address: 0x%lx\n", + err_print_prefix, + cmd, perror_cmd[cmd], + addr); + if (perror & TITAN__PCHIP_PERROR__DAC) + printk("%s Dual Address Cycle\n", err_print_prefix); + if (perror & TITAN__PCHIP_PERROR__MWIN) + printk("%s Hit in Monster Window\n", err_print_prefix); +#endif /* CONFIG_VERBOSE_MCHECK */ + + return status; +} + +static int +titan_parse_p_agperror(int which, u64 agperror, int print) +{ + int status = MCHK_DISPOSITION_REPORT; +#ifdef CONFIG_VERBOSE_MCHECK + int cmd, len; + unsigned long addr; + + char *agperror_cmd[] = { "Read (low-priority)", "Read (high-priority)", + "Write (low-priority)", + "Write (high-priority)", + "Reserved", "Reserved", + "Flush", "Fence" + }; +#endif /* CONFIG_VERBOSE_MCHECK */ + +#define TITAN__PCHIP_AGPERROR__LOST (1UL << 0) +#define TITAN__PCHIP_AGPERROR__LPQFULL (1UL << 1) +#define TITAN__PCHIP_AGPERROR__HPQFULL (1UL << 2) +#define TITAN__PCHIP_AGPERROR__RESCMD (1UL << 3) +#define TITAN__PCHIP_AGPERROR__IPTE (1UL << 4) +#define TITAN__PCHIP_AGPERROR__PTP (1UL << 5) +#define TITAN__PCHIP_AGPERROR__NOWINDOW (1UL << 6) +#define TITAN__PCHIP_AGPERROR__ERRMASK (TITAN__PCHIP_AGPERROR__LOST | \ + TITAN__PCHIP_AGPERROR__LPQFULL | \ + TITAN__PCHIP_AGPERROR__HPQFULL | \ + TITAN__PCHIP_AGPERROR__RESCMD | \ + TITAN__PCHIP_AGPERROR__IPTE | \ + TITAN__PCHIP_AGPERROR__PTP | \ + TITAN__PCHIP_AGPERROR__NOWINDOW) +#define TITAN__PCHIP_AGPERROR__DAC (1UL << 48) +#define TITAN__PCHIP_AGPERROR__MWIN (1UL << 49) +#define TITAN__PCHIP_AGPERROR__FENCE (1UL << 59) +#define TITAN__PCHIP_AGPERROR__CMD__S (50) +#define TITAN__PCHIP_AGPERROR__CMD__M (0x07) +#define TITAN__PCHIP_AGPERROR__ADDR__S (15) +#define TITAN__PCHIP_AGPERROR__ADDR__M (0xffffffffUL) +#define TITAN__PCHIP_AGPERROR__LEN__S (53) +#define TITAN__PCHIP_AGPERROR__LEN__M (0x3f) + + if (!(agperror & TITAN__PCHIP_AGPERROR__ERRMASK)) + return MCHK_DISPOSITION_UNKNOWN_ERROR; + +#ifdef CONFIG_VERBOSE_MCHECK + if (!print) + return status; + + cmd = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__CMD); + addr = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__ADDR) << 3; + len = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__LEN); + + printk("%s PChip %d AGPERROR: %016lx\n", err_print_prefix, + which, agperror); + if (agperror & TITAN__PCHIP_AGPERROR__NOWINDOW) + printk("%s No Window\n", err_print_prefix); + if (agperror & TITAN__PCHIP_AGPERROR__PTP) + printk("%s Peer-to-Peer set\n", err_print_prefix); + if (agperror & TITAN__PCHIP_AGPERROR__IPTE) + printk("%s Invalid PTE\n", err_print_prefix); + if (agperror & TITAN__PCHIP_AGPERROR__RESCMD) + printk("%s Reserved Command\n", err_print_prefix); + if (agperror & TITAN__PCHIP_AGPERROR__HPQFULL) + printk("%s HP Transaction Received while Queue Full\n", + err_print_prefix); + if (agperror & TITAN__PCHIP_AGPERROR__LPQFULL) + printk("%s LP Transaction Received while Queue Full\n", + err_print_prefix); + if (agperror & TITAN__PCHIP_AGPERROR__LOST) + printk("%s Lost Error\n", err_print_prefix); + printk("%s Command: 0x%x - %s, %d Quadwords%s\n" + " Address: 0x%lx\n", + err_print_prefix, cmd, agperror_cmd[cmd], len, + (agperror & TITAN__PCHIP_AGPERROR__FENCE) ? ", FENCE" : "", + addr); + if (agperror & TITAN__PCHIP_AGPERROR__DAC) + printk("%s Dual Address Cycle\n", err_print_prefix); + if (agperror & TITAN__PCHIP_AGPERROR__MWIN) + printk("%s Hit in Monster Window\n", err_print_prefix); +#endif /* CONFIG_VERBOSE_MCHECK */ + + return status; +} + +static int +titan_parse_p_chip(int which, u64 serror, u64 gperror, + u64 aperror, u64 agperror, int print) +{ + int status = MCHK_DISPOSITION_UNKNOWN_ERROR; + status |= titan_parse_p_serror(which, serror, print); + status |= titan_parse_p_perror(which, 0, gperror, print); + status |= titan_parse_p_perror(which, 1, aperror, print); + status |= titan_parse_p_agperror(which, agperror, print); + return status; +} + +int +titan_process_logout_frame(struct el_common *mchk_header, int print) +{ + struct el_TITAN_sysdata_mcheck *tmchk = + (struct el_TITAN_sysdata_mcheck *) + ((unsigned long)mchk_header + mchk_header->sys_offset); + int status = MCHK_DISPOSITION_UNKNOWN_ERROR; + + status |= titan_parse_c_misc(tmchk->c_misc, print); + status |= titan_parse_p_chip(0, tmchk->p0_serror, tmchk->p0_gperror, + tmchk->p0_aperror, tmchk->p0_agperror, + print); + status |= titan_parse_p_chip(1, tmchk->p1_serror, tmchk->p1_gperror, + tmchk->p1_aperror, tmchk->p1_agperror, + print); + + return status; +} + +void +titan_machine_check(u64 vector, u64 la_ptr, struct pt_regs *regs) +{ + struct el_common *mchk_header = (struct el_common *)la_ptr; + struct el_TITAN_sysdata_mcheck *tmchk = + (struct el_TITAN_sysdata_mcheck *) + ((unsigned long)mchk_header + mchk_header->sys_offset); + u64 irqmask; + + /* + * Mask of Titan interrupt sources which are reported as machine checks + * + * 63 - CChip Error + * 62 - PChip 0 H_Error + * 61 - PChip 1 H_Error + * 60 - PChip 0 C_Error + * 59 - PChip 1 C_Error + */ +#define TITAN_MCHECK_INTERRUPT_MASK 0xF800000000000000UL + + /* + * Sync the processor + */ + mb(); + draina(); + + /* + * Only handle system errors here + */ + if ((vector != SCB_Q_SYSMCHK) && (vector != SCB_Q_SYSERR)) { + ev6_machine_check(vector, la_ptr, regs); + return; + } + + /* + * It's a system error, handle it here + * + * The PALcode has already cleared the error, so just parse it + */ + + /* + * Parse the logout frame without printing first. If the only error(s) + * found are classified as "dismissable", then just dismiss them and + * don't print any message + */ + if (titan_process_logout_frame(mchk_header, 0) != + MCHK_DISPOSITION_DISMISS) { + char *saved_err_prefix = err_print_prefix; + err_print_prefix = KERN_CRIT; + + /* + * Either a nondismissable error was detected or no + * recognized error was detected in the logout frame + * -- report the error in either case + */ + printk("%s" + "*System %s Error (Vector 0x%x) reported on CPU %d:\n", + err_print_prefix, + (vector == SCB_Q_SYSERR)?"Correctable":"Uncorrectable", + (unsigned int)vector, (int)smp_processor_id()); + +#ifdef CONFIG_VERBOSE_MCHECK + titan_process_logout_frame(mchk_header, alpha_verbose_mcheck); + if (alpha_verbose_mcheck) + dik_show_regs(regs, NULL); +#endif /* CONFIG_VERBOSE_MCHECK */ + + err_print_prefix = saved_err_prefix; + + /* + * Convert any pending interrupts which report as system + * machine checks to interrupts + */ + irqmask = tmchk->c_dirx & TITAN_MCHECK_INTERRUPT_MASK; + titan_dispatch_irqs(irqmask, regs); + } + + + /* + * Release the logout frame + */ + wrmces(0x7); + mb(); +} + +/* + * Subpacket Annotations + */ +static char *el_titan_pchip0_extended_annotation[] = { + "Subpacket Header", "P0_SCTL", "P0_SERREN", + "P0_APCTL", "P0_APERREN", "P0_AGPERREN", + "P0_ASPRST", "P0_AWSBA0", "P0_AWSBA1", + "P0_AWSBA2", "P0_AWSBA3", "P0_AWSM0", + "P0_AWSM1", "P0_AWSM2", "P0_AWSM3", + "P0_ATBA0", "P0_ATBA1", "P0_ATBA2", + "P0_ATBA3", "P0_GPCTL", "P0_GPERREN", + "P0_GSPRST", "P0_GWSBA0", "P0_GWSBA1", + "P0_GWSBA2", "P0_GWSBA3", "P0_GWSM0", + "P0_GWSM1", "P0_GWSM2", "P0_GWSM3", + "P0_GTBA0", "P0_GTBA1", "P0_GTBA2", + "P0_GTBA3", NULL +}; +static char *el_titan_pchip1_extended_annotation[] = { + "Subpacket Header", "P1_SCTL", "P1_SERREN", + "P1_APCTL", "P1_APERREN", "P1_AGPERREN", + "P1_ASPRST", "P1_AWSBA0", "P1_AWSBA1", + "P1_AWSBA2", "P1_AWSBA3", "P1_AWSM0", + "P1_AWSM1", "P1_AWSM2", "P1_AWSM3", + "P1_ATBA0", "P1_ATBA1", "P1_ATBA2", + "P1_ATBA3", "P1_GPCTL", "P1_GPERREN", + "P1_GSPRST", "P1_GWSBA0", "P1_GWSBA1", + "P1_GWSBA2", "P1_GWSBA3", "P1_GWSM0", + "P1_GWSM1", "P1_GWSM2", "P1_GWSM3", + "P1_GTBA0", "P1_GTBA1", "P1_GTBA2", + "P1_GTBA3", NULL +}; +static char *el_titan_memory_extended_annotation[] = { + "Subpacket Header", "AAR0", "AAR1", + "AAR2", "AAR3", "P0_SCTL", + "P0_GPCTL", "P0_APCTL", "P1_SCTL", + "P1_GPCTL", "P1_SCTL", NULL +}; + +static struct el_subpacket_annotation el_titan_annotations[] = { + SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, + EL_TYPE__REGATTA__TITAN_PCHIP0_EXTENDED, + 1, + "Titan PChip 0 Extended Frame", + el_titan_pchip0_extended_annotation), + SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, + EL_TYPE__REGATTA__TITAN_PCHIP1_EXTENDED, + 1, + "Titan PChip 1 Extended Frame", + el_titan_pchip1_extended_annotation), + SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, + EL_TYPE__REGATTA__TITAN_MEMORY_EXTENDED, + 1, + "Titan Memory Extended Frame", + el_titan_memory_extended_annotation), + SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, + EL_TYPE__TERMINATION__TERMINATION, + 1, + "Termination Subpacket", + NULL) +}; + +static struct el_subpacket * +el_process_regatta_subpacket(struct el_subpacket *header) +{ + int status; + + if (header->class != EL_CLASS__REGATTA_FAMILY) { + printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n", + err_print_prefix, + header->class, header->type); + return NULL; + } + + switch(header->type) { + case EL_TYPE__REGATTA__PROCESSOR_ERROR_FRAME: + case EL_TYPE__REGATTA__SYSTEM_ERROR_FRAME: + case EL_TYPE__REGATTA__ENVIRONMENTAL_FRAME: + case EL_TYPE__REGATTA__PROCESSOR_DBL_ERROR_HALT: + case EL_TYPE__REGATTA__SYSTEM_DBL_ERROR_HALT: + printk("%s ** Occurred on CPU %d:\n", + err_print_prefix, + (int)header->by_type.regatta_frame.cpuid); + status = privateer_process_logout_frame((struct el_common *) + header->by_type.regatta_frame.data_start, 1); + break; + default: + printk("%s ** REGATTA TYPE %d SUBPACKET\n", + err_print_prefix, header->type); + el_annotate_subpacket(header); + break; + } + + + return (struct el_subpacket *)((unsigned long)header + header->length); +} + +static struct el_subpacket_handler titan_subpacket_handler = + SUBPACKET_HANDLER_INIT(EL_CLASS__REGATTA_FAMILY, + el_process_regatta_subpacket); + +void +titan_register_error_handlers(void) +{ + size_t i; + + for (i = 0; i < ARRAY_SIZE (el_titan_annotations); i++) + cdl_register_subpacket_annotation(&el_titan_annotations[i]); + + cdl_register_subpacket_handler(&titan_subpacket_handler); + + ev6_register_error_handlers(); +} + + +/* + * Privateer + */ + +static int +privateer_process_680_frame(struct el_common *mchk_header, int print) +{ + int status = MCHK_DISPOSITION_UNKNOWN_ERROR; +#ifdef CONFIG_VERBOSE_MCHECK + struct el_PRIVATEER_envdata_mcheck *emchk = + (struct el_PRIVATEER_envdata_mcheck *) + ((unsigned long)mchk_header + mchk_header->sys_offset); + + /* TODO - catagorize errors, for now, no error */ + + if (!print) + return status; + + /* TODO - decode instead of just dumping... */ + printk("%s Summary Flags: %016lx\n" + " CChip DIRx: %016lx\n" + " System Management IR: %016lx\n" + " CPU IR: %016lx\n" + " Power Supply IR: %016lx\n" + " LM78 Fault Status: %016lx\n" + " System Doors: %016lx\n" + " Temperature Warning: %016lx\n" + " Fan Control: %016lx\n" + " Fatal Power Down Code: %016lx\n", + err_print_prefix, + emchk->summary, + emchk->c_dirx, + emchk->smir, + emchk->cpuir, + emchk->psir, + emchk->fault, + emchk->sys_doors, + emchk->temp_warn, + emchk->fan_ctrl, + emchk->code); +#endif /* CONFIG_VERBOSE_MCHECK */ + + return status; +} + +int +privateer_process_logout_frame(struct el_common *mchk_header, int print) +{ + struct el_common_EV6_mcheck *ev6mchk = + (struct el_common_EV6_mcheck *)mchk_header; + int status = MCHK_DISPOSITION_UNKNOWN_ERROR; + + /* + * Machine check codes + */ +#define PRIVATEER_MCHK__CORR_ECC 0x86 /* 630 */ +#define PRIVATEER_MCHK__DC_TAG_PERR 0x9E /* 630 */ +#define PRIVATEER_MCHK__PAL_BUGCHECK 0x8E /* 670 */ +#define PRIVATEER_MCHK__OS_BUGCHECK 0x90 /* 670 */ +#define PRIVATEER_MCHK__PROC_HRD_ERR 0x98 /* 670 */ +#define PRIVATEER_MCHK__ISTREAM_CMOV_PRX 0xA0 /* 670 */ +#define PRIVATEER_MCHK__ISTREAM_CMOV_FLT 0xA2 /* 670 */ +#define PRIVATEER_MCHK__SYS_HRD_ERR 0x202 /* 660 */ +#define PRIVATEER_MCHK__SYS_CORR_ERR 0x204 /* 620 */ +#define PRIVATEER_MCHK__SYS_ENVIRON 0x206 /* 680 */ + + switch(ev6mchk->MCHK_Code) { + /* + * Vector 630 - Processor, Correctable + */ + case PRIVATEER_MCHK__CORR_ECC: + case PRIVATEER_MCHK__DC_TAG_PERR: + /* + * Fall through to vector 670 for processing... + */ + /* + * Vector 670 - Processor, Uncorrectable + */ + case PRIVATEER_MCHK__PAL_BUGCHECK: + case PRIVATEER_MCHK__OS_BUGCHECK: + case PRIVATEER_MCHK__PROC_HRD_ERR: + case PRIVATEER_MCHK__ISTREAM_CMOV_PRX: + case PRIVATEER_MCHK__ISTREAM_CMOV_FLT: + status |= ev6_process_logout_frame(mchk_header, print); + break; + + /* + * Vector 620 - System, Correctable + */ + case PRIVATEER_MCHK__SYS_CORR_ERR: + /* + * Fall through to vector 660 for processing... + */ + /* + * Vector 660 - System, Uncorrectable + */ + case PRIVATEER_MCHK__SYS_HRD_ERR: + status |= titan_process_logout_frame(mchk_header, print); + break; + + /* + * Vector 680 - System, Environmental + */ + case PRIVATEER_MCHK__SYS_ENVIRON: /* System, Environmental */ + status |= privateer_process_680_frame(mchk_header, print); + break; + + /* + * Unknown + */ + default: + status |= MCHK_DISPOSITION_REPORT; + if (print) { + printk("%s** Unknown Error, frame follows\n", + err_print_prefix); + mchk_dump_logout_frame(mchk_header); + } + + } + + return status; +} + +void +privateer_machine_check(u64 vector, u64 la_ptr, struct pt_regs *regs) +{ + struct el_common *mchk_header = (struct el_common *)la_ptr; + struct el_TITAN_sysdata_mcheck *tmchk = + (struct el_TITAN_sysdata_mcheck *) + (la_ptr + mchk_header->sys_offset); + u64 irqmask; + char *saved_err_prefix = err_print_prefix; + +#define PRIVATEER_680_INTERRUPT_MASK (0xE00UL) +#define PRIVATEER_HOTPLUG_INTERRUPT_MASK (0xE00UL) + + /* + * Sync the processor. + */ + mb(); + draina(); + + /* + * Only handle system events here. + */ + if (vector != SCB_Q_SYSEVENT) + return titan_machine_check(vector, la_ptr, regs); + + /* + * Report the event - System Events should be reported even if no + * error is indicated since the event could indicate the return + * to normal status. + */ + err_print_prefix = KERN_CRIT; + printk("%s*System Event (Vector 0x%x) reported on CPU %d:\n", + err_print_prefix, + (unsigned int)vector, (int)smp_processor_id()); + privateer_process_680_frame(mchk_header, 1); + err_print_prefix = saved_err_prefix; + + /* + * Convert any pending interrupts which report as 680 machine + * checks to interrupts. + */ + irqmask = tmchk->c_dirx & PRIVATEER_680_INTERRUPT_MASK; + + /* + * Dispatch the interrupt(s). + */ + titan_dispatch_irqs(irqmask, regs); + + /* + * Release the logout frame. + */ + wrmces(0x7); + mb(); +} diff --git a/arch/alpha/kernel/es1888.c b/arch/alpha/kernel/es1888.c new file mode 100644 index 0000000..d584c85 --- /dev/null +++ b/arch/alpha/kernel/es1888.c @@ -0,0 +1,49 @@ +/* + * linux/arch/alpha/kernel/es1888.c + * + * Init the built-in ES1888 sound chip (SB16 compatible) + */ + +#include <linux/init.h> +#include <asm/io.h> +#include "proto.h" + +void __init +es1888_init(void) +{ + /* Sequence of IO reads to init the audio controller */ + inb(0x0229); + inb(0x0229); + inb(0x0229); + inb(0x022b); + inb(0x0229); + inb(0x022b); + inb(0x0229); + inb(0x0229); + inb(0x022b); + inb(0x0229); + inb(0x0220); /* This sets the base address to 0x220 */ + + /* Sequence to set DMA channels */ + outb(0x01, 0x0226); /* reset */ + inb(0x0226); /* pause */ + outb(0x00, 0x0226); /* release reset */ + while (!(inb(0x022e) & 0x80)) /* wait for bit 7 to assert*/ + continue; + inb(0x022a); /* pause */ + outb(0xc6, 0x022c); /* enable extended mode */ + inb(0x022a); /* pause, also forces the write */ + while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */ + continue; + outb(0xb1, 0x022c); /* setup for write to Interrupt CR */ + while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */ + continue; + outb(0x14, 0x022c); /* set IRQ 5 */ + while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */ + continue; + outb(0xb2, 0x022c); /* setup for write to DMA CR */ + while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */ + continue; + outb(0x18, 0x022c); /* set DMA channel 1 */ + inb(0x022c); /* force the write */ +} diff --git a/arch/alpha/kernel/gct.c b/arch/alpha/kernel/gct.c new file mode 100644 index 0000000..8827687 --- /dev/null +++ b/arch/alpha/kernel/gct.c @@ -0,0 +1,48 @@ +/* + * linux/arch/alpha/kernel/gct.c + */ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> + +#include <asm/hwrpb.h> +#include <asm/gct.h> + +int +gct6_find_nodes(gct6_node *node, gct6_search_struct *search) +{ + gct6_search_struct *wanted; + int status = 0; + + /* First check the magic number. */ + if (node->magic != GCT_NODE_MAGIC) { + printk(KERN_ERR "GCT Node MAGIC incorrect - GCT invalid\n"); + return -EINVAL; + } + + /* Check against the search struct. */ + for (wanted = search; + wanted && (wanted->type | wanted->subtype); + wanted++) { + if (node->type != wanted->type) + continue; + if (node->subtype != wanted->subtype) + continue; + + /* Found it -- call out. */ + if (wanted->callout) + wanted->callout(node); + } + + /* Now walk the tree, siblings first. */ + if (node->next) + status |= gct6_find_nodes(GCT_NODE_PTR(node->next), search); + + /* Then the children. */ + if (node->child) + status |= gct6_find_nodes(GCT_NODE_PTR(node->child), search); + + return status; +} diff --git a/arch/alpha/kernel/head.S b/arch/alpha/kernel/head.S new file mode 100644 index 0000000..4ca2e40 --- /dev/null +++ b/arch/alpha/kernel/head.S @@ -0,0 +1,99 @@ +/* + * alpha/boot/head.S + * + * initial boot stuff.. At this point, the bootloader has already + * switched into OSF/1 PAL-code, and loaded us at the correct address + * (START_ADDR). So there isn't much left for us to do: just set up + * the kernel global pointer and jump to the kernel entry-point. + */ + +#include <linux/config.h> +#include <asm/system.h> +#include <asm/asm_offsets.h> + +.globl swapper_pg_dir +.globl _stext +swapper_pg_dir=SWAPPER_PGD + + .set noreorder + .globl __start + .ent __start +_stext: +__start: + .prologue 0 + br $27,1f +1: ldgp $29,0($27) + /* We need to get current_task_info loaded up... */ + lda $8,init_thread_union + /* ... and find our stack ... */ + lda $30,0x4000 - SIZEOF_PT_REGS($8) + /* ... and then we can start the kernel. */ + jsr $26,start_kernel + call_pal PAL_halt + .end __start + +#ifdef CONFIG_SMP + .align 3 + .globl __smp_callin + .ent __smp_callin + /* On entry here from SRM console, the HWPCB of the per-cpu + slot for this processor has been loaded. We've arranged + for the UNIQUE value for this process to contain the PCBB + of the target idle task. */ +__smp_callin: + .prologue 1 + ldgp $29,0($27) # First order of business, load the GP. + + call_pal PAL_rduniq # Grab the target PCBB. + mov $0,$16 # Install it. + call_pal PAL_swpctx + + lda $8,0x3fff # Find "current". + bic $30,$8,$8 + + jsr $26,smp_callin + call_pal PAL_halt + .end __smp_callin +#endif /* CONFIG_SMP */ + + # + # The following two functions are needed for supporting SRM PALcode + # on the PC164 (at least), since that PALcode manages the interrupt + # masking, and we cannot duplicate the effort without causing problems + # + + .align 3 + .globl cserve_ena + .ent cserve_ena +cserve_ena: + .prologue 0 + bis $16,$16,$17 + lda $16,52($31) + call_pal PAL_cserve + ret ($26) + .end cserve_ena + + .align 3 + .globl cserve_dis + .ent cserve_dis +cserve_dis: + .prologue 0 + bis $16,$16,$17 + lda $16,53($31) + call_pal PAL_cserve + ret ($26) + .end cserve_dis + + # + # It is handy, on occasion, to make halt actually just loop. + # Putting it here means we dont have to recompile the whole + # kernel. + # + + .align 3 + .globl halt + .ent halt +halt: + .prologue 0 + call_pal PAL_halt + .end halt diff --git a/arch/alpha/kernel/init_task.c b/arch/alpha/kernel/init_task.c new file mode 100644 index 0000000..835d09a --- /dev/null +++ b/arch/alpha/kernel/init_task.c @@ -0,0 +1,23 @@ +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/init.h> +#include <linux/init_task.h> +#include <linux/fs.h> +#include <linux/mqueue.h> +#include <asm/uaccess.h> + + +static struct fs_struct init_fs = INIT_FS; +static struct files_struct init_files = INIT_FILES; +static struct signal_struct init_signals = INIT_SIGNALS(init_signals); +static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); +struct mm_struct init_mm = INIT_MM(init_mm); +struct task_struct init_task = INIT_TASK(init_task); + +EXPORT_SYMBOL(init_mm); +EXPORT_SYMBOL(init_task); + +union thread_union init_thread_union + __attribute__((section(".data.init_thread"))) + = { INIT_THREAD_INFO(init_task) }; diff --git a/arch/alpha/kernel/io.c b/arch/alpha/kernel/io.c new file mode 100644 index 0000000..19c5875 --- /dev/null +++ b/arch/alpha/kernel/io.c @@ -0,0 +1,630 @@ +/* + * Alpha IO and memory functions. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/module.h> +#include <asm/io.h> + +/* Out-of-line versions of the i/o routines that redirect into the + platform-specific version. Note that "platform-specific" may mean + "generic", which bumps through the machine vector. */ + +unsigned int +ioread8(void __iomem *addr) +{ + unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); + mb(); + return ret; +} + +unsigned int ioread16(void __iomem *addr) +{ + unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); + mb(); + return ret; +} + +unsigned int ioread32(void __iomem *addr) +{ + unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); + mb(); + return ret; +} + +void iowrite8(u8 b, void __iomem *addr) +{ + IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr); + mb(); +} + +void iowrite16(u16 b, void __iomem *addr) +{ + IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr); + mb(); +} + +void iowrite32(u32 b, void __iomem *addr) +{ + IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr); + mb(); +} + +EXPORT_SYMBOL(ioread8); +EXPORT_SYMBOL(ioread16); +EXPORT_SYMBOL(ioread32); +EXPORT_SYMBOL(iowrite8); +EXPORT_SYMBOL(iowrite16); +EXPORT_SYMBOL(iowrite32); + +u8 inb(unsigned long port) +{ + return ioread8(ioport_map(port, 1)); +} + +u16 inw(unsigned long port) +{ + return ioread16(ioport_map(port, 2)); +} + +u32 inl(unsigned long port) +{ + return ioread32(ioport_map(port, 4)); +} + +void outb(u8 b, unsigned long port) +{ + iowrite8(b, ioport_map(port, 1)); +} + +void outw(u16 b, unsigned long port) +{ + iowrite16(b, ioport_map(port, 2)); +} + +void outl(u32 b, unsigned long port) +{ + iowrite32(b, ioport_map(port, 4)); +} + +EXPORT_SYMBOL(inb); +EXPORT_SYMBOL(inw); +EXPORT_SYMBOL(inl); +EXPORT_SYMBOL(outb); +EXPORT_SYMBOL(outw); +EXPORT_SYMBOL(outl); + +u8 __raw_readb(const volatile void __iomem *addr) +{ + return IO_CONCAT(__IO_PREFIX,readb)(addr); +} + +u16 __raw_readw(const volatile void __iomem *addr) +{ + return IO_CONCAT(__IO_PREFIX,readw)(addr); +} + +u32 __raw_readl(const volatile void __iomem *addr) +{ + return IO_CONCAT(__IO_PREFIX,readl)(addr); +} + +u64 __raw_readq(const volatile void __iomem *addr) +{ + return IO_CONCAT(__IO_PREFIX,readq)(addr); +} + +void __raw_writeb(u8 b, volatile void __iomem *addr) +{ + IO_CONCAT(__IO_PREFIX,writeb)(b, addr); +} + +void __raw_writew(u16 b, volatile void __iomem *addr) +{ + IO_CONCAT(__IO_PREFIX,writew)(b, addr); +} + +void __raw_writel(u32 b, volatile void __iomem *addr) +{ + IO_CONCAT(__IO_PREFIX,writel)(b, addr); +} + +void __raw_writeq(u64 b, volatile void __iomem *addr) +{ + IO_CONCAT(__IO_PREFIX,writeq)(b, addr); +} + +EXPORT_SYMBOL(__raw_readb); +EXPORT_SYMBOL(__raw_readw); +EXPORT_SYMBOL(__raw_readl); +EXPORT_SYMBOL(__raw_readq); +EXPORT_SYMBOL(__raw_writeb); +EXPORT_SYMBOL(__raw_writew); +EXPORT_SYMBOL(__raw_writel); +EXPORT_SYMBOL(__raw_writeq); + +u8 readb(const volatile void __iomem *addr) +{ + u8 ret = __raw_readb(addr); + mb(); + return ret; +} + +u16 readw(const volatile void __iomem *addr) +{ + u16 ret = __raw_readw(addr); + mb(); + return ret; +} + +u32 readl(const volatile void __iomem *addr) +{ + u32 ret = __raw_readl(addr); + mb(); + return ret; +} + +u64 readq(const volatile void __iomem *addr) +{ + u64 ret = __raw_readq(addr); + mb(); + return ret; +} + +void writeb(u8 b, volatile void __iomem *addr) +{ + __raw_writeb(b, addr); + mb(); +} + +void writew(u16 b, volatile void __iomem *addr) +{ + __raw_writew(b, addr); + mb(); +} + +void writel(u32 b, volatile void __iomem *addr) +{ + __raw_writel(b, addr); + mb(); +} + +void writeq(u64 b, volatile void __iomem *addr) +{ + __raw_writeq(b, addr); + mb(); +} + +EXPORT_SYMBOL(readb); +EXPORT_SYMBOL(readw); +EXPORT_SYMBOL(readl); +EXPORT_SYMBOL(readq); +EXPORT_SYMBOL(writeb); +EXPORT_SYMBOL(writew); +EXPORT_SYMBOL(writel); +EXPORT_SYMBOL(writeq); + + +/* + * Read COUNT 8-bit bytes from port PORT into memory starting at SRC. + */ +void ioread8_rep(void __iomem *port, void *dst, unsigned long count) +{ + while ((unsigned long)dst & 0x3) { + if (!count) + return; + count--; + *(unsigned char *)dst = ioread8(port); + dst += 1; + } + + while (count >= 4) { + unsigned int w; + count -= 4; + w = ioread8(port); + w |= ioread8(port) << 8; + w |= ioread8(port) << 16; + w |= ioread8(port) << 24; + *(unsigned int *)dst = w; + dst += 4; + } + + while (count) { + --count; + *(unsigned char *)dst = ioread8(port); + dst += 1; + } +} + +void insb(unsigned long port, void *dst, unsigned long count) +{ + ioread8_rep(ioport_map(port, 1), dst, count); +} + +EXPORT_SYMBOL(ioread8_rep); +EXPORT_SYMBOL(insb); + +/* + * Read COUNT 16-bit words from port PORT into memory starting at + * SRC. SRC must be at least short aligned. This is used by the + * IDE driver to read disk sectors. Performance is important, but + * the interfaces seems to be slow: just using the inlined version + * of the inw() breaks things. + */ +void ioread16_rep(void __iomem *port, void *dst, unsigned long count) +{ + if (unlikely((unsigned long)dst & 0x3)) { + if (!count) + return; + BUG_ON((unsigned long)dst & 0x1); + count--; + *(unsigned short *)dst = ioread16(port); + dst += 2; + } + + while (count >= 2) { + unsigned int w; + count -= 2; + w = ioread16(port); + w |= ioread16(port) << 16; + *(unsigned int *)dst = w; + dst += 4; + } + + if (count) { + *(unsigned short*)dst = ioread16(port); + } +} + +void insw(unsigned long port, void *dst, unsigned long count) +{ + ioread16_rep(ioport_map(port, 2), dst, count); +} + +EXPORT_SYMBOL(ioread16_rep); +EXPORT_SYMBOL(insw); + + +/* + * Read COUNT 32-bit words from port PORT into memory starting at + * SRC. Now works with any alignment in SRC. Performance is important, + * but the interfaces seems to be slow: just using the inlined version + * of the inl() breaks things. + */ +void ioread32_rep(void __iomem *port, void *dst, unsigned long count) +{ + if (unlikely((unsigned long)dst & 0x3)) { + while (count--) { + struct S { int x __attribute__((packed)); }; + ((struct S *)dst)->x = ioread32(port); + dst += 4; + } + } else { + /* Buffer 32-bit aligned. */ + while (count--) { + *(unsigned int *)dst = ioread32(port); + dst += 4; + } + } +} + +void insl(unsigned long port, void *dst, unsigned long count) +{ + ioread32_rep(ioport_map(port, 4), dst, count); +} + +EXPORT_SYMBOL(ioread32_rep); +EXPORT_SYMBOL(insl); + + +/* + * Like insb but in the opposite direction. + * Don't worry as much about doing aligned memory transfers: + * doing byte reads the "slow" way isn't nearly as slow as + * doing byte writes the slow way (no r-m-w cycle). + */ +void iowrite8_rep(void __iomem *port, const void *xsrc, unsigned long count) +{ + const unsigned char *src = xsrc; + while (count--) + iowrite8(*src++, port); +} + +void outsb(unsigned long port, const void *src, unsigned long count) +{ + iowrite8_rep(ioport_map(port, 1), src, count); +} + +EXPORT_SYMBOL(iowrite8_rep); +EXPORT_SYMBOL(outsb); + + +/* + * Like insw but in the opposite direction. This is used by the IDE + * driver to write disk sectors. Performance is important, but the + * interfaces seems to be slow: just using the inlined version of the + * outw() breaks things. + */ +void iowrite16_rep(void __iomem *port, const void *src, unsigned long count) +{ + if (unlikely((unsigned long)src & 0x3)) { + if (!count) + return; + BUG_ON((unsigned long)src & 0x1); + iowrite16(*(unsigned short *)src, port); + src += 2; + --count; + } + + while (count >= 2) { + unsigned int w; + count -= 2; + w = *(unsigned int *)src; + src += 4; + iowrite16(w >> 0, port); + iowrite16(w >> 16, port); + } + + if (count) { + iowrite16(*(unsigned short *)src, port); + } +} + +void outsw(unsigned long port, const void *src, unsigned long count) +{ + iowrite16_rep(ioport_map(port, 2), src, count); +} + +EXPORT_SYMBOL(iowrite16_rep); +EXPORT_SYMBOL(outsw); + + +/* + * Like insl but in the opposite direction. This is used by the IDE + * driver to write disk sectors. Works with any alignment in SRC. + * Performance is important, but the interfaces seems to be slow: + * just using the inlined version of the outl() breaks things. + */ +void iowrite32_rep(void __iomem *port, const void *src, unsigned long count) +{ + if (unlikely((unsigned long)src & 0x3)) { + while (count--) { + struct S { int x __attribute__((packed)); }; + iowrite32(((struct S *)src)->x, port); + src += 4; + } + } else { + /* Buffer 32-bit aligned. */ + while (count--) { + iowrite32(*(unsigned int *)src, port); + src += 4; + } + } +} + +void outsl(unsigned long port, const void *src, unsigned long count) +{ + iowrite32_rep(ioport_map(port, 4), src, count); +} + +EXPORT_SYMBOL(iowrite32_rep); +EXPORT_SYMBOL(outsl); + + +/* + * Copy data from IO memory space to "real" memory space. + * This needs to be optimized. + */ +void memcpy_fromio(void *to, const volatile void __iomem *from, long count) +{ + /* Optimize co-aligned transfers. Everything else gets handled + a byte at a time. */ + + if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) { + count -= 8; + do { + *(u64 *)to = __raw_readq(from); + count -= 8; + to += 8; + from += 8; + } while (count >= 0); + count += 8; + } + + if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) { + count -= 4; + do { + *(u32 *)to = __raw_readl(from); + count -= 4; + to += 4; + from += 4; + } while (count >= 0); + count += 4; + } + + if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) { + count -= 2; + do { + *(u16 *)to = __raw_readw(from); + count -= 2; + to += 2; + from += 2; + } while (count >= 0); + count += 2; + } + + while (count > 0) { + *(u8 *) to = __raw_readb(from); + count--; + to++; + from++; + } + mb(); +} + +EXPORT_SYMBOL(memcpy_fromio); + + +/* + * Copy data from "real" memory space to IO memory space. + * This needs to be optimized. + */ +void memcpy_toio(volatile void __iomem *to, const void *from, long count) +{ + /* Optimize co-aligned transfers. Everything else gets handled + a byte at a time. */ + /* FIXME -- align FROM. */ + + if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) { + count -= 8; + do { + __raw_writeq(*(const u64 *)from, to); + count -= 8; + to += 8; + from += 8; + } while (count >= 0); + count += 8; + } + + if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) { + count -= 4; + do { + __raw_writel(*(const u32 *)from, to); + count -= 4; + to += 4; + from += 4; + } while (count >= 0); + count += 4; + } + + if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) { + count -= 2; + do { + __raw_writew(*(const u16 *)from, to); + count -= 2; + to += 2; + from += 2; + } while (count >= 0); + count += 2; + } + + while (count > 0) { + __raw_writeb(*(const u8 *) from, to); + count--; + to++; + from++; + } + mb(); +} + +EXPORT_SYMBOL(memcpy_toio); + + +/* + * "memset" on IO memory space. + */ +void _memset_c_io(volatile void __iomem *to, unsigned long c, long count) +{ + /* Handle any initial odd byte */ + if (count > 0 && ((u64)to & 1)) { + __raw_writeb(c, to); + to++; + count--; + } + + /* Handle any initial odd halfword */ + if (count >= 2 && ((u64)to & 2)) { + __raw_writew(c, to); + to += 2; + count -= 2; + } + + /* Handle any initial odd word */ + if (count >= 4 && ((u64)to & 4)) { + __raw_writel(c, to); + to += 4; + count -= 4; + } + + /* Handle all full-sized quadwords: we're aligned + (or have a small count) */ + count -= 8; + if (count >= 0) { + do { + __raw_writeq(c, to); + to += 8; + count -= 8; + } while (count >= 0); + } + count += 8; + + /* The tail is word-aligned if we still have count >= 4 */ + if (count >= 4) { + __raw_writel(c, to); + to += 4; + count -= 4; + } + + /* The tail is half-word aligned if we have count >= 2 */ + if (count >= 2) { + __raw_writew(c, to); + to += 2; + count -= 2; + } + + /* And finally, one last byte.. */ + if (count) { + __raw_writeb(c, to); + } + mb(); +} + +EXPORT_SYMBOL(_memset_c_io); + +/* A version of memcpy used by the vga console routines to move data around + arbitrarily between screen and main memory. */ + +void +scr_memcpyw(u16 *d, const u16 *s, unsigned int count) +{ + const u16 __iomem *ios = (const u16 __iomem *) s; + u16 __iomem *iod = (u16 __iomem *) d; + int s_isio = __is_ioaddr(s); + int d_isio = __is_ioaddr(d); + + if (s_isio) { + if (d_isio) { + /* FIXME: Should handle unaligned ops and + operation widening. */ + + count /= 2; + while (count--) { + u16 tmp = __raw_readw(ios++); + __raw_writew(tmp, iod++); + } + } + else + memcpy_fromio(d, ios, count); + } else { + if (d_isio) + memcpy_toio(iod, s, count); + else + memcpy(d, s, count); + } +} + +EXPORT_SYMBOL(scr_memcpyw); + +void __iomem *ioport_map(unsigned long port, unsigned int size) +{ + return IO_CONCAT(__IO_PREFIX,ioportmap) (port); +} + +void ioport_unmap(void __iomem *addr) +{ +} + +EXPORT_SYMBOL(ioport_map); +EXPORT_SYMBOL(ioport_unmap); diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c new file mode 100644 index 0000000..b6114f5 --- /dev/null +++ b/arch/alpha/kernel/irq.c @@ -0,0 +1,774 @@ +/* + * linux/arch/alpha/kernel/irq.c + * + * Copyright (C) 1995 Linus Torvalds + * + * This file contains the code used by various IRQ handling routines: + * asking for different IRQ's should be done through these routines + * instead of just grabbing them. Thus setups with different IRQ numbers + * shouldn't result in any weird surprises, and installing new handlers + * should be easier. + */ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/kernel_stat.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/ptrace.h> +#include <linux/interrupt.h> +#include <linux/slab.h> +#include <linux/random.h> +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <linux/profile.h> +#include <linux/bitops.h> + +#include <asm/system.h> +#include <asm/io.h> +#include <asm/uaccess.h> + +/* + * Controller mappings for all interrupt sources: + */ +irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = { + [0 ... NR_IRQS-1] = { + .handler = &no_irq_type, + .lock = SPIN_LOCK_UNLOCKED + } +}; + +static void register_irq_proc(unsigned int irq); + +volatile unsigned long irq_err_count; + +/* + * Special irq handlers. + */ + +irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs) +{ + return IRQ_NONE; +} + +/* + * Generic no controller code + */ + +static void no_irq_enable_disable(unsigned int irq) { } +static unsigned int no_irq_startup(unsigned int irq) { return 0; } + +static void +no_irq_ack(unsigned int irq) +{ + irq_err_count++; + printk(KERN_CRIT "Unexpected IRQ trap at vector %u\n", irq); +} + +struct hw_interrupt_type no_irq_type = { + .typename = "none", + .startup = no_irq_startup, + .shutdown = no_irq_enable_disable, + .enable = no_irq_enable_disable, + .disable = no_irq_enable_disable, + .ack = no_irq_ack, + .end = no_irq_enable_disable, +}; + +int +handle_IRQ_event(unsigned int irq, struct pt_regs *regs, + struct irqaction *action) +{ + int status = 1; /* Force the "do bottom halves" bit */ + int ret; + + do { + if (!(action->flags & SA_INTERRUPT)) + local_irq_enable(); + else + local_irq_disable(); + + ret = action->handler(irq, action->dev_id, regs); + if (ret == IRQ_HANDLED) + status |= action->flags; + action = action->next; + } while (action); + if (status & SA_SAMPLE_RANDOM) + add_interrupt_randomness(irq); + local_irq_disable(); + + return status; +} + +/* + * Generic enable/disable code: this just calls + * down into the PIC-specific version for the actual + * hardware disable after having gotten the irq + * controller lock. + */ +void inline +disable_irq_nosync(unsigned int irq) +{ + irq_desc_t *desc = irq_desc + irq; + unsigned long flags; + + spin_lock_irqsave(&desc->lock, flags); + if (!desc->depth++) { + desc->status |= IRQ_DISABLED; + desc->handler->disable(irq); + } + spin_unlock_irqrestore(&desc->lock, flags); +} + +/* + * Synchronous version of the above, making sure the IRQ is + * no longer running on any other IRQ.. + */ +void +disable_irq(unsigned int irq) +{ + disable_irq_nosync(irq); + synchronize_irq(irq); +} + +void +enable_irq(unsigned int irq) +{ + irq_desc_t *desc = irq_desc + irq; + unsigned long flags; + + spin_lock_irqsave(&desc->lock, flags); + switch (desc->depth) { + case 1: { + unsigned int status = desc->status & ~IRQ_DISABLED; + desc->status = status; + if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { + desc->status = status | IRQ_REPLAY; + hw_resend_irq(desc->handler,irq); + } + desc->handler->enable(irq); + /* fall-through */ + } + default: + desc->depth--; + break; + case 0: + printk(KERN_ERR "enable_irq() unbalanced from %p\n", + __builtin_return_address(0)); + } + spin_unlock_irqrestore(&desc->lock, flags); +} + +int +setup_irq(unsigned int irq, struct irqaction * new) +{ + int shared = 0; + struct irqaction *old, **p; + unsigned long flags; + irq_desc_t *desc = irq_desc + irq; + + if (desc->handler == &no_irq_type) + return -ENOSYS; + + /* + * Some drivers like serial.c use request_irq() heavily, + * so we have to be careful not to interfere with a + * running system. + */ + if (new->flags & SA_SAMPLE_RANDOM) { + /* + * This function might sleep, we want to call it first, + * outside of the atomic block. + * Yes, this might clear the entropy pool if the wrong + * driver is attempted to be loaded, without actually + * installing a new handler, but is this really a problem, + * only the sysadmin is able to do this. + */ + rand_initialize_irq(irq); + } + + /* + * The following block of code has to be executed atomically + */ + spin_lock_irqsave(&desc->lock,flags); + p = &desc->action; + if ((old = *p) != NULL) { + /* Can't share interrupts unless both agree to */ + if (!(old->flags & new->flags & SA_SHIRQ)) { + spin_unlock_irqrestore(&desc->lock,flags); + return -EBUSY; + } + + /* add new interrupt at end of irq queue */ + do { + p = &old->next; + old = *p; + } while (old); + shared = 1; + } + + *p = new; + + if (!shared) { + desc->depth = 0; + desc->status &= + ~(IRQ_DISABLED|IRQ_AUTODETECT|IRQ_WAITING|IRQ_INPROGRESS); + desc->handler->startup(irq); + } + spin_unlock_irqrestore(&desc->lock,flags); + + return 0; +} + +static struct proc_dir_entry * root_irq_dir; +static struct proc_dir_entry * irq_dir[NR_IRQS]; + +#ifdef CONFIG_SMP +static struct proc_dir_entry * smp_affinity_entry[NR_IRQS]; +static char irq_user_affinity[NR_IRQS]; +static cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL }; + +static void +select_smp_affinity(int irq) +{ + static int last_cpu; + int cpu = last_cpu + 1; + + if (! irq_desc[irq].handler->set_affinity || irq_user_affinity[irq]) + return; + + while (!cpu_possible(cpu)) + cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); + last_cpu = cpu; + + irq_affinity[irq] = cpumask_of_cpu(cpu); + irq_desc[irq].handler->set_affinity(irq, cpumask_of_cpu(cpu)); +} + +static int +irq_affinity_read_proc (char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]); + if (count - len < 2) + return -EINVAL; + len += sprintf(page + len, "\n"); + return len; +} + +static int +irq_affinity_write_proc(struct file *file, const char __user *buffer, + unsigned long count, void *data) +{ + int irq = (long) data, full_count = count, err; + cpumask_t new_value; + + if (!irq_desc[irq].handler->set_affinity) + return -EIO; + + err = cpumask_parse(buffer, count, new_value); + + /* The special value 0 means release control of the + affinity to kernel. */ + cpus_and(new_value, new_value, cpu_online_map); + if (cpus_empty(new_value)) { + irq_user_affinity[irq] = 0; + select_smp_affinity(irq); + } + /* Do not allow disabling IRQs completely - it's a too easy + way to make the system unusable accidentally :-) At least + one online CPU still has to be targeted. */ + else { + irq_affinity[irq] = new_value; + irq_user_affinity[irq] = 1; + irq_desc[irq].handler->set_affinity(irq, new_value); + } + + return full_count; +} + +#endif /* CONFIG_SMP */ + +#define MAX_NAMELEN 10 + +static void +register_irq_proc (unsigned int irq) +{ + char name [MAX_NAMELEN]; + + if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) || + irq_dir[irq]) + return; + + memset(name, 0, MAX_NAMELEN); + sprintf(name, "%d", irq); + + /* create /proc/irq/1234 */ + irq_dir[irq] = proc_mkdir(name, root_irq_dir); + +#ifdef CONFIG_SMP + if (irq_desc[irq].handler->set_affinity) { + struct proc_dir_entry *entry; + /* create /proc/irq/1234/smp_affinity */ + entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]); + + if (entry) { + entry->nlink = 1; + entry->data = (void *)(long)irq; + entry->read_proc = irq_affinity_read_proc; + entry->write_proc = irq_affinity_write_proc; + } + + smp_affinity_entry[irq] = entry; + } +#endif +} + +void +init_irq_proc (void) +{ + int i; + + /* create /proc/irq */ + root_irq_dir = proc_mkdir("irq", NULL); + +#ifdef CONFIG_SMP + /* create /proc/irq/prof_cpu_mask */ + create_prof_cpu_mask(root_irq_dir); +#endif + + /* + * Create entries for all existing IRQs. + */ + for (i = 0; i < ACTUAL_NR_IRQS; i++) { + if (irq_desc[i].handler == &no_irq_type) + continue; + register_irq_proc(i); + } +} + +int +request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), + unsigned long irqflags, const char * devname, void *dev_id) +{ + int retval; + struct irqaction * action; + + if (irq >= ACTUAL_NR_IRQS) + return -EINVAL; + if (!handler) + return -EINVAL; + +#if 1 + /* + * Sanity-check: shared interrupts should REALLY pass in + * a real dev-ID, otherwise we'll have trouble later trying + * to figure out which interrupt is which (messes up the + * interrupt freeing logic etc). + */ + if ((irqflags & SA_SHIRQ) && !dev_id) { + printk(KERN_ERR + "Bad boy: %s (at %p) called us without a dev_id!\n", + devname, __builtin_return_address(0)); + } +#endif + + action = (struct irqaction *) + kmalloc(sizeof(struct irqaction), GFP_KERNEL); + if (!action) + return -ENOMEM; + + action->handler = handler; + action->flags = irqflags; + cpus_clear(action->mask); + action->name = devname; + action->next = NULL; + action->dev_id = dev_id; + +#ifdef CONFIG_SMP + select_smp_affinity(irq); +#endif + + retval = setup_irq(irq, action); + if (retval) + kfree(action); + return retval; +} + +EXPORT_SYMBOL(request_irq); + +void +free_irq(unsigned int irq, void *dev_id) +{ + irq_desc_t *desc; + struct irqaction **p; + unsigned long flags; + + if (irq >= ACTUAL_NR_IRQS) { + printk(KERN_CRIT "Trying to free IRQ%d\n", irq); + return; + } + + desc = irq_desc + irq; + spin_lock_irqsave(&desc->lock,flags); + p = &desc->action; + for (;;) { + struct irqaction * action = *p; + if (action) { + struct irqaction **pp = p; + p = &action->next; + if (action->dev_id != dev_id) + continue; + + /* Found - now remove it from the list of entries. */ + *pp = action->next; + if (!desc->action) { + desc->status |= IRQ_DISABLED; + desc->handler->shutdown(irq); + } + spin_unlock_irqrestore(&desc->lock,flags); + +#ifdef CONFIG_SMP + /* Wait to make sure it's not being used on + another CPU. */ + while (desc->status & IRQ_INPROGRESS) + barrier(); +#endif + kfree(action); + return; + } + printk(KERN_ERR "Trying to free free IRQ%d\n",irq); + spin_unlock_irqrestore(&desc->lock,flags); + return; + } +} + +EXPORT_SYMBOL(free_irq); + +int +show_interrupts(struct seq_file *p, void *v) +{ +#ifdef CONFIG_SMP + int j; +#endif + int i = *(loff_t *) v; + struct irqaction * action; + unsigned long flags; + +#ifdef CONFIG_SMP + if (i == 0) { + seq_puts(p, " "); + for (i = 0; i < NR_CPUS; i++) + if (cpu_online(i)) + seq_printf(p, "CPU%d ", i); + seq_putc(p, '\n'); + } +#endif + + if (i < ACTUAL_NR_IRQS) { + spin_lock_irqsave(&irq_desc[i].lock, flags); + action = irq_desc[i].action; + if (!action) + goto unlock; + seq_printf(p, "%3d: ",i); +#ifndef CONFIG_SMP + seq_printf(p, "%10u ", kstat_irqs(i)); +#else + for (j = 0; j < NR_CPUS; j++) + if (cpu_online(j)) + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); +#endif + seq_printf(p, " %14s", irq_desc[i].handler->typename); + seq_printf(p, " %c%s", + (action->flags & SA_INTERRUPT)?'+':' ', + action->name); + + for (action=action->next; action; action = action->next) { + seq_printf(p, ", %c%s", + (action->flags & SA_INTERRUPT)?'+':' ', + action->name); + } + + seq_putc(p, '\n'); +unlock: + spin_unlock_irqrestore(&irq_desc[i].lock, flags); + } else if (i == ACTUAL_NR_IRQS) { +#ifdef CONFIG_SMP + seq_puts(p, "IPI: "); + for (i = 0; i < NR_CPUS; i++) + if (cpu_online(i)) + seq_printf(p, "%10lu ", cpu_data[i].ipi_count); + seq_putc(p, '\n'); +#endif + seq_printf(p, "ERR: %10lu\n", irq_err_count); + } + return 0; +} + + +/* + * handle_irq handles all normal device IRQ's (the special + * SMP cross-CPU interrupts have their own specific + * handlers). + */ + +#define MAX_ILLEGAL_IRQS 16 + +void +handle_irq(int irq, struct pt_regs * regs) +{ + /* + * We ack quickly, we don't want the irq controller + * thinking we're snobs just because some other CPU has + * disabled global interrupts (we have already done the + * INT_ACK cycles, it's too late to try to pretend to the + * controller that we aren't taking the interrupt). + * + * 0 return value means that this irq is already being + * handled by some other CPU. (or is disabled) + */ + int cpu = smp_processor_id(); + irq_desc_t *desc = irq_desc + irq; + struct irqaction * action; + unsigned int status; + static unsigned int illegal_count=0; + + if ((unsigned) irq > ACTUAL_NR_IRQS && illegal_count < MAX_ILLEGAL_IRQS ) { + irq_err_count++; + illegal_count++; + printk(KERN_CRIT "device_interrupt: invalid interrupt %d\n", + irq); + return; + } + + irq_enter(); + kstat_cpu(cpu).irqs[irq]++; + spin_lock_irq(&desc->lock); /* mask also the higher prio events */ + desc->handler->ack(irq); + /* + * REPLAY is when Linux resends an IRQ that was dropped earlier. + * WAITING is used by probe to mark irqs that are being tested. + */ + status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); + status |= IRQ_PENDING; /* we _want_ to handle it */ + + /* + * If the IRQ is disabled for whatever reason, we cannot + * use the action we have. + */ + action = NULL; + if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) { + action = desc->action; + status &= ~IRQ_PENDING; /* we commit to handling */ + status |= IRQ_INPROGRESS; /* we are handling it */ + } + desc->status = status; + + /* + * If there is no IRQ handler or it was disabled, exit early. + * Since we set PENDING, if another processor is handling + * a different instance of this same irq, the other processor + * will take care of it. + */ + if (!action) + goto out; + + /* + * Edge triggered interrupts need to remember pending events. + * This applies to any hw interrupts that allow a second + * instance of the same irq to arrive while we are in handle_irq + * or in the handler. But the code here only handles the _second_ + * instance of the irq, not the third or fourth. So it is mostly + * useful for irq hardware that does not mask cleanly in an + * SMP environment. + */ + for (;;) { + spin_unlock(&desc->lock); + handle_IRQ_event(irq, regs, action); + spin_lock(&desc->lock); + + if (!(desc->status & IRQ_PENDING) + || (desc->status & IRQ_LEVEL)) + break; + desc->status &= ~IRQ_PENDING; + } + desc->status &= ~IRQ_INPROGRESS; +out: + /* + * The ->end() handler has to deal with interrupts which got + * disabled while the handler was running. + */ + desc->handler->end(irq); + spin_unlock(&desc->lock); + + irq_exit(); +} + +/* + * IRQ autodetection code.. + * + * This depends on the fact that any interrupt that + * comes in on to an unassigned handler will get stuck + * with "IRQ_WAITING" cleared and the interrupt + * disabled. + */ +unsigned long +probe_irq_on(void) +{ + int i; + irq_desc_t *desc; + unsigned long delay; + unsigned long val; + + /* Something may have generated an irq long ago and we want to + flush such a longstanding irq before considering it as spurious. */ + for (i = NR_IRQS-1; i >= 0; i--) { + desc = irq_desc + i; + + spin_lock_irq(&desc->lock); + if (!irq_desc[i].action) + irq_desc[i].handler->startup(i); + spin_unlock_irq(&desc->lock); + } + + /* Wait for longstanding interrupts to trigger. */ + for (delay = jiffies + HZ/50; time_after(delay, jiffies); ) + /* about 20ms delay */ barrier(); + + /* enable any unassigned irqs (we must startup again here because + if a longstanding irq happened in the previous stage, it may have + masked itself) first, enable any unassigned irqs. */ + for (i = NR_IRQS-1; i >= 0; i--) { + desc = irq_desc + i; + + spin_lock_irq(&desc->lock); + if (!desc->action) { + desc->status |= IRQ_AUTODETECT | IRQ_WAITING; + if (desc->handler->startup(i)) + desc->status |= IRQ_PENDING; + } + spin_unlock_irq(&desc->lock); + } + + /* + * Wait for spurious interrupts to trigger + */ + for (delay = jiffies + HZ/10; time_after(delay, jiffies); ) + /* about 100ms delay */ barrier(); + + /* + * Now filter out any obviously spurious interrupts + */ + val = 0; + for (i=0; i<NR_IRQS; i++) { + irq_desc_t *desc = irq_desc + i; + unsigned int status; + + spin_lock_irq(&desc->lock); + status = desc->status; + + if (status & IRQ_AUTODETECT) { + /* It triggered already - consider it spurious. */ + if (!(status & IRQ_WAITING)) { + desc->status = status & ~IRQ_AUTODETECT; + desc->handler->shutdown(i); + } else + if (i < 32) + val |= 1 << i; + } + spin_unlock_irq(&desc->lock); + } + + return val; +} + +EXPORT_SYMBOL(probe_irq_on); + +/* + * Return a mask of triggered interrupts (this + * can handle only legacy ISA interrupts). + */ +unsigned int +probe_irq_mask(unsigned long val) +{ + int i; + unsigned int mask; + + mask = 0; + for (i = 0; i < NR_IRQS; i++) { + irq_desc_t *desc = irq_desc + i; + unsigned int status; + + spin_lock_irq(&desc->lock); + status = desc->status; + + if (status & IRQ_AUTODETECT) { + /* We only react to ISA interrupts */ + if (!(status & IRQ_WAITING)) { + if (i < 16) + mask |= 1 << i; + } + + desc->status = status & ~IRQ_AUTODETECT; + desc->handler->shutdown(i); + } + spin_unlock_irq(&desc->lock); + } + + return mask & val; +} + +/* + * Get the result of the IRQ probe.. A negative result means that + * we have several candidates (but we return the lowest-numbered + * one). + */ + +int +probe_irq_off(unsigned long val) +{ + int i, irq_found, nr_irqs; + + nr_irqs = 0; + irq_found = 0; + for (i=0; i<NR_IRQS; i++) { + irq_desc_t *desc = irq_desc + i; + unsigned int status; + + spin_lock_irq(&desc->lock); + status = desc->status; + + if (status & IRQ_AUTODETECT) { + if (!(status & IRQ_WAITING)) { + if (!nr_irqs) + irq_found = i; + nr_irqs++; + } + desc->status = status & ~IRQ_AUTODETECT; + desc->handler->shutdown(i); + } + spin_unlock_irq(&desc->lock); + } + + if (nr_irqs > 1) + irq_found = -irq_found; + return irq_found; +} + +EXPORT_SYMBOL(probe_irq_off); + +#ifdef CONFIG_SMP +void synchronize_irq(unsigned int irq) +{ + /* is there anything to synchronize with? */ + if (!irq_desc[irq].action) + return; + + while (irq_desc[irq].status & IRQ_INPROGRESS) + barrier(); +} +#endif diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c new file mode 100644 index 0000000..e6ded33 --- /dev/null +++ b/arch/alpha/kernel/irq_alpha.c @@ -0,0 +1,252 @@ +/* + * Alpha specific irq code. + */ + +#include <linux/config.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/irq.h> +#include <linux/kernel_stat.h> + +#include <asm/machvec.h> +#include <asm/dma.h> + +#include "proto.h" +#include "irq_impl.h" + +/* Hack minimum IPL during interrupt processing for broken hardware. */ +#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK +int __min_ipl; +#endif + +/* + * Performance counter hook. A module can override this to + * do something useful. + */ +static void +dummy_perf(unsigned long vector, struct pt_regs *regs) +{ + irq_err_count++; + printk(KERN_CRIT "Performance counter interrupt!\n"); +} + +void (*perf_irq)(unsigned long, struct pt_regs *) = dummy_perf; + +/* + * The main interrupt entry point. + */ + +asmlinkage void +do_entInt(unsigned long type, unsigned long vector, + unsigned long la_ptr, struct pt_regs *regs) +{ + switch (type) { + case 0: +#ifdef CONFIG_SMP + handle_ipi(regs); + return; +#else + irq_err_count++; + printk(KERN_CRIT "Interprocessor interrupt? " + "You must be kidding!\n"); +#endif + break; + case 1: +#ifdef CONFIG_SMP + { + long cpu; + smp_percpu_timer_interrupt(regs); + cpu = smp_processor_id(); + if (cpu != boot_cpuid) { + kstat_cpu(cpu).irqs[RTC_IRQ]++; + } else { + handle_irq(RTC_IRQ, regs); + } + } +#else + handle_irq(RTC_IRQ, regs); +#endif + return; + case 2: + alpha_mv.machine_check(vector, la_ptr, regs); + return; + case 3: + alpha_mv.device_interrupt(vector, regs); + return; + case 4: + perf_irq(la_ptr, regs); + return; + default: + printk(KERN_CRIT "Hardware intr %ld %lx? Huh?\n", + type, vector); + } + printk(KERN_CRIT "PC = %016lx PS=%04lx\n", regs->pc, regs->ps); +} + +void __init +common_init_isa_dma(void) +{ + outb(0, DMA1_RESET_REG); + outb(0, DMA2_RESET_REG); + outb(0, DMA1_CLR_MASK_REG); + outb(0, DMA2_CLR_MASK_REG); +} + +void __init +init_IRQ(void) +{ + /* Just in case the platform init_irq() causes interrupts/mchecks + (as is the case with RAWHIDE, at least). */ + wrent(entInt, 0); + + alpha_mv.init_irq(); +} + +/* + * machine error checks + */ +#define MCHK_K_TPERR 0x0080 +#define MCHK_K_TCPERR 0x0082 +#define MCHK_K_HERR 0x0084 +#define MCHK_K_ECC_C 0x0086 +#define MCHK_K_ECC_NC 0x0088 +#define MCHK_K_OS_BUGCHECK 0x008A +#define MCHK_K_PAL_BUGCHECK 0x0090 + +#ifndef CONFIG_SMP +struct mcheck_info __mcheck_info; +#endif + +void +process_mcheck_info(unsigned long vector, unsigned long la_ptr, + struct pt_regs *regs, const char *machine, + int expected) +{ + struct el_common *mchk_header; + const char *reason; + + /* + * See if the machine check is due to a badaddr() and if so, + * ignore it. + */ + +#ifdef CONFIG_VERBOSE_MCHECK + if (alpha_verbose_mcheck > 1) { + printk(KERN_CRIT "%s machine check %s\n", machine, + expected ? "expected." : "NOT expected!!!"); + } +#endif + + if (expected) { + int cpu = smp_processor_id(); + mcheck_expected(cpu) = 0; + mcheck_taken(cpu) = 1; + return; + } + + mchk_header = (struct el_common *)la_ptr; + + printk(KERN_CRIT "%s machine check: vector=0x%lx pc=0x%lx code=0x%x\n", + machine, vector, regs->pc, mchk_header->code); + + switch (mchk_header->code) { + /* Machine check reasons. Defined according to PALcode sources. */ + case 0x80: reason = "tag parity error"; break; + case 0x82: reason = "tag control parity error"; break; + case 0x84: reason = "generic hard error"; break; + case 0x86: reason = "correctable ECC error"; break; + case 0x88: reason = "uncorrectable ECC error"; break; + case 0x8A: reason = "OS-specific PAL bugcheck"; break; + case 0x90: reason = "callsys in kernel mode"; break; + case 0x96: reason = "i-cache read retryable error"; break; + case 0x98: reason = "processor detected hard error"; break; + + /* System specific (these are for Alcor, at least): */ + case 0x202: reason = "system detected hard error"; break; + case 0x203: reason = "system detected uncorrectable ECC error"; break; + case 0x204: reason = "SIO SERR occurred on PCI bus"; break; + case 0x205: reason = "parity error detected by core logic"; break; + case 0x206: reason = "SIO IOCHK occurred on ISA bus"; break; + case 0x207: reason = "non-existent memory error"; break; + case 0x208: reason = "MCHK_K_DCSR"; break; + case 0x209: reason = "PCI SERR detected"; break; + case 0x20b: reason = "PCI data parity error detected"; break; + case 0x20d: reason = "PCI address parity error detected"; break; + case 0x20f: reason = "PCI master abort error"; break; + case 0x211: reason = "PCI target abort error"; break; + case 0x213: reason = "scatter/gather PTE invalid error"; break; + case 0x215: reason = "flash ROM write error"; break; + case 0x217: reason = "IOA timeout detected"; break; + case 0x219: reason = "IOCHK#, EISA add-in board parity or other catastrophic error"; break; + case 0x21b: reason = "EISA fail-safe timer timeout"; break; + case 0x21d: reason = "EISA bus time-out"; break; + case 0x21f: reason = "EISA software generated NMI"; break; + case 0x221: reason = "unexpected ev5 IRQ[3] interrupt"; break; + default: reason = "unknown"; break; + } + + printk(KERN_CRIT "machine check type: %s%s\n", + reason, mchk_header->retry ? " (retryable)" : ""); + + dik_show_regs(regs, NULL); + +#ifdef CONFIG_VERBOSE_MCHECK + if (alpha_verbose_mcheck > 1) { + /* Dump the logout area to give all info. */ + unsigned long *ptr = (unsigned long *)la_ptr; + long i; + for (i = 0; i < mchk_header->size / sizeof(long); i += 2) { + printk(KERN_CRIT " +%8lx %016lx %016lx\n", + i*sizeof(long), ptr[i], ptr[i+1]); + } + } +#endif /* CONFIG_VERBOSE_MCHECK */ +} + +/* + * The special RTC interrupt type. The interrupt itself was + * processed by PALcode, and comes in via entInt vector 1. + */ + +static void rtc_enable_disable(unsigned int irq) { } +static unsigned int rtc_startup(unsigned int irq) { return 0; } + +struct irqaction timer_irqaction = { + .handler = timer_interrupt, + .flags = SA_INTERRUPT, + .name = "timer", +}; + +static struct hw_interrupt_type rtc_irq_type = { + .typename = "RTC", + .startup = rtc_startup, + .shutdown = rtc_enable_disable, + .enable = rtc_enable_disable, + .disable = rtc_enable_disable, + .ack = rtc_enable_disable, + .end = rtc_enable_disable, +}; + +void __init +init_rtc_irq(void) +{ + irq_desc[RTC_IRQ].status = IRQ_DISABLED; + irq_desc[RTC_IRQ].handler = &rtc_irq_type; + setup_irq(RTC_IRQ, &timer_irqaction); +} + +/* Dummy irqactions. */ +struct irqaction isa_cascade_irqaction = { + .handler = no_action, + .name = "isa-cascade" +}; + +struct irqaction timer_cascade_irqaction = { + .handler = no_action, + .name = "timer-cascade" +}; + +struct irqaction halt_switch_irqaction = { + .handler = no_action, + .name = "halt-switch" +}; diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c new file mode 100644 index 0000000..b188683 --- /dev/null +++ b/arch/alpha/kernel/irq_i8259.c @@ -0,0 +1,183 @@ +/* + * linux/arch/alpha/kernel/irq_i8259.c + * + * This is the 'legacy' 8259A Programmable Interrupt Controller, + * present in the majority of PC/AT boxes. + * + * Started hacking from linux-2.3.30pre6/arch/i386/kernel/i8259.c. + */ + +#include <linux/config.h> +#include <linux/init.h> +#include <linux/cache.h> +#include <linux/sched.h> +#include <linux/irq.h> +#include <linux/interrupt.h> + +#include <asm/io.h> + +#include "proto.h" +#include "irq_impl.h" + + +/* Note mask bit is true for DISABLED irqs. */ +static unsigned int cached_irq_mask = 0xffff; +static DEFINE_SPINLOCK(i8259_irq_lock); + +static inline void +i8259_update_irq_hw(unsigned int irq, unsigned long mask) +{ + int port = 0x21; + if (irq & 8) mask >>= 8; + if (irq & 8) port = 0xA1; + outb(mask, port); +} + +inline void +i8259a_enable_irq(unsigned int irq) +{ + spin_lock(&i8259_irq_lock); + i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); + spin_unlock(&i8259_irq_lock); +} + +static inline void +__i8259a_disable_irq(unsigned int irq) +{ + i8259_update_irq_hw(irq, cached_irq_mask |= 1 << irq); +} + +void +i8259a_disable_irq(unsigned int irq) +{ + spin_lock(&i8259_irq_lock); + __i8259a_disable_irq(irq); + spin_unlock(&i8259_irq_lock); +} + +void +i8259a_mask_and_ack_irq(unsigned int irq) +{ + spin_lock(&i8259_irq_lock); + __i8259a_disable_irq(irq); + + /* Ack the interrupt making it the lowest priority. */ + if (irq >= 8) { + outb(0xE0 | (irq - 8), 0xa0); /* ack the slave */ + irq = 2; + } + outb(0xE0 | irq, 0x20); /* ack the master */ + spin_unlock(&i8259_irq_lock); +} + +unsigned int +i8259a_startup_irq(unsigned int irq) +{ + i8259a_enable_irq(irq); + return 0; /* never anything pending */ +} + +void +i8259a_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + i8259a_enable_irq(irq); +} + +struct hw_interrupt_type i8259a_irq_type = { + .typename = "XT-PIC", + .startup = i8259a_startup_irq, + .shutdown = i8259a_disable_irq, + .enable = i8259a_enable_irq, + .disable = i8259a_disable_irq, + .ack = i8259a_mask_and_ack_irq, + .end = i8259a_end_irq, +}; + +void __init +init_i8259a_irqs(void) +{ + static struct irqaction cascade = { + .handler = no_action, + .name = "cascade", + }; + + long i; + + outb(0xff, 0x21); /* mask all of 8259A-1 */ + outb(0xff, 0xA1); /* mask all of 8259A-2 */ + + for (i = 0; i < 16; i++) { + irq_desc[i].status = IRQ_DISABLED; + irq_desc[i].handler = &i8259a_irq_type; + } + + setup_irq(2, &cascade); +} + + +#if defined(CONFIG_ALPHA_GENERIC) +# define IACK_SC alpha_mv.iack_sc +#elif defined(CONFIG_ALPHA_APECS) +# define IACK_SC APECS_IACK_SC +#elif defined(CONFIG_ALPHA_LCA) +# define IACK_SC LCA_IACK_SC +#elif defined(CONFIG_ALPHA_CIA) +# define IACK_SC CIA_IACK_SC +#elif defined(CONFIG_ALPHA_PYXIS) +# define IACK_SC PYXIS_IACK_SC +#elif defined(CONFIG_ALPHA_TITAN) +# define IACK_SC TITAN_IACK_SC +#elif defined(CONFIG_ALPHA_TSUNAMI) +# define IACK_SC TSUNAMI_IACK_SC +#elif defined(CONFIG_ALPHA_IRONGATE) +# define IACK_SC IRONGATE_IACK_SC +#endif +/* Note that CONFIG_ALPHA_POLARIS is intentionally left out here, since + sys_rx164 wants to use isa_no_iack_sc_device_interrupt for some reason. */ + +#if defined(IACK_SC) +void +isa_device_interrupt(unsigned long vector, struct pt_regs *regs) +{ + /* + * Generate a PCI interrupt acknowledge cycle. The PIC will + * respond with the interrupt vector of the highest priority + * interrupt that is pending. The PALcode sets up the + * interrupts vectors such that irq level L generates vector L. + */ + int j = *(vuip) IACK_SC; + j &= 0xff; + handle_irq(j, regs); +} +#endif + +#if defined(CONFIG_ALPHA_GENERIC) || !defined(IACK_SC) +void +isa_no_iack_sc_device_interrupt(unsigned long vector, struct pt_regs *regs) +{ + unsigned long pic; + + /* + * It seems to me that the probability of two or more *device* + * interrupts occurring at almost exactly the same time is + * pretty low. So why pay the price of checking for + * additional interrupts here if the common case can be + * handled so much easier? + */ + /* + * The first read of gives you *all* interrupting lines. + * Therefore, read the mask register and and out those lines + * not enabled. Note that some documentation has 21 and a1 + * write only. This is not true. + */ + pic = inb(0x20) | (inb(0xA0) << 8); /* read isr */ + pic &= 0xFFFB; /* mask out cascade & hibits */ + + while (pic) { + int j = ffz(~pic); + pic &= pic - 1; + handle_irq(j, regs); + } +} +#endif diff --git a/arch/alpha/kernel/irq_impl.h b/arch/alpha/kernel/irq_impl.h new file mode 100644 index 0000000..f201d8f --- /dev/null +++ b/arch/alpha/kernel/irq_impl.h @@ -0,0 +1,42 @@ +/* + * linux/arch/alpha/kernel/irq_impl.h + * + * Copyright (C) 1995 Linus Torvalds + * Copyright (C) 1998, 2000 Richard Henderson + * + * This file contains declarations and inline functions for interfacing + * with the IRQ handling routines in irq.c. + */ + +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/profile.h> + + +#define RTC_IRQ 8 + +extern void isa_device_interrupt(unsigned long, struct pt_regs *); +extern void isa_no_iack_sc_device_interrupt(unsigned long, struct pt_regs *); +extern void srm_device_interrupt(unsigned long, struct pt_regs *); +extern void pyxis_device_interrupt(unsigned long, struct pt_regs *); + +extern struct irqaction timer_irqaction; +extern struct irqaction isa_cascade_irqaction; +extern struct irqaction timer_cascade_irqaction; +extern struct irqaction halt_switch_irqaction; + +extern void init_srm_irqs(long, unsigned long); +extern void init_pyxis_irqs(unsigned long); +extern void init_rtc_irq(void); + +extern void common_init_isa_dma(void); + +extern void i8259a_enable_irq(unsigned int); +extern void i8259a_disable_irq(unsigned int); +extern void i8259a_mask_and_ack_irq(unsigned int); +extern unsigned int i8259a_startup_irq(unsigned int); +extern void i8259a_end_irq(unsigned int); +extern struct hw_interrupt_type i8259a_irq_type; +extern void init_i8259a_irqs(void); + +extern void handle_irq(int irq, struct pt_regs * regs); diff --git a/arch/alpha/kernel/irq_pyxis.c b/arch/alpha/kernel/irq_pyxis.c new file mode 100644 index 0000000..146a20b --- /dev/null +++ b/arch/alpha/kernel/irq_pyxis.c @@ -0,0 +1,127 @@ +/* + * linux/arch/alpha/kernel/irq_pyxis.c + * + * Based on code written by David A Rusling (david.rusling@reo.mts.dec.com). + * + * IRQ Code common to all PYXIS core logic chips. + */ + +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/irq.h> + +#include <asm/io.h> +#include <asm/core_cia.h> + +#include "proto.h" +#include "irq_impl.h" + + +/* Note mask bit is true for ENABLED irqs. */ +static unsigned long cached_irq_mask; + +static inline void +pyxis_update_irq_hw(unsigned long mask) +{ + *(vulp)PYXIS_INT_MASK = mask; + mb(); + *(vulp)PYXIS_INT_MASK; +} + +static inline void +pyxis_enable_irq(unsigned int irq) +{ + pyxis_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); +} + +static void +pyxis_disable_irq(unsigned int irq) +{ + pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); +} + +static unsigned int +pyxis_startup_irq(unsigned int irq) +{ + pyxis_enable_irq(irq); + return 0; +} + +static void +pyxis_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + pyxis_enable_irq(irq); +} + +static void +pyxis_mask_and_ack_irq(unsigned int irq) +{ + unsigned long bit = 1UL << (irq - 16); + unsigned long mask = cached_irq_mask &= ~bit; + + /* Disable the interrupt. */ + *(vulp)PYXIS_INT_MASK = mask; + wmb(); + /* Ack PYXIS PCI interrupt. */ + *(vulp)PYXIS_INT_REQ = bit; + mb(); + /* Re-read to force both writes. */ + *(vulp)PYXIS_INT_MASK; +} + +static struct hw_interrupt_type pyxis_irq_type = { + .typename = "PYXIS", + .startup = pyxis_startup_irq, + .shutdown = pyxis_disable_irq, + .enable = pyxis_enable_irq, + .disable = pyxis_disable_irq, + .ack = pyxis_mask_and_ack_irq, + .end = pyxis_end_irq, +}; + +void +pyxis_device_interrupt(unsigned long vector, struct pt_regs *regs) +{ + unsigned long pld; + unsigned int i; + + /* Read the interrupt summary register of PYXIS */ + pld = *(vulp)PYXIS_INT_REQ; + pld &= cached_irq_mask; + + /* + * Now for every possible bit set, work through them and call + * the appropriate interrupt handler. + */ + while (pld) { + i = ffz(~pld); + pld &= pld - 1; /* clear least bit set */ + if (i == 7) + isa_device_interrupt(vector, regs); + else + handle_irq(16+i, regs); + } +} + +void __init +init_pyxis_irqs(unsigned long ignore_mask) +{ + long i; + + *(vulp)PYXIS_INT_MASK = 0; /* disable all */ + *(vulp)PYXIS_INT_REQ = -1; /* flush all */ + mb(); + + /* Send -INTA pulses to clear any pending interrupts ...*/ + *(vuip) CIA_IACK_SC; + + for (i = 16; i < 48; ++i) { + if ((ignore_mask >> i) & 1) + continue; + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].handler = &pyxis_irq_type; + } + + setup_irq(16+7, &isa_cascade_irqaction); +} diff --git a/arch/alpha/kernel/irq_srm.c b/arch/alpha/kernel/irq_srm.c new file mode 100644 index 0000000..0a87e46 --- /dev/null +++ b/arch/alpha/kernel/irq_srm.c @@ -0,0 +1,79 @@ +/* + * Handle interrupts from the SRM, assuming no additional weirdness. + */ + +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/irq.h> + +#include "proto.h" +#include "irq_impl.h" + + +/* + * Is the palcode SMP safe? In other words: can we call cserve_ena/dis + * at the same time in multiple CPUs? To be safe I added a spinlock + * but it can be removed trivially if the palcode is robust against smp. + */ +DEFINE_SPINLOCK(srm_irq_lock); + +static inline void +srm_enable_irq(unsigned int irq) +{ + spin_lock(&srm_irq_lock); + cserve_ena(irq - 16); + spin_unlock(&srm_irq_lock); +} + +static void +srm_disable_irq(unsigned int irq) +{ + spin_lock(&srm_irq_lock); + cserve_dis(irq - 16); + spin_unlock(&srm_irq_lock); +} + +static unsigned int +srm_startup_irq(unsigned int irq) +{ + srm_enable_irq(irq); + return 0; +} + +static void +srm_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + srm_enable_irq(irq); +} + +/* Handle interrupts from the SRM, assuming no additional weirdness. */ +static struct hw_interrupt_type srm_irq_type = { + .typename = "SRM", + .startup = srm_startup_irq, + .shutdown = srm_disable_irq, + .enable = srm_enable_irq, + .disable = srm_disable_irq, + .ack = srm_disable_irq, + .end = srm_end_irq, +}; + +void __init +init_srm_irqs(long max, unsigned long ignore_mask) +{ + long i; + + for (i = 16; i < max; ++i) { + if (i < 64 && ((ignore_mask >> i) & 1)) + continue; + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].handler = &srm_irq_type; + } +} + +void +srm_device_interrupt(unsigned long vector, struct pt_regs * regs) +{ + int irq = (vector - 0x800) >> 4; + handle_irq(irq, regs); +} diff --git a/arch/alpha/kernel/machvec_impl.h b/arch/alpha/kernel/machvec_impl.h new file mode 100644 index 0000000..4959b7a --- /dev/null +++ b/arch/alpha/kernel/machvec_impl.h @@ -0,0 +1,150 @@ +/* + * linux/arch/alpha/kernel/machvec.h + * + * Copyright (C) 1997, 1998 Richard Henderson + * + * This file has goodies to help simplify instantiation of machine vectors. + */ + +#include <linux/config.h> +#include <asm/pgalloc.h> + +/* Whee. These systems don't have an HAE: + IRONGATE, MARVEL, POLARIS, TSUNAMI, TITAN, WILDFIRE + Fix things up for the GENERIC kernel by defining the HAE address + to be that of the cache. Now we can read and write it as we like. ;-) */ +#define IRONGATE_HAE_ADDRESS (&alpha_mv.hae_cache) +#define MARVEL_HAE_ADDRESS (&alpha_mv.hae_cache) +#define POLARIS_HAE_ADDRESS (&alpha_mv.hae_cache) +#define TSUNAMI_HAE_ADDRESS (&alpha_mv.hae_cache) +#define TITAN_HAE_ADDRESS (&alpha_mv.hae_cache) +#define WILDFIRE_HAE_ADDRESS (&alpha_mv.hae_cache) + +#ifdef CIA_ONE_HAE_WINDOW +#define CIA_HAE_ADDRESS (&alpha_mv.hae_cache) +#endif +#ifdef MCPCIA_ONE_HAE_WINDOW +#define MCPCIA_HAE_ADDRESS (&alpha_mv.hae_cache) +#endif + +/* Only a few systems don't define IACK_SC, handling all interrupts through + the SRM console. But splitting out that one case from IO() below + seems like such a pain. Define this to get things to compile. */ +#define JENSEN_IACK_SC 1 +#define T2_IACK_SC 1 +#define WILDFIRE_IACK_SC 1 /* FIXME */ + +/* + * Some helpful macros for filling in the blanks. + */ + +#define CAT1(x,y) x##y +#define CAT(x,y) CAT1(x,y) + +#define DO_DEFAULT_RTC rtc_port: 0x70 + +#define DO_EV4_MMU \ + .max_asn = EV4_MAX_ASN, \ + .mv_switch_mm = ev4_switch_mm, \ + .mv_activate_mm = ev4_activate_mm, \ + .mv_flush_tlb_current = ev4_flush_tlb_current, \ + .mv_flush_tlb_current_page = ev4_flush_tlb_current_page + +#define DO_EV5_MMU \ + .max_asn = EV5_MAX_ASN, \ + .mv_switch_mm = ev5_switch_mm, \ + .mv_activate_mm = ev5_activate_mm, \ + .mv_flush_tlb_current = ev5_flush_tlb_current, \ + .mv_flush_tlb_current_page = ev5_flush_tlb_current_page + +#define DO_EV6_MMU \ + .max_asn = EV6_MAX_ASN, \ + .mv_switch_mm = ev5_switch_mm, \ + .mv_activate_mm = ev5_activate_mm, \ + .mv_flush_tlb_current = ev5_flush_tlb_current, \ + .mv_flush_tlb_current_page = ev5_flush_tlb_current_page + +#define DO_EV7_MMU \ + .max_asn = EV6_MAX_ASN, \ + .mv_switch_mm = ev5_switch_mm, \ + .mv_activate_mm = ev5_activate_mm, \ + .mv_flush_tlb_current = ev5_flush_tlb_current, \ + .mv_flush_tlb_current_page = ev5_flush_tlb_current_page + +#define IO_LITE(UP,low) \ + .hae_register = (unsigned long *) CAT(UP,_HAE_ADDRESS), \ + .iack_sc = CAT(UP,_IACK_SC), \ + .mv_ioread8 = CAT(low,_ioread8), \ + .mv_ioread16 = CAT(low,_ioread16), \ + .mv_ioread32 = CAT(low,_ioread32), \ + .mv_iowrite8 = CAT(low,_iowrite8), \ + .mv_iowrite16 = CAT(low,_iowrite16), \ + .mv_iowrite32 = CAT(low,_iowrite32), \ + .mv_readb = CAT(low,_readb), \ + .mv_readw = CAT(low,_readw), \ + .mv_readl = CAT(low,_readl), \ + .mv_readq = CAT(low,_readq), \ + .mv_writeb = CAT(low,_writeb), \ + .mv_writew = CAT(low,_writew), \ + .mv_writel = CAT(low,_writel), \ + .mv_writeq = CAT(low,_writeq), \ + .mv_ioportmap = CAT(low,_ioportmap), \ + .mv_ioremap = CAT(low,_ioremap), \ + .mv_iounmap = CAT(low,_iounmap), \ + .mv_is_ioaddr = CAT(low,_is_ioaddr), \ + .mv_is_mmio = CAT(low,_is_mmio) \ + +#define IO(UP,low) \ + IO_LITE(UP,low), \ + .pci_ops = &CAT(low,_pci_ops), \ + .mv_pci_tbi = CAT(low,_pci_tbi) + +#define DO_APECS_IO IO(APECS,apecs) +#define DO_CIA_IO IO(CIA,cia) +#define DO_IRONGATE_IO IO(IRONGATE,irongate) +#define DO_LCA_IO IO(LCA,lca) +#define DO_MARVEL_IO IO(MARVEL,marvel) +#define DO_MCPCIA_IO IO(MCPCIA,mcpcia) +#define DO_POLARIS_IO IO(POLARIS,polaris) +#define DO_T2_IO IO(T2,t2) +#define DO_TSUNAMI_IO IO(TSUNAMI,tsunami) +#define DO_TITAN_IO IO(TITAN,titan) +#define DO_WILDFIRE_IO IO(WILDFIRE,wildfire) + +#define DO_PYXIS_IO IO_LITE(CIA,cia_bwx), \ + .pci_ops = &cia_pci_ops, \ + .mv_pci_tbi = cia_pci_tbi + +/* + * In a GENERIC kernel, we have lots of these vectors floating about, + * all but one of which we want to go away. In a non-GENERIC kernel, + * we want only one, ever. + * + * Accomplish this in the GENERIC kernel by putting all of the vectors + * in the .init.data section where they'll go away. We'll copy the + * one we want to the real alpha_mv vector in setup_arch. + * + * Accomplish this in a non-GENERIC kernel by ifdef'ing out all but + * one of the vectors, which will not reside in .init.data. We then + * alias this one vector to alpha_mv, so no copy is needed. + * + * Upshot: set __initdata to nothing for non-GENERIC kernels. + */ + +#ifdef CONFIG_ALPHA_GENERIC +#define __initmv __initdata +#define ALIAS_MV(x) +#else +#define __initmv + +/* GCC actually has a syntax for defining aliases, but is under some + delusion that you shouldn't be able to declare it extern somewhere + else beforehand. Fine. We'll do it ourselves. */ +#if 0 +#define ALIAS_MV(system) \ + struct alpha_machine_vector alpha_mv __attribute__((alias(#system "_mv"))); +#else +#define ALIAS_MV(system) \ + asm(".global alpha_mv\nalpha_mv = " #system "_mv"); +#endif +#endif /* GENERIC */ diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c new file mode 100644 index 0000000..fc271e3 --- /dev/null +++ b/arch/alpha/kernel/module.c @@ -0,0 +1,311 @@ +/* Kernel module help for Alpha. + Copyright (C) 2002 Richard Henderson. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +#include <linux/moduleloader.h> +#include <linux/elf.h> +#include <linux/vmalloc.h> +#include <linux/fs.h> +#include <linux/string.h> +#include <linux/kernel.h> +#include <linux/slab.h> + +#if 0 +#define DEBUGP printk +#else +#define DEBUGP(fmt...) +#endif + +void * +module_alloc(unsigned long size) +{ + if (size == 0) + return NULL; + return vmalloc(size); +} + +void +module_free(struct module *mod, void *module_region) +{ + vfree(module_region); +} + +/* Allocate the GOT at the end of the core sections. */ + +struct got_entry { + struct got_entry *next; + Elf64_Addr r_offset; + int got_offset; +}; + +static inline void +process_reloc_for_got(Elf64_Rela *rela, + struct got_entry *chains, Elf64_Xword *poffset) +{ + unsigned long r_sym = ELF64_R_SYM (rela->r_info); + unsigned long r_type = ELF64_R_TYPE (rela->r_info); + Elf64_Addr r_offset = rela->r_offset; + struct got_entry *g; + + if (r_type != R_ALPHA_LITERAL) + return; + + for (g = chains + r_sym; g ; g = g->next) + if (g->r_offset == r_offset) { + if (g->got_offset == 0) { + g->got_offset = *poffset; + *poffset += 8; + } + goto found_entry; + } + + g = kmalloc (sizeof (*g), GFP_KERNEL); + g->next = chains[r_sym].next; + g->r_offset = r_offset; + g->got_offset = *poffset; + *poffset += 8; + chains[r_sym].next = g; + + found_entry: + /* Trick: most of the ELF64_R_TYPE field is unused. There are + 42 valid relocation types, and a 32-bit field. Co-opt the + bits above 256 to store the got offset for this reloc. */ + rela->r_info |= g->got_offset << 8; +} + +int +module_frob_arch_sections(Elf64_Ehdr *hdr, Elf64_Shdr *sechdrs, + char *secstrings, struct module *me) +{ + struct got_entry *chains; + Elf64_Rela *rela; + Elf64_Shdr *esechdrs, *symtab, *s, *got; + unsigned long nsyms, nrela, i; + + esechdrs = sechdrs + hdr->e_shnum; + symtab = got = NULL; + + /* Find out how large the symbol table is. Allocate one got_entry + head per symbol. Normally this will be enough, but not always. + We'll chain different offsets for the symbol down each head. */ + for (s = sechdrs; s < esechdrs; ++s) + if (s->sh_type == SHT_SYMTAB) + symtab = s; + else if (!strcmp(".got", secstrings + s->sh_name)) { + got = s; + me->arch.gotsecindex = s - sechdrs; + } + + if (!symtab) { + printk(KERN_ERR "module %s: no symbol table\n", me->name); + return -ENOEXEC; + } + if (!got) { + printk(KERN_ERR "module %s: no got section\n", me->name); + return -ENOEXEC; + } + + nsyms = symtab->sh_size / sizeof(Elf64_Sym); + chains = kmalloc(nsyms * sizeof(struct got_entry), GFP_KERNEL); + memset(chains, 0, nsyms * sizeof(struct got_entry)); + + got->sh_size = 0; + got->sh_addralign = 8; + got->sh_type = SHT_NOBITS; + + /* Examine all LITERAL relocations to find out what GOT entries + are required. This sizes the GOT section as well. */ + for (s = sechdrs; s < esechdrs; ++s) + if (s->sh_type == SHT_RELA) { + nrela = s->sh_size / sizeof(Elf64_Rela); + rela = (void *)hdr + s->sh_offset; + for (i = 0; i < nrela; ++i) + process_reloc_for_got(rela+i, chains, + &got->sh_size); + } + + /* Free the memory we allocated. */ + for (i = 0; i < nsyms; ++i) { + struct got_entry *g, *n; + for (g = chains[i].next; g ; g = n) { + n = g->next; + kfree(g); + } + } + kfree(chains); + + return 0; +} + +int +apply_relocate(Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex, + unsigned int relsec, struct module *me) +{ + printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name); + return -ENOEXEC; +} + +int +apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, + struct module *me) +{ + Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr; + unsigned long i, n = sechdrs[relsec].sh_size / sizeof(*rela); + Elf64_Sym *symtab, *sym; + void *base, *location; + unsigned long got, gp; + + DEBUGP("Applying relocate section %u to %u\n", relsec, + sechdrs[relsec].sh_info); + + base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr; + symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr; + + /* The small sections were sorted to the end of the segment. + The following should definitely cover them. */ + gp = (u64)me->module_core + me->core_size - 0x8000; + got = sechdrs[me->arch.gotsecindex].sh_addr; + + for (i = 0; i < n; i++) { + unsigned long r_sym = ELF64_R_SYM (rela[i].r_info); + unsigned long r_type = ELF64_R_TYPE (rela[i].r_info); + unsigned long r_got_offset = r_type >> 8; + unsigned long value, hi, lo; + r_type &= 0xff; + + /* This is where to make the change. */ + location = base + rela[i].r_offset; + + /* This is the symbol it is referring to. Note that all + unresolved symbols have been resolved. */ + sym = symtab + r_sym; + value = sym->st_value + rela[i].r_addend; + + switch (r_type) { + case R_ALPHA_NONE: + break; + case R_ALPHA_REFQUAD: + /* BUG() can produce misaligned relocations. */ + ((u32 *)location)[0] = value; + ((u32 *)location)[1] = value >> 32; + break; + case R_ALPHA_GPREL32: + value -= gp; + if ((int)value != value) + goto reloc_overflow; + *(u32 *)location = value; + break; + case R_ALPHA_LITERAL: + hi = got + r_got_offset; + lo = hi - gp; + if ((short)lo != lo) + goto reloc_overflow; + *(u16 *)location = lo; + *(u64 *)hi = value; + break; + case R_ALPHA_LITUSE: + break; + case R_ALPHA_GPDISP: + value = gp - (u64)location; + lo = (short)value; + hi = (int)(value - lo); + if (hi + lo != value) + goto reloc_overflow; + *(u16 *)location = hi >> 16; + *(u16 *)(location + rela[i].r_addend) = lo; + break; + case R_ALPHA_BRSGP: + /* BRSGP is only allowed to bind to local symbols. + If the section is undef, this means that the + value was resolved from somewhere else. */ + if (sym->st_shndx == SHN_UNDEF) + goto reloc_overflow; + if ((sym->st_other & STO_ALPHA_STD_GPLOAD) == + STO_ALPHA_STD_GPLOAD) + /* Omit the prologue. */ + value += 8; + /* FALLTHRU */ + case R_ALPHA_BRADDR: + value -= (u64)location + 4; + if (value & 3) + goto reloc_overflow; + value = (long)value >> 2; + if (value + (1<<21) >= 1<<22) + goto reloc_overflow; + value &= 0x1fffff; + value |= *(u32 *)location & ~0x1fffff; + *(u32 *)location = value; + break; + case R_ALPHA_HINT: + break; + case R_ALPHA_SREL32: + value -= (u64)location; + if ((int)value != value) + goto reloc_overflow; + *(u32 *)location = value; + break; + case R_ALPHA_SREL64: + value -= (u64)location; + *(u64 *)location = value; + break; + case R_ALPHA_GPRELHIGH: + value = (long)(value - gp + 0x8000) >> 16; + if ((short) value != value) + goto reloc_overflow; + *(u16 *)location = value; + break; + case R_ALPHA_GPRELLOW: + value -= gp; + *(u16 *)location = value; + break; + case R_ALPHA_GPREL16: + value -= gp; + if ((short) value != value) + goto reloc_overflow; + *(u16 *)location = value; + break; + default: + printk(KERN_ERR "module %s: Unknown relocation: %lu\n", + me->name, r_type); + return -ENOEXEC; + reloc_overflow: + if (ELF64_ST_TYPE (sym->st_info) == STT_SECTION) + printk(KERN_ERR + "module %s: Relocation overflow vs section %d\n", + me->name, sym->st_shndx); + else + printk(KERN_ERR + "module %s: Relocation overflow vs %s\n", + me->name, strtab + sym->st_name); + return -ENOEXEC; + } + } + + return 0; +} + +int +module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, + struct module *me) +{ + return 0; +} + +void +module_arch_cleanup(struct module *mod) +{ +} diff --git a/arch/alpha/kernel/ns87312.c b/arch/alpha/kernel/ns87312.c new file mode 100644 index 0000000..342b56d --- /dev/null +++ b/arch/alpha/kernel/ns87312.c @@ -0,0 +1,38 @@ +/* + * linux/arch/alpha/kernel/ns87312.c + */ + +#include <linux/init.h> +#include <asm/io.h> +#include "proto.h" + + +/* + * The SRM console *disables* the IDE interface, this code ensures it's + * enabled. + * + * This code bangs on a control register of the 87312 Super I/O chip + * that implements parallel port/serial ports/IDE/FDI. Depending on + * the motherboard, the Super I/O chip can be configured through a + * pair of registers that are located either at I/O ports 0x26e/0x26f + * or 0x398/0x399. Unfortunately, autodetecting which base address is + * in use works only once (right after a reset). The Super I/O chip + * has the additional quirk that configuration register data must be + * written twice (I believe this is a safety feature to prevent + * accidental modification---fun, isn't it?). + */ + +void __init +ns87312_enable_ide(long ide_base) +{ + int data; + unsigned long flags; + + local_irq_save(flags); + outb(0, ide_base); /* set the index register for reg #0 */ + data = inb(ide_base+1); /* read the current contents */ + outb(0, ide_base); /* set the index register for reg #0 */ + outb(data | 0x40, ide_base+1); /* turn on IDE */ + outb(data | 0x40, ide_base+1); /* turn on IDE, really! */ + local_irq_restore(flags); +} diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c new file mode 100644 index 0000000..b5d0fd2 --- /dev/null +++ b/arch/alpha/kernel/osf_sys.c @@ -0,0 +1,1345 @@ +/* + * linux/arch/alpha/kernel/osf_sys.c + * + * Copyright (C) 1995 Linus Torvalds + */ + +/* + * This file handles some of the stranger OSF/1 system call interfaces. + * Some of the system calls expect a non-C calling standard, others have + * special parameter blocks.. + */ + +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/smp_lock.h> +#include <linux/stddef.h> +#include <linux/syscalls.h> +#include <linux/unistd.h> +#include <linux/ptrace.h> +#include <linux/slab.h> +#include <linux/user.h> +#include <linux/a.out.h> +#include <linux/utsname.h> +#include <linux/time.h> +#include <linux/timex.h> +#include <linux/major.h> +#include <linux/stat.h> +#include <linux/mman.h> +#include <linux/shm.h> +#include <linux/poll.h> +#include <linux/file.h> +#include <linux/types.h> +#include <linux/ipc.h> +#include <linux/namei.h> +#include <linux/uio.h> +#include <linux/vfs.h> + +#include <asm/fpu.h> +#include <asm/io.h> +#include <asm/uaccess.h> +#include <asm/system.h> +#include <asm/sysinfo.h> +#include <asm/hwrpb.h> +#include <asm/processor.h> + +extern int do_pipe(int *); + +/* + * Brk needs to return an error. Still support Linux's brk(0) query idiom, + * which OSF programs just shouldn't be doing. We're still not quite + * identical to OSF as we don't return 0 on success, but doing otherwise + * would require changes to libc. Hopefully this is good enough. + */ +asmlinkage unsigned long +osf_brk(unsigned long brk) +{ + unsigned long retval = sys_brk(brk); + if (brk && brk != retval) + retval = -ENOMEM; + return retval; +} + +/* + * This is pure guess-work.. + */ +asmlinkage int +osf_set_program_attributes(unsigned long text_start, unsigned long text_len, + unsigned long bss_start, unsigned long bss_len) +{ + struct mm_struct *mm; + + lock_kernel(); + mm = current->mm; + mm->end_code = bss_start + bss_len; + mm->brk = bss_start + bss_len; +#if 0 + printk("set_program_attributes(%lx %lx %lx %lx)\n", + text_start, text_len, bss_start, bss_len); +#endif + unlock_kernel(); + return 0; +} + +/* + * OSF/1 directory handling functions... + * + * The "getdents()" interface is much more sane: the "basep" stuff is + * braindamage (it can't really handle filesystems where the directory + * offset differences aren't the same as "d_reclen"). + */ +#define NAME_OFFSET offsetof (struct osf_dirent, d_name) +#define ROUND_UP(x) (((x)+3) & ~3) + +struct osf_dirent { + unsigned int d_ino; + unsigned short d_reclen; + unsigned short d_namlen; + char d_name[1]; +}; + +struct osf_dirent_callback { + struct osf_dirent __user *dirent; + long __user *basep; + unsigned int count; + int error; +}; + +static int +osf_filldir(void *__buf, const char *name, int namlen, loff_t offset, + ino_t ino, unsigned int d_type) +{ + struct osf_dirent __user *dirent; + struct osf_dirent_callback *buf = (struct osf_dirent_callback *) __buf; + unsigned int reclen = ROUND_UP(NAME_OFFSET + namlen + 1); + + buf->error = -EINVAL; /* only used if we fail */ + if (reclen > buf->count) + return -EINVAL; + if (buf->basep) { + if (put_user(offset, buf->basep)) + return -EFAULT; + buf->basep = NULL; + } + dirent = buf->dirent; + put_user(ino, &dirent->d_ino); + put_user(namlen, &dirent->d_namlen); + put_user(reclen, &dirent->d_reclen); + if (copy_to_user(dirent->d_name, name, namlen) || + put_user(0, dirent->d_name + namlen)) + return -EFAULT; + dirent = (void __user *)dirent + reclen; + buf->dirent = dirent; + buf->count -= reclen; + return 0; +} + +asmlinkage int +osf_getdirentries(unsigned int fd, struct osf_dirent __user *dirent, + unsigned int count, long __user *basep) +{ + int error; + struct file *file; + struct osf_dirent_callback buf; + + error = -EBADF; + file = fget(fd); + if (!file) + goto out; + + buf.dirent = dirent; + buf.basep = basep; + buf.count = count; + buf.error = 0; + + error = vfs_readdir(file, osf_filldir, &buf); + if (error < 0) + goto out_putf; + + error = buf.error; + if (count != buf.count) + error = count - buf.count; + + out_putf: + fput(file); + out: + return error; +} + +#undef ROUND_UP +#undef NAME_OFFSET + +asmlinkage unsigned long +osf_mmap(unsigned long addr, unsigned long len, unsigned long prot, + unsigned long flags, unsigned long fd, unsigned long off) +{ + struct file *file = NULL; + unsigned long ret = -EBADF; + +#if 0 + if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED)) + printk("%s: unimplemented OSF mmap flags %04lx\n", + current->comm, flags); +#endif + if (!(flags & MAP_ANONYMOUS)) { + file = fget(fd); + if (!file) + goto out; + } + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); + down_write(¤t->mm->mmap_sem); + ret = do_mmap(file, addr, len, prot, flags, off); + up_write(¤t->mm->mmap_sem); + if (file) + fput(file); + out: + return ret; +} + + +/* + * The OSF/1 statfs structure is much larger, but this should + * match the beginning, at least. + */ +struct osf_statfs { + short f_type; + short f_flags; + int f_fsize; + int f_bsize; + int f_blocks; + int f_bfree; + int f_bavail; + int f_files; + int f_ffree; + __kernel_fsid_t f_fsid; +}; + +static int +linux_to_osf_statfs(struct kstatfs *linux_stat, struct osf_statfs __user *osf_stat, + unsigned long bufsiz) +{ + struct osf_statfs tmp_stat; + + tmp_stat.f_type = linux_stat->f_type; + tmp_stat.f_flags = 0; /* mount flags */ + tmp_stat.f_fsize = linux_stat->f_frsize; + tmp_stat.f_bsize = linux_stat->f_bsize; + tmp_stat.f_blocks = linux_stat->f_blocks; + tmp_stat.f_bfree = linux_stat->f_bfree; + tmp_stat.f_bavail = linux_stat->f_bavail; + tmp_stat.f_files = linux_stat->f_files; + tmp_stat.f_ffree = linux_stat->f_ffree; + tmp_stat.f_fsid = linux_stat->f_fsid; + if (bufsiz > sizeof(tmp_stat)) + bufsiz = sizeof(tmp_stat); + return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0; +} + +static int +do_osf_statfs(struct dentry * dentry, struct osf_statfs __user *buffer, + unsigned long bufsiz) +{ + struct kstatfs linux_stat; + int error = vfs_statfs(dentry->d_inode->i_sb, &linux_stat); + if (!error) + error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); + return error; +} + +asmlinkage int +osf_statfs(char __user *path, struct osf_statfs __user *buffer, unsigned long bufsiz) +{ + struct nameidata nd; + int retval; + + retval = user_path_walk(path, &nd); + if (!retval) { + retval = do_osf_statfs(nd.dentry, buffer, bufsiz); + path_release(&nd); + } + return retval; +} + +asmlinkage int +osf_fstatfs(unsigned long fd, struct osf_statfs __user *buffer, unsigned long bufsiz) +{ + struct file *file; + int retval; + + retval = -EBADF; + file = fget(fd); + if (file) { + retval = do_osf_statfs(file->f_dentry, buffer, bufsiz); + fput(file); + } + return retval; +} + +/* + * Uhh.. OSF/1 mount parameters aren't exactly obvious.. + * + * Although to be frank, neither are the native Linux/i386 ones.. + */ +struct ufs_args { + char __user *devname; + int flags; + uid_t exroot; +}; + +struct cdfs_args { + char __user *devname; + int flags; + uid_t exroot; + + /* This has lots more here, which Linux handles with the option block + but I'm too lazy to do the translation into ASCII. */ +}; + +struct procfs_args { + char __user *devname; + int flags; + uid_t exroot; +}; + +/* + * We can't actually handle ufs yet, so we translate UFS mounts to + * ext2fs mounts. I wouldn't mind a UFS filesystem, but the UFS + * layout is so braindead it's a major headache doing it. + * + * Just how long ago was it written? OTOH our UFS driver may be still + * unhappy with OSF UFS. [CHECKME] + */ +static int +osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags) +{ + int retval; + struct cdfs_args tmp; + char *devname; + + retval = -EFAULT; + if (copy_from_user(&tmp, args, sizeof(tmp))) + goto out; + devname = getname(tmp.devname); + retval = PTR_ERR(devname); + if (IS_ERR(devname)) + goto out; + retval = do_mount(devname, dirname, "ext2", flags, NULL); + putname(devname); + out: + return retval; +} + +static int +osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags) +{ + int retval; + struct cdfs_args tmp; + char *devname; + + retval = -EFAULT; + if (copy_from_user(&tmp, args, sizeof(tmp))) + goto out; + devname = getname(tmp.devname); + retval = PTR_ERR(devname); + if (IS_ERR(devname)) + goto out; + retval = do_mount(devname, dirname, "iso9660", flags, NULL); + putname(devname); + out: + return retval; +} + +static int +osf_procfs_mount(char *dirname, struct procfs_args __user *args, int flags) +{ + struct procfs_args tmp; + + if (copy_from_user(&tmp, args, sizeof(tmp))) + return -EFAULT; + + return do_mount("", dirname, "proc", flags, NULL); +} + +asmlinkage int +osf_mount(unsigned long typenr, char __user *path, int flag, void __user *data) +{ + int retval = -EINVAL; + char *name; + + lock_kernel(); + + name = getname(path); + retval = PTR_ERR(name); + if (IS_ERR(name)) + goto out; + switch (typenr) { + case 1: + retval = osf_ufs_mount(name, data, flag); + break; + case 6: + retval = osf_cdfs_mount(name, data, flag); + break; + case 9: + retval = osf_procfs_mount(name, data, flag); + break; + default: + printk("osf_mount(%ld, %x)\n", typenr, flag); + } + putname(name); + out: + unlock_kernel(); + return retval; +} + +asmlinkage int +osf_utsname(char __user *name) +{ + int error; + + down_read(&uts_sem); + error = -EFAULT; + if (copy_to_user(name + 0, system_utsname.sysname, 32)) + goto out; + if (copy_to_user(name + 32, system_utsname.nodename, 32)) + goto out; + if (copy_to_user(name + 64, system_utsname.release, 32)) + goto out; + if (copy_to_user(name + 96, system_utsname.version, 32)) + goto out; + if (copy_to_user(name + 128, system_utsname.machine, 32)) + goto out; + + error = 0; + out: + up_read(&uts_sem); + return error; +} + +asmlinkage unsigned long +sys_getpagesize(void) +{ + return PAGE_SIZE; +} + +asmlinkage unsigned long +sys_getdtablesize(void) +{ + return NR_OPEN; +} + +/* + * For compatibility with OSF/1 only. Use utsname(2) instead. + */ +asmlinkage int +osf_getdomainname(char __user *name, int namelen) +{ + unsigned len; + int i; + + if (!access_ok(VERIFY_WRITE, name, namelen)) + return -EFAULT; + + len = namelen; + if (namelen > 32) + len = 32; + + down_read(&uts_sem); + for (i = 0; i < len; ++i) { + __put_user(system_utsname.domainname[i], name + i); + if (system_utsname.domainname[i] == '\0') + break; + } + up_read(&uts_sem); + + return 0; +} + +asmlinkage long +osf_shmat(int shmid, void __user *shmaddr, int shmflg) +{ + unsigned long raddr; + long err; + + err = do_shmat(shmid, shmaddr, shmflg, &raddr); + + /* + * This works because all user-level addresses are + * non-negative longs! + */ + return err ? err : (long)raddr; +} + + +/* + * The following stuff should move into a header file should it ever + * be labeled "officially supported." Right now, there is just enough + * support to avoid applications (such as tar) printing error + * messages. The attributes are not really implemented. + */ + +/* + * Values for Property list entry flag + */ +#define PLE_PROPAGATE_ON_COPY 0x1 /* cp(1) will copy entry + by default */ +#define PLE_FLAG_MASK 0x1 /* Valid flag values */ +#define PLE_FLAG_ALL -1 /* All flag value */ + +struct proplistname_args { + unsigned int pl_mask; + unsigned int pl_numnames; + char **pl_names; +}; + +union pl_args { + struct setargs { + char __user *path; + long follow; + long nbytes; + char __user *buf; + } set; + struct fsetargs { + long fd; + long nbytes; + char __user *buf; + } fset; + struct getargs { + char __user *path; + long follow; + struct proplistname_args __user *name_args; + long nbytes; + char __user *buf; + int __user *min_buf_size; + } get; + struct fgetargs { + long fd; + struct proplistname_args __user *name_args; + long nbytes; + char __user *buf; + int __user *min_buf_size; + } fget; + struct delargs { + char __user *path; + long follow; + struct proplistname_args __user *name_args; + } del; + struct fdelargs { + long fd; + struct proplistname_args __user *name_args; + } fdel; +}; + +enum pl_code { + PL_SET = 1, PL_FSET = 2, + PL_GET = 3, PL_FGET = 4, + PL_DEL = 5, PL_FDEL = 6 +}; + +asmlinkage long +osf_proplist_syscall(enum pl_code code, union pl_args __user *args) +{ + long error; + int __user *min_buf_size_ptr; + + lock_kernel(); + switch (code) { + case PL_SET: + if (get_user(error, &args->set.nbytes)) + error = -EFAULT; + break; + case PL_FSET: + if (get_user(error, &args->fset.nbytes)) + error = -EFAULT; + break; + case PL_GET: + error = get_user(min_buf_size_ptr, &args->get.min_buf_size); + if (error) + break; + error = put_user(0, min_buf_size_ptr); + break; + case PL_FGET: + error = get_user(min_buf_size_ptr, &args->fget.min_buf_size); + if (error) + break; + error = put_user(0, min_buf_size_ptr); + break; + case PL_DEL: + case PL_FDEL: + error = 0; + break; + default: + error = -EOPNOTSUPP; + break; + }; + unlock_kernel(); + return error; +} + +asmlinkage int +osf_sigstack(struct sigstack __user *uss, struct sigstack __user *uoss) +{ + unsigned long usp = rdusp(); + unsigned long oss_sp = current->sas_ss_sp + current->sas_ss_size; + unsigned long oss_os = on_sig_stack(usp); + int error; + + if (uss) { + void __user *ss_sp; + + error = -EFAULT; + if (get_user(ss_sp, &uss->ss_sp)) + goto out; + + /* If the current stack was set with sigaltstack, don't + swap stacks while we are on it. */ + error = -EPERM; + if (current->sas_ss_sp && on_sig_stack(usp)) + goto out; + + /* Since we don't know the extent of the stack, and we don't + track onstack-ness, but rather calculate it, we must + presume a size. Ho hum this interface is lossy. */ + current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ; + current->sas_ss_size = SIGSTKSZ; + } + + if (uoss) { + error = -EFAULT; + if (! access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)) + || __put_user(oss_sp, &uoss->ss_sp) + || __put_user(oss_os, &uoss->ss_onstack)) + goto out; + } + + error = 0; + out: + return error; +} + +asmlinkage long +osf_sysinfo(int command, char __user *buf, long count) +{ + static char * sysinfo_table[] = { + system_utsname.sysname, + system_utsname.nodename, + system_utsname.release, + system_utsname.version, + system_utsname.machine, + "alpha", /* instruction set architecture */ + "dummy", /* hardware serial number */ + "dummy", /* hardware manufacturer */ + "dummy", /* secure RPC domain */ + }; + unsigned long offset; + char *res; + long len, err = -EINVAL; + + offset = command-1; + if (offset >= sizeof(sysinfo_table)/sizeof(char *)) { + /* Digital UNIX has a few unpublished interfaces here */ + printk("sysinfo(%d)", command); + goto out; + } + + down_read(&uts_sem); + res = sysinfo_table[offset]; + len = strlen(res)+1; + if (len > count) + len = count; + if (copy_to_user(buf, res, len)) + err = -EFAULT; + else + err = 0; + up_read(&uts_sem); + out: + return err; +} + +asmlinkage unsigned long +osf_getsysinfo(unsigned long op, void __user *buffer, unsigned long nbytes, + int __user *start, void __user *arg) +{ + unsigned long w; + struct percpu_struct *cpu; + + switch (op) { + case GSI_IEEE_FP_CONTROL: + /* Return current software fp control & status bits. */ + /* Note that DU doesn't verify available space here. */ + + w = current_thread_info()->ieee_state & IEEE_SW_MASK; + w = swcr_update_status(w, rdfpcr()); + if (put_user(w, (unsigned long __user *) buffer)) + return -EFAULT; + return 0; + + case GSI_IEEE_STATE_AT_SIGNAL: + /* + * Not sure anybody will ever use this weird stuff. These + * ops can be used (under OSF/1) to set the fpcr that should + * be used when a signal handler starts executing. + */ + break; + + case GSI_UACPROC: + if (nbytes < sizeof(unsigned int)) + return -EINVAL; + w = (current_thread_info()->flags >> UAC_SHIFT) & UAC_BITMASK; + if (put_user(w, (unsigned int __user *)buffer)) + return -EFAULT; + return 1; + + case GSI_PROC_TYPE: + if (nbytes < sizeof(unsigned long)) + return -EINVAL; + cpu = (struct percpu_struct*) + ((char*)hwrpb + hwrpb->processor_offset); + w = cpu->type; + if (put_user(w, (unsigned long __user*)buffer)) + return -EFAULT; + return 1; + + case GSI_GET_HWRPB: + if (nbytes < sizeof(*hwrpb)) + return -EINVAL; + if (copy_to_user(buffer, hwrpb, nbytes) != 0) + return -EFAULT; + return 1; + + default: + break; + } + + return -EOPNOTSUPP; +} + +asmlinkage unsigned long +osf_setsysinfo(unsigned long op, void __user *buffer, unsigned long nbytes, + int __user *start, void __user *arg) +{ + switch (op) { + case SSI_IEEE_FP_CONTROL: { + unsigned long swcr, fpcr; + unsigned int *state; + + /* + * Alpha Architecture Handbook 4.7.7.3: + * To be fully IEEE compiant, we must track the current IEEE + * exception state in software, because spurrious bits can be + * set in the trap shadow of a software-complete insn. + */ + + if (get_user(swcr, (unsigned long __user *)buffer)) + return -EFAULT; + state = ¤t_thread_info()->ieee_state; + + /* Update softare trap enable bits. */ + *state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK); + + /* Update the real fpcr. */ + fpcr = rdfpcr() & FPCR_DYN_MASK; + fpcr |= ieee_swcr_to_fpcr(swcr); + wrfpcr(fpcr); + + return 0; + } + + case SSI_IEEE_RAISE_EXCEPTION: { + unsigned long exc, swcr, fpcr, fex; + unsigned int *state; + + if (get_user(exc, (unsigned long __user *)buffer)) + return -EFAULT; + state = ¤t_thread_info()->ieee_state; + exc &= IEEE_STATUS_MASK; + + /* Update softare trap enable bits. */ + swcr = (*state & IEEE_SW_MASK) | exc; + *state |= exc; + + /* Update the real fpcr. */ + fpcr = rdfpcr(); + fpcr |= ieee_swcr_to_fpcr(swcr); + wrfpcr(fpcr); + + /* If any exceptions set by this call, and are unmasked, + send a signal. Old exceptions are not signaled. */ + fex = (exc >> IEEE_STATUS_TO_EXCSUM_SHIFT) & swcr; + if (fex) { + siginfo_t info; + int si_code = 0; + + if (fex & IEEE_TRAP_ENABLE_DNO) si_code = FPE_FLTUND; + if (fex & IEEE_TRAP_ENABLE_INE) si_code = FPE_FLTRES; + if (fex & IEEE_TRAP_ENABLE_UNF) si_code = FPE_FLTUND; + if (fex & IEEE_TRAP_ENABLE_OVF) si_code = FPE_FLTOVF; + if (fex & IEEE_TRAP_ENABLE_DZE) si_code = FPE_FLTDIV; + if (fex & IEEE_TRAP_ENABLE_INV) si_code = FPE_FLTINV; + + info.si_signo = SIGFPE; + info.si_errno = 0; + info.si_code = si_code; + info.si_addr = NULL; /* FIXME */ + send_sig_info(SIGFPE, &info, current); + } + return 0; + } + + case SSI_IEEE_STATE_AT_SIGNAL: + case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: + /* + * Not sure anybody will ever use this weird stuff. These + * ops can be used (under OSF/1) to set the fpcr that should + * be used when a signal handler starts executing. + */ + break; + + case SSI_NVPAIRS: { + unsigned long v, w, i; + unsigned int old, new; + + for (i = 0; i < nbytes; ++i) { + + if (get_user(v, 2*i + (unsigned int __user *)buffer)) + return -EFAULT; + if (get_user(w, 2*i + 1 + (unsigned int __user *)buffer)) + return -EFAULT; + switch (v) { + case SSIN_UACPROC: + again: + old = current_thread_info()->flags; + new = old & ~(UAC_BITMASK << UAC_SHIFT); + new = new | (w & UAC_BITMASK) << UAC_SHIFT; + if (cmpxchg(¤t_thread_info()->flags, + old, new) != old) + goto again; + break; + + default: + return -EOPNOTSUPP; + } + } + return 0; + } + + default: + break; + } + + return -EOPNOTSUPP; +} + +/* Translations due to the fact that OSF's time_t is an int. Which + affects all sorts of things, like timeval and itimerval. */ + +extern struct timezone sys_tz; +extern int do_adjtimex(struct timex *); + +struct timeval32 +{ + int tv_sec, tv_usec; +}; + +struct itimerval32 +{ + struct timeval32 it_interval; + struct timeval32 it_value; +}; + +static inline long +get_tv32(struct timeval *o, struct timeval32 __user *i) +{ + return (!access_ok(VERIFY_READ, i, sizeof(*i)) || + (__get_user(o->tv_sec, &i->tv_sec) | + __get_user(o->tv_usec, &i->tv_usec))); +} + +static inline long +put_tv32(struct timeval32 __user *o, struct timeval *i) +{ + return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || + (__put_user(i->tv_sec, &o->tv_sec) | + __put_user(i->tv_usec, &o->tv_usec))); +} + +static inline long +get_it32(struct itimerval *o, struct itimerval32 __user *i) +{ + return (!access_ok(VERIFY_READ, i, sizeof(*i)) || + (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) | + __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) | + __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) | + __get_user(o->it_value.tv_usec, &i->it_value.tv_usec))); +} + +static inline long +put_it32(struct itimerval32 __user *o, struct itimerval *i) +{ + return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || + (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) | + __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) | + __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) | + __put_user(i->it_value.tv_usec, &o->it_value.tv_usec))); +} + +static inline void +jiffies_to_timeval32(unsigned long jiffies, struct timeval32 *value) +{ + value->tv_usec = (jiffies % HZ) * (1000000L / HZ); + value->tv_sec = jiffies / HZ; +} + +asmlinkage int +osf_gettimeofday(struct timeval32 __user *tv, struct timezone __user *tz) +{ + if (tv) { + struct timeval ktv; + do_gettimeofday(&ktv); + if (put_tv32(tv, &ktv)) + return -EFAULT; + } + if (tz) { + if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) + return -EFAULT; + } + return 0; +} + +asmlinkage int +osf_settimeofday(struct timeval32 __user *tv, struct timezone __user *tz) +{ + struct timespec kts; + struct timezone ktz; + + if (tv) { + if (get_tv32((struct timeval *)&kts, tv)) + return -EFAULT; + } + if (tz) { + if (copy_from_user(&ktz, tz, sizeof(*tz))) + return -EFAULT; + } + + kts.tv_nsec *= 1000; + + return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); +} + +asmlinkage int +osf_getitimer(int which, struct itimerval32 __user *it) +{ + struct itimerval kit; + int error; + + error = do_getitimer(which, &kit); + if (!error && put_it32(it, &kit)) + error = -EFAULT; + + return error; +} + +asmlinkage int +osf_setitimer(int which, struct itimerval32 __user *in, struct itimerval32 __user *out) +{ + struct itimerval kin, kout; + int error; + + if (in) { + if (get_it32(&kin, in)) + return -EFAULT; + } else + memset(&kin, 0, sizeof(kin)); + + error = do_setitimer(which, &kin, out ? &kout : NULL); + if (error || !out) + return error; + + if (put_it32(out, &kout)) + return -EFAULT; + + return 0; + +} + +asmlinkage int +osf_utimes(char __user *filename, struct timeval32 __user *tvs) +{ + struct timeval ktvs[2]; + + if (tvs) { + if (get_tv32(&ktvs[0], &tvs[0]) || + get_tv32(&ktvs[1], &tvs[1])) + return -EFAULT; + } + + return do_utimes(filename, tvs ? ktvs : NULL); +} + +#define MAX_SELECT_SECONDS \ + ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1) + +asmlinkage int +osf_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, + struct timeval32 __user *tvp) +{ + fd_set_bits fds; + char *bits; + size_t size; + long timeout; + int ret = -EINVAL; + + timeout = MAX_SCHEDULE_TIMEOUT; + if (tvp) { + time_t sec, usec; + + if (!access_ok(VERIFY_READ, tvp, sizeof(*tvp)) + || __get_user(sec, &tvp->tv_sec) + || __get_user(usec, &tvp->tv_usec)) { + ret = -EFAULT; + goto out_nofds; + } + + if (sec < 0 || usec < 0) + goto out_nofds; + + if ((unsigned long) sec < MAX_SELECT_SECONDS) { + timeout = (usec + 1000000/HZ - 1) / (1000000/HZ); + timeout += sec * (unsigned long) HZ; + } + } + + if (n < 0 || n > current->files->max_fdset) + goto out_nofds; + + /* + * We need 6 bitmaps (in/out/ex for both incoming and outgoing), + * since we used fdset we need to allocate memory in units of + * long-words. + */ + ret = -ENOMEM; + size = FDS_BYTES(n); + bits = kmalloc(6 * size, GFP_KERNEL); + if (!bits) + goto out_nofds; + fds.in = (unsigned long *) bits; + fds.out = (unsigned long *) (bits + size); + fds.ex = (unsigned long *) (bits + 2*size); + fds.res_in = (unsigned long *) (bits + 3*size); + fds.res_out = (unsigned long *) (bits + 4*size); + fds.res_ex = (unsigned long *) (bits + 5*size); + + if ((ret = get_fd_set(n, inp->fds_bits, fds.in)) || + (ret = get_fd_set(n, outp->fds_bits, fds.out)) || + (ret = get_fd_set(n, exp->fds_bits, fds.ex))) + goto out; + zero_fd_set(n, fds.res_in); + zero_fd_set(n, fds.res_out); + zero_fd_set(n, fds.res_ex); + + ret = do_select(n, &fds, &timeout); + + /* OSF does not copy back the remaining time. */ + + if (ret < 0) + goto out; + if (!ret) { + ret = -ERESTARTNOHAND; + if (signal_pending(current)) + goto out; + ret = 0; + } + + if (set_fd_set(n, inp->fds_bits, fds.res_in) || + set_fd_set(n, outp->fds_bits, fds.res_out) || + set_fd_set(n, exp->fds_bits, fds.res_ex)) + ret = -EFAULT; + + out: + kfree(bits); + out_nofds: + return ret; +} + +struct rusage32 { + struct timeval32 ru_utime; /* user time used */ + struct timeval32 ru_stime; /* system time used */ + long ru_maxrss; /* maximum resident set size */ + long ru_ixrss; /* integral shared memory size */ + long ru_idrss; /* integral unshared data size */ + long ru_isrss; /* integral unshared stack size */ + long ru_minflt; /* page reclaims */ + long ru_majflt; /* page faults */ + long ru_nswap; /* swaps */ + long ru_inblock; /* block input operations */ + long ru_oublock; /* block output operations */ + long ru_msgsnd; /* messages sent */ + long ru_msgrcv; /* messages received */ + long ru_nsignals; /* signals received */ + long ru_nvcsw; /* voluntary context switches */ + long ru_nivcsw; /* involuntary " */ +}; + +asmlinkage int +osf_getrusage(int who, struct rusage32 __user *ru) +{ + struct rusage32 r; + + if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) + return -EINVAL; + + memset(&r, 0, sizeof(r)); + switch (who) { + case RUSAGE_SELF: + jiffies_to_timeval32(current->utime, &r.ru_utime); + jiffies_to_timeval32(current->stime, &r.ru_stime); + r.ru_minflt = current->min_flt; + r.ru_majflt = current->maj_flt; + break; + case RUSAGE_CHILDREN: + jiffies_to_timeval32(current->signal->cutime, &r.ru_utime); + jiffies_to_timeval32(current->signal->cstime, &r.ru_stime); + r.ru_minflt = current->signal->cmin_flt; + r.ru_majflt = current->signal->cmaj_flt; + break; + } + + return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; +} + +asmlinkage long +osf_wait4(pid_t pid, int __user *ustatus, int options, + struct rusage32 __user *ur) +{ + struct rusage r; + long ret, err; + mm_segment_t old_fs; + + if (!ur) + return sys_wait4(pid, ustatus, options, NULL); + + old_fs = get_fs(); + + set_fs (KERNEL_DS); + ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r); + set_fs (old_fs); + + if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur))) + return -EFAULT; + + err = 0; + err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec); + err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec); + err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec); + err |= __put_user(r.ru_stime.tv_usec, &ur->ru_stime.tv_usec); + err |= __put_user(r.ru_maxrss, &ur->ru_maxrss); + err |= __put_user(r.ru_ixrss, &ur->ru_ixrss); + err |= __put_user(r.ru_idrss, &ur->ru_idrss); + err |= __put_user(r.ru_isrss, &ur->ru_isrss); + err |= __put_user(r.ru_minflt, &ur->ru_minflt); + err |= __put_user(r.ru_majflt, &ur->ru_majflt); + err |= __put_user(r.ru_nswap, &ur->ru_nswap); + err |= __put_user(r.ru_inblock, &ur->ru_inblock); + err |= __put_user(r.ru_oublock, &ur->ru_oublock); + err |= __put_user(r.ru_msgsnd, &ur->ru_msgsnd); + err |= __put_user(r.ru_msgrcv, &ur->ru_msgrcv); + err |= __put_user(r.ru_nsignals, &ur->ru_nsignals); + err |= __put_user(r.ru_nvcsw, &ur->ru_nvcsw); + err |= __put_user(r.ru_nivcsw, &ur->ru_nivcsw); + + return err ? err : ret; +} + +/* + * I don't know what the parameters are: the first one + * seems to be a timeval pointer, and I suspect the second + * one is the time remaining.. Ho humm.. No documentation. + */ +asmlinkage int +osf_usleep_thread(struct timeval32 __user *sleep, struct timeval32 __user *remain) +{ + struct timeval tmp; + unsigned long ticks; + + if (get_tv32(&tmp, sleep)) + goto fault; + + ticks = tmp.tv_usec; + ticks = (ticks + (1000000 / HZ) - 1) / (1000000 / HZ); + ticks += tmp.tv_sec * HZ; + + current->state = TASK_INTERRUPTIBLE; + ticks = schedule_timeout(ticks); + + if (remain) { + tmp.tv_sec = ticks / HZ; + tmp.tv_usec = ticks % HZ; + if (put_tv32(remain, &tmp)) + goto fault; + } + + return 0; + fault: + return -EFAULT; +} + + +struct timex32 { + unsigned int modes; /* mode selector */ + long offset; /* time offset (usec) */ + long freq; /* frequency offset (scaled ppm) */ + long maxerror; /* maximum error (usec) */ + long esterror; /* estimated error (usec) */ + int status; /* clock command/status */ + long constant; /* pll time constant */ + long precision; /* clock precision (usec) (read only) */ + long tolerance; /* clock frequency tolerance (ppm) + * (read only) + */ + struct timeval32 time; /* (read only) */ + long tick; /* (modified) usecs between clock ticks */ + + long ppsfreq; /* pps frequency (scaled ppm) (ro) */ + long jitter; /* pps jitter (us) (ro) */ + int shift; /* interval duration (s) (shift) (ro) */ + long stabil; /* pps stability (scaled ppm) (ro) */ + long jitcnt; /* jitter limit exceeded (ro) */ + long calcnt; /* calibration intervals (ro) */ + long errcnt; /* calibration errors (ro) */ + long stbcnt; /* stability limit exceeded (ro) */ + + int :32; int :32; int :32; int :32; + int :32; int :32; int :32; int :32; + int :32; int :32; int :32; int :32; +}; + +asmlinkage int +sys_old_adjtimex(struct timex32 __user *txc_p) +{ + struct timex txc; + int ret; + + /* copy relevant bits of struct timex. */ + if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) || + copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) - + offsetof(struct timex32, time))) + return -EFAULT; + + ret = do_adjtimex(&txc); + if (ret < 0) + return ret; + + /* copy back to timex32 */ + if (copy_to_user(txc_p, &txc, offsetof(struct timex32, time)) || + (copy_to_user(&txc_p->tick, &txc.tick, sizeof(struct timex32) - + offsetof(struct timex32, tick))) || + (put_tv32(&txc_p->time, &txc.time))) + return -EFAULT; + + return ret; +} + +/* Get an address range which is currently unmapped. Similar to the + generic version except that we know how to honor ADDR_LIMIT_32BIT. */ + +static unsigned long +arch_get_unmapped_area_1(unsigned long addr, unsigned long len, + unsigned long limit) +{ + struct vm_area_struct *vma = find_vma(current->mm, addr); + + while (1) { + /* At this point: (!vma || addr < vma->vm_end). */ + if (limit - len < addr) + return -ENOMEM; + if (!vma || addr + len <= vma->vm_start) + return addr; + addr = vma->vm_end; + vma = vma->vm_next; + } +} + +unsigned long +arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + unsigned long limit; + + /* "32 bit" actually means 31 bit, since pointers sign extend. */ + if (current->personality & ADDR_LIMIT_32BIT) + limit = 0x80000000; + else + limit = TASK_SIZE; + + if (len > limit) + return -ENOMEM; + + /* First, see if the given suggestion fits. + + The OSF/1 loader (/sbin/loader) relies on us returning an + address larger than the requested if one exists, which is + a terribly broken way to program. + + That said, I can see the use in being able to suggest not + merely specific addresses, but regions of memory -- perhaps + this feature should be incorporated into all ports? */ + + if (addr) { + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit); + if (addr != (unsigned long) -ENOMEM) + return addr; + } + + /* Next, try allocating at TASK_UNMAPPED_BASE. */ + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE), + len, limit); + if (addr != (unsigned long) -ENOMEM) + return addr; + + /* Finally, try allocating in low memory. */ + addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit); + + return addr; +} + +#ifdef CONFIG_OSF4_COMPAT + +/* Clear top 32 bits of iov_len in the user's buffer for + compatibility with old versions of OSF/1 where iov_len + was defined as int. */ +static int +osf_fix_iov_len(const struct iovec __user *iov, unsigned long count) +{ + unsigned long i; + + for (i = 0 ; i < count ; i++) { + int __user *iov_len_high = (int __user *)&iov[i].iov_len + 1; + + if (put_user(0, iov_len_high)) + return -EFAULT; + } + return 0; +} + +asmlinkage ssize_t +osf_readv(unsigned long fd, const struct iovec __user * vector, unsigned long count) +{ + if (unlikely(personality(current->personality) == PER_OSF4)) + if (osf_fix_iov_len(vector, count)) + return -EFAULT; + return sys_readv(fd, vector, count); +} + +asmlinkage ssize_t +osf_writev(unsigned long fd, const struct iovec __user * vector, unsigned long count) +{ + if (unlikely(personality(current->personality) == PER_OSF4)) + if (osf_fix_iov_len(vector, count)) + return -EFAULT; + return sys_writev(fd, vector, count); +} + +#endif diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c new file mode 100644 index 0000000..582a3519f --- /dev/null +++ b/arch/alpha/kernel/pci-noop.c @@ -0,0 +1,214 @@ +/* + * linux/arch/alpha/kernel/pci-noop.c + * + * Stub PCI interfaces for Jensen-specific kernels. + */ + +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/bootmem.h> +#include <linux/mm.h> +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/dma-mapping.h> + +#include "proto.h" + + +/* + * The PCI controller list. + */ + +struct pci_controller *hose_head, **hose_tail = &hose_head; +struct pci_controller *pci_isa_hose; + + +struct pci_controller * __init +alloc_pci_controller(void) +{ + struct pci_controller *hose; + + hose = alloc_bootmem(sizeof(*hose)); + + *hose_tail = hose; + hose_tail = &hose->next; + + return hose; +} + +struct resource * __init +alloc_resource(void) +{ + struct resource *res; + + res = alloc_bootmem(sizeof(*res)); + + return res; +} + +asmlinkage long +sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn) +{ + struct pci_controller *hose; + + /* from hose or from bus.devfn */ + if (which & IOBASE_FROM_HOSE) { + for (hose = hose_head; hose; hose = hose->next) + if (hose->index == bus) + break; + if (!hose) + return -ENODEV; + } else { + /* Special hook for ISA access. */ + if (bus == 0 && dfn == 0) + hose = pci_isa_hose; + else + return -ENODEV; + } + + switch (which & ~IOBASE_FROM_HOSE) { + case IOBASE_HOSE: + return hose->index; + case IOBASE_SPARSE_MEM: + return hose->sparse_mem_base; + case IOBASE_DENSE_MEM: + return hose->dense_mem_base; + case IOBASE_SPARSE_IO: + return hose->sparse_io_base; + case IOBASE_DENSE_IO: + return hose->dense_io_base; + case IOBASE_ROOT_BUS: + return hose->bus->number; + } + + return -EOPNOTSUPP; +} + +asmlinkage long +sys_pciconfig_read(unsigned long bus, unsigned long dfn, + unsigned long off, unsigned long len, void *buf) +{ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + else + return -ENODEV; +} + +asmlinkage long +sys_pciconfig_write(unsigned long bus, unsigned long dfn, + unsigned long off, unsigned long len, void *buf) +{ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + else + return -ENODEV; +} + +/* Stubs for the routines in pci_iommu.c: */ + +void * +pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) +{ + return NULL; +} + +void +pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr, + dma_addr_t dma_addr) +{ +} + +dma_addr_t +pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, + int direction) +{ + return (dma_addr_t) 0; +} + +void +pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size, + int direction) +{ +} + +int +pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, + int direction) +{ + return 0; +} + +void +pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, + int direction) +{ +} + +int +pci_dma_supported(struct pci_dev *hwdev, dma_addr_t mask) +{ + return 0; +} + +/* Generic DMA mapping functions: */ + +void * +dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, int gfp) +{ + void *ret; + + if (!dev || *dev->dma_mask >= 0xffffffffUL) + gfp &= ~GFP_DMA; + ret = (void *)__get_free_pages(gfp, get_order(size)); + if (ret) { + memset(ret, 0, size); + *dma_handle = virt_to_bus(ret); + } + return ret; +} + +EXPORT_SYMBOL(dma_alloc_coherent); + +int +dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction direction) +{ + int i; + + for (i = 0; i < nents; i++ ) { + void *va; + + BUG_ON(!sg[i].page); + va = page_address(sg[i].page) + sg[i].offset; + sg_dma_address(sg + i) = (dma_addr_t)virt_to_bus(va); + sg_dma_len(sg + i) = sg[i].length; + } + + return nents; +} + +EXPORT_SYMBOL(dma_map_sg); + +int +dma_set_mask(struct device *dev, u64 mask) +{ + if (!dev->dma_mask || !dma_supported(dev, mask)) + return -EIO; + + *dev->dma_mask = mask; + + return 0; +} + +void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) +{ + return NULL; +} + +void pci_iounmap(struct pci_dev *dev, void __iomem * addr) +{ +} + +EXPORT_SYMBOL(pci_iomap); +EXPORT_SYMBOL(pci_iounmap); diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c new file mode 100644 index 0000000..1f36bbd --- /dev/null +++ b/arch/alpha/kernel/pci.c @@ -0,0 +1,561 @@ +/* + * linux/arch/alpha/kernel/pci.c + * + * Extruded from code written by + * Dave Rusling (david.rusling@reo.mts.dec.com) + * David Mosberger (davidm@cs.arizona.edu) + */ + +/* 2.3.x PCI/resources, 1999 Andrea Arcangeli <andrea@suse.de> */ + +/* + * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru> + * PCI-PCI bridges cleanup + */ +#include <linux/config.h> +#include <linux/string.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/kernel.h> +#include <linux/bootmem.h> +#include <linux/module.h> +#include <linux/cache.h> +#include <linux/slab.h> +#include <asm/machvec.h> + +#include "proto.h" +#include "pci_impl.h" + + +/* + * Some string constants used by the various core logics. + */ + +const char *const pci_io_names[] = { + "PCI IO bus 0", "PCI IO bus 1", "PCI IO bus 2", "PCI IO bus 3", + "PCI IO bus 4", "PCI IO bus 5", "PCI IO bus 6", "PCI IO bus 7" +}; + +const char *const pci_mem_names[] = { + "PCI mem bus 0", "PCI mem bus 1", "PCI mem bus 2", "PCI mem bus 3", + "PCI mem bus 4", "PCI mem bus 5", "PCI mem bus 6", "PCI mem bus 7" +}; + +const char pci_hae0_name[] = "HAE0"; + +/* Indicate whether we respect the PCI setup left by console. */ +/* + * Make this long-lived so that we know when shutting down + * whether we probed only or not. + */ +int pci_probe_only; + +/* + * The PCI controller list. + */ + +struct pci_controller *hose_head, **hose_tail = &hose_head; +struct pci_controller *pci_isa_hose; + +/* + * Quirks. + */ + +static void __init +quirk_isa_bridge(struct pci_dev *dev) +{ + dev->class = PCI_CLASS_BRIDGE_ISA << 8; +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82378, quirk_isa_bridge); + +static void __init +quirk_cypress(struct pci_dev *dev) +{ + /* The Notorious Cy82C693 chip. */ + + /* The Cypress IDE controller doesn't support native mode, but it + has programmable addresses of IDE command/control registers. + This violates PCI specifications, confuses the IDE subsystem and + causes resource conflicts between the primary HD_CMD register and + the floppy controller. Ugh. Fix that. */ + if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE) { + dev->resource[0].flags = 0; + dev->resource[1].flags = 0; + } + + /* The Cypress bridge responds on the PCI bus in the address range + 0xffff0000-0xffffffff (conventional x86 BIOS ROM). There is no + way to turn this off. The bridge also supports several extended + BIOS ranges (disabled after power-up), and some consoles do turn + them on. So if we use a large direct-map window, or a large SG + window, we must avoid the entire 0xfff00000-0xffffffff region. */ + else if (dev->class >> 8 == PCI_CLASS_BRIDGE_ISA) { + if (__direct_map_base + __direct_map_size >= 0xfff00000UL) + __direct_map_size = 0xfff00000UL - __direct_map_base; + else { + struct pci_controller *hose = dev->sysdata; + struct pci_iommu_arena *pci = hose->sg_pci; + if (pci && pci->dma_base + pci->size >= 0xfff00000UL) + pci->size = 0xfff00000UL - pci->dma_base; + } + } +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, quirk_cypress); + +/* Called for each device after PCI setup is done. */ +static void __init +pcibios_fixup_final(struct pci_dev *dev) +{ + unsigned int class = dev->class >> 8; + + if (class == PCI_CLASS_BRIDGE_ISA || class == PCI_CLASS_BRIDGE_EISA) { + dev->dma_mask = MAX_ISA_DMA_ADDRESS - 1; + isa_bridge = dev; + } +} +DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final); + +/* Just declaring that the power-of-ten prefixes are actually the + power-of-two ones doesn't make it true :) */ +#define KB 1024 +#define MB (1024*KB) +#define GB (1024*MB) + +void +pcibios_align_resource(void *data, struct resource *res, + unsigned long size, unsigned long align) +{ + struct pci_dev *dev = data; + struct pci_controller *hose = dev->sysdata; + unsigned long alignto; + unsigned long start = res->start; + + if (res->flags & IORESOURCE_IO) { + /* Make sure we start at our min on all hoses */ + if (start - hose->io_space->start < PCIBIOS_MIN_IO) + start = PCIBIOS_MIN_IO + hose->io_space->start; + + /* + * Put everything into 0x00-0xff region modulo 0x400 + */ + if (start & 0x300) + start = (start + 0x3ff) & ~0x3ff; + } + else if (res->flags & IORESOURCE_MEM) { + /* Make sure we start at our min on all hoses */ + if (start - hose->mem_space->start < PCIBIOS_MIN_MEM) + start = PCIBIOS_MIN_MEM + hose->mem_space->start; + + /* + * The following holds at least for the Low Cost + * Alpha implementation of the PCI interface: + * + * In sparse memory address space, the first + * octant (16MB) of every 128MB segment is + * aliased to the very first 16 MB of the + * address space (i.e., it aliases the ISA + * memory address space). Thus, we try to + * avoid allocating PCI devices in that range. + * Can be allocated in 2nd-7th octant only. + * Devices that need more than 112MB of + * address space must be accessed through + * dense memory space only! + */ + + /* Align to multiple of size of minimum base. */ + alignto = max(0x1000UL, align); + start = ALIGN(start, alignto); + if (hose->sparse_mem_base && size <= 7 * 16*MB) { + if (((start / (16*MB)) & 0x7) == 0) { + start &= ~(128*MB - 1); + start += 16*MB; + start = ALIGN(start, alignto); + } + if (start/(128*MB) != (start + size - 1)/(128*MB)) { + start &= ~(128*MB - 1); + start += (128 + 16)*MB; + start = ALIGN(start, alignto); + } + } + } + + res->start = start; +} +#undef KB +#undef MB +#undef GB + +static int __init +pcibios_init(void) +{ + if (alpha_mv.init_pci) + alpha_mv.init_pci(); + return 0; +} + +subsys_initcall(pcibios_init); + +char * __init +pcibios_setup(char *str) +{ + return str; +} + +#ifdef ALPHA_RESTORE_SRM_SETUP +static struct pdev_srm_saved_conf *srm_saved_configs; + +void __init +pdev_save_srm_config(struct pci_dev *dev) +{ + struct pdev_srm_saved_conf *tmp; + static int printed = 0; + + if (!alpha_using_srm || pci_probe_only) + return; + + if (!printed) { + printk(KERN_INFO "pci: enabling save/restore of SRM state\n"); + printed = 1; + } + + tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) { + printk(KERN_ERR "%s: kmalloc() failed!\n", __FUNCTION__); + return; + } + tmp->next = srm_saved_configs; + tmp->dev = dev; + + pci_save_state(dev); + + srm_saved_configs = tmp; +} + +void +pci_restore_srm_config(void) +{ + struct pdev_srm_saved_conf *tmp; + + /* No need to restore if probed only. */ + if (pci_probe_only) + return; + + /* Restore SRM config. */ + for (tmp = srm_saved_configs; tmp; tmp = tmp->next) { + pci_restore_state(tmp->dev); + } +} +#endif + +void __init +pcibios_fixup_resource(struct resource *res, struct resource *root) +{ + res->start += root->start; + res->end += root->start; +} + +void __init +pcibios_fixup_device_resources(struct pci_dev *dev, struct pci_bus *bus) +{ + /* Update device resources. */ + struct pci_controller *hose = (struct pci_controller *)bus->sysdata; + int i; + + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + if (!dev->resource[i].start) + continue; + if (dev->resource[i].flags & IORESOURCE_IO) + pcibios_fixup_resource(&dev->resource[i], + hose->io_space); + else if (dev->resource[i].flags & IORESOURCE_MEM) + pcibios_fixup_resource(&dev->resource[i], + hose->mem_space); + } +} + +void __init +pcibios_fixup_bus(struct pci_bus *bus) +{ + /* Propagate hose info into the subordinate devices. */ + + struct pci_controller *hose = bus->sysdata; + struct pci_dev *dev = bus->self; + + if (!dev) { + /* Root bus. */ + u32 pci_mem_end; + u32 sg_base = hose->sg_pci ? hose->sg_pci->dma_base : ~0; + unsigned long end; + + bus->resource[0] = hose->io_space; + bus->resource[1] = hose->mem_space; + + /* Adjust hose mem_space limit to prevent PCI allocations + in the iommu windows. */ + pci_mem_end = min((u32)__direct_map_base, sg_base) - 1; + end = hose->mem_space->start + pci_mem_end; + if (hose->mem_space->end > end) + hose->mem_space->end = end; + } else if (pci_probe_only && + (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { + pci_read_bridge_bases(bus); + pcibios_fixup_device_resources(dev, bus); + } + + list_for_each_entry(dev, &bus->devices, bus_list) { + pdev_save_srm_config(dev); + if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) + pcibios_fixup_device_resources(dev, bus); + } +} + +void __init +pcibios_update_irq(struct pci_dev *dev, int irq) +{ + pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); +} + +/* Most Alphas have straight-forward swizzling needs. */ + +u8 __init +common_swizzle(struct pci_dev *dev, u8 *pinp) +{ + u8 pin = *pinp; + + while (dev->bus->parent) { + pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); + /* Move up the chain of bridges. */ + dev = dev->bus->self; + } + *pinp = pin; + + /* The slot is the slot of the last bridge. */ + return PCI_SLOT(dev->devfn); +} + +void __devinit +pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, + struct resource *res) +{ + struct pci_controller *hose = (struct pci_controller *)dev->sysdata; + unsigned long offset = 0; + + if (res->flags & IORESOURCE_IO) + offset = hose->io_space->start; + else if (res->flags & IORESOURCE_MEM) + offset = hose->mem_space->start; + + region->start = res->start - offset; + region->end = res->end - offset; +} + +#ifdef CONFIG_HOTPLUG +EXPORT_SYMBOL(pcibios_resource_to_bus); +#endif + +int +pcibios_enable_device(struct pci_dev *dev, int mask) +{ + u16 cmd, oldcmd; + int i; + + pci_read_config_word(dev, PCI_COMMAND, &cmd); + oldcmd = cmd; + + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + struct resource *res = &dev->resource[i]; + + if (res->flags & IORESOURCE_IO) + cmd |= PCI_COMMAND_IO; + else if (res->flags & IORESOURCE_MEM) + cmd |= PCI_COMMAND_MEMORY; + } + + if (cmd != oldcmd) { + printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n", + pci_name(dev), cmd); + /* Enable the appropriate bits in the PCI command register. */ + pci_write_config_word(dev, PCI_COMMAND, cmd); + } + return 0; +} + +/* + * If we set up a device for bus mastering, we need to check the latency + * timer as certain firmware forgets to set it properly, as seen + * on SX164 and LX164 with SRM. + */ +void +pcibios_set_master(struct pci_dev *dev) +{ + u8 lat; + pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); + if (lat >= 16) return; + printk("PCI: Setting latency timer of device %s to 64\n", + pci_name(dev)); + pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64); +} + +static void __init +pcibios_claim_one_bus(struct pci_bus *b) +{ + struct pci_dev *dev; + struct pci_bus *child_bus; + + list_for_each_entry(dev, &b->devices, bus_list) { + int i; + + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + struct resource *r = &dev->resource[i]; + + if (r->parent || !r->start || !r->flags) + continue; + pci_claim_resource(dev, i); + } + } + + list_for_each_entry(child_bus, &b->children, node) + pcibios_claim_one_bus(child_bus); +} + +static void __init +pcibios_claim_console_setup(void) +{ + struct pci_bus *b; + + list_for_each_entry(b, &pci_root_buses, node) + pcibios_claim_one_bus(b); +} + +void __init +common_init_pci(void) +{ + struct pci_controller *hose; + struct pci_bus *bus; + int next_busno; + int need_domain_info = 0; + + /* Scan all of the recorded PCI controllers. */ + for (next_busno = 0, hose = hose_head; hose; hose = hose->next) { + bus = pci_scan_bus(next_busno, alpha_mv.pci_ops, hose); + hose->bus = bus; + hose->need_domain_info = need_domain_info; + next_busno = bus->subordinate + 1; + /* Don't allow 8-bit bus number overflow inside the hose - + reserve some space for bridges. */ + if (next_busno > 224) { + next_busno = 0; + need_domain_info = 1; + } + } + + if (pci_probe_only) + pcibios_claim_console_setup(); + + pci_assign_unassigned_resources(); + pci_fixup_irqs(alpha_mv.pci_swizzle, alpha_mv.pci_map_irq); +} + + +struct pci_controller * __init +alloc_pci_controller(void) +{ + struct pci_controller *hose; + + hose = alloc_bootmem(sizeof(*hose)); + + *hose_tail = hose; + hose_tail = &hose->next; + + return hose; +} + +struct resource * __init +alloc_resource(void) +{ + struct resource *res; + + res = alloc_bootmem(sizeof(*res)); + + return res; +} + + +/* Provide information on locations of various I/O regions in physical + memory. Do this on a per-card basis so that we choose the right hose. */ + +asmlinkage long +sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn) +{ + struct pci_controller *hose; + struct pci_dev *dev; + + /* from hose or from bus.devfn */ + if (which & IOBASE_FROM_HOSE) { + for(hose = hose_head; hose; hose = hose->next) + if (hose->index == bus) break; + if (!hose) return -ENODEV; + } else { + /* Special hook for ISA access. */ + if (bus == 0 && dfn == 0) { + hose = pci_isa_hose; + } else { + dev = pci_find_slot(bus, dfn); + if (!dev) + return -ENODEV; + hose = dev->sysdata; + } + } + + switch (which & ~IOBASE_FROM_HOSE) { + case IOBASE_HOSE: + return hose->index; + case IOBASE_SPARSE_MEM: + return hose->sparse_mem_base; + case IOBASE_DENSE_MEM: + return hose->dense_mem_base; + case IOBASE_SPARSE_IO: + return hose->sparse_io_base; + case IOBASE_DENSE_IO: + return hose->dense_io_base; + case IOBASE_ROOT_BUS: + return hose->bus->number; + } + + return -EOPNOTSUPP; +} + +/* Create an __iomem token from a PCI BAR. Copied from lib/iomap.c with + no changes, since we don't want the other things in that object file. */ + +void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) +{ + unsigned long start = pci_resource_start(dev, bar); + unsigned long len = pci_resource_len(dev, bar); + unsigned long flags = pci_resource_flags(dev, bar); + + if (!len || !start) + return NULL; + if (maxlen && len > maxlen) + len = maxlen; + if (flags & IORESOURCE_IO) + return ioport_map(start, len); + if (flags & IORESOURCE_MEM) { + /* Not checking IORESOURCE_CACHEABLE because alpha does + not distinguish between ioremap and ioremap_nocache. */ + return ioremap(start, len); + } + return NULL; +} + +/* Destroy that token. Not copied from lib/iomap.c. */ + +void pci_iounmap(struct pci_dev *dev, void __iomem * addr) +{ + if (__is_mmio(addr)) + iounmap(addr); +} + +EXPORT_SYMBOL(pci_iomap); +EXPORT_SYMBOL(pci_iounmap); diff --git a/arch/alpha/kernel/pci_impl.h b/arch/alpha/kernel/pci_impl.h new file mode 100644 index 0000000..f8b7499 --- /dev/null +++ b/arch/alpha/kernel/pci_impl.h @@ -0,0 +1,209 @@ +/* + * linux/arch/alpha/kernel/pci_impl.h + * + * This file contains declarations and inline functions for interfacing + * with the PCI initialization routines. + */ + +struct pci_dev; +struct pci_controller; +struct pci_iommu_arena; + +/* + * We can't just blindly use 64K for machines with EISA busses; they + * may also have PCI-PCI bridges present, and then we'd configure the + * bridge incorrectly. + * + * Also, we start at 0x8000 or 0x9000, in hopes to get all devices' + * IO space areas allocated *before* 0xC000; this is because certain + * BIOSes (Millennium for one) use PCI Config space "mechanism #2" + * accesses to probe the bus. If a device's registers appear at 0xC000, + * it may see an INx/OUTx at that address during BIOS emulation of the + * VGA BIOS, and some cards, notably Adaptec 2940UW, take mortal offense. + */ + +#define EISA_DEFAULT_IO_BASE 0x9000 /* start above 8th slot */ +#define DEFAULT_IO_BASE 0x8000 /* start at 8th slot */ + +/* + * We try to make the DEFAULT_MEM_BASE addresses *always* have more than + * a single bit set. This is so that devices like the broken Myrinet card + * will always have a PCI memory address that will never match a IDSEL + * address in PCI Config space, which can cause problems with early rev cards. + */ + +/* + * An XL is AVANTI (APECS) family, *but* it has only 27 bits of ISA address + * that get passed through the PCI<->ISA bridge chip. Although this causes + * us to set the PCI->Mem window bases lower than normal, we still allocate + * PCI bus devices' memory addresses *below* the low DMA mapping window, + * and hope they fit below 64Mb (to avoid conflicts), and so that they can + * be accessed via SPARSE space. + * + * We accept the risk that a broken Myrinet card will be put into a true XL + * and thus can more easily run into the problem described below. + */ +#define XL_DEFAULT_MEM_BASE ((16+2)*1024*1024) /* 16M to 64M-1 is avail */ + +/* + * APECS and LCA have only 34 bits for physical addresses, thus limiting PCI + * bus memory addresses for SPARSE access to be less than 128Mb. + */ +#define APECS_AND_LCA_DEFAULT_MEM_BASE ((16+2)*1024*1024) + +/* + * Because MCPCIA and T2 core logic support more bits for + * physical addresses, they should allow an expanded range of SPARSE + * memory addresses. However, we do not use them all, in order to + * avoid the HAE manipulation that would be needed. + */ +#define MCPCIA_DEFAULT_MEM_BASE ((32+2)*1024*1024) +#define T2_DEFAULT_MEM_BASE ((16+1)*1024*1024) + +/* + * Because CIA and PYXIS have more bits for physical addresses, + * they support an expanded range of SPARSE memory addresses. + */ +#define DEFAULT_MEM_BASE ((128+16)*1024*1024) + +/* ??? Experimenting with no HAE for CIA. */ +#define CIA_DEFAULT_MEM_BASE ((32+2)*1024*1024) + +#define IRONGATE_DEFAULT_MEM_BASE ((256*8-16)*1024*1024) + +#define DEFAULT_AGP_APER_SIZE (64*1024*1024) + +/* + * A small note about bridges and interrupts. The DECchip 21050 (and + * later) adheres to the PCI-PCI bridge specification. This says that + * the interrupts on the other side of a bridge are swizzled in the + * following manner: + * + * Dev Interrupt Interrupt + * Pin on Pin on + * Device Connector + * + * 4 A A + * B B + * C C + * D D + * + * 5 A B + * B C + * C D + * D A + * + * 6 A C + * B D + * C A + * D B + * + * 7 A D + * B A + * C B + * D C + * + * Where A = pin 1, B = pin 2 and so on and pin=0 = default = A. + * Thus, each swizzle is ((pin-1) + (device#-4)) % 4 + * + * The following code swizzles for exactly one bridge. The routine + * common_swizzle below handles multiple bridges. But there are a + * couple boards that do strange things, so we define this here. + */ + +static inline u8 bridge_swizzle(u8 pin, u8 slot) +{ + return (((pin-1) + slot) % 4) + 1; +} + + +/* The following macro is used to implement the table-based irq mapping + function for all single-bus Alphas. */ + +#define COMMON_TABLE_LOOKUP \ +({ long _ctl_ = -1; \ + if (slot >= min_idsel && slot <= max_idsel && pin < irqs_per_slot) \ + _ctl_ = irq_tab[slot - min_idsel][pin]; \ + _ctl_; }) + + +/* A PCI IOMMU allocation arena. There are typically two of these + regions per bus. */ +/* ??? The 8400 has a 32-byte pte entry, and the entire table apparently + lives directly on the host bridge (no tlb?). We don't support this + machine, but if we ever did, we'd need to parameterize all this quite + a bit further. Probably with per-bus operation tables. */ + +struct pci_iommu_arena +{ + spinlock_t lock; + struct pci_controller *hose; +#define IOMMU_INVALID_PTE 0x2 /* 32:63 bits MBZ */ +#define IOMMU_RESERVED_PTE 0xface + unsigned long *ptes; + dma_addr_t dma_base; + unsigned int size; + unsigned int next_entry; + unsigned int align_entry; +}; + +#if defined(CONFIG_ALPHA_SRM) && \ + (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA)) +# define NEED_SRM_SAVE_RESTORE +#else +# undef NEED_SRM_SAVE_RESTORE +#endif + +#if defined(CONFIG_ALPHA_GENERIC) || defined(NEED_SRM_SAVE_RESTORE) +# define ALPHA_RESTORE_SRM_SETUP +#else +# undef ALPHA_RESTORE_SRM_SETUP +#endif + +#ifdef ALPHA_RESTORE_SRM_SETUP +/* Store PCI device configuration left by SRM here. */ +struct pdev_srm_saved_conf +{ + struct pdev_srm_saved_conf *next; + struct pci_dev *dev; +}; + +extern void pci_restore_srm_config(void); +#else +#define pdev_save_srm_config(dev) do {} while (0) +#define pci_restore_srm_config() do {} while (0) +#endif + +/* The hose list. */ +extern struct pci_controller *hose_head, **hose_tail; +extern struct pci_controller *pci_isa_hose; + +/* Indicate that we trust the console to configure things properly. */ +extern int pci_probe_only; + +extern unsigned long alpha_agpgart_size; + +extern void common_init_pci(void); +extern u8 common_swizzle(struct pci_dev *, u8 *); +extern struct pci_controller *alloc_pci_controller(void); +extern struct resource *alloc_resource(void); + +extern struct pci_iommu_arena *iommu_arena_new_node(int, + struct pci_controller *, + dma_addr_t, unsigned long, + unsigned long); +extern struct pci_iommu_arena *iommu_arena_new(struct pci_controller *, + dma_addr_t, unsigned long, + unsigned long); +extern const char *const pci_io_names[]; +extern const char *const pci_mem_names[]; +extern const char pci_hae0_name[]; + +extern unsigned long size_for_memory(unsigned long max); + +extern int iommu_reserve(struct pci_iommu_arena *, long, long); +extern int iommu_release(struct pci_iommu_arena *, long, long); +extern int iommu_bind(struct pci_iommu_arena *, long, long, unsigned long *); +extern int iommu_unbind(struct pci_iommu_arena *, long, long); + + diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c new file mode 100644 index 0000000..7cb23f1 --- /dev/null +++ b/arch/alpha/kernel/pci_iommu.c @@ -0,0 +1,971 @@ +/* + * linux/arch/alpha/kernel/pci_iommu.c + */ + +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/bootmem.h> + +#include <asm/io.h> +#include <asm/hwrpb.h> + +#include "proto.h" +#include "pci_impl.h" + + +#define DEBUG_ALLOC 0 +#if DEBUG_ALLOC > 0 +# define DBGA(args...) printk(KERN_DEBUG args) +#else +# define DBGA(args...) +#endif +#if DEBUG_ALLOC > 1 +# define DBGA2(args...) printk(KERN_DEBUG args) +#else +# define DBGA2(args...) +#endif + +#define DEBUG_NODIRECT 0 +#define DEBUG_FORCEDAC 0 + +#define ISA_DMA_MASK 0x00ffffff + +static inline unsigned long +mk_iommu_pte(unsigned long paddr) +{ + return (paddr >> (PAGE_SHIFT-1)) | 1; +} + +static inline long +calc_npages(long bytes) +{ + return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT; +} + + +/* Return the minimum of MAX or the first power of two larger + than main memory. */ + +unsigned long +size_for_memory(unsigned long max) +{ + unsigned long mem = max_low_pfn << PAGE_SHIFT; + if (mem < max) + max = 1UL << ceil_log2(mem); + return max; +} + +struct pci_iommu_arena * +iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base, + unsigned long window_size, unsigned long align) +{ + unsigned long mem_size; + struct pci_iommu_arena *arena; + + mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long)); + + /* Note that the TLB lookup logic uses bitwise concatenation, + not addition, so the required arena alignment is based on + the size of the window. Retain the align parameter so that + particular systems can over-align the arena. */ + if (align < mem_size) + align = mem_size; + + +#ifdef CONFIG_DISCONTIGMEM + + if (!NODE_DATA(nid) || + (NULL == (arena = alloc_bootmem_node(NODE_DATA(nid), + sizeof(*arena))))) { + printk("%s: couldn't allocate arena from node %d\n" + " falling back to system-wide allocation\n", + __FUNCTION__, nid); + arena = alloc_bootmem(sizeof(*arena)); + } + + if (!NODE_DATA(nid) || + (NULL == (arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), + mem_size, + align, + 0)))) { + printk("%s: couldn't allocate arena ptes from node %d\n" + " falling back to system-wide allocation\n", + __FUNCTION__, nid); + arena->ptes = __alloc_bootmem(mem_size, align, 0); + } + +#else /* CONFIG_DISCONTIGMEM */ + + arena = alloc_bootmem(sizeof(*arena)); + arena->ptes = __alloc_bootmem(mem_size, align, 0); + +#endif /* CONFIG_DISCONTIGMEM */ + + spin_lock_init(&arena->lock); + arena->hose = hose; + arena->dma_base = base; + arena->size = window_size; + arena->next_entry = 0; + + /* Align allocations to a multiple of a page size. Not needed + unless there are chip bugs. */ + arena->align_entry = 1; + + return arena; +} + +struct pci_iommu_arena * +iommu_arena_new(struct pci_controller *hose, dma_addr_t base, + unsigned long window_size, unsigned long align) +{ + return iommu_arena_new_node(0, hose, base, window_size, align); +} + +/* Must be called with the arena lock held */ +static long +iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask) +{ + unsigned long *ptes; + long i, p, nent; + + /* Search forward for the first mask-aligned sequence of N free ptes */ + ptes = arena->ptes; + nent = arena->size >> PAGE_SHIFT; + p = (arena->next_entry + mask) & ~mask; + i = 0; + while (i < n && p+i < nent) { + if (ptes[p+i]) + p = (p + i + 1 + mask) & ~mask, i = 0; + else + i = i + 1; + } + + if (i < n) { + /* Reached the end. Flush the TLB and restart the + search from the beginning. */ + alpha_mv.mv_pci_tbi(arena->hose, 0, -1); + + p = 0, i = 0; + while (i < n && p+i < nent) { + if (ptes[p+i]) + p = (p + i + 1 + mask) & ~mask, i = 0; + else + i = i + 1; + } + + if (i < n) + return -1; + } + + /* Success. It's the responsibility of the caller to mark them + in use before releasing the lock */ + return p; +} + +static long +iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align) +{ + unsigned long flags; + unsigned long *ptes; + long i, p, mask; + + spin_lock_irqsave(&arena->lock, flags); + + /* Search for N empty ptes */ + ptes = arena->ptes; + mask = max(align, arena->align_entry) - 1; + p = iommu_arena_find_pages(arena, n, mask); + if (p < 0) { + spin_unlock_irqrestore(&arena->lock, flags); + return -1; + } + + /* Success. Mark them all in use, ie not zero and invalid + for the iommu tlb that could load them from under us. + The chip specific bits will fill this in with something + kosher when we return. */ + for (i = 0; i < n; ++i) + ptes[p+i] = IOMMU_INVALID_PTE; + + arena->next_entry = p + n; + spin_unlock_irqrestore(&arena->lock, flags); + + return p; +} + +static void +iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n) +{ + unsigned long *p; + long i; + + p = arena->ptes + ofs; + for (i = 0; i < n; ++i) + p[i] = 0; +} + +/* Map a single buffer of the indicated size for PCI DMA in streaming + mode. The 32-bit PCI bus mastering address to use is returned. + Once the device is given the dma address, the device owns this memory + until either pci_unmap_single or pci_dma_sync_single is performed. */ + +static dma_addr_t +pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, + int dac_allowed) +{ + struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose; + dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; + struct pci_iommu_arena *arena; + long npages, dma_ofs, i; + unsigned long paddr; + dma_addr_t ret; + unsigned int align = 0; + + paddr = __pa(cpu_addr); + +#if !DEBUG_NODIRECT + /* First check to see if we can use the direct map window. */ + if (paddr + size + __direct_map_base - 1 <= max_dma + && paddr + size <= __direct_map_size) { + ret = paddr + __direct_map_base; + + DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n", + cpu_addr, size, ret, __builtin_return_address(0)); + + return ret; + } +#endif + + /* Next, use DAC if selected earlier. */ + if (dac_allowed) { + ret = paddr + alpha_mv.pci_dac_offset; + + DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n", + cpu_addr, size, ret, __builtin_return_address(0)); + + return ret; + } + + /* If the machine doesn't define a pci_tbi routine, we have to + assume it doesn't support sg mapping, and, since we tried to + use direct_map above, it now must be considered an error. */ + if (! alpha_mv.mv_pci_tbi) { + static int been_here = 0; /* Only print the message once. */ + if (!been_here) { + printk(KERN_WARNING "pci_map_single: no HW sg\n"); + been_here = 1; + } + return 0; + } + + arena = hose->sg_pci; + if (!arena || arena->dma_base + arena->size - 1 > max_dma) + arena = hose->sg_isa; + + npages = calc_npages((paddr & ~PAGE_MASK) + size); + + /* Force allocation to 64KB boundary for ISA bridges. */ + if (pdev && pdev == isa_bridge) + align = 8; + dma_ofs = iommu_arena_alloc(arena, npages, align); + if (dma_ofs < 0) { + printk(KERN_WARNING "pci_map_single failed: " + "could not allocate dma page tables\n"); + return 0; + } + + paddr &= PAGE_MASK; + for (i = 0; i < npages; ++i, paddr += PAGE_SIZE) + arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr); + + ret = arena->dma_base + dma_ofs * PAGE_SIZE; + ret += (unsigned long)cpu_addr & ~PAGE_MASK; + + DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n", + cpu_addr, size, npages, ret, __builtin_return_address(0)); + + return ret; +} + +dma_addr_t +pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir) +{ + int dac_allowed; + + if (dir == PCI_DMA_NONE) + BUG(); + + dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; + return pci_map_single_1(pdev, cpu_addr, size, dac_allowed); +} + +dma_addr_t +pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset, + size_t size, int dir) +{ + int dac_allowed; + + if (dir == PCI_DMA_NONE) + BUG(); + + dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; + return pci_map_single_1(pdev, (char *)page_address(page) + offset, + size, dac_allowed); +} + +/* Unmap a single streaming mode DMA translation. The DMA_ADDR and + SIZE must match what was provided for in a previous pci_map_single + call. All other usages are undefined. After this call, reads by + the cpu to the buffer are guaranteed to see whatever the device + wrote there. */ + +void +pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size, + int direction) +{ + unsigned long flags; + struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose; + struct pci_iommu_arena *arena; + long dma_ofs, npages; + + if (direction == PCI_DMA_NONE) + BUG(); + + if (dma_addr >= __direct_map_base + && dma_addr < __direct_map_base + __direct_map_size) { + /* Nothing to do. */ + + DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n", + dma_addr, size, __builtin_return_address(0)); + + return; + } + + if (dma_addr > 0xffffffff) { + DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n", + dma_addr, size, __builtin_return_address(0)); + return; + } + + arena = hose->sg_pci; + if (!arena || dma_addr < arena->dma_base) + arena = hose->sg_isa; + + dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT; + if (dma_ofs * PAGE_SIZE >= arena->size) { + printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx " + " base %lx size %x\n", dma_addr, arena->dma_base, + arena->size); + return; + BUG(); + } + + npages = calc_npages((dma_addr & ~PAGE_MASK) + size); + + spin_lock_irqsave(&arena->lock, flags); + + iommu_arena_free(arena, dma_ofs, npages); + + /* If we're freeing ptes above the `next_entry' pointer (they + may have snuck back into the TLB since the last wrap flush), + we need to flush the TLB before reallocating the latter. */ + if (dma_ofs >= arena->next_entry) + alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1); + + spin_unlock_irqrestore(&arena->lock, flags); + + DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n", + dma_addr, size, npages, __builtin_return_address(0)); +} + +void +pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr, + size_t size, int direction) +{ + pci_unmap_single(pdev, dma_addr, size, direction); +} + +/* Allocate and map kernel buffer using consistent mode DMA for PCI + device. Returns non-NULL cpu-view pointer to the buffer if + successful and sets *DMA_ADDRP to the pci side dma address as well, + else DMA_ADDRP is undefined. */ + +void * +pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) +{ + void *cpu_addr; + long order = get_order(size); + int gfp = GFP_ATOMIC; + +try_again: + cpu_addr = (void *)__get_free_pages(gfp, order); + if (! cpu_addr) { + printk(KERN_INFO "pci_alloc_consistent: " + "get_free_pages failed from %p\n", + __builtin_return_address(0)); + /* ??? Really atomic allocation? Otherwise we could play + with vmalloc and sg if we can't find contiguous memory. */ + return NULL; + } + memset(cpu_addr, 0, size); + + *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0); + if (*dma_addrp == 0) { + free_pages((unsigned long)cpu_addr, order); + if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA)) + return NULL; + /* The address doesn't fit required mask and we + do not have iommu. Try again with GFP_DMA. */ + gfp |= GFP_DMA; + goto try_again; + } + + DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n", + size, cpu_addr, *dma_addrp, __builtin_return_address(0)); + + return cpu_addr; +} + +/* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must + be values that were returned from pci_alloc_consistent. SIZE must + be the same as what as passed into pci_alloc_consistent. + References to the memory and mappings associated with CPU_ADDR or + DMA_ADDR past this call are illegal. */ + +void +pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr, + dma_addr_t dma_addr) +{ + pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); + free_pages((unsigned long)cpu_addr, get_order(size)); + + DBGA2("pci_free_consistent: [%x,%lx] from %p\n", + dma_addr, size, __builtin_return_address(0)); +} + + +/* Classify the elements of the scatterlist. Write dma_address + of each element with: + 0 : Followers all physically adjacent. + 1 : Followers all virtually adjacent. + -1 : Not leader, physically adjacent to previous. + -2 : Not leader, virtually adjacent to previous. + Write dma_length of each leader with the combined lengths of + the mergable followers. */ + +#define SG_ENT_VIRT_ADDRESS(SG) (page_address((SG)->page) + (SG)->offset) +#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG)) + +static void +sg_classify(struct scatterlist *sg, struct scatterlist *end, int virt_ok) +{ + unsigned long next_paddr; + struct scatterlist *leader; + long leader_flag, leader_length; + + leader = sg; + leader_flag = 0; + leader_length = leader->length; + next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length; + + for (++sg; sg < end; ++sg) { + unsigned long addr, len; + addr = SG_ENT_PHYS_ADDRESS(sg); + len = sg->length; + + if (next_paddr == addr) { + sg->dma_address = -1; + leader_length += len; + } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) { + sg->dma_address = -2; + leader_flag = 1; + leader_length += len; + } else { + leader->dma_address = leader_flag; + leader->dma_length = leader_length; + leader = sg; + leader_flag = 0; + leader_length = len; + } + + next_paddr = addr + len; + } + + leader->dma_address = leader_flag; + leader->dma_length = leader_length; +} + +/* Given a scatterlist leader, choose an allocation method and fill + in the blanks. */ + +static int +sg_fill(struct scatterlist *leader, struct scatterlist *end, + struct scatterlist *out, struct pci_iommu_arena *arena, + dma_addr_t max_dma, int dac_allowed) +{ + unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader); + long size = leader->dma_length; + struct scatterlist *sg; + unsigned long *ptes; + long npages, dma_ofs, i; + +#if !DEBUG_NODIRECT + /* If everything is physically contiguous, and the addresses + fall into the direct-map window, use it. */ + if (leader->dma_address == 0 + && paddr + size + __direct_map_base - 1 <= max_dma + && paddr + size <= __direct_map_size) { + out->dma_address = paddr + __direct_map_base; + out->dma_length = size; + + DBGA(" sg_fill: [%p,%lx] -> direct %lx\n", + __va(paddr), size, out->dma_address); + + return 0; + } +#endif + + /* If physically contiguous and DAC is available, use it. */ + if (leader->dma_address == 0 && dac_allowed) { + out->dma_address = paddr + alpha_mv.pci_dac_offset; + out->dma_length = size; + + DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n", + __va(paddr), size, out->dma_address); + + return 0; + } + + /* Otherwise, we'll use the iommu to make the pages virtually + contiguous. */ + + paddr &= ~PAGE_MASK; + npages = calc_npages(paddr + size); + dma_ofs = iommu_arena_alloc(arena, npages, 0); + if (dma_ofs < 0) { + /* If we attempted a direct map above but failed, die. */ + if (leader->dma_address == 0) + return -1; + + /* Otherwise, break up the remaining virtually contiguous + hunks into individual direct maps and retry. */ + sg_classify(leader, end, 0); + return sg_fill(leader, end, out, arena, max_dma, dac_allowed); + } + + out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr; + out->dma_length = size; + + DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n", + __va(paddr), size, out->dma_address, npages); + + /* All virtually contiguous. We need to find the length of each + physically contiguous subsegment to fill in the ptes. */ + ptes = &arena->ptes[dma_ofs]; + sg = leader; + do { +#if DEBUG_ALLOC > 0 + struct scatterlist *last_sg = sg; +#endif + + size = sg->length; + paddr = SG_ENT_PHYS_ADDRESS(sg); + + while (sg+1 < end && (int) sg[1].dma_address == -1) { + size += sg[1].length; + sg++; + } + + npages = calc_npages((paddr & ~PAGE_MASK) + size); + + paddr &= PAGE_MASK; + for (i = 0; i < npages; ++i, paddr += PAGE_SIZE) + *ptes++ = mk_iommu_pte(paddr); + +#if DEBUG_ALLOC > 0 + DBGA(" (%ld) [%p,%x] np %ld\n", + last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg), + last_sg->length, npages); + while (++last_sg <= sg) { + DBGA(" (%ld) [%p,%x] cont\n", + last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg), + last_sg->length); + } +#endif + } while (++sg < end && (int) sg->dma_address < 0); + + return 1; +} + +int +pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, + int direction) +{ + struct scatterlist *start, *end, *out; + struct pci_controller *hose; + struct pci_iommu_arena *arena; + dma_addr_t max_dma; + int dac_allowed; + + if (direction == PCI_DMA_NONE) + BUG(); + + dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; + + /* Fast path single entry scatterlists. */ + if (nents == 1) { + sg->dma_length = sg->length; + sg->dma_address + = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg), + sg->length, dac_allowed); + return sg->dma_address != 0; + } + + start = sg; + end = sg + nents; + + /* First, prepare information about the entries. */ + sg_classify(sg, end, alpha_mv.mv_pci_tbi != 0); + + /* Second, figure out where we're going to map things. */ + if (alpha_mv.mv_pci_tbi) { + hose = pdev ? pdev->sysdata : pci_isa_hose; + max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; + arena = hose->sg_pci; + if (!arena || arena->dma_base + arena->size - 1 > max_dma) + arena = hose->sg_isa; + } else { + max_dma = -1; + arena = NULL; + hose = NULL; + } + + /* Third, iterate over the scatterlist leaders and allocate + dma space as needed. */ + for (out = sg; sg < end; ++sg) { + if ((int) sg->dma_address < 0) + continue; + if (sg_fill(sg, end, out, arena, max_dma, dac_allowed) < 0) + goto error; + out++; + } + + /* Mark the end of the list for pci_unmap_sg. */ + if (out < end) + out->dma_length = 0; + + if (out - start == 0) + printk(KERN_WARNING "pci_map_sg failed: no entries?\n"); + DBGA("pci_map_sg: %ld entries\n", out - start); + + return out - start; + + error: + printk(KERN_WARNING "pci_map_sg failed: " + "could not allocate dma page tables\n"); + + /* Some allocation failed while mapping the scatterlist + entries. Unmap them now. */ + if (out > start) + pci_unmap_sg(pdev, start, out - start, direction); + return 0; +} + +/* Unmap a set of streaming mode DMA translations. Again, cpu read + rules concerning calls here are the same as for pci_unmap_single() + above. */ + +void +pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, + int direction) +{ + unsigned long flags; + struct pci_controller *hose; + struct pci_iommu_arena *arena; + struct scatterlist *end; + dma_addr_t max_dma; + dma_addr_t fbeg, fend; + + if (direction == PCI_DMA_NONE) + BUG(); + + if (! alpha_mv.mv_pci_tbi) + return; + + hose = pdev ? pdev->sysdata : pci_isa_hose; + max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; + arena = hose->sg_pci; + if (!arena || arena->dma_base + arena->size - 1 > max_dma) + arena = hose->sg_isa; + + fbeg = -1, fend = 0; + + spin_lock_irqsave(&arena->lock, flags); + + for (end = sg + nents; sg < end; ++sg) { + dma64_addr_t addr; + size_t size; + long npages, ofs; + dma_addr_t tend; + + addr = sg->dma_address; + size = sg->dma_length; + if (!size) + break; + + if (addr > 0xffffffff) { + /* It's a DAC address -- nothing to do. */ + DBGA(" (%ld) DAC [%lx,%lx]\n", + sg - end + nents, addr, size); + continue; + } + + if (addr >= __direct_map_base + && addr < __direct_map_base + __direct_map_size) { + /* Nothing to do. */ + DBGA(" (%ld) direct [%lx,%lx]\n", + sg - end + nents, addr, size); + continue; + } + + DBGA(" (%ld) sg [%lx,%lx]\n", + sg - end + nents, addr, size); + + npages = calc_npages((addr & ~PAGE_MASK) + size); + ofs = (addr - arena->dma_base) >> PAGE_SHIFT; + iommu_arena_free(arena, ofs, npages); + + tend = addr + size - 1; + if (fbeg > addr) fbeg = addr; + if (fend < tend) fend = tend; + } + + /* If we're freeing ptes above the `next_entry' pointer (they + may have snuck back into the TLB since the last wrap flush), + we need to flush the TLB before reallocating the latter. */ + if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry) + alpha_mv.mv_pci_tbi(hose, fbeg, fend); + + spin_unlock_irqrestore(&arena->lock, flags); + + DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg)); +} + + +/* Return whether the given PCI device DMA address mask can be + supported properly. */ + +int +pci_dma_supported(struct pci_dev *pdev, u64 mask) +{ + struct pci_controller *hose; + struct pci_iommu_arena *arena; + + /* If there exists a direct map, and the mask fits either + the entire direct mapped space or the total system memory as + shifted by the map base */ + if (__direct_map_size != 0 + && (__direct_map_base + __direct_map_size - 1 <= mask || + __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask)) + return 1; + + /* Check that we have a scatter-gather arena that fits. */ + hose = pdev ? pdev->sysdata : pci_isa_hose; + arena = hose->sg_isa; + if (arena && arena->dma_base + arena->size - 1 <= mask) + return 1; + arena = hose->sg_pci; + if (arena && arena->dma_base + arena->size - 1 <= mask) + return 1; + + /* As last resort try ZONE_DMA. */ + if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask) + return 1; + + return 0; +} + + +/* + * AGP GART extensions to the IOMMU + */ +int +iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask) +{ + unsigned long flags; + unsigned long *ptes; + long i, p; + + if (!arena) return -EINVAL; + + spin_lock_irqsave(&arena->lock, flags); + + /* Search for N empty ptes. */ + ptes = arena->ptes; + p = iommu_arena_find_pages(arena, pg_count, align_mask); + if (p < 0) { + spin_unlock_irqrestore(&arena->lock, flags); + return -1; + } + + /* Success. Mark them all reserved (ie not zero and invalid) + for the iommu tlb that could load them from under us. + They will be filled in with valid bits by _bind() */ + for (i = 0; i < pg_count; ++i) + ptes[p+i] = IOMMU_RESERVED_PTE; + + arena->next_entry = p + pg_count; + spin_unlock_irqrestore(&arena->lock, flags); + + return p; +} + +int +iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count) +{ + unsigned long *ptes; + long i; + + if (!arena) return -EINVAL; + + ptes = arena->ptes; + + /* Make sure they're all reserved first... */ + for(i = pg_start; i < pg_start + pg_count; i++) + if (ptes[i] != IOMMU_RESERVED_PTE) + return -EBUSY; + + iommu_arena_free(arena, pg_start, pg_count); + return 0; +} + +int +iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count, + unsigned long *physaddrs) +{ + unsigned long flags; + unsigned long *ptes; + long i, j; + + if (!arena) return -EINVAL; + + spin_lock_irqsave(&arena->lock, flags); + + ptes = arena->ptes; + + for(j = pg_start; j < pg_start + pg_count; j++) { + if (ptes[j] != IOMMU_RESERVED_PTE) { + spin_unlock_irqrestore(&arena->lock, flags); + return -EBUSY; + } + } + + for(i = 0, j = pg_start; i < pg_count; i++, j++) + ptes[j] = mk_iommu_pte(physaddrs[i]); + + spin_unlock_irqrestore(&arena->lock, flags); + + return 0; +} + +int +iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count) +{ + unsigned long *p; + long i; + + if (!arena) return -EINVAL; + + p = arena->ptes + pg_start; + for(i = 0; i < pg_count; i++) + p[i] = IOMMU_RESERVED_PTE; + + return 0; +} + +/* True if the machine supports DAC addressing, and DEV can + make use of it given MASK. */ + +int +pci_dac_dma_supported(struct pci_dev *dev, u64 mask) +{ + dma64_addr_t dac_offset = alpha_mv.pci_dac_offset; + int ok = 1; + + /* If this is not set, the machine doesn't support DAC at all. */ + if (dac_offset == 0) + ok = 0; + + /* The device has to be able to address our DAC bit. */ + if ((dac_offset & dev->dma_mask) != dac_offset) + ok = 0; + + /* If both conditions above are met, we are fine. */ + DBGA("pci_dac_dma_supported %s from %p\n", + ok ? "yes" : "no", __builtin_return_address(0)); + + return ok; +} + +dma64_addr_t +pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, + unsigned long offset, int direction) +{ + return (alpha_mv.pci_dac_offset + + __pa(page_address(page)) + + (dma64_addr_t) offset); +} + +struct page * +pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr) +{ + unsigned long paddr = (dma_addr & PAGE_MASK) - alpha_mv.pci_dac_offset; + return virt_to_page(__va(paddr)); +} + +unsigned long +pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr) +{ + return (dma_addr & ~PAGE_MASK); +} + + +/* Helper for generic DMA-mapping functions. */ + +struct pci_dev * +alpha_gendev_to_pci(struct device *dev) +{ + if (dev && dev->bus == &pci_bus_type) + return to_pci_dev(dev); + + /* Assume that non-PCI devices asking for DMA are either ISA or EISA, + BUG() otherwise. */ + BUG_ON(!isa_bridge); + + /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA + bridge is bus master then). */ + if (!dev || !dev->dma_mask || !*dev->dma_mask) + return isa_bridge; + + /* For EISA bus masters, return isa_bridge (it might have smaller + dma_mask due to wiring limitations). */ + if (*dev->dma_mask >= isa_bridge->dma_mask) + return isa_bridge; + + /* This assumes ISA bus master with dma_mask 0xffffff. */ + return NULL; +} + +int +dma_set_mask(struct device *dev, u64 mask) +{ + if (!dev->dma_mask || + !pci_dma_supported(alpha_gendev_to_pci(dev), mask)) + return -EIO; + + *dev->dma_mask = mask; + + return 0; +} diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c new file mode 100644 index 0000000..4933f3c --- /dev/null +++ b/arch/alpha/kernel/process.c @@ -0,0 +1,528 @@ +/* + * linux/arch/alpha/kernel/process.c + * + * Copyright (C) 1995 Linus Torvalds + */ + +/* + * This file handles the architecture-dependent parts of process handling. + */ + +#include <linux/config.h> +#include <linux/errno.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/smp_lock.h> +#include <linux/stddef.h> +#include <linux/unistd.h> +#include <linux/ptrace.h> +#include <linux/slab.h> +#include <linux/user.h> +#include <linux/a.out.h> +#include <linux/utsname.h> +#include <linux/time.h> +#include <linux/major.h> +#include <linux/stat.h> +#include <linux/mman.h> +#include <linux/elfcore.h> +#include <linux/reboot.h> +#include <linux/tty.h> +#include <linux/console.h> + +#include <asm/reg.h> +#include <asm/uaccess.h> +#include <asm/system.h> +#include <asm/io.h> +#include <asm/pgtable.h> +#include <asm/hwrpb.h> +#include <asm/fpu.h> + +#include "proto.h" +#include "pci_impl.h" + +void default_idle(void) +{ + barrier(); +} + +void +cpu_idle(void) +{ + while (1) { + void (*idle)(void) = default_idle; + /* FIXME -- EV6 and LCA45 know how to power down + the CPU. */ + + while (!need_resched()) + idle(); + schedule(); + } +} + + +struct halt_info { + int mode; + char *restart_cmd; +}; + +static void +common_shutdown_1(void *generic_ptr) +{ + struct halt_info *how = (struct halt_info *)generic_ptr; + struct percpu_struct *cpup; + unsigned long *pflags, flags; + int cpuid = smp_processor_id(); + + /* No point in taking interrupts anymore. */ + local_irq_disable(); + + cpup = (struct percpu_struct *) + ((unsigned long)hwrpb + hwrpb->processor_offset + + hwrpb->processor_size * cpuid); + pflags = &cpup->flags; + flags = *pflags; + + /* Clear reason to "default"; clear "bootstrap in progress". */ + flags &= ~0x00ff0001UL; + +#ifdef CONFIG_SMP + /* Secondaries halt here. */ + if (cpuid != boot_cpuid) { + flags |= 0x00040000UL; /* "remain halted" */ + *pflags = flags; + clear_bit(cpuid, &cpu_present_mask); + halt(); + } +#endif + + if (how->mode == LINUX_REBOOT_CMD_RESTART) { + if (!how->restart_cmd) { + flags |= 0x00020000UL; /* "cold bootstrap" */ + } else { + /* For SRM, we could probably set environment + variables to get this to work. We'd have to + delay this until after srm_paging_stop unless + we ever got srm_fixup working. + + At the moment, SRM will use the last boot device, + but the file and flags will be the defaults, when + doing a "warm" bootstrap. */ + flags |= 0x00030000UL; /* "warm bootstrap" */ + } + } else { + flags |= 0x00040000UL; /* "remain halted" */ + } + *pflags = flags; + +#ifdef CONFIG_SMP + /* Wait for the secondaries to halt. */ + cpu_clear(boot_cpuid, cpu_possible_map); + while (cpus_weight(cpu_possible_map)) + barrier(); +#endif + + /* If booted from SRM, reset some of the original environment. */ + if (alpha_using_srm) { +#ifdef CONFIG_DUMMY_CONSOLE + /* This has the effect of resetting the VGA video origin. */ + take_over_console(&dummy_con, 0, MAX_NR_CONSOLES-1, 1); +#endif + pci_restore_srm_config(); + set_hae(srm_hae); + } + + if (alpha_mv.kill_arch) + alpha_mv.kill_arch(how->mode); + + if (! alpha_using_srm && how->mode != LINUX_REBOOT_CMD_RESTART) { + /* Unfortunately, since MILO doesn't currently understand + the hwrpb bits above, we can't reliably halt the + processor and keep it halted. So just loop. */ + return; + } + + if (alpha_using_srm) + srm_paging_stop(); + + halt(); +} + +static void +common_shutdown(int mode, char *restart_cmd) +{ + struct halt_info args; + args.mode = mode; + args.restart_cmd = restart_cmd; + on_each_cpu(common_shutdown_1, &args, 1, 0); +} + +void +machine_restart(char *restart_cmd) +{ + common_shutdown(LINUX_REBOOT_CMD_RESTART, restart_cmd); +} + +EXPORT_SYMBOL(machine_restart); + +void +machine_halt(void) +{ + common_shutdown(LINUX_REBOOT_CMD_HALT, NULL); +} + +EXPORT_SYMBOL(machine_halt); + +void +machine_power_off(void) +{ + common_shutdown(LINUX_REBOOT_CMD_POWER_OFF, NULL); +} + +EXPORT_SYMBOL(machine_power_off); + +/* Used by sysrq-p, among others. I don't believe r9-r15 are ever + saved in the context it's used. */ + +void +show_regs(struct pt_regs *regs) +{ + dik_show_regs(regs, NULL); +} + +/* + * Re-start a thread when doing execve() + */ +void +start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) +{ + set_fs(USER_DS); + regs->pc = pc; + regs->ps = 8; + wrusp(sp); +} + +/* + * Free current thread data structures etc.. + */ +void +exit_thread(void) +{ +} + +void +flush_thread(void) +{ + /* Arrange for each exec'ed process to start off with a clean slate + with respect to the FPU. This is all exceptions disabled. */ + current_thread_info()->ieee_state = 0; + wrfpcr(FPCR_DYN_NORMAL | ieee_swcr_to_fpcr(0)); + + /* Clean slate for TLS. */ + current_thread_info()->pcb.unique = 0; +} + +void +release_thread(struct task_struct *dead_task) +{ +} + +/* + * "alpha_clone()".. By the time we get here, the + * non-volatile registers have also been saved on the + * stack. We do some ugly pointer stuff here.. (see + * also copy_thread) + * + * Notice that "fork()" is implemented in terms of clone, + * with parameters (SIGCHLD, 0). + */ +int +alpha_clone(unsigned long clone_flags, unsigned long usp, + int __user *parent_tid, int __user *child_tid, + unsigned long tls_value, struct pt_regs *regs) +{ + if (!usp) + usp = rdusp(); + + return do_fork(clone_flags, usp, regs, 0, parent_tid, child_tid); +} + +int +alpha_vfork(struct pt_regs *regs) +{ + return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), + regs, 0, NULL, NULL); +} + +/* + * Copy an alpha thread.. + * + * Note the "stack_offset" stuff: when returning to kernel mode, we need + * to have some extra stack-space for the kernel stack that still exists + * after the "ret_from_fork". When returning to user mode, we only want + * the space needed by the syscall stack frame (ie "struct pt_regs"). + * Use the passed "regs" pointer to determine how much space we need + * for a kernel fork(). + */ + +int +copy_thread(int nr, unsigned long clone_flags, unsigned long usp, + unsigned long unused, + struct task_struct * p, struct pt_regs * regs) +{ + extern void ret_from_fork(void); + + struct thread_info *childti = p->thread_info; + struct pt_regs * childregs; + struct switch_stack * childstack, *stack; + unsigned long stack_offset, settls; + + stack_offset = PAGE_SIZE - sizeof(struct pt_regs); + if (!(regs->ps & 8)) + stack_offset = (PAGE_SIZE-1) & (unsigned long) regs; + childregs = (struct pt_regs *) + (stack_offset + PAGE_SIZE + (long) childti); + + *childregs = *regs; + settls = regs->r20; + childregs->r0 = 0; + childregs->r19 = 0; + childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */ + regs->r20 = 0; + stack = ((struct switch_stack *) regs) - 1; + childstack = ((struct switch_stack *) childregs) - 1; + *childstack = *stack; + childstack->r26 = (unsigned long) ret_from_fork; + childti->pcb.usp = usp; + childti->pcb.ksp = (unsigned long) childstack; + childti->pcb.flags = 1; /* set FEN, clear everything else */ + + /* Set a new TLS for the child thread? Peek back into the + syscall arguments that we saved on syscall entry. Oops, + except we'd have clobbered it with the parent/child set + of r20. Read the saved copy. */ + /* Note: if CLONE_SETTLS is not set, then we must inherit the + value from the parent, which will have been set by the block + copy in dup_task_struct. This is non-intuitive, but is + required for proper operation in the case of a threaded + application calling fork. */ + if (clone_flags & CLONE_SETTLS) + childti->pcb.unique = settls; + + return 0; +} + +/* + * Fill in the user structure for an ECOFF core dump. + */ +void +dump_thread(struct pt_regs * pt, struct user * dump) +{ + /* switch stack follows right below pt_regs: */ + struct switch_stack * sw = ((struct switch_stack *) pt) - 1; + + dump->magic = CMAGIC; + dump->start_code = current->mm->start_code; + dump->start_data = current->mm->start_data; + dump->start_stack = rdusp() & ~(PAGE_SIZE - 1); + dump->u_tsize = ((current->mm->end_code - dump->start_code) + >> PAGE_SHIFT); + dump->u_dsize = ((current->mm->brk + PAGE_SIZE-1 - dump->start_data) + >> PAGE_SHIFT); + dump->u_ssize = (current->mm->start_stack - dump->start_stack + + PAGE_SIZE-1) >> PAGE_SHIFT; + + /* + * We store the registers in an order/format that is + * compatible with DEC Unix/OSF/1 as this makes life easier + * for gdb. + */ + dump->regs[EF_V0] = pt->r0; + dump->regs[EF_T0] = pt->r1; + dump->regs[EF_T1] = pt->r2; + dump->regs[EF_T2] = pt->r3; + dump->regs[EF_T3] = pt->r4; + dump->regs[EF_T4] = pt->r5; + dump->regs[EF_T5] = pt->r6; + dump->regs[EF_T6] = pt->r7; + dump->regs[EF_T7] = pt->r8; + dump->regs[EF_S0] = sw->r9; + dump->regs[EF_S1] = sw->r10; + dump->regs[EF_S2] = sw->r11; + dump->regs[EF_S3] = sw->r12; + dump->regs[EF_S4] = sw->r13; + dump->regs[EF_S5] = sw->r14; + dump->regs[EF_S6] = sw->r15; + dump->regs[EF_A3] = pt->r19; + dump->regs[EF_A4] = pt->r20; + dump->regs[EF_A5] = pt->r21; + dump->regs[EF_T8] = pt->r22; + dump->regs[EF_T9] = pt->r23; + dump->regs[EF_T10] = pt->r24; + dump->regs[EF_T11] = pt->r25; + dump->regs[EF_RA] = pt->r26; + dump->regs[EF_T12] = pt->r27; + dump->regs[EF_AT] = pt->r28; + dump->regs[EF_SP] = rdusp(); + dump->regs[EF_PS] = pt->ps; + dump->regs[EF_PC] = pt->pc; + dump->regs[EF_GP] = pt->gp; + dump->regs[EF_A0] = pt->r16; + dump->regs[EF_A1] = pt->r17; + dump->regs[EF_A2] = pt->r18; + memcpy((char *)dump->regs + EF_SIZE, sw->fp, 32 * 8); +} + +/* + * Fill in the user structure for a ELF core dump. + */ +void +dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti) +{ + /* switch stack follows right below pt_regs: */ + struct switch_stack * sw = ((struct switch_stack *) pt) - 1; + + dest[ 0] = pt->r0; + dest[ 1] = pt->r1; + dest[ 2] = pt->r2; + dest[ 3] = pt->r3; + dest[ 4] = pt->r4; + dest[ 5] = pt->r5; + dest[ 6] = pt->r6; + dest[ 7] = pt->r7; + dest[ 8] = pt->r8; + dest[ 9] = sw->r9; + dest[10] = sw->r10; + dest[11] = sw->r11; + dest[12] = sw->r12; + dest[13] = sw->r13; + dest[14] = sw->r14; + dest[15] = sw->r15; + dest[16] = pt->r16; + dest[17] = pt->r17; + dest[18] = pt->r18; + dest[19] = pt->r19; + dest[20] = pt->r20; + dest[21] = pt->r21; + dest[22] = pt->r22; + dest[23] = pt->r23; + dest[24] = pt->r24; + dest[25] = pt->r25; + dest[26] = pt->r26; + dest[27] = pt->r27; + dest[28] = pt->r28; + dest[29] = pt->gp; + dest[30] = rdusp(); + dest[31] = pt->pc; + + /* Once upon a time this was the PS value. Which is stupid + since that is always 8 for usermode. Usurped for the more + useful value of the thread's UNIQUE field. */ + dest[32] = ti->pcb.unique; +} + +int +dump_elf_task(elf_greg_t *dest, struct task_struct *task) +{ + struct thread_info *ti; + struct pt_regs *pt; + + ti = task->thread_info; + pt = (struct pt_regs *)((unsigned long)ti + 2*PAGE_SIZE) - 1; + + dump_elf_thread(dest, pt, ti); + + return 1; +} + +int +dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task) +{ + struct thread_info *ti; + struct pt_regs *pt; + struct switch_stack *sw; + + ti = task->thread_info; + pt = (struct pt_regs *)((unsigned long)ti + 2*PAGE_SIZE) - 1; + sw = (struct switch_stack *)pt - 1; + + memcpy(dest, sw->fp, 32 * 8); + + return 1; +} + +/* + * sys_execve() executes a new program. + */ +asmlinkage int +do_sys_execve(char __user *ufilename, char __user * __user *argv, + char __user * __user *envp, struct pt_regs *regs) +{ + int error; + char *filename; + + filename = getname(ufilename); + error = PTR_ERR(filename); + if (IS_ERR(filename)) + goto out; + error = do_execve(filename, argv, envp, regs); + putname(filename); +out: + return error; +} + +/* + * Return saved PC of a blocked thread. This assumes the frame + * pointer is the 6th saved long on the kernel stack and that the + * saved return address is the first long in the frame. This all + * holds provided the thread blocked through a call to schedule() ($15 + * is the frame pointer in schedule() and $15 is saved at offset 48 by + * entry.S:do_switch_stack). + * + * Under heavy swap load I've seen this lose in an ugly way. So do + * some extra sanity checking on the ranges we expect these pointers + * to be in so that we can fail gracefully. This is just for ps after + * all. -- r~ + */ + +unsigned long +thread_saved_pc(task_t *t) +{ + unsigned long base = (unsigned long)t->thread_info; + unsigned long fp, sp = t->thread_info->pcb.ksp; + + if (sp > base && sp+6*8 < base + 16*1024) { + fp = ((unsigned long*)sp)[6]; + if (fp > sp && fp < base + 16*1024) + return *(unsigned long *)fp; + } + + return 0; +} + +unsigned long +get_wchan(struct task_struct *p) +{ + unsigned long schedule_frame; + unsigned long pc; + if (!p || p == current || p->state == TASK_RUNNING) + return 0; + /* + * This one depends on the frame size of schedule(). Do a + * "disass schedule" in gdb to find the frame size. Also, the + * code assumes that sleep_on() follows immediately after + * interruptible_sleep_on() and that add_timer() follows + * immediately after interruptible_sleep(). Ugly, isn't it? + * Maybe adding a wchan field to task_struct would be better, + * after all... + */ + + pc = thread_saved_pc(p); + if (in_sched_functions(pc)) { + schedule_frame = ((unsigned long *)p->thread_info->pcb.ksp)[6]; + return ((unsigned long *)schedule_frame)[12]; + } + return pc; +} diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h new file mode 100644 index 0000000..e1560fb --- /dev/null +++ b/arch/alpha/kernel/proto.h @@ -0,0 +1,210 @@ +#include <linux/config.h> +#include <linux/interrupt.h> + + +/* Prototypes of functions used across modules here in this directory. */ + +#define vucp volatile unsigned char * +#define vusp volatile unsigned short * +#define vip volatile int * +#define vuip volatile unsigned int * +#define vulp volatile unsigned long * + +struct pt_regs; +struct task_struct; +struct pci_dev; +struct pci_controller; + +/* core_apecs.c */ +extern struct pci_ops apecs_pci_ops; +extern void apecs_init_arch(void); +extern void apecs_pci_clr_err(void); +extern void apecs_machine_check(u64, u64, struct pt_regs *); +extern void apecs_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); + +/* core_cia.c */ +extern struct pci_ops cia_pci_ops; +extern void cia_init_pci(void); +extern void cia_init_arch(void); +extern void pyxis_init_arch(void); +extern void cia_kill_arch(int); +extern void cia_machine_check(u64, u64, struct pt_regs *); +extern void cia_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); + +/* core_irongate.c */ +extern struct pci_ops irongate_pci_ops; +extern int irongate_pci_clr_err(void); +extern void irongate_init_arch(void); +extern void irongate_machine_check(u64, u64, struct pt_regs *); +#define irongate_pci_tbi ((void *)0) + +/* core_lca.c */ +extern struct pci_ops lca_pci_ops; +extern void lca_init_arch(void); +extern void lca_machine_check(u64, u64, struct pt_regs *); +extern void lca_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); + +/* core_marvel.c */ +extern struct pci_ops marvel_pci_ops; +extern void marvel_init_arch(void); +extern void marvel_kill_arch(int); +extern void marvel_machine_check(u64, u64, struct pt_regs *); +extern void marvel_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); +extern int marvel_pa_to_nid(unsigned long); +extern int marvel_cpuid_to_nid(int); +extern unsigned long marvel_node_mem_start(int); +extern unsigned long marvel_node_mem_size(int); +extern struct _alpha_agp_info *marvel_agp_info(void); +struct io7 *marvel_find_io7(int pe); +struct io7 *marvel_next_io7(struct io7 *prev); +void io7_clear_errors(struct io7 *io7); + +/* core_mcpcia.c */ +extern struct pci_ops mcpcia_pci_ops; +extern void mcpcia_init_arch(void); +extern void mcpcia_init_hoses(void); +extern void mcpcia_machine_check(u64, u64, struct pt_regs *); +extern void mcpcia_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); + +/* core_polaris.c */ +extern struct pci_ops polaris_pci_ops; +extern int polaris_read_config_dword(struct pci_dev *, int, u32 *); +extern int polaris_write_config_dword(struct pci_dev *, int, u32); +extern void polaris_init_arch(void); +extern void polaris_machine_check(u64, u64, struct pt_regs *); +#define polaris_pci_tbi ((void *)0) + +/* core_t2.c */ +extern struct pci_ops t2_pci_ops; +extern void t2_init_arch(void); +extern void t2_kill_arch(int); +extern void t2_machine_check(u64, u64, struct pt_regs *); +extern void t2_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); + +/* core_titan.c */ +extern struct pci_ops titan_pci_ops; +extern void titan_init_arch(void); +extern void titan_kill_arch(int); +extern void titan_machine_check(u64, u64, struct pt_regs *); +extern void titan_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); +extern struct _alpha_agp_info *titan_agp_info(void); + +/* core_tsunami.c */ +extern struct pci_ops tsunami_pci_ops; +extern void tsunami_init_arch(void); +extern void tsunami_kill_arch(int); +extern void tsunami_machine_check(u64, u64, struct pt_regs *); +extern void tsunami_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); + +/* core_wildfire.c */ +extern struct pci_ops wildfire_pci_ops; +extern void wildfire_init_arch(void); +extern void wildfire_kill_arch(int); +extern void wildfire_machine_check(u64, u64, struct pt_regs *); +extern void wildfire_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); +extern int wildfire_pa_to_nid(unsigned long); +extern int wildfire_cpuid_to_nid(int); +extern unsigned long wildfire_node_mem_start(int); +extern unsigned long wildfire_node_mem_size(int); + +/* setup.c */ +extern unsigned long srm_hae; +extern int boot_cpuid; +#ifdef CONFIG_VERBOSE_MCHECK +extern unsigned long alpha_verbose_mcheck; +#endif + +/* srmcons.c */ +#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM) +extern void register_srm_console(void); +extern void unregister_srm_console(void); +#else +#define register_srm_console() +#define unregister_srm_console() +#endif + +/* smp.c */ +extern void setup_smp(void); +extern void handle_ipi(struct pt_regs *); +extern void smp_percpu_timer_interrupt(struct pt_regs *); + +/* bios32.c */ +/* extern void reset_for_srm(void); */ + +/* time.c */ +extern irqreturn_t timer_interrupt(int irq, void *dev, struct pt_regs * regs); +extern void common_init_rtc(void); +extern unsigned long est_cycle_freq; + +/* smc37c93x.c */ +extern void SMC93x_Init(void); + +/* smc37c669.c */ +extern void SMC669_Init(int); + +/* es1888.c */ +extern void es1888_init(void); + +/* ns87312.c */ +extern void ns87312_enable_ide(long ide_base); + +/* ../lib/fpreg.c */ +extern void alpha_write_fp_reg (unsigned long reg, unsigned long val); +extern unsigned long alpha_read_fp_reg (unsigned long reg); + +/* head.S */ +extern void wrmces(unsigned long mces); +extern void cserve_ena(unsigned long); +extern void cserve_dis(unsigned long); +extern void __smp_callin(unsigned long); + +/* entry.S */ +extern void entArith(void); +extern void entIF(void); +extern void entInt(void); +extern void entMM(void); +extern void entSys(void); +extern void entUna(void); +extern void entDbg(void); + +/* ptrace.c */ +extern int ptrace_set_bpt (struct task_struct *child); +extern int ptrace_cancel_bpt (struct task_struct *child); + +/* traps.c */ +extern void dik_show_regs(struct pt_regs *regs, unsigned long *r9_15); +extern void die_if_kernel(char *, struct pt_regs *, long, unsigned long *); + +/* sys_titan.c */ +extern void titan_dispatch_irqs(u64, struct pt_regs *); + +/* ../mm/init.c */ +extern void switch_to_system_map(void); +extern void srm_paging_stop(void); + +/* ../mm/remap.c */ +extern int __alpha_remap_area_pages(unsigned long, unsigned long, + unsigned long, unsigned long); + +/* irq.c */ + +#ifdef CONFIG_SMP +#define mcheck_expected(cpu) (cpu_data[cpu].mcheck_expected) +#define mcheck_taken(cpu) (cpu_data[cpu].mcheck_taken) +#define mcheck_extra(cpu) (cpu_data[cpu].mcheck_extra) +#else +extern struct mcheck_info +{ + unsigned char expected __attribute__((aligned(8))); + unsigned char taken; + unsigned char extra; +} __mcheck_info; + +#define mcheck_expected(cpu) (*((void)(cpu), &__mcheck_info.expected)) +#define mcheck_taken(cpu) (*((void)(cpu), &__mcheck_info.taken)) +#define mcheck_extra(cpu) (*((void)(cpu), &__mcheck_info.extra)) +#endif + +extern void process_mcheck_info(unsigned long vector, unsigned long la_ptr, + struct pt_regs *regs, const char *machine, + int expected); diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c new file mode 100644 index 0000000..d005831 --- /dev/null +++ b/arch/alpha/kernel/ptrace.c @@ -0,0 +1,415 @@ +/* ptrace.c */ +/* By Ross Biro 1/23/92 */ +/* edited by Linus Torvalds */ +/* mangled further by Bob Manson (manson@santafe.edu) */ +/* more mutilation by David Mosberger (davidm@azstarnet.com) */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/smp_lock.h> +#include <linux/errno.h> +#include <linux/ptrace.h> +#include <linux/user.h> +#include <linux/slab.h> +#include <linux/security.h> + +#include <asm/uaccess.h> +#include <asm/pgtable.h> +#include <asm/system.h> +#include <asm/fpu.h> + +#include "proto.h" + +#define DEBUG DBG_MEM +#undef DEBUG + +#ifdef DEBUG +enum { + DBG_MEM = (1<<0), + DBG_BPT = (1<<1), + DBG_MEM_ALL = (1<<2) +}; +#define DBG(fac,args) {if ((fac) & DEBUG) printk args;} +#else +#define DBG(fac,args) +#endif + +#define BREAKINST 0x00000080 /* call_pal bpt */ + +/* + * does not yet catch signals sent when the child dies. + * in exit.c or in signal.c. + */ + +/* + * Processes always block with the following stack-layout: + * + * +================================+ <---- task + 2*PAGE_SIZE + * | PALcode saved frame (ps, pc, | ^ + * | gp, a0, a1, a2) | | + * +================================+ | struct pt_regs + * | | | + * | frame generated by SAVE_ALL | | + * | | v + * +================================+ + * | | ^ + * | frame saved by do_switch_stack | | struct switch_stack + * | | v + * +================================+ + */ + +/* + * The following table maps a register index into the stack offset at + * which the register is saved. Register indices are 0-31 for integer + * regs, 32-63 for fp regs, and 64 for the pc. Notice that sp and + * zero have no stack-slot and need to be treated specially (see + * get_reg/put_reg below). + */ +enum { + REG_R0 = 0, REG_F0 = 32, REG_FPCR = 63, REG_PC = 64 +}; + +static int regoff[] = { + PT_REG( r0), PT_REG( r1), PT_REG( r2), PT_REG( r3), + PT_REG( r4), PT_REG( r5), PT_REG( r6), PT_REG( r7), + PT_REG( r8), SW_REG( r9), SW_REG( r10), SW_REG( r11), + SW_REG( r12), SW_REG( r13), SW_REG( r14), SW_REG( r15), + PT_REG( r16), PT_REG( r17), PT_REG( r18), PT_REG( r19), + PT_REG( r20), PT_REG( r21), PT_REG( r22), PT_REG( r23), + PT_REG( r24), PT_REG( r25), PT_REG( r26), PT_REG( r27), + PT_REG( r28), PT_REG( gp), -1, -1, + SW_REG(fp[ 0]), SW_REG(fp[ 1]), SW_REG(fp[ 2]), SW_REG(fp[ 3]), + SW_REG(fp[ 4]), SW_REG(fp[ 5]), SW_REG(fp[ 6]), SW_REG(fp[ 7]), + SW_REG(fp[ 8]), SW_REG(fp[ 9]), SW_REG(fp[10]), SW_REG(fp[11]), + SW_REG(fp[12]), SW_REG(fp[13]), SW_REG(fp[14]), SW_REG(fp[15]), + SW_REG(fp[16]), SW_REG(fp[17]), SW_REG(fp[18]), SW_REG(fp[19]), + SW_REG(fp[20]), SW_REG(fp[21]), SW_REG(fp[22]), SW_REG(fp[23]), + SW_REG(fp[24]), SW_REG(fp[25]), SW_REG(fp[26]), SW_REG(fp[27]), + SW_REG(fp[28]), SW_REG(fp[29]), SW_REG(fp[30]), SW_REG(fp[31]), + PT_REG( pc) +}; + +static unsigned long zero; + +/* + * Get address of register REGNO in task TASK. + */ +static unsigned long * +get_reg_addr(struct task_struct * task, unsigned long regno) +{ + unsigned long *addr; + + if (regno == 30) { + addr = &task->thread_info->pcb.usp; + } else if (regno == 65) { + addr = &task->thread_info->pcb.unique; + } else if (regno == 31 || regno > 65) { + zero = 0; + addr = &zero; + } else { + addr = (void *)task->thread_info + regoff[regno]; + } + return addr; +} + +/* + * Get contents of register REGNO in task TASK. + */ +static unsigned long +get_reg(struct task_struct * task, unsigned long regno) +{ + /* Special hack for fpcr -- combine hardware and software bits. */ + if (regno == 63) { + unsigned long fpcr = *get_reg_addr(task, regno); + unsigned long swcr + = task->thread_info->ieee_state & IEEE_SW_MASK; + swcr = swcr_update_status(swcr, fpcr); + return fpcr | swcr; + } + return *get_reg_addr(task, regno); +} + +/* + * Write contents of register REGNO in task TASK. + */ +static int +put_reg(struct task_struct *task, unsigned long regno, unsigned long data) +{ + if (regno == 63) { + task->thread_info->ieee_state + = ((task->thread_info->ieee_state & ~IEEE_SW_MASK) + | (data & IEEE_SW_MASK)); + data = (data & FPCR_DYN_MASK) | ieee_swcr_to_fpcr(data); + } + *get_reg_addr(task, regno) = data; + return 0; +} + +static inline int +read_int(struct task_struct *task, unsigned long addr, int * data) +{ + int copied = access_process_vm(task, addr, data, sizeof(int), 0); + return (copied == sizeof(int)) ? 0 : -EIO; +} + +static inline int +write_int(struct task_struct *task, unsigned long addr, int data) +{ + int copied = access_process_vm(task, addr, &data, sizeof(int), 1); + return (copied == sizeof(int)) ? 0 : -EIO; +} + +/* + * Set breakpoint. + */ +int +ptrace_set_bpt(struct task_struct * child) +{ + int displ, i, res, reg_b, nsaved = 0; + unsigned int insn, op_code; + unsigned long pc; + + pc = get_reg(child, REG_PC); + res = read_int(child, pc, (int *) &insn); + if (res < 0) + return res; + + op_code = insn >> 26; + if (op_code >= 0x30) { + /* + * It's a branch: instead of trying to figure out + * whether the branch will be taken or not, we'll put + * a breakpoint at either location. This is simpler, + * more reliable, and probably not a whole lot slower + * than the alternative approach of emulating the + * branch (emulation can be tricky for fp branches). + */ + displ = ((s32)(insn << 11)) >> 9; + child->thread_info->bpt_addr[nsaved++] = pc + 4; + if (displ) /* guard against unoptimized code */ + child->thread_info->bpt_addr[nsaved++] + = pc + 4 + displ; + DBG(DBG_BPT, ("execing branch\n")); + } else if (op_code == 0x1a) { + reg_b = (insn >> 16) & 0x1f; + child->thread_info->bpt_addr[nsaved++] = get_reg(child, reg_b); + DBG(DBG_BPT, ("execing jump\n")); + } else { + child->thread_info->bpt_addr[nsaved++] = pc + 4; + DBG(DBG_BPT, ("execing normal insn\n")); + } + + /* install breakpoints: */ + for (i = 0; i < nsaved; ++i) { + res = read_int(child, child->thread_info->bpt_addr[i], + (int *) &insn); + if (res < 0) + return res; + child->thread_info->bpt_insn[i] = insn; + DBG(DBG_BPT, (" -> next_pc=%lx\n", + child->thread_info->bpt_addr[i])); + res = write_int(child, child->thread_info->bpt_addr[i], + BREAKINST); + if (res < 0) + return res; + } + child->thread_info->bpt_nsaved = nsaved; + return 0; +} + +/* + * Ensure no single-step breakpoint is pending. Returns non-zero + * value if child was being single-stepped. + */ +int +ptrace_cancel_bpt(struct task_struct * child) +{ + int i, nsaved = child->thread_info->bpt_nsaved; + + child->thread_info->bpt_nsaved = 0; + + if (nsaved > 2) { + printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved); + nsaved = 2; + } + + for (i = 0; i < nsaved; ++i) { + write_int(child, child->thread_info->bpt_addr[i], + child->thread_info->bpt_insn[i]); + } + return (nsaved != 0); +} + +/* + * Called by kernel/ptrace.c when detaching.. + * + * Make sure the single step bit is not set. + */ +void ptrace_disable(struct task_struct *child) +{ + ptrace_cancel_bpt(child); +} + +asmlinkage long +do_sys_ptrace(long request, long pid, long addr, long data, + struct pt_regs *regs) +{ + struct task_struct *child; + unsigned long tmp; + size_t copied; + long ret; + + lock_kernel(); + DBG(DBG_MEM, ("request=%ld pid=%ld addr=0x%lx data=0x%lx\n", + request, pid, addr, data)); + ret = -EPERM; + if (request == PTRACE_TRACEME) { + /* are we already being traced? */ + if (current->ptrace & PT_PTRACED) + goto out_notsk; + ret = security_ptrace(current->parent, current); + if (ret) + goto out_notsk; + /* set the ptrace bit in the process ptrace flags. */ + current->ptrace |= PT_PTRACED; + ret = 0; + goto out_notsk; + } + if (pid == 1) /* you may not mess with init */ + goto out_notsk; + + ret = -ESRCH; + read_lock(&tasklist_lock); + child = find_task_by_pid(pid); + if (child) + get_task_struct(child); + read_unlock(&tasklist_lock); + if (!child) + goto out_notsk; + + if (request == PTRACE_ATTACH) { + ret = ptrace_attach(child); + goto out; + } + + ret = ptrace_check_attach(child, request == PTRACE_KILL); + if (ret < 0) + goto out; + + switch (request) { + /* When I and D space are separate, these will need to be fixed. */ + case PTRACE_PEEKTEXT: /* read word at location addr. */ + case PTRACE_PEEKDATA: + copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); + ret = -EIO; + if (copied != sizeof(tmp)) + break; + + regs->r0 = 0; /* special return: no errors */ + ret = tmp; + break; + + /* Read register number ADDR. */ + case PTRACE_PEEKUSR: + regs->r0 = 0; /* special return: no errors */ + ret = get_reg(child, addr); + DBG(DBG_MEM, ("peek $%ld->%#lx\n", addr, ret)); + break; + + /* When I and D space are separate, this will have to be fixed. */ + case PTRACE_POKETEXT: /* write the word at location addr. */ + case PTRACE_POKEDATA: + tmp = data; + copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 1); + ret = (copied == sizeof(tmp)) ? 0 : -EIO; + break; + + case PTRACE_POKEUSR: /* write the specified register */ + DBG(DBG_MEM, ("poke $%ld<-%#lx\n", addr, data)); + ret = put_reg(child, addr, data); + break; + + case PTRACE_SYSCALL: + /* continue and stop at next (return from) syscall */ + case PTRACE_CONT: /* restart after signal. */ + ret = -EIO; + if ((unsigned long) data > _NSIG) + break; + if (request == PTRACE_SYSCALL) + set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + else + clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + child->exit_code = data; + /* make sure single-step breakpoint is gone. */ + ptrace_cancel_bpt(child); + wake_up_process(child); + ret = 0; + break; + + /* + * Make the child exit. Best I can do is send it a sigkill. + * perhaps it should be put in the status that it wants to + * exit. + */ + case PTRACE_KILL: + ret = 0; + if (child->exit_state == EXIT_ZOMBIE) + break; + child->exit_code = SIGKILL; + /* make sure single-step breakpoint is gone. */ + ptrace_cancel_bpt(child); + wake_up_process(child); + goto out; + + case PTRACE_SINGLESTEP: /* execute single instruction. */ + ret = -EIO; + if ((unsigned long) data > _NSIG) + break; + /* Mark single stepping. */ + child->thread_info->bpt_nsaved = -1; + clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + child->exit_code = data; + wake_up_process(child); + /* give it a chance to run. */ + ret = 0; + goto out; + + case PTRACE_DETACH: /* detach a process that was attached. */ + ret = ptrace_detach(child, data); + goto out; + + default: + ret = ptrace_request(child, request, addr, data); + goto out; + } + out: + put_task_struct(child); + out_notsk: + unlock_kernel(); + return ret; +} + +asmlinkage void +syscall_trace(void) +{ + if (!test_thread_flag(TIF_SYSCALL_TRACE)) + return; + if (!(current->ptrace & PT_PTRACED)) + return; + /* The 0x80 provides a way for the tracing parent to distinguish + between a syscall stop and SIGTRAP delivery */ + ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) + ? 0x80 : 0)); + + /* + * This isn't the same as continuing with a signal, but it will do + * for normal use. strace only continues with a signal if the + * stopping signal is not SIGTRAP. -brl + */ + if (current->exit_code) { + send_sig(current->exit_code, current, 1); + current->exit_code = 0; + } +} diff --git a/arch/alpha/kernel/semaphore.c b/arch/alpha/kernel/semaphore.c new file mode 100644 index 0000000..8c8aaa2 --- /dev/null +++ b/arch/alpha/kernel/semaphore.c @@ -0,0 +1,224 @@ +/* + * Alpha semaphore implementation. + * + * (C) Copyright 1996 Linus Torvalds + * (C) Copyright 1999, 2000 Richard Henderson + */ + +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/init.h> + +/* + * This is basically the PPC semaphore scheme ported to use + * the Alpha ll/sc sequences, so see the PPC code for + * credits. + */ + +/* + * Atomically update sem->count. + * This does the equivalent of the following: + * + * old_count = sem->count; + * tmp = MAX(old_count, 0) + incr; + * sem->count = tmp; + * return old_count; + */ +static inline int __sem_update_count(struct semaphore *sem, int incr) +{ + long old_count, tmp = 0; + + __asm__ __volatile__( + "1: ldl_l %0,%2\n" + " cmovgt %0,%0,%1\n" + " addl %1,%3,%1\n" + " stl_c %1,%2\n" + " beq %1,2f\n" + " mb\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) + : "Ir" (incr), "1" (tmp), "m" (sem->count)); + + return old_count; +} + +/* + * Perform the "down" function. Return zero for semaphore acquired, + * return negative for signalled out of the function. + * + * If called from down, the return is ignored and the wait loop is + * not interruptible. This means that a task waiting on a semaphore + * using "down()" cannot be killed until someone does an "up()" on + * the semaphore. + * + * If called from down_interruptible, the return value gets checked + * upon return. If the return value is negative then the task continues + * with the negative value in the return register (it can be tested by + * the caller). + * + * Either form may be used in conjunction with "up()". + */ + +void __sched +__down_failed(struct semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + +#ifdef CONFIG_DEBUG_SEMAPHORE + printk("%s(%d): down failed(%p)\n", + tsk->comm, tsk->pid, sem); +#endif + + tsk->state = TASK_UNINTERRUPTIBLE; + wmb(); + add_wait_queue_exclusive(&sem->wait, &wait); + + /* + * Try to get the semaphore. If the count is > 0, then we've + * got the semaphore; we decrement count and exit the loop. + * If the count is 0 or negative, we set it to -1, indicating + * that we are asleep, and then sleep. + */ + while (__sem_update_count(sem, -1) <= 0) { + schedule(); + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + } + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; + + /* + * If there are any more sleepers, wake one of them up so + * that it can either get the semaphore, or set count to -1 + * indicating that there are still processes sleeping. + */ + wake_up(&sem->wait); + +#ifdef CONFIG_DEBUG_SEMAPHORE + printk("%s(%d): down acquired(%p)\n", + tsk->comm, tsk->pid, sem); +#endif +} + +int __sched +__down_failed_interruptible(struct semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + long ret = 0; + +#ifdef CONFIG_DEBUG_SEMAPHORE + printk("%s(%d): down failed(%p)\n", + tsk->comm, tsk->pid, sem); +#endif + + tsk->state = TASK_INTERRUPTIBLE; + wmb(); + add_wait_queue_exclusive(&sem->wait, &wait); + + while (__sem_update_count(sem, -1) <= 0) { + if (signal_pending(current)) { + /* + * A signal is pending - give up trying. + * Set sem->count to 0 if it is negative, + * since we are no longer sleeping. + */ + __sem_update_count(sem, 0); + ret = -EINTR; + break; + } + schedule(); + set_task_state(tsk, TASK_INTERRUPTIBLE); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; + wake_up(&sem->wait); + +#ifdef CONFIG_DEBUG_SEMAPHORE + printk("%s(%d): down %s(%p)\n", + current->comm, current->pid, + (ret < 0 ? "interrupted" : "acquired"), sem); +#endif + return ret; +} + +void +__up_wakeup(struct semaphore *sem) +{ + /* + * Note that we incremented count in up() before we came here, + * but that was ineffective since the result was <= 0, and + * any negative value of count is equivalent to 0. + * This ends up setting count to 1, unless count is now > 0 + * (i.e. because some other cpu has called up() in the meantime), + * in which case we just increment count. + */ + __sem_update_count(sem, 1); + wake_up(&sem->wait); +} + +void __sched +down(struct semaphore *sem) +{ +#ifdef WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif +#ifdef CONFIG_DEBUG_SEMAPHORE + printk("%s(%d): down(%p) <count=%d> from %p\n", + current->comm, current->pid, sem, + atomic_read(&sem->count), __builtin_return_address(0)); +#endif + __down(sem); +} + +int __sched +down_interruptible(struct semaphore *sem) +{ +#ifdef WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif +#ifdef CONFIG_DEBUG_SEMAPHORE + printk("%s(%d): down(%p) <count=%d> from %p\n", + current->comm, current->pid, sem, + atomic_read(&sem->count), __builtin_return_address(0)); +#endif + return __down_interruptible(sem); +} + +int +down_trylock(struct semaphore *sem) +{ + int ret; + +#ifdef WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + + ret = __down_trylock(sem); + +#ifdef CONFIG_DEBUG_SEMAPHORE + printk("%s(%d): down_trylock %s from %p\n", + current->comm, current->pid, + ret ? "failed" : "acquired", + __builtin_return_address(0)); +#endif + + return ret; +} + +void +up(struct semaphore *sem) +{ +#ifdef WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif +#ifdef CONFIG_DEBUG_SEMAPHORE + printk("%s(%d): up(%p) <count=%d> from %p\n", + current->comm, current->pid, sem, + atomic_read(&sem->count), __builtin_return_address(0)); +#endif + __up(sem); +} diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c new file mode 100644 index 0000000..b4e5f8f --- /dev/null +++ b/arch/alpha/kernel/setup.c @@ -0,0 +1,1486 @@ +/* + * linux/arch/alpha/kernel/setup.c + * + * Copyright (C) 1995 Linus Torvalds + */ + +/* 2.3.x bootmem, 1999 Andrea Arcangeli <andrea@suse.de> */ + +/* + * Bootup setup stuff. + */ + +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/stddef.h> +#include <linux/unistd.h> +#include <linux/ptrace.h> +#include <linux/slab.h> +#include <linux/user.h> +#include <linux/a.out.h> +#include <linux/tty.h> +#include <linux/delay.h> +#include <linux/config.h> /* CONFIG_ALPHA_LCA etc */ +#include <linux/mc146818rtc.h> +#include <linux/console.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/string.h> +#include <linux/ioport.h> +#include <linux/bootmem.h> +#include <linux/pci.h> +#include <linux/seq_file.h> +#include <linux/root_dev.h> +#include <linux/initrd.h> +#include <linux/eisa.h> +#ifdef CONFIG_MAGIC_SYSRQ +#include <linux/sysrq.h> +#include <linux/reboot.h> +#endif +#include <linux/notifier.h> +#include <asm/setup.h> +#include <asm/io.h> + +extern struct notifier_block *panic_notifier_list; +static int alpha_panic_event(struct notifier_block *, unsigned long, void *); +static struct notifier_block alpha_panic_block = { + alpha_panic_event, + NULL, + INT_MAX /* try to do it first */ +}; + +#include <asm/uaccess.h> +#include <asm/pgtable.h> +#include <asm/system.h> +#include <asm/hwrpb.h> +#include <asm/dma.h> +#include <asm/io.h> +#include <asm/mmu_context.h> +#include <asm/console.h> + +#include "proto.h" +#include "pci_impl.h" + + +struct hwrpb_struct *hwrpb; +unsigned long srm_hae; + +int alpha_l1i_cacheshape; +int alpha_l1d_cacheshape; +int alpha_l2_cacheshape; +int alpha_l3_cacheshape; + +#ifdef CONFIG_VERBOSE_MCHECK +/* 0=minimum, 1=verbose, 2=all */ +/* These can be overridden via the command line, ie "verbose_mcheck=2") */ +unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON; +#endif + +/* Which processor we booted from. */ +int boot_cpuid; + +/* + * Using SRM callbacks for initial console output. This works from + * setup_arch() time through the end of time_init(), as those places + * are under our (Alpha) control. + + * "srmcons" specified in the boot command arguments allows us to + * see kernel messages during the period of time before the true + * console device is "registered" during console_init(). + * As of this version (2.5.59), console_init() will call + * disable_early_printk() as the last action before initializing + * the console drivers. That's the last possible time srmcons can be + * unregistered without interfering with console behavior. + * + * By default, OFF; set it with a bootcommand arg of "srmcons" or + * "console=srm". The meaning of these two args is: + * "srmcons" - early callback prints + * "console=srm" - full callback based console, including early prints + */ +int srmcons_output = 0; + +/* Enforce a memory size limit; useful for testing. By default, none. */ +unsigned long mem_size_limit = 0; + +/* Set AGP GART window size (0 means disabled). */ +unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE; + +#ifdef CONFIG_ALPHA_GENERIC +struct alpha_machine_vector alpha_mv; +int alpha_using_srm; +#endif + +#define N(a) (sizeof(a)/sizeof(a[0])) + +static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long, + unsigned long); +static struct alpha_machine_vector *get_sysvec_byname(const char *); +static void get_sysnames(unsigned long, unsigned long, unsigned long, + char **, char **); +static void determine_cpu_caches (unsigned int); + +static char command_line[COMMAND_LINE_SIZE]; + +/* + * The format of "screen_info" is strange, and due to early + * i386-setup code. This is just enough to make the console + * code think we're on a VGA color display. + */ + +struct screen_info screen_info = { + .orig_x = 0, + .orig_y = 25, + .orig_video_cols = 80, + .orig_video_lines = 25, + .orig_video_isVGA = 1, + .orig_video_points = 16 +}; + +/* + * The direct map I/O window, if any. This should be the same + * for all busses, since it's used by virt_to_bus. + */ + +unsigned long __direct_map_base; +unsigned long __direct_map_size; + +/* + * Declare all of the machine vectors. + */ + +/* GCC 2.7.2 (on alpha at least) is lame. It does not support either + __attribute__((weak)) or #pragma weak. Bypass it and talk directly + to the assembler. */ + +#define WEAK(X) \ + extern struct alpha_machine_vector X; \ + asm(".weak "#X) + +WEAK(alcor_mv); +WEAK(alphabook1_mv); +WEAK(avanti_mv); +WEAK(cabriolet_mv); +WEAK(clipper_mv); +WEAK(dp264_mv); +WEAK(eb164_mv); +WEAK(eb64p_mv); +WEAK(eb66_mv); +WEAK(eb66p_mv); +WEAK(eiger_mv); +WEAK(jensen_mv); +WEAK(lx164_mv); +WEAK(lynx_mv); +WEAK(marvel_ev7_mv); +WEAK(miata_mv); +WEAK(mikasa_mv); +WEAK(mikasa_primo_mv); +WEAK(monet_mv); +WEAK(nautilus_mv); +WEAK(noname_mv); +WEAK(noritake_mv); +WEAK(noritake_primo_mv); +WEAK(p2k_mv); +WEAK(pc164_mv); +WEAK(privateer_mv); +WEAK(rawhide_mv); +WEAK(ruffian_mv); +WEAK(rx164_mv); +WEAK(sable_mv); +WEAK(sable_gamma_mv); +WEAK(shark_mv); +WEAK(sx164_mv); +WEAK(takara_mv); +WEAK(titan_mv); +WEAK(webbrick_mv); +WEAK(wildfire_mv); +WEAK(xl_mv); +WEAK(xlt_mv); + +#undef WEAK + +/* + * I/O resources inherited from PeeCees. Except for perhaps the + * turbochannel alphas, everyone has these on some sort of SuperIO chip. + * + * ??? If this becomes less standard, move the struct out into the + * machine vector. + */ + +static void __init +reserve_std_resources(void) +{ + static struct resource standard_io_resources[] = { + { .name = "rtc", .start = -1, .end = -1 }, + { .name = "dma1", .start = 0x00, .end = 0x1f }, + { .name = "pic1", .start = 0x20, .end = 0x3f }, + { .name = "timer", .start = 0x40, .end = 0x5f }, + { .name = "keyboard", .start = 0x60, .end = 0x6f }, + { .name = "dma page reg", .start = 0x80, .end = 0x8f }, + { .name = "pic2", .start = 0xa0, .end = 0xbf }, + { .name = "dma2", .start = 0xc0, .end = 0xdf }, + }; + + struct resource *io = &ioport_resource; + size_t i; + + if (hose_head) { + struct pci_controller *hose; + for (hose = hose_head; hose; hose = hose->next) + if (hose->index == 0) { + io = hose->io_space; + break; + } + } + + /* Fix up for the Jensen's queer RTC placement. */ + standard_io_resources[0].start = RTC_PORT(0); + standard_io_resources[0].end = RTC_PORT(0) + 0x10; + + for (i = 0; i < N(standard_io_resources); ++i) + request_resource(io, standard_io_resources+i); +} + +#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) +#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) +#define PFN_PHYS(x) ((x) << PAGE_SHIFT) +#define PFN_MAX PFN_DOWN(0x80000000) +#define for_each_mem_cluster(memdesc, cluster, i) \ + for ((cluster) = (memdesc)->cluster, (i) = 0; \ + (i) < (memdesc)->numclusters; (i)++, (cluster)++) + +static unsigned long __init +get_mem_size_limit(char *s) +{ + unsigned long end = 0; + char *from = s; + + end = simple_strtoul(from, &from, 0); + if ( *from == 'K' || *from == 'k' ) { + end = end << 10; + from++; + } else if ( *from == 'M' || *from == 'm' ) { + end = end << 20; + from++; + } else if ( *from == 'G' || *from == 'g' ) { + end = end << 30; + from++; + } + return end >> PAGE_SHIFT; /* Return the PFN of the limit. */ +} + +#ifdef CONFIG_BLK_DEV_INITRD +void * __init +move_initrd(unsigned long mem_limit) +{ + void *start; + unsigned long size; + + size = initrd_end - initrd_start; + start = __alloc_bootmem(PAGE_ALIGN(size), PAGE_SIZE, 0); + if (!start || __pa(start) + size > mem_limit) { + initrd_start = initrd_end = 0; + return NULL; + } + memmove(start, (void *)initrd_start, size); + initrd_start = (unsigned long)start; + initrd_end = initrd_start + size; + printk("initrd moved to %p\n", start); + return start; +} +#endif + +#ifndef CONFIG_DISCONTIGMEM +static void __init +setup_memory(void *kernel_end) +{ + struct memclust_struct * cluster; + struct memdesc_struct * memdesc; + unsigned long start_kernel_pfn, end_kernel_pfn; + unsigned long bootmap_size, bootmap_pages, bootmap_start; + unsigned long start, end; + unsigned long i; + + /* Find free clusters, and init and free the bootmem accordingly. */ + memdesc = (struct memdesc_struct *) + (hwrpb->mddt_offset + (unsigned long) hwrpb); + + for_each_mem_cluster(memdesc, cluster, i) { + printk("memcluster %lu, usage %01lx, start %8lu, end %8lu\n", + i, cluster->usage, cluster->start_pfn, + cluster->start_pfn + cluster->numpages); + + /* Bit 0 is console/PALcode reserved. Bit 1 is + non-volatile memory -- we might want to mark + this for later. */ + if (cluster->usage & 3) + continue; + + end = cluster->start_pfn + cluster->numpages; + if (end > max_low_pfn) + max_low_pfn = end; + } + + /* + * Except for the NUMA systems (wildfire, marvel) all of the + * Alpha systems we run on support 32GB of memory or less. + * Since the NUMA systems introduce large holes in memory addressing, + * we can get into a situation where there is not enough contiguous + * memory for the memory map. + * + * Limit memory to the first 32GB to limit the NUMA systems to + * memory on their first node (wildfire) or 2 (marvel) to avoid + * not being able to produce the memory map. In order to access + * all of the memory on the NUMA systems, build with discontiguous + * memory support. + * + * If the user specified a memory limit, let that memory limit stand. + */ + if (!mem_size_limit) + mem_size_limit = (32ul * 1024 * 1024 * 1024) >> PAGE_SHIFT; + + if (mem_size_limit && max_low_pfn >= mem_size_limit) + { + printk("setup: forcing memory size to %ldK (from %ldK).\n", + mem_size_limit << (PAGE_SHIFT - 10), + max_low_pfn << (PAGE_SHIFT - 10)); + max_low_pfn = mem_size_limit; + } + + /* Find the bounds of kernel memory. */ + start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS); + end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end)); + bootmap_start = -1; + + try_again: + if (max_low_pfn <= end_kernel_pfn) + panic("not enough memory to boot"); + + /* We need to know how many physically contiguous pages + we'll need for the bootmap. */ + bootmap_pages = bootmem_bootmap_pages(max_low_pfn); + + /* Now find a good region where to allocate the bootmap. */ + for_each_mem_cluster(memdesc, cluster, i) { + if (cluster->usage & 3) + continue; + + start = cluster->start_pfn; + end = start + cluster->numpages; + if (start >= max_low_pfn) + continue; + if (end > max_low_pfn) + end = max_low_pfn; + if (start < start_kernel_pfn) { + if (end > end_kernel_pfn + && end - end_kernel_pfn >= bootmap_pages) { + bootmap_start = end_kernel_pfn; + break; + } else if (end > start_kernel_pfn) + end = start_kernel_pfn; + } else if (start < end_kernel_pfn) + start = end_kernel_pfn; + if (end - start >= bootmap_pages) { + bootmap_start = start; + break; + } + } + + if (bootmap_start == ~0UL) { + max_low_pfn >>= 1; + goto try_again; + } + + /* Allocate the bootmap and mark the whole MM as reserved. */ + bootmap_size = init_bootmem(bootmap_start, max_low_pfn); + + /* Mark the free regions. */ + for_each_mem_cluster(memdesc, cluster, i) { + if (cluster->usage & 3) + continue; + + start = cluster->start_pfn; + end = cluster->start_pfn + cluster->numpages; + if (start >= max_low_pfn) + continue; + if (end > max_low_pfn) + end = max_low_pfn; + if (start < start_kernel_pfn) { + if (end > end_kernel_pfn) { + free_bootmem(PFN_PHYS(start), + (PFN_PHYS(start_kernel_pfn) + - PFN_PHYS(start))); + printk("freeing pages %ld:%ld\n", + start, start_kernel_pfn); + start = end_kernel_pfn; + } else if (end > start_kernel_pfn) + end = start_kernel_pfn; + } else if (start < end_kernel_pfn) + start = end_kernel_pfn; + if (start >= end) + continue; + + free_bootmem(PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start)); + printk("freeing pages %ld:%ld\n", start, end); + } + + /* Reserve the bootmap memory. */ + reserve_bootmem(PFN_PHYS(bootmap_start), bootmap_size); + printk("reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size)); + +#ifdef CONFIG_BLK_DEV_INITRD + initrd_start = INITRD_START; + if (initrd_start) { + initrd_end = initrd_start+INITRD_SIZE; + printk("Initial ramdisk at: 0x%p (%lu bytes)\n", + (void *) initrd_start, INITRD_SIZE); + + if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) { + if (!move_initrd(PFN_PHYS(max_low_pfn))) + printk("initrd extends beyond end of memory " + "(0x%08lx > 0x%p)\ndisabling initrd\n", + initrd_end, + phys_to_virt(PFN_PHYS(max_low_pfn))); + } else { + reserve_bootmem(virt_to_phys((void *)initrd_start), + INITRD_SIZE); + } + } +#endif /* CONFIG_BLK_DEV_INITRD */ +} +#else +extern void setup_memory(void *); +#endif /* !CONFIG_DISCONTIGMEM */ + +int __init +page_is_ram(unsigned long pfn) +{ + struct memclust_struct * cluster; + struct memdesc_struct * memdesc; + unsigned long i; + + memdesc = (struct memdesc_struct *) + (hwrpb->mddt_offset + (unsigned long) hwrpb); + for_each_mem_cluster(memdesc, cluster, i) + { + if (pfn >= cluster->start_pfn && + pfn < cluster->start_pfn + cluster->numpages) { + return (cluster->usage & 3) ? 0 : 1; + } + } + + return 0; +} + +#undef PFN_UP +#undef PFN_DOWN +#undef PFN_PHYS +#undef PFN_MAX + +void __init +setup_arch(char **cmdline_p) +{ + extern char _end[]; + + struct alpha_machine_vector *vec = NULL; + struct percpu_struct *cpu; + char *type_name, *var_name, *p; + void *kernel_end = _end; /* end of kernel */ + char *args = command_line; + + hwrpb = (struct hwrpb_struct*) __va(INIT_HWRPB->phys_addr); + boot_cpuid = hard_smp_processor_id(); + + /* + * Pre-process the system type to make sure it will be valid. + * + * This may restore real CABRIO and EB66+ family names, ie + * EB64+ and EB66. + * + * Oh, and "white box" AS800 (aka DIGITAL Server 3000 series) + * and AS1200 (DIGITAL Server 5000 series) have the type as + * the negative of the real one. + */ + if ((long)hwrpb->sys_type < 0) { + hwrpb->sys_type = -((long)hwrpb->sys_type); + hwrpb_update_checksum(hwrpb); + } + + /* Register a call for panic conditions. */ + notifier_chain_register(&panic_notifier_list, &alpha_panic_block); + +#ifdef CONFIG_ALPHA_GENERIC + /* Assume that we've booted from SRM if we haven't booted from MILO. + Detect the later by looking for "MILO" in the system serial nr. */ + alpha_using_srm = strncmp((const char *)hwrpb->ssn, "MILO", 4) != 0; +#endif + + /* If we are using SRM, we want to allow callbacks + as early as possible, so do this NOW, and then + they should work immediately thereafter. + */ + kernel_end = callback_init(kernel_end); + + /* + * Locate the command line. + */ + /* Hack for Jensen... since we're restricted to 8 or 16 chars for + boot flags depending on the boot mode, we need some shorthand. + This should do for installation. */ + if (strcmp(COMMAND_LINE, "INSTALL") == 0) { + strlcpy(command_line, "root=/dev/fd0 load_ramdisk=1", sizeof command_line); + } else { + strlcpy(command_line, COMMAND_LINE, sizeof command_line); + } + strcpy(saved_command_line, command_line); + *cmdline_p = command_line; + + /* + * Process command-line arguments. + */ + while ((p = strsep(&args, " \t")) != NULL) { + if (!*p) continue; + if (strncmp(p, "alpha_mv=", 9) == 0) { + vec = get_sysvec_byname(p+9); + continue; + } + if (strncmp(p, "cycle=", 6) == 0) { + est_cycle_freq = simple_strtol(p+6, NULL, 0); + continue; + } + if (strncmp(p, "mem=", 4) == 0) { + mem_size_limit = get_mem_size_limit(p+4); + continue; + } + if (strncmp(p, "srmcons", 7) == 0) { + srmcons_output |= 1; + continue; + } + if (strncmp(p, "console=srm", 11) == 0) { + srmcons_output |= 2; + continue; + } + if (strncmp(p, "gartsize=", 9) == 0) { + alpha_agpgart_size = + get_mem_size_limit(p+9) << PAGE_SHIFT; + continue; + } +#ifdef CONFIG_VERBOSE_MCHECK + if (strncmp(p, "verbose_mcheck=", 15) == 0) { + alpha_verbose_mcheck = simple_strtol(p+15, NULL, 0); + continue; + } +#endif + } + + /* Replace the command line, now that we've killed it with strsep. */ + strcpy(command_line, saved_command_line); + + /* If we want SRM console printk echoing early, do it now. */ + if (alpha_using_srm && srmcons_output) { + register_srm_console(); + + /* + * If "console=srm" was specified, clear the srmcons_output + * flag now so that time.c won't unregister_srm_console + */ + if (srmcons_output & 2) + srmcons_output = 0; + } + +#ifdef CONFIG_MAGIC_SYSRQ + /* If we're using SRM, make sysrq-b halt back to the prom, + not auto-reboot. */ + if (alpha_using_srm) { + struct sysrq_key_op *op = __sysrq_get_key_op('b'); + op->handler = (void *) machine_halt; + } +#endif + + /* + * Identify and reconfigure for the current system. + */ + cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset); + + get_sysnames(hwrpb->sys_type, hwrpb->sys_variation, + cpu->type, &type_name, &var_name); + if (*var_name == '0') + var_name = ""; + + if (!vec) { + vec = get_sysvec(hwrpb->sys_type, hwrpb->sys_variation, + cpu->type); + } + + if (!vec) { + panic("Unsupported system type: %s%s%s (%ld %ld)\n", + type_name, (*var_name ? " variation " : ""), var_name, + hwrpb->sys_type, hwrpb->sys_variation); + } + if (vec != &alpha_mv) { + alpha_mv = *vec; + } + + printk("Booting " +#ifdef CONFIG_ALPHA_GENERIC + "GENERIC " +#endif + "on %s%s%s using machine vector %s from %s\n", + type_name, (*var_name ? " variation " : ""), + var_name, alpha_mv.vector_name, + (alpha_using_srm ? "SRM" : "MILO")); + + printk("Major Options: " +#ifdef CONFIG_SMP + "SMP " +#endif +#ifdef CONFIG_ALPHA_EV56 + "EV56 " +#endif +#ifdef CONFIG_ALPHA_EV67 + "EV67 " +#endif +#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS + "LEGACY_START " +#endif +#ifdef CONFIG_VERBOSE_MCHECK + "VERBOSE_MCHECK " +#endif + +#ifdef CONFIG_DISCONTIGMEM + "DISCONTIGMEM " +#ifdef CONFIG_NUMA + "NUMA " +#endif +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK + "DEBUG_SPINLOCK " +#endif +#ifdef CONFIG_MAGIC_SYSRQ + "MAGIC_SYSRQ " +#endif + "\n"); + + printk("Command line: %s\n", command_line); + + /* + * Sync up the HAE. + * Save the SRM's current value for restoration. + */ + srm_hae = *alpha_mv.hae_register; + __set_hae(alpha_mv.hae_cache); + + /* Reset enable correctable error reports. */ + wrmces(0x7); + + /* Find our memory. */ + setup_memory(kernel_end); + + /* First guess at cpu cache sizes. Do this before init_arch. */ + determine_cpu_caches(cpu->type); + + /* Initialize the machine. Usually has to do with setting up + DMA windows and the like. */ + if (alpha_mv.init_arch) + alpha_mv.init_arch(); + + /* Reserve standard resources. */ + reserve_std_resources(); + + /* + * Give us a default console. TGA users will see nothing until + * chr_dev_init is called, rather late in the boot sequence. + */ + +#ifdef CONFIG_VT +#if defined(CONFIG_VGA_CONSOLE) + conswitchp = &vga_con; +#elif defined(CONFIG_DUMMY_CONSOLE) + conswitchp = &dummy_con; +#endif +#endif + + /* Default root filesystem to sda2. */ + ROOT_DEV = Root_SDA2; + +#ifdef CONFIG_EISA + /* FIXME: only set this when we actually have EISA in this box? */ + EISA_bus = 1; +#endif + + /* + * Check ASN in HWRPB for validity, report if bad. + * FIXME: how was this failing? Should we trust it instead, + * and copy the value into alpha_mv.max_asn? + */ + + if (hwrpb->max_asn != MAX_ASN) { + printk("Max ASN from HWRPB is bad (0x%lx)\n", hwrpb->max_asn); + } + + /* + * Identify the flock of penguins. + */ + +#ifdef CONFIG_SMP + setup_smp(); +#endif + paging_init(); +} + +void __init +disable_early_printk(void) +{ + if (alpha_using_srm && srmcons_output) { + unregister_srm_console(); + srmcons_output = 0; + } +} + +static char sys_unknown[] = "Unknown"; +static char systype_names[][16] = { + "0", + "ADU", "Cobra", "Ruby", "Flamingo", "Mannequin", "Jensen", + "Pelican", "Morgan", "Sable", "Medulla", "Noname", + "Turbolaser", "Avanti", "Mustang", "Alcor", "Tradewind", + "Mikasa", "EB64", "EB66", "EB64+", "AlphaBook1", + "Rawhide", "K2", "Lynx", "XL", "EB164", "Noritake", + "Cortex", "29", "Miata", "XXM", "Takara", "Yukon", + "Tsunami", "Wildfire", "CUSCO", "Eiger", "Titan", "Marvel" +}; + +static char unofficial_names[][8] = {"100", "Ruffian"}; + +static char api_names[][16] = {"200", "Nautilus"}; + +static char eb164_names[][8] = {"EB164", "PC164", "LX164", "SX164", "RX164"}; +static int eb164_indices[] = {0,0,0,1,1,1,1,1,2,2,2,2,3,3,3,3,4}; + +static char alcor_names[][16] = {"Alcor", "Maverick", "Bret"}; +static int alcor_indices[] = {0,0,0,1,1,1,0,0,0,0,0,0,2,2,2,2,2,2}; + +static char eb64p_names[][16] = {"EB64+", "Cabriolet", "AlphaPCI64"}; +static int eb64p_indices[] = {0,0,1,2}; + +static char eb66_names[][8] = {"EB66", "EB66+"}; +static int eb66_indices[] = {0,0,1}; + +static char marvel_names[][16] = { + "Marvel/EV7" +}; +static int marvel_indices[] = { 0 }; + +static char rawhide_names[][16] = { + "Dodge", "Wrangler", "Durango", "Tincup", "DaVinci" +}; +static int rawhide_indices[] = {0,0,0,1,1,2,2,3,3,4,4}; + +static char titan_names[][16] = { + "DEFAULT", "Privateer", "Falcon", "Granite" +}; +static int titan_indices[] = {0,1,2,2,3}; + +static char tsunami_names[][16] = { + "0", "DP264", "Warhol", "Windjammer", "Monet", "Clipper", + "Goldrush", "Webbrick", "Catamaran", "Brisbane", "Melbourne", + "Flying Clipper", "Shark" +}; +static int tsunami_indices[] = {0,1,2,3,4,5,6,7,8,9,10,11,12}; + +static struct alpha_machine_vector * __init +get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu) +{ + static struct alpha_machine_vector *systype_vecs[] __initdata = + { + NULL, /* 0 */ + NULL, /* ADU */ + NULL, /* Cobra */ + NULL, /* Ruby */ + NULL, /* Flamingo */ + NULL, /* Mannequin */ + &jensen_mv, + NULL, /* Pelican */ + NULL, /* Morgan */ + NULL, /* Sable -- see below. */ + NULL, /* Medulla */ + &noname_mv, + NULL, /* Turbolaser */ + &avanti_mv, + NULL, /* Mustang */ + NULL, /* Alcor, Bret, Maverick. HWRPB inaccurate? */ + NULL, /* Tradewind */ + NULL, /* Mikasa -- see below. */ + NULL, /* EB64 */ + NULL, /* EB66 -- see variation. */ + NULL, /* EB64+ -- see variation. */ + &alphabook1_mv, + &rawhide_mv, + NULL, /* K2 */ + &lynx_mv, /* Lynx */ + &xl_mv, + NULL, /* EB164 -- see variation. */ + NULL, /* Noritake -- see below. */ + NULL, /* Cortex */ + NULL, /* 29 */ + &miata_mv, + NULL, /* XXM */ + &takara_mv, + NULL, /* Yukon */ + NULL, /* Tsunami -- see variation. */ + &wildfire_mv, /* Wildfire */ + NULL, /* CUSCO */ + &eiger_mv, /* Eiger */ + NULL, /* Titan */ + NULL, /* Marvel */ + }; + + static struct alpha_machine_vector *unofficial_vecs[] __initdata = + { + NULL, /* 100 */ + &ruffian_mv, + }; + + static struct alpha_machine_vector *api_vecs[] __initdata = + { + NULL, /* 200 */ + &nautilus_mv, + }; + + static struct alpha_machine_vector *alcor_vecs[] __initdata = + { + &alcor_mv, &xlt_mv, &xlt_mv + }; + + static struct alpha_machine_vector *eb164_vecs[] __initdata = + { + &eb164_mv, &pc164_mv, &lx164_mv, &sx164_mv, &rx164_mv + }; + + static struct alpha_machine_vector *eb64p_vecs[] __initdata = + { + &eb64p_mv, + &cabriolet_mv, + &cabriolet_mv /* AlphaPCI64 */ + }; + + static struct alpha_machine_vector *eb66_vecs[] __initdata = + { + &eb66_mv, + &eb66p_mv + }; + + static struct alpha_machine_vector *marvel_vecs[] __initdata = + { + &marvel_ev7_mv, + }; + + static struct alpha_machine_vector *titan_vecs[] __initdata = + { + &titan_mv, /* default */ + &privateer_mv, /* privateer */ + &titan_mv, /* falcon */ + &privateer_mv, /* granite */ + }; + + static struct alpha_machine_vector *tsunami_vecs[] __initdata = + { + NULL, + &dp264_mv, /* dp264 */ + &dp264_mv, /* warhol */ + &dp264_mv, /* windjammer */ + &monet_mv, /* monet */ + &clipper_mv, /* clipper */ + &dp264_mv, /* goldrush */ + &webbrick_mv, /* webbrick */ + &dp264_mv, /* catamaran */ + NULL, /* brisbane? */ + NULL, /* melbourne? */ + NULL, /* flying clipper? */ + &shark_mv, /* shark */ + }; + + /* ??? Do we need to distinguish between Rawhides? */ + + struct alpha_machine_vector *vec; + + /* Search the system tables first... */ + vec = NULL; + if (type < N(systype_vecs)) { + vec = systype_vecs[type]; + } else if ((type > ST_API_BIAS) && + (type - ST_API_BIAS) < N(api_vecs)) { + vec = api_vecs[type - ST_API_BIAS]; + } else if ((type > ST_UNOFFICIAL_BIAS) && + (type - ST_UNOFFICIAL_BIAS) < N(unofficial_vecs)) { + vec = unofficial_vecs[type - ST_UNOFFICIAL_BIAS]; + } + + /* If we've not found one, try for a variation. */ + + if (!vec) { + /* Member ID is a bit-field. */ + unsigned long member = (variation >> 10) & 0x3f; + + cpu &= 0xffffffff; /* make it usable */ + + switch (type) { + case ST_DEC_ALCOR: + if (member < N(alcor_indices)) + vec = alcor_vecs[alcor_indices[member]]; + break; + case ST_DEC_EB164: + if (member < N(eb164_indices)) + vec = eb164_vecs[eb164_indices[member]]; + /* PC164 may show as EB164 variation with EV56 CPU, + but, since no true EB164 had anything but EV5... */ + if (vec == &eb164_mv && cpu == EV56_CPU) + vec = &pc164_mv; + break; + case ST_DEC_EB64P: + if (member < N(eb64p_indices)) + vec = eb64p_vecs[eb64p_indices[member]]; + break; + case ST_DEC_EB66: + if (member < N(eb66_indices)) + vec = eb66_vecs[eb66_indices[member]]; + break; + case ST_DEC_MARVEL: + if (member < N(marvel_indices)) + vec = marvel_vecs[marvel_indices[member]]; + break; + case ST_DEC_TITAN: + vec = titan_vecs[0]; /* default */ + if (member < N(titan_indices)) + vec = titan_vecs[titan_indices[member]]; + break; + case ST_DEC_TSUNAMI: + if (member < N(tsunami_indices)) + vec = tsunami_vecs[tsunami_indices[member]]; + break; + case ST_DEC_1000: + if (cpu == EV5_CPU || cpu == EV56_CPU) + vec = &mikasa_primo_mv; + else + vec = &mikasa_mv; + break; + case ST_DEC_NORITAKE: + if (cpu == EV5_CPU || cpu == EV56_CPU) + vec = &noritake_primo_mv; + else + vec = &noritake_mv; + break; + case ST_DEC_2100_A500: + if (cpu == EV5_CPU || cpu == EV56_CPU) + vec = &sable_gamma_mv; + else + vec = &sable_mv; + break; + } + } + return vec; +} + +static struct alpha_machine_vector * __init +get_sysvec_byname(const char *name) +{ + static struct alpha_machine_vector *all_vecs[] __initdata = + { + &alcor_mv, + &alphabook1_mv, + &avanti_mv, + &cabriolet_mv, + &clipper_mv, + &dp264_mv, + &eb164_mv, + &eb64p_mv, + &eb66_mv, + &eb66p_mv, + &eiger_mv, + &jensen_mv, + &lx164_mv, + &lynx_mv, + &miata_mv, + &mikasa_mv, + &mikasa_primo_mv, + &monet_mv, + &nautilus_mv, + &noname_mv, + &noritake_mv, + &noritake_primo_mv, + &p2k_mv, + &pc164_mv, + &privateer_mv, + &rawhide_mv, + &ruffian_mv, + &rx164_mv, + &sable_mv, + &sable_gamma_mv, + &shark_mv, + &sx164_mv, + &takara_mv, + &webbrick_mv, + &wildfire_mv, + &xl_mv, + &xlt_mv + }; + + size_t i; + + for (i = 0; i < N(all_vecs); ++i) { + struct alpha_machine_vector *mv = all_vecs[i]; + if (strcasecmp(mv->vector_name, name) == 0) + return mv; + } + return NULL; +} + +static void +get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu, + char **type_name, char **variation_name) +{ + unsigned long member; + + /* If not in the tables, make it UNKNOWN, + else set type name to family */ + if (type < N(systype_names)) { + *type_name = systype_names[type]; + } else if ((type > ST_API_BIAS) && + (type - ST_API_BIAS) < N(api_names)) { + *type_name = api_names[type - ST_API_BIAS]; + } else if ((type > ST_UNOFFICIAL_BIAS) && + (type - ST_UNOFFICIAL_BIAS) < N(unofficial_names)) { + *type_name = unofficial_names[type - ST_UNOFFICIAL_BIAS]; + } else { + *type_name = sys_unknown; + *variation_name = sys_unknown; + return; + } + + /* Set variation to "0"; if variation is zero, done. */ + *variation_name = systype_names[0]; + if (variation == 0) { + return; + } + + member = (variation >> 10) & 0x3f; /* member ID is a bit-field */ + + cpu &= 0xffffffff; /* make it usable */ + + switch (type) { /* select by family */ + default: /* default to variation "0" for now */ + break; + case ST_DEC_EB164: + if (member < N(eb164_indices)) + *variation_name = eb164_names[eb164_indices[member]]; + /* PC164 may show as EB164 variation, but with EV56 CPU, + so, since no true EB164 had anything but EV5... */ + if (eb164_indices[member] == 0 && cpu == EV56_CPU) + *variation_name = eb164_names[1]; /* make it PC164 */ + break; + case ST_DEC_ALCOR: + if (member < N(alcor_indices)) + *variation_name = alcor_names[alcor_indices[member]]; + break; + case ST_DEC_EB64P: + if (member < N(eb64p_indices)) + *variation_name = eb64p_names[eb64p_indices[member]]; + break; + case ST_DEC_EB66: + if (member < N(eb66_indices)) + *variation_name = eb66_names[eb66_indices[member]]; + break; + case ST_DEC_MARVEL: + if (member < N(marvel_indices)) + *variation_name = marvel_names[marvel_indices[member]]; + break; + case ST_DEC_RAWHIDE: + if (member < N(rawhide_indices)) + *variation_name = rawhide_names[rawhide_indices[member]]; + break; + case ST_DEC_TITAN: + *variation_name = titan_names[0]; /* default */ + if (member < N(titan_indices)) + *variation_name = titan_names[titan_indices[member]]; + break; + case ST_DEC_TSUNAMI: + if (member < N(tsunami_indices)) + *variation_name = tsunami_names[tsunami_indices[member]]; + break; + } +} + +/* + * A change was made to the HWRPB via an ECO and the following code + * tracks a part of the ECO. In HWRPB versions less than 5, the ECO + * was not implemented in the console firmware. If it's revision 5 or + * greater we can get the name of the platform as an ASCII string from + * the HWRPB. That's what this function does. It checks the revision + * level and if the string is in the HWRPB it returns the address of + * the string--a pointer to the name of the platform. + * + * Returns: + * - Pointer to a ASCII string if it's in the HWRPB + * - Pointer to a blank string if the data is not in the HWRPB. + */ + +static char * +platform_string(void) +{ + struct dsr_struct *dsr; + static char unk_system_string[] = "N/A"; + + /* Go to the console for the string pointer. + * If the rpb_vers is not 5 or greater the rpb + * is old and does not have this data in it. + */ + if (hwrpb->revision < 5) + return (unk_system_string); + else { + /* The Dynamic System Recognition struct + * has the system platform name starting + * after the character count of the string. + */ + dsr = ((struct dsr_struct *) + ((char *)hwrpb + hwrpb->dsr_offset)); + return ((char *)dsr + (dsr->sysname_off + + sizeof(long))); + } +} + +static int +get_nr_processors(struct percpu_struct *cpubase, unsigned long num) +{ + struct percpu_struct *cpu; + unsigned long i; + int count = 0; + + for (i = 0; i < num; i++) { + cpu = (struct percpu_struct *) + ((char *)cpubase + i*hwrpb->processor_size); + if ((cpu->flags & 0x1cc) == 0x1cc) + count++; + } + return count; +} + +static void +show_cache_size (struct seq_file *f, const char *which, int shape) +{ + if (shape == -1) + seq_printf (f, "%s\t\t: n/a\n", which); + else if (shape == 0) + seq_printf (f, "%s\t\t: unknown\n", which); + else + seq_printf (f, "%s\t\t: %dK, %d-way, %db line\n", + which, shape >> 10, shape & 15, + 1 << ((shape >> 4) & 15)); +} + +static int +show_cpuinfo(struct seq_file *f, void *slot) +{ + extern struct unaligned_stat { + unsigned long count, va, pc; + } unaligned[2]; + + static char cpu_names[][8] = { + "EV3", "EV4", "Simulate", "LCA4", "EV5", "EV45", "EV56", + "EV6", "PCA56", "PCA57", "EV67", "EV68CB", "EV68AL", + "EV68CX", "EV7", "EV79", "EV69" + }; + + struct percpu_struct *cpu = slot; + unsigned int cpu_index; + char *cpu_name; + char *systype_name; + char *sysvariation_name; + int nr_processors; + + cpu_index = (unsigned) (cpu->type - 1); + cpu_name = "Unknown"; + if (cpu_index < N(cpu_names)) + cpu_name = cpu_names[cpu_index]; + + get_sysnames(hwrpb->sys_type, hwrpb->sys_variation, + cpu->type, &systype_name, &sysvariation_name); + + nr_processors = get_nr_processors(cpu, hwrpb->nr_processors); + + seq_printf(f, "cpu\t\t\t: Alpha\n" + "cpu model\t\t: %s\n" + "cpu variation\t\t: %ld\n" + "cpu revision\t\t: %ld\n" + "cpu serial number\t: %s\n" + "system type\t\t: %s\n" + "system variation\t: %s\n" + "system revision\t\t: %ld\n" + "system serial number\t: %s\n" + "cycle frequency [Hz]\t: %lu %s\n" + "timer frequency [Hz]\t: %lu.%02lu\n" + "page size [bytes]\t: %ld\n" + "phys. address bits\t: %ld\n" + "max. addr. space #\t: %ld\n" + "BogoMIPS\t\t: %lu.%02lu\n" + "kernel unaligned acc\t: %ld (pc=%lx,va=%lx)\n" + "user unaligned acc\t: %ld (pc=%lx,va=%lx)\n" + "platform string\t\t: %s\n" + "cpus detected\t\t: %d\n", + cpu_name, cpu->variation, cpu->revision, + (char*)cpu->serial_no, + systype_name, sysvariation_name, hwrpb->sys_revision, + (char*)hwrpb->ssn, + est_cycle_freq ? : hwrpb->cycle_freq, + est_cycle_freq ? "est." : "", + hwrpb->intr_freq / 4096, + (100 * hwrpb->intr_freq / 4096) % 100, + hwrpb->pagesize, + hwrpb->pa_bits, + hwrpb->max_asn, + loops_per_jiffy / (500000/HZ), + (loops_per_jiffy / (5000/HZ)) % 100, + unaligned[0].count, unaligned[0].pc, unaligned[0].va, + unaligned[1].count, unaligned[1].pc, unaligned[1].va, + platform_string(), nr_processors); + +#ifdef CONFIG_SMP + seq_printf(f, "cpus active\t\t: %d\n" + "cpu active mask\t\t: %016lx\n", + num_online_cpus(), cpus_addr(cpu_possible_map)[0]); +#endif + + show_cache_size (f, "L1 Icache", alpha_l1i_cacheshape); + show_cache_size (f, "L1 Dcache", alpha_l1d_cacheshape); + show_cache_size (f, "L2 cache", alpha_l2_cacheshape); + show_cache_size (f, "L3 cache", alpha_l3_cacheshape); + + return 0; +} + +static int __init +read_mem_block(int *addr, int stride, int size) +{ + long nloads = size / stride, cnt, tmp; + + __asm__ __volatile__( + " rpcc %0\n" + "1: ldl %3,0(%2)\n" + " subq %1,1,%1\n" + /* Next two XORs introduce an explicit data dependency between + consecutive loads in the loop, which will give us true load + latency. */ + " xor %3,%2,%2\n" + " xor %3,%2,%2\n" + " addq %2,%4,%2\n" + " bne %1,1b\n" + " rpcc %3\n" + " subl %3,%0,%0\n" + : "=&r" (cnt), "=&r" (nloads), "=&r" (addr), "=&r" (tmp) + : "r" (stride), "1" (nloads), "2" (addr)); + + return cnt / (size / stride); +} + +#define CSHAPE(totalsize, linesize, assoc) \ + ((totalsize & ~0xff) | (linesize << 4) | assoc) + +/* ??? EV5 supports up to 64M, but did the systems with more than + 16M of BCACHE ever exist? */ +#define MAX_BCACHE_SIZE 16*1024*1024 + +/* Note that the offchip caches are direct mapped on all Alphas. */ +static int __init +external_cache_probe(int minsize, int width) +{ + int cycles, prev_cycles = 1000000; + int stride = 1 << width; + long size = minsize, maxsize = MAX_BCACHE_SIZE * 2; + + if (maxsize > (max_low_pfn + 1) << PAGE_SHIFT) + maxsize = 1 << (floor_log2(max_low_pfn + 1) + PAGE_SHIFT); + + /* Get the first block cached. */ + read_mem_block(__va(0), stride, size); + + while (size < maxsize) { + /* Get an average load latency in cycles. */ + cycles = read_mem_block(__va(0), stride, size); + if (cycles > prev_cycles * 2) { + /* Fine, we exceed the cache. */ + printk("%ldK Bcache detected; load hit latency %d " + "cycles, load miss latency %d cycles\n", + size >> 11, prev_cycles, cycles); + return CSHAPE(size >> 1, width, 1); + } + /* Try to get the next block cached. */ + read_mem_block(__va(size), stride, size); + prev_cycles = cycles; + size <<= 1; + } + return -1; /* No BCACHE found. */ +} + +static void __init +determine_cpu_caches (unsigned int cpu_type) +{ + int L1I, L1D, L2, L3; + + switch (cpu_type) { + case EV4_CPU: + case EV45_CPU: + { + if (cpu_type == EV4_CPU) + L1I = CSHAPE(8*1024, 5, 1); + else + L1I = CSHAPE(16*1024, 5, 1); + L1D = L1I; + L3 = -1; + + /* BIU_CTL is a write-only Abox register. PALcode has a + shadow copy, and may be available from some versions + of the CSERVE PALcall. If we can get it, then + + unsigned long biu_ctl, size; + size = 128*1024 * (1 << ((biu_ctl >> 28) & 7)); + L2 = CSHAPE (size, 5, 1); + + Unfortunately, we can't rely on that. + */ + L2 = external_cache_probe(128*1024, 5); + break; + } + + case LCA4_CPU: + { + unsigned long car, size; + + L1I = L1D = CSHAPE(8*1024, 5, 1); + L3 = -1; + + car = *(vuip) phys_to_virt (0x120000078UL); + size = 64*1024 * (1 << ((car >> 5) & 7)); + /* No typo -- 8 byte cacheline size. Whodathunk. */ + L2 = (car & 1 ? CSHAPE (size, 3, 1) : -1); + break; + } + + case EV5_CPU: + case EV56_CPU: + { + unsigned long sc_ctl, width; + + L1I = L1D = CSHAPE(8*1024, 5, 1); + + /* Check the line size of the Scache. */ + sc_ctl = *(vulp) phys_to_virt (0xfffff000a8UL); + width = sc_ctl & 0x1000 ? 6 : 5; + L2 = CSHAPE (96*1024, width, 3); + + /* BC_CONTROL and BC_CONFIG are write-only IPRs. PALcode + has a shadow copy, and may be available from some versions + of the CSERVE PALcall. If we can get it, then + + unsigned long bc_control, bc_config, size; + size = 1024*1024 * (1 << ((bc_config & 7) - 1)); + L3 = (bc_control & 1 ? CSHAPE (size, width, 1) : -1); + + Unfortunately, we can't rely on that. + */ + L3 = external_cache_probe(1024*1024, width); + break; + } + + case PCA56_CPU: + case PCA57_CPU: + { + unsigned long cbox_config, size; + + if (cpu_type == PCA56_CPU) { + L1I = CSHAPE(16*1024, 6, 1); + L1D = CSHAPE(8*1024, 5, 1); + } else { + L1I = CSHAPE(32*1024, 6, 2); + L1D = CSHAPE(16*1024, 5, 1); + } + L3 = -1; + + cbox_config = *(vulp) phys_to_virt (0xfffff00008UL); + size = 512*1024 * (1 << ((cbox_config >> 12) & 3)); + +#if 0 + L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1); +#else + L2 = external_cache_probe(512*1024, 6); +#endif + break; + } + + case EV6_CPU: + case EV67_CPU: + case EV68CB_CPU: + case EV68AL_CPU: + case EV68CX_CPU: + case EV69_CPU: + L1I = L1D = CSHAPE(64*1024, 6, 2); + L2 = external_cache_probe(1024*1024, 6); + L3 = -1; + break; + + case EV7_CPU: + case EV79_CPU: + L1I = L1D = CSHAPE(64*1024, 6, 2); + L2 = CSHAPE(7*1024*1024/4, 6, 7); + L3 = -1; + break; + + default: + /* Nothing known about this cpu type. */ + L1I = L1D = L2 = L3 = 0; + break; + } + + alpha_l1i_cacheshape = L1I; + alpha_l1d_cacheshape = L1D; + alpha_l2_cacheshape = L2; + alpha_l3_cacheshape = L3; +} + +/* + * We show only CPU #0 info. + */ +static void * +c_start(struct seq_file *f, loff_t *pos) +{ + return *pos ? NULL : (char *)hwrpb + hwrpb->processor_offset; +} + +static void * +c_next(struct seq_file *f, void *v, loff_t *pos) +{ + return NULL; +} + +static void +c_stop(struct seq_file *f, void *v) +{ +} + +struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = show_cpuinfo, +}; + + +static int +alpha_panic_event(struct notifier_block *this, unsigned long event, void *ptr) +{ +#if 1 + /* FIXME FIXME FIXME */ + /* If we are using SRM and serial console, just hard halt here. */ + if (alpha_using_srm && srmcons_output) + __halt(); +#endif + return NOTIFY_DONE; +} diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c new file mode 100644 index 0000000..08fe807 --- /dev/null +++ b/arch/alpha/kernel/signal.c @@ -0,0 +1,672 @@ +/* + * linux/arch/alpha/kernel/signal.c + * + * Copyright (C) 1995 Linus Torvalds + * + * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson + */ + +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/signal.h> +#include <linux/errno.h> +#include <linux/wait.h> +#include <linux/ptrace.h> +#include <linux/unistd.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/smp_lock.h> +#include <linux/stddef.h> +#include <linux/tty.h> +#include <linux/binfmts.h> +#include <linux/bitops.h> + +#include <asm/uaccess.h> +#include <asm/sigcontext.h> +#include <asm/ucontext.h> + +#include "proto.h" + + +#define DEBUG_SIG 0 + +#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) + +asmlinkage void ret_from_sys_call(void); +static int do_signal(sigset_t *, struct pt_regs *, struct switch_stack *, + unsigned long, unsigned long); + + +/* + * The OSF/1 sigprocmask calling sequence is different from the + * C sigprocmask() sequence.. + * + * how: + * 1 - SIG_BLOCK + * 2 - SIG_UNBLOCK + * 3 - SIG_SETMASK + * + * We change the range to -1 .. 1 in order to let gcc easily + * use the conditional move instructions. + * + * Note that we don't need to acquire the kernel lock for SMP + * operation, as all of this is local to this thread. + */ +asmlinkage unsigned long +do_osf_sigprocmask(int how, unsigned long newmask, struct pt_regs *regs) +{ + unsigned long oldmask = -EINVAL; + + if ((unsigned long)how-1 <= 2) { + long sign = how-2; /* -1 .. 1 */ + unsigned long block, unblock; + + newmask &= _BLOCKABLE; + spin_lock_irq(¤t->sighand->siglock); + oldmask = current->blocked.sig[0]; + + unblock = oldmask & ~newmask; + block = oldmask | newmask; + if (!sign) + block = unblock; + if (sign <= 0) + newmask = block; + if (_NSIG_WORDS > 1 && sign > 0) + sigemptyset(¤t->blocked); + current->blocked.sig[0] = newmask; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + regs->r0 = 0; /* special no error return */ + } + return oldmask; +} + +asmlinkage int +osf_sigaction(int sig, const struct osf_sigaction __user *act, + struct osf_sigaction __user *oact) +{ + struct k_sigaction new_ka, old_ka; + int ret; + + if (act) { + old_sigset_t mask; + if (!access_ok(VERIFY_READ, act, sizeof(*act)) || + __get_user(new_ka.sa.sa_handler, &act->sa_handler) || + __get_user(new_ka.sa.sa_flags, &act->sa_flags)) + return -EFAULT; + __get_user(mask, &act->sa_mask); + siginitset(&new_ka.sa.sa_mask, mask); + new_ka.ka_restorer = NULL; + } + + ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); + + if (!ret && oact) { + if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || + __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || + __put_user(old_ka.sa.sa_flags, &oact->sa_flags)) + return -EFAULT; + __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); + } + + return ret; +} + +asmlinkage long +sys_rt_sigaction(int sig, const struct sigaction __user *act, + struct sigaction __user *oact, + size_t sigsetsize, void __user *restorer) +{ + struct k_sigaction new_ka, old_ka; + int ret; + + /* XXX: Don't preclude handling different sized sigset_t's. */ + if (sigsetsize != sizeof(sigset_t)) + return -EINVAL; + + if (act) { + new_ka.ka_restorer = restorer; + if (copy_from_user(&new_ka.sa, act, sizeof(*act))) + return -EFAULT; + } + + ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); + + if (!ret && oact) { + if (copy_to_user(oact, &old_ka.sa, sizeof(*oact))) + return -EFAULT; + } + + return ret; +} + +/* + * Atomically swap in the new signal mask, and wait for a signal. + */ +asmlinkage int +do_sigsuspend(old_sigset_t mask, struct pt_regs *regs, struct switch_stack *sw) +{ + sigset_t oldset; + + mask &= _BLOCKABLE; + spin_lock_irq(¤t->sighand->siglock); + oldset = current->blocked; + siginitset(¤t->blocked, mask); + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + /* Indicate EINTR on return from any possible signal handler, + which will not come back through here, but via sigreturn. */ + regs->r0 = EINTR; + regs->r19 = 1; + + while (1) { + current->state = TASK_INTERRUPTIBLE; + schedule(); + if (do_signal(&oldset, regs, sw, 0, 0)) + return -EINTR; + } +} + +asmlinkage int +do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize, + struct pt_regs *regs, struct switch_stack *sw) +{ + sigset_t oldset, set; + + /* XXX: Don't preclude handling different sized sigset_t's. */ + if (sigsetsize != sizeof(sigset_t)) + return -EINVAL; + if (copy_from_user(&set, uset, sizeof(set))) + return -EFAULT; + + sigdelsetmask(&set, ~_BLOCKABLE); + spin_lock_irq(¤t->sighand->siglock); + oldset = current->blocked; + current->blocked = set; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + /* Indicate EINTR on return from any possible signal handler, + which will not come back through here, but via sigreturn. */ + regs->r0 = EINTR; + regs->r19 = 1; + + while (1) { + current->state = TASK_INTERRUPTIBLE; + schedule(); + if (do_signal(&oldset, regs, sw, 0, 0)) + return -EINTR; + } +} + +asmlinkage int +sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss) +{ + return do_sigaltstack(uss, uoss, rdusp()); +} + +/* + * Do a signal return; undo the signal stack. + */ + +#if _NSIG_WORDS > 1 +# error "Non SA_SIGINFO frame needs rearranging" +#endif + +struct sigframe +{ + struct sigcontext sc; + unsigned int retcode[3]; +}; + +struct rt_sigframe +{ + struct siginfo info; + struct ucontext uc; + unsigned int retcode[3]; +}; + +/* If this changes, userland unwinders that Know Things about our signal + frame will break. Do not undertake lightly. It also implies an ABI + change wrt the size of siginfo_t, which may cause some pain. */ +extern char compile_time_assert + [offsetof(struct rt_sigframe, uc.uc_mcontext) == 176 ? 1 : -1]; + +#define INSN_MOV_R30_R16 0x47fe0410 +#define INSN_LDI_R0 0x201f0000 +#define INSN_CALLSYS 0x00000083 + +static long +restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, + struct switch_stack *sw) +{ + unsigned long usp; + long i, err = __get_user(regs->pc, &sc->sc_pc); + + sw->r26 = (unsigned long) ret_from_sys_call; + + err |= __get_user(regs->r0, sc->sc_regs+0); + err |= __get_user(regs->r1, sc->sc_regs+1); + err |= __get_user(regs->r2, sc->sc_regs+2); + err |= __get_user(regs->r3, sc->sc_regs+3); + err |= __get_user(regs->r4, sc->sc_regs+4); + err |= __get_user(regs->r5, sc->sc_regs+5); + err |= __get_user(regs->r6, sc->sc_regs+6); + err |= __get_user(regs->r7, sc->sc_regs+7); + err |= __get_user(regs->r8, sc->sc_regs+8); + err |= __get_user(sw->r9, sc->sc_regs+9); + err |= __get_user(sw->r10, sc->sc_regs+10); + err |= __get_user(sw->r11, sc->sc_regs+11); + err |= __get_user(sw->r12, sc->sc_regs+12); + err |= __get_user(sw->r13, sc->sc_regs+13); + err |= __get_user(sw->r14, sc->sc_regs+14); + err |= __get_user(sw->r15, sc->sc_regs+15); + err |= __get_user(regs->r16, sc->sc_regs+16); + err |= __get_user(regs->r17, sc->sc_regs+17); + err |= __get_user(regs->r18, sc->sc_regs+18); + err |= __get_user(regs->r19, sc->sc_regs+19); + err |= __get_user(regs->r20, sc->sc_regs+20); + err |= __get_user(regs->r21, sc->sc_regs+21); + err |= __get_user(regs->r22, sc->sc_regs+22); + err |= __get_user(regs->r23, sc->sc_regs+23); + err |= __get_user(regs->r24, sc->sc_regs+24); + err |= __get_user(regs->r25, sc->sc_regs+25); + err |= __get_user(regs->r26, sc->sc_regs+26); + err |= __get_user(regs->r27, sc->sc_regs+27); + err |= __get_user(regs->r28, sc->sc_regs+28); + err |= __get_user(regs->gp, sc->sc_regs+29); + err |= __get_user(usp, sc->sc_regs+30); + wrusp(usp); + + for (i = 0; i < 31; i++) + err |= __get_user(sw->fp[i], sc->sc_fpregs+i); + err |= __get_user(sw->fp[31], &sc->sc_fpcr); + + return err; +} + +/* Note that this syscall is also used by setcontext(3) to install + a given sigcontext. This because it's impossible to set *all* + registers and transfer control from userland. */ + +asmlinkage void +do_sigreturn(struct sigcontext __user *sc, struct pt_regs *regs, + struct switch_stack *sw) +{ + sigset_t set; + + /* Verify that it's a good sigcontext before using it */ + if (!access_ok(VERIFY_READ, sc, sizeof(*sc))) + goto give_sigsegv; + if (__get_user(set.sig[0], &sc->sc_mask)) + goto give_sigsegv; + + sigdelsetmask(&set, ~_BLOCKABLE); + spin_lock_irq(¤t->sighand->siglock); + current->blocked = set; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + if (restore_sigcontext(sc, regs, sw)) + goto give_sigsegv; + + /* Send SIGTRAP if we're single-stepping: */ + if (ptrace_cancel_bpt (current)) { + siginfo_t info; + + info.si_signo = SIGTRAP; + info.si_errno = 0; + info.si_code = TRAP_BRKPT; + info.si_addr = (void __user *) regs->pc; + info.si_trapno = 0; + send_sig_info(SIGTRAP, &info, current); + } + return; + +give_sigsegv: + force_sig(SIGSEGV, current); +} + +asmlinkage void +do_rt_sigreturn(struct rt_sigframe __user *frame, struct pt_regs *regs, + struct switch_stack *sw) +{ + sigset_t set; + + /* Verify that it's a good ucontext_t before using it */ + if (!access_ok(VERIFY_READ, &frame->uc, sizeof(frame->uc))) + goto give_sigsegv; + if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) + goto give_sigsegv; + + sigdelsetmask(&set, ~_BLOCKABLE); + spin_lock_irq(¤t->sighand->siglock); + current->blocked = set; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + if (restore_sigcontext(&frame->uc.uc_mcontext, regs, sw)) + goto give_sigsegv; + + /* Send SIGTRAP if we're single-stepping: */ + if (ptrace_cancel_bpt (current)) { + siginfo_t info; + + info.si_signo = SIGTRAP; + info.si_errno = 0; + info.si_code = TRAP_BRKPT; + info.si_addr = (void __user *) regs->pc; + info.si_trapno = 0; + send_sig_info(SIGTRAP, &info, current); + } + return; + +give_sigsegv: + force_sig(SIGSEGV, current); +} + + +/* + * Set up a signal frame. + */ + +static inline void __user * +get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) +{ + if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) + sp = current->sas_ss_sp + current->sas_ss_size; + + return (void __user *)((sp - frame_size) & -32ul); +} + +static long +setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, + struct switch_stack *sw, unsigned long mask, unsigned long sp) +{ + long i, err = 0; + + err |= __put_user(on_sig_stack((unsigned long)sc), &sc->sc_onstack); + err |= __put_user(mask, &sc->sc_mask); + err |= __put_user(regs->pc, &sc->sc_pc); + err |= __put_user(8, &sc->sc_ps); + + err |= __put_user(regs->r0 , sc->sc_regs+0); + err |= __put_user(regs->r1 , sc->sc_regs+1); + err |= __put_user(regs->r2 , sc->sc_regs+2); + err |= __put_user(regs->r3 , sc->sc_regs+3); + err |= __put_user(regs->r4 , sc->sc_regs+4); + err |= __put_user(regs->r5 , sc->sc_regs+5); + err |= __put_user(regs->r6 , sc->sc_regs+6); + err |= __put_user(regs->r7 , sc->sc_regs+7); + err |= __put_user(regs->r8 , sc->sc_regs+8); + err |= __put_user(sw->r9 , sc->sc_regs+9); + err |= __put_user(sw->r10 , sc->sc_regs+10); + err |= __put_user(sw->r11 , sc->sc_regs+11); + err |= __put_user(sw->r12 , sc->sc_regs+12); + err |= __put_user(sw->r13 , sc->sc_regs+13); + err |= __put_user(sw->r14 , sc->sc_regs+14); + err |= __put_user(sw->r15 , sc->sc_regs+15); + err |= __put_user(regs->r16, sc->sc_regs+16); + err |= __put_user(regs->r17, sc->sc_regs+17); + err |= __put_user(regs->r18, sc->sc_regs+18); + err |= __put_user(regs->r19, sc->sc_regs+19); + err |= __put_user(regs->r20, sc->sc_regs+20); + err |= __put_user(regs->r21, sc->sc_regs+21); + err |= __put_user(regs->r22, sc->sc_regs+22); + err |= __put_user(regs->r23, sc->sc_regs+23); + err |= __put_user(regs->r24, sc->sc_regs+24); + err |= __put_user(regs->r25, sc->sc_regs+25); + err |= __put_user(regs->r26, sc->sc_regs+26); + err |= __put_user(regs->r27, sc->sc_regs+27); + err |= __put_user(regs->r28, sc->sc_regs+28); + err |= __put_user(regs->gp , sc->sc_regs+29); + err |= __put_user(sp, sc->sc_regs+30); + err |= __put_user(0, sc->sc_regs+31); + + for (i = 0; i < 31; i++) + err |= __put_user(sw->fp[i], sc->sc_fpregs+i); + err |= __put_user(0, sc->sc_fpregs+31); + err |= __put_user(sw->fp[31], &sc->sc_fpcr); + + err |= __put_user(regs->trap_a0, &sc->sc_traparg_a0); + err |= __put_user(regs->trap_a1, &sc->sc_traparg_a1); + err |= __put_user(regs->trap_a2, &sc->sc_traparg_a2); + + return err; +} + +static void +setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, + struct pt_regs *regs, struct switch_stack * sw) +{ + unsigned long oldsp, r26, err = 0; + struct sigframe __user *frame; + + oldsp = rdusp(); + frame = get_sigframe(ka, oldsp, sizeof(*frame)); + if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + goto give_sigsegv; + + err |= setup_sigcontext(&frame->sc, regs, sw, set->sig[0], oldsp); + if (err) + goto give_sigsegv; + + /* Set up to return from userspace. If provided, use a stub + already in userspace. */ + if (ka->ka_restorer) { + r26 = (unsigned long) ka->ka_restorer; + } else { + err |= __put_user(INSN_MOV_R30_R16, frame->retcode+0); + err |= __put_user(INSN_LDI_R0+__NR_sigreturn, frame->retcode+1); + err |= __put_user(INSN_CALLSYS, frame->retcode+2); + imb(); + r26 = (unsigned long) frame->retcode; + } + + /* Check that everything was written properly. */ + if (err) + goto give_sigsegv; + + /* "Return" to the handler */ + regs->r26 = r26; + regs->r27 = regs->pc = (unsigned long) ka->sa.sa_handler; + regs->r16 = sig; /* a0: signal number */ + regs->r17 = 0; /* a1: exception code */ + regs->r18 = (unsigned long) &frame->sc; /* a2: sigcontext pointer */ + wrusp((unsigned long) frame); + +#if DEBUG_SIG + printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", + current->comm, current->pid, frame, regs->pc, regs->r26); +#endif + + return; + +give_sigsegv: + force_sigsegv(sig, current); +} + +static void +setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, + sigset_t *set, struct pt_regs *regs, struct switch_stack * sw) +{ + unsigned long oldsp, r26, err = 0; + struct rt_sigframe __user *frame; + + oldsp = rdusp(); + frame = get_sigframe(ka, oldsp, sizeof(*frame)); + if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + goto give_sigsegv; + + err |= copy_siginfo_to_user(&frame->info, info); + + /* Create the ucontext. */ + err |= __put_user(0, &frame->uc.uc_flags); + err |= __put_user(0, &frame->uc.uc_link); + err |= __put_user(set->sig[0], &frame->uc.uc_osf_sigmask); + err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); + err |= __put_user(sas_ss_flags(oldsp), &frame->uc.uc_stack.ss_flags); + err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); + err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, sw, + set->sig[0], oldsp); + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + if (err) + goto give_sigsegv; + + /* Set up to return from userspace. If provided, use a stub + already in userspace. */ + if (ka->ka_restorer) { + r26 = (unsigned long) ka->ka_restorer; + } else { + err |= __put_user(INSN_MOV_R30_R16, frame->retcode+0); + err |= __put_user(INSN_LDI_R0+__NR_rt_sigreturn, + frame->retcode+1); + err |= __put_user(INSN_CALLSYS, frame->retcode+2); + imb(); + r26 = (unsigned long) frame->retcode; + } + + if (err) + goto give_sigsegv; + + /* "Return" to the handler */ + regs->r26 = r26; + regs->r27 = regs->pc = (unsigned long) ka->sa.sa_handler; + regs->r16 = sig; /* a0: signal number */ + regs->r17 = (unsigned long) &frame->info; /* a1: siginfo pointer */ + regs->r18 = (unsigned long) &frame->uc; /* a2: ucontext pointer */ + wrusp((unsigned long) frame); + +#if DEBUG_SIG + printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", + current->comm, current->pid, frame, regs->pc, regs->r26); +#endif + + return; + +give_sigsegv: + force_sigsegv(sig, current); +} + + +/* + * OK, we're invoking a handler. + */ +static inline void +handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info, + sigset_t *oldset, struct pt_regs * regs, struct switch_stack *sw) +{ + if (ka->sa.sa_flags & SA_SIGINFO) + setup_rt_frame(sig, ka, info, oldset, regs, sw); + else + setup_frame(sig, ka, oldset, regs, sw); + + if (ka->sa.sa_flags & SA_RESETHAND) + ka->sa.sa_handler = SIG_DFL; + + if (!(ka->sa.sa_flags & SA_NODEFER)) { + spin_lock_irq(¤t->sighand->siglock); + sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); + sigaddset(¤t->blocked,sig); + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + } +} + +static inline void +syscall_restart(unsigned long r0, unsigned long r19, + struct pt_regs *regs, struct k_sigaction *ka) +{ + switch (regs->r0) { + case ERESTARTSYS: + if (!(ka->sa.sa_flags & SA_RESTART)) { + case ERESTARTNOHAND: + regs->r0 = EINTR; + break; + } + /* fallthrough */ + case ERESTARTNOINTR: + regs->r0 = r0; /* reset v0 and a3 and replay syscall */ + regs->r19 = r19; + regs->pc -= 4; + break; + case ERESTART_RESTARTBLOCK: + current_thread_info()->restart_block.fn = do_no_restart_syscall; + regs->r0 = EINTR; + break; + } +} + + +/* + * Note that 'init' is a special process: it doesn't get signals it doesn't + * want to handle. Thus you cannot kill init even with a SIGKILL even by + * mistake. + * + * Note that we go through the signals twice: once to check the signals that + * the kernel can handle, and then we build all the user-level signal handling + * stack-frames in one go after that. + * + * "r0" and "r19" are the registers we need to restore for system call + * restart. "r0" is also used as an indicator whether we can restart at + * all (if we get here from anything but a syscall return, it will be 0) + */ +static int +do_signal(sigset_t *oldset, struct pt_regs * regs, struct switch_stack * sw, + unsigned long r0, unsigned long r19) +{ + siginfo_t info; + int signr; + unsigned long single_stepping = ptrace_cancel_bpt(current); + struct k_sigaction ka; + + if (!oldset) + oldset = ¤t->blocked; + + /* This lets the debugger run, ... */ + signr = get_signal_to_deliver(&info, &ka, regs, NULL); + /* ... so re-check the single stepping. */ + single_stepping |= ptrace_cancel_bpt(current); + + if (signr > 0) { + /* Whee! Actually deliver the signal. */ + if (r0) syscall_restart(r0, r19, regs, &ka); + handle_signal(signr, &ka, &info, oldset, regs, sw); + if (single_stepping) + ptrace_set_bpt(current); /* re-set bpt */ + return 1; + } + + if (r0) { + switch (regs->r0) { + case ERESTARTNOHAND: + case ERESTARTSYS: + case ERESTARTNOINTR: + /* Reset v0 and a3 and replay syscall. */ + regs->r0 = r0; + regs->r19 = r19; + regs->pc -= 4; + break; + case ERESTART_RESTARTBLOCK: + /* Force v0 to the restart syscall and reply. */ + regs->r0 = __NR_restart_syscall; + regs->pc -= 4; + break; + } + } + if (single_stepping) + ptrace_set_bpt(current); /* re-set breakpoint */ + + return 0; +} + +void +do_notify_resume(sigset_t *oldset, struct pt_regs *regs, + struct switch_stack *sw, unsigned long r0, + unsigned long r19, unsigned long thread_info_flags) +{ + if (thread_info_flags & _TIF_SIGPENDING) + do_signal(oldset, regs, sw, r0, r19); +} diff --git a/arch/alpha/kernel/smc37c669.c b/arch/alpha/kernel/smc37c669.c new file mode 100644 index 0000000..fd467b2 --- /dev/null +++ b/arch/alpha/kernel/smc37c669.c @@ -0,0 +1,2554 @@ +/* + * SMC 37C669 initialization code + */ +#include <linux/kernel.h> + +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/spinlock.h> + +#include <asm/hwrpb.h> +#include <asm/io.h> +#include <asm/segment.h> + +#if 0 +# define DBG_DEVS(args) printk args +#else +# define DBG_DEVS(args) +#endif + +#define KB 1024 +#define MB (1024*KB) +#define GB (1024*MB) + +#define SMC_DEBUG 0 + +/* File: smcc669_def.h + * + * Copyright (C) 1997 by + * Digital Equipment Corporation, Maynard, Massachusetts. + * All rights reserved. + * + * This software is furnished under a license and may be used and copied + * only in accordance of the terms of such license and with the + * inclusion of the above copyright notice. This software or any other + * copies thereof may not be provided or otherwise made available to any + * other person. No title to and ownership of the software is hereby + * transferred. + * + * The information in this software is subject to change without notice + * and should not be construed as a commitment by Digital Equipment + * Corporation. + * + * Digital assumes no responsibility for the use or reliability of its + * software on equipment which is not supplied by Digital. + * + * + * Abstract: + * + * This file contains header definitions for the SMC37c669 + * Super I/O controller. + * + * Author: + * + * Eric Rasmussen + * + * Modification History: + * + * er 28-Jan-1997 Initial Entry + */ + +#ifndef __SMC37c669_H +#define __SMC37c669_H + +/* +** Macros for handling device IRQs +** +** The mask acts as a flag used in mapping actual ISA IRQs (0 - 15) +** to device IRQs (A - H). +*/ +#define SMC37c669_DEVICE_IRQ_MASK 0x80000000 +#define SMC37c669_DEVICE_IRQ( __i ) \ + ((SMC37c669_DEVICE_IRQ_MASK) | (__i)) +#define SMC37c669_IS_DEVICE_IRQ(__i) \ + (((__i) & (SMC37c669_DEVICE_IRQ_MASK)) == (SMC37c669_DEVICE_IRQ_MASK)) +#define SMC37c669_RAW_DEVICE_IRQ(__i) \ + ((__i) & ~(SMC37c669_DEVICE_IRQ_MASK)) + +/* +** Macros for handling device DRQs +** +** The mask acts as a flag used in mapping actual ISA DMA +** channels to device DMA channels (A - C). +*/ +#define SMC37c669_DEVICE_DRQ_MASK 0x80000000 +#define SMC37c669_DEVICE_DRQ(__d) \ + ((SMC37c669_DEVICE_DRQ_MASK) | (__d)) +#define SMC37c669_IS_DEVICE_DRQ(__d) \ + (((__d) & (SMC37c669_DEVICE_DRQ_MASK)) == (SMC37c669_DEVICE_DRQ_MASK)) +#define SMC37c669_RAW_DEVICE_DRQ(__d) \ + ((__d) & ~(SMC37c669_DEVICE_DRQ_MASK)) + +#define SMC37c669_DEVICE_ID 0x3 + +/* +** SMC37c669 Device Function Definitions +*/ +#define SERIAL_0 0 +#define SERIAL_1 1 +#define PARALLEL_0 2 +#define FLOPPY_0 3 +#define IDE_0 4 +#define NUM_FUNCS 5 + +/* +** Default Device Function Mappings +*/ +#define COM1_BASE 0x3F8 +#define COM1_IRQ 4 +#define COM2_BASE 0x2F8 +#define COM2_IRQ 3 +#define PARP_BASE 0x3BC +#define PARP_IRQ 7 +#define PARP_DRQ 3 +#define FDC_BASE 0x3F0 +#define FDC_IRQ 6 +#define FDC_DRQ 2 + +/* +** Configuration On/Off Key Definitions +*/ +#define SMC37c669_CONFIG_ON_KEY 0x55 +#define SMC37c669_CONFIG_OFF_KEY 0xAA + +/* +** SMC 37c669 Device IRQs +*/ +#define SMC37c669_DEVICE_IRQ_A ( SMC37c669_DEVICE_IRQ( 0x01 ) ) +#define SMC37c669_DEVICE_IRQ_B ( SMC37c669_DEVICE_IRQ( 0x02 ) ) +#define SMC37c669_DEVICE_IRQ_C ( SMC37c669_DEVICE_IRQ( 0x03 ) ) +#define SMC37c669_DEVICE_IRQ_D ( SMC37c669_DEVICE_IRQ( 0x04 ) ) +#define SMC37c669_DEVICE_IRQ_E ( SMC37c669_DEVICE_IRQ( 0x05 ) ) +#define SMC37c669_DEVICE_IRQ_F ( SMC37c669_DEVICE_IRQ( 0x06 ) ) +/* SMC37c669_DEVICE_IRQ_G *** RESERVED ***/ +#define SMC37c669_DEVICE_IRQ_H ( SMC37c669_DEVICE_IRQ( 0x08 ) ) + +/* +** SMC 37c669 Device DMA Channel Definitions +*/ +#define SMC37c669_DEVICE_DRQ_A ( SMC37c669_DEVICE_DRQ( 0x01 ) ) +#define SMC37c669_DEVICE_DRQ_B ( SMC37c669_DEVICE_DRQ( 0x02 ) ) +#define SMC37c669_DEVICE_DRQ_C ( SMC37c669_DEVICE_DRQ( 0x03 ) ) + +/* +** Configuration Register Index Definitions +*/ +#define SMC37c669_CR00_INDEX 0x00 +#define SMC37c669_CR01_INDEX 0x01 +#define SMC37c669_CR02_INDEX 0x02 +#define SMC37c669_CR03_INDEX 0x03 +#define SMC37c669_CR04_INDEX 0x04 +#define SMC37c669_CR05_INDEX 0x05 +#define SMC37c669_CR06_INDEX 0x06 +#define SMC37c669_CR07_INDEX 0x07 +#define SMC37c669_CR08_INDEX 0x08 +#define SMC37c669_CR09_INDEX 0x09 +#define SMC37c669_CR0A_INDEX 0x0A +#define SMC37c669_CR0B_INDEX 0x0B +#define SMC37c669_CR0C_INDEX 0x0C +#define SMC37c669_CR0D_INDEX 0x0D +#define SMC37c669_CR0E_INDEX 0x0E +#define SMC37c669_CR0F_INDEX 0x0F +#define SMC37c669_CR10_INDEX 0x10 +#define SMC37c669_CR11_INDEX 0x11 +#define SMC37c669_CR12_INDEX 0x12 +#define SMC37c669_CR13_INDEX 0x13 +#define SMC37c669_CR14_INDEX 0x14 +#define SMC37c669_CR15_INDEX 0x15 +#define SMC37c669_CR16_INDEX 0x16 +#define SMC37c669_CR17_INDEX 0x17 +#define SMC37c669_CR18_INDEX 0x18 +#define SMC37c669_CR19_INDEX 0x19 +#define SMC37c669_CR1A_INDEX 0x1A +#define SMC37c669_CR1B_INDEX 0x1B +#define SMC37c669_CR1C_INDEX 0x1C +#define SMC37c669_CR1D_INDEX 0x1D +#define SMC37c669_CR1E_INDEX 0x1E +#define SMC37c669_CR1F_INDEX 0x1F +#define SMC37c669_CR20_INDEX 0x20 +#define SMC37c669_CR21_INDEX 0x21 +#define SMC37c669_CR22_INDEX 0x22 +#define SMC37c669_CR23_INDEX 0x23 +#define SMC37c669_CR24_INDEX 0x24 +#define SMC37c669_CR25_INDEX 0x25 +#define SMC37c669_CR26_INDEX 0x26 +#define SMC37c669_CR27_INDEX 0x27 +#define SMC37c669_CR28_INDEX 0x28 +#define SMC37c669_CR29_INDEX 0x29 + +/* +** Configuration Register Alias Definitions +*/ +#define SMC37c669_DEVICE_ID_INDEX SMC37c669_CR0D_INDEX +#define SMC37c669_DEVICE_REVISION_INDEX SMC37c669_CR0E_INDEX +#define SMC37c669_FDC_BASE_ADDRESS_INDEX SMC37c669_CR20_INDEX +#define SMC37c669_IDE_BASE_ADDRESS_INDEX SMC37c669_CR21_INDEX +#define SMC37c669_IDE_ALTERNATE_ADDRESS_INDEX SMC37c669_CR22_INDEX +#define SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX SMC37c669_CR23_INDEX +#define SMC37c669_SERIAL0_BASE_ADDRESS_INDEX SMC37c669_CR24_INDEX +#define SMC37c669_SERIAL1_BASE_ADDRESS_INDEX SMC37c669_CR25_INDEX +#define SMC37c669_PARALLEL_FDC_DRQ_INDEX SMC37c669_CR26_INDEX +#define SMC37c669_PARALLEL_FDC_IRQ_INDEX SMC37c669_CR27_INDEX +#define SMC37c669_SERIAL_IRQ_INDEX SMC37c669_CR28_INDEX + +/* +** Configuration Register Definitions +** +** The INDEX (write only) and DATA (read/write) ports are effective +** only when the chip is in the Configuration State. +*/ +typedef struct _SMC37c669_CONFIG_REGS { + unsigned char index_port; + unsigned char data_port; +} SMC37c669_CONFIG_REGS; + +/* +** CR00 - default value 0x28 +** +** IDE_EN (CR00<1:0>): +** 0x - 30ua pull-ups on nIDEEN, nHDCS0, NHDCS1 +** 11 - IRQ_H available as IRQ output, +** IRRX2, IRTX2 available as alternate IR pins +** 10 - nIDEEN, nHDCS0, nHDCS1 used to control IDE +** +** VALID (CR00<7>): +** A high level on this software controlled bit can +** be used to indicate that a valid configuration +** cycle has occurred. The control software must +** take care to set this bit at the appropriate times. +** Set to zero after power up. This bit has no +** effect on any other hardware in the chip. +** +*/ +typedef union _SMC37c669_CR00 { + unsigned char as_uchar; + struct { + unsigned ide_en : 2; /* See note above */ + unsigned reserved1 : 1; /* RAZ */ + unsigned fdc_pwr : 1; /* 1 = supply power to FDC */ + unsigned reserved2 : 3; /* Read as 010b */ + unsigned valid : 1; /* See note above */ + } by_field; +} SMC37c669_CR00; + +/* +** CR01 - default value 0x9C +*/ +typedef union _SMC37c669_CR01 { + unsigned char as_uchar; + struct { + unsigned reserved1 : 2; /* RAZ */ + unsigned ppt_pwr : 1; /* 1 = supply power to PPT */ + unsigned ppt_mode : 1; /* 1 = Printer mode, 0 = EPP */ + unsigned reserved2 : 1; /* Read as 1 */ + unsigned reserved3 : 2; /* RAZ */ + unsigned lock_crx: 1; /* Lock CR00 - CR18 */ + } by_field; +} SMC37c669_CR01; + +/* +** CR02 - default value 0x88 +*/ +typedef union _SMC37c669_CR02 { + unsigned char as_uchar; + struct { + unsigned reserved1 : 3; /* RAZ */ + unsigned uart1_pwr : 1; /* 1 = supply power to UART1 */ + unsigned reserved2 : 3; /* RAZ */ + unsigned uart2_pwr : 1; /* 1 = supply power to UART2 */ + } by_field; +} SMC37c669_CR02; + +/* +** CR03 - default value 0x78 +** +** CR03<7> CR03<2> Pin 94 +** ------- ------- ------ +** 0 X DRV2 (input) +** 1 0 ADRX +** 1 1 IRQ_B +** +** CR03<6> CR03<5> Op Mode +** ------- ------- ------- +** 0 0 Model 30 +** 0 1 PS/2 +** 1 0 Reserved +** 1 1 AT Mode +*/ +typedef union _SMC37c669_CR03 { + unsigned char as_uchar; + struct { + unsigned pwrgd_gamecs : 1; /* 1 = PWRGD, 0 = GAMECS */ + unsigned fdc_mode2 : 1; /* 1 = Enhanced Mode 2 */ + unsigned pin94_0 : 1; /* See note above */ + unsigned reserved1 : 1; /* RAZ */ + unsigned drvden : 1; /* 1 = high, 0 - output */ + unsigned op_mode : 2; /* See note above */ + unsigned pin94_1 : 1; /* See note above */ + } by_field; +} SMC37c669_CR03; + +/* +** CR04 - default value 0x00 +** +** PP_EXT_MODE: +** If CR01<PP_MODE> = 0 and PP_EXT_MODE = +** 00 - Standard and Bidirectional +** 01 - EPP mode and SPP +** 10 - ECP mode +** In this mode, 2 drives can be supported +** directly, 3 or 4 drives must use external +** 4 drive support. SPP can be selected +** through the ECR register of ECP as mode 000. +** 11 - ECP mode and EPP mode +** In this mode, 2 drives can be supported +** directly, 3 or 4 drives must use external +** 4 drive support. SPP can be selected +** through the ECR register of ECP as mode 000. +** In this mode, EPP can be selected through +** the ECR register of ECP as mode 100. +** +** PP_FDC: +** 00 - Normal +** 01 - PPFD1 +** 10 - PPFD2 +** 11 - Reserved +** +** MIDI1: +** Serial Clock Select: +** A low level on this bit disables MIDI support, +** clock = divide by 13. A high level on this +** bit enables MIDI support, clock = divide by 12. +** +** MIDI operates at 31.25 Kbps which can be derived +** from 125 KHz (24 MHz / 12 = 2 MHz, 2 MHz / 16 = 125 KHz) +** +** ALT_IO: +** 0 - Use pins IRRX, IRTX +** 1 - Use pins IRRX2, IRTX2 +** +** If this bit is set, the IR receive and transmit +** functions will not be available on pins 25 and 26 +** unless CR00<IDE_EN> = 11. +*/ +typedef union _SMC37c669_CR04 { + unsigned char as_uchar; + struct { + unsigned ppt_ext_mode : 2; /* See note above */ + unsigned ppt_fdc : 2; /* See note above */ + unsigned midi1 : 1; /* See note above */ + unsigned midi2 : 1; /* See note above */ + unsigned epp_type : 1; /* 0 = EPP 1.9, 1 = EPP 1.7 */ + unsigned alt_io : 1; /* See note above */ + } by_field; +} SMC37c669_CR04; + +/* +** CR05 - default value 0x00 +** +** DEN_SEL: +** 00 - Densel output normal +** 01 - Reserved +** 10 - Densel output 1 +** 11 - Densel output 0 +** +*/ +typedef union _SMC37c669_CR05 { + unsigned char as_uchar; + struct { + unsigned reserved1 : 2; /* RAZ */ + unsigned fdc_dma_mode : 1; /* 0 = burst, 1 = non-burst */ + unsigned den_sel : 2; /* See note above */ + unsigned swap_drv : 1; /* Swap the FDC motor selects */ + unsigned extx4 : 1; /* 0 = 2 drive, 1 = external 4 drive decode */ + unsigned reserved2 : 1; /* RAZ */ + } by_field; +} SMC37c669_CR05; + +/* +** CR06 - default value 0xFF +*/ +typedef union _SMC37c669_CR06 { + unsigned char as_uchar; + struct { + unsigned floppy_a : 2; /* Type of floppy drive A */ + unsigned floppy_b : 2; /* Type of floppy drive B */ + unsigned floppy_c : 2; /* Type of floppy drive C */ + unsigned floppy_d : 2; /* Type of floppy drive D */ + } by_field; +} SMC37c669_CR06; + +/* +** CR07 - default value 0x00 +** +** Auto Power Management CR07<7:4>: +** 0 - Auto Powerdown disabled (default) +** 1 - Auto Powerdown enabled +** +** This bit is reset to the default state by POR or +** a hardware reset. +** +*/ +typedef union _SMC37c669_CR07 { + unsigned char as_uchar; + struct { + unsigned floppy_boot : 2; /* 0 = A:, 1 = B: */ + unsigned reserved1 : 2; /* RAZ */ + unsigned ppt_en : 1; /* See note above */ + unsigned uart1_en : 1; /* See note above */ + unsigned uart2_en : 1; /* See note above */ + unsigned fdc_en : 1; /* See note above */ + } by_field; +} SMC37c669_CR07; + +/* +** CR08 - default value 0x00 +*/ +typedef union _SMC37c669_CR08 { + unsigned char as_uchar; + struct { + unsigned zero : 4; /* 0 */ + unsigned addrx7_4 : 4; /* ADR<7:3> for ADRx decode */ + } by_field; +} SMC37c669_CR08; + +/* +** CR09 - default value 0x00 +** +** ADRx_CONFIG: +** 00 - ADRx disabled +** 01 - 1 byte decode A<3:0> = 0000b +** 10 - 8 byte block decode A<3:0> = 0XXXb +** 11 - 16 byte block decode A<3:0> = XXXXb +** +*/ +typedef union _SMC37c669_CR09 { + unsigned char as_uchar; + struct { + unsigned adra8 : 3; /* ADR<10:8> for ADRx decode */ + unsigned reserved1 : 3; + unsigned adrx_config : 2; /* See note above */ + } by_field; +} SMC37c669_CR09; + +/* +** CR0A - default value 0x00 +*/ +typedef union _SMC37c669_CR0A { + unsigned char as_uchar; + struct { + unsigned ecp_fifo_threshold : 4; + unsigned reserved1 : 4; + } by_field; +} SMC37c669_CR0A; + +/* +** CR0B - default value 0x00 +*/ +typedef union _SMC37c669_CR0B { + unsigned char as_uchar; + struct { + unsigned fdd0_drtx : 2; /* FDD0 Data Rate Table */ + unsigned fdd1_drtx : 2; /* FDD1 Data Rate Table */ + unsigned fdd2_drtx : 2; /* FDD2 Data Rate Table */ + unsigned fdd3_drtx : 2; /* FDD3 Data Rate Table */ + } by_field; +} SMC37c669_CR0B; + +/* +** CR0C - default value 0x00 +** +** UART2_MODE: +** 000 - Standard (default) +** 001 - IrDA (HPSIR) +** 010 - Amplitude Shift Keyed IR @500 KHz +** 011 - Reserved +** 1xx - Reserved +** +*/ +typedef union _SMC37c669_CR0C { + unsigned char as_uchar; + struct { + unsigned uart2_rcv_polarity : 1; /* 1 = invert RX */ + unsigned uart2_xmit_polarity : 1; /* 1 = invert TX */ + unsigned uart2_duplex : 1; /* 1 = full, 0 = half */ + unsigned uart2_mode : 3; /* See note above */ + unsigned uart1_speed : 1; /* 1 = high speed enabled */ + unsigned uart2_speed : 1; /* 1 = high speed enabled */ + } by_field; +} SMC37c669_CR0C; + +/* +** CR0D - default value 0x03 +** +** Device ID Register - read only +*/ +typedef union _SMC37c669_CR0D { + unsigned char as_uchar; + struct { + unsigned device_id : 8; /* Returns 0x3 in this field */ + } by_field; +} SMC37c669_CR0D; + +/* +** CR0E - default value 0x02 +** +** Device Revision Register - read only +*/ +typedef union _SMC37c669_CR0E { + unsigned char as_uchar; + struct { + unsigned device_rev : 8; /* Returns 0x2 in this field */ + } by_field; +} SMC37c669_CR0E; + +/* +** CR0F - default value 0x00 +*/ +typedef union _SMC37c669_CR0F { + unsigned char as_uchar; + struct { + unsigned test0 : 1; /* Reserved - set to 0 */ + unsigned test1 : 1; /* Reserved - set to 0 */ + unsigned test2 : 1; /* Reserved - set to 0 */ + unsigned test3 : 1; /* Reserved - set t0 0 */ + unsigned test4 : 1; /* Reserved - set to 0 */ + unsigned test5 : 1; /* Reserved - set t0 0 */ + unsigned test6 : 1; /* Reserved - set t0 0 */ + unsigned test7 : 1; /* Reserved - set to 0 */ + } by_field; +} SMC37c669_CR0F; + +/* +** CR10 - default value 0x00 +*/ +typedef union _SMC37c669_CR10 { + unsigned char as_uchar; + struct { + unsigned reserved1 : 3; /* RAZ */ + unsigned pll_gain : 1; /* 1 = 3V, 2 = 5V operation */ + unsigned pll_stop : 1; /* 1 = stop PLLs */ + unsigned ace_stop : 1; /* 1 = stop UART clocks */ + unsigned pll_clock_ctrl : 1; /* 0 = 14.318 MHz, 1 = 24 MHz */ + unsigned ir_test : 1; /* Enable IR test mode */ + } by_field; +} SMC37c669_CR10; + +/* +** CR11 - default value 0x00 +*/ +typedef union _SMC37c669_CR11 { + unsigned char as_uchar; + struct { + unsigned ir_loopback : 1; /* Internal IR loop back */ + unsigned test_10ms : 1; /* Test 10ms autopowerdown FDC timeout */ + unsigned reserved1 : 6; /* RAZ */ + } by_field; +} SMC37c669_CR11; + +/* +** CR12 - CR1D are reserved registers +*/ + +/* +** CR1E - default value 0x80 +** +** GAMECS: +** 00 - GAMECS disabled +** 01 - 1 byte decode ADR<3:0> = 0001b +** 10 - 8 byte block decode ADR<3:0> = 0XXXb +** 11 - 16 byte block decode ADR<3:0> = XXXXb +** +*/ +typedef union _SMC37c66_CR1E { + unsigned char as_uchar; + struct { + unsigned gamecs_config: 2; /* See note above */ + unsigned gamecs_addr9_4 : 6; /* GAMECS Addr<9:4> */ + } by_field; +} SMC37c669_CR1E; + +/* +** CR1F - default value 0x00 +** +** DT0 DT1 DRVDEN0 DRVDEN1 Drive Type +** --- --- ------- ------- ---------- +** 0 0 DENSEL DRATE0 4/2/1 MB 3.5" +** 2/1 MB 5.25" +** 2/1.6/1 MB 3.5" (3-mode) +** 0 1 DRATE1 DRATE0 +** 1 0 nDENSEL DRATE0 PS/2 +** 1 1 DRATE0 DRATE1 +** +** Note: DENSEL, DRATE1, and DRATE0 map onto two output +** pins - DRVDEN0 and DRVDEN1. +** +*/ +typedef union _SMC37c669_CR1F { + unsigned char as_uchar; + struct { + unsigned fdd0_drive_type : 2; /* FDD0 drive type */ + unsigned fdd1_drive_type : 2; /* FDD1 drive type */ + unsigned fdd2_drive_type : 2; /* FDD2 drive type */ + unsigned fdd3_drive_type : 2; /* FDD3 drive type */ + } by_field; +} SMC37c669_CR1F; + +/* +** CR20 - default value 0x3C +** +** FDC Base Address Register +** - To disable this decode set Addr<9:8> = 0 +** - A<10> = 0, A<3:0> = 0XXXb to access. +** +*/ +typedef union _SMC37c669_CR20 { + unsigned char as_uchar; + struct { + unsigned zero : 2; /* 0 */ + unsigned addr9_4 : 6; /* FDC Addr<9:4> */ + } by_field; +} SMC37c669_CR20; + +/* +** CR21 - default value 0x3C +** +** IDE Base Address Register +** - To disable this decode set Addr<9:8> = 0 +** - A<10> = 0, A<3:0> = 0XXXb to access. +** +*/ +typedef union _SMC37c669_CR21 { + unsigned char as_uchar; + struct { + unsigned zero : 2; /* 0 */ + unsigned addr9_4 : 6; /* IDE Addr<9:4> */ + } by_field; +} SMC37c669_CR21; + +/* +** CR22 - default value 0x3D +** +** IDE Alternate Status Base Address Register +** - To disable this decode set Addr<9:8> = 0 +** - A<10> = 0, A<3:0> = 0110b to access. +** +*/ +typedef union _SMC37c669_CR22 { + unsigned char as_uchar; + struct { + unsigned zero : 2; /* 0 */ + unsigned addr9_4 : 6; /* IDE Alt Status Addr<9:4> */ + } by_field; +} SMC37c669_CR22; + +/* +** CR23 - default value 0x00 +** +** Parallel Port Base Address Register +** - To disable this decode set Addr<9:8> = 0 +** - A<10> = 0 to access. +** - If EPP is enabled, A<2:0> = XXXb to access. +** If EPP is NOT enabled, A<1:0> = XXb to access +** +*/ +typedef union _SMC37c669_CR23 { + unsigned char as_uchar; + struct { + unsigned addr9_2 : 8; /* Parallel Port Addr<9:2> */ + } by_field; +} SMC37c669_CR23; + +/* +** CR24 - default value 0x00 +** +** UART1 Base Address Register +** - To disable this decode set Addr<9:8> = 0 +** - A<10> = 0, A<2:0> = XXXb to access. +** +*/ +typedef union _SMC37c669_CR24 { + unsigned char as_uchar; + struct { + unsigned zero : 1; /* 0 */ + unsigned addr9_3 : 7; /* UART1 Addr<9:3> */ + } by_field; +} SMC37c669_CR24; + +/* +** CR25 - default value 0x00 +** +** UART2 Base Address Register +** - To disable this decode set Addr<9:8> = 0 +** - A<10> = 0, A<2:0> = XXXb to access. +** +*/ +typedef union _SMC37c669_CR25 { + unsigned char as_uchar; + struct { + unsigned zero : 1; /* 0 */ + unsigned addr9_3 : 7; /* UART2 Addr<9:3> */ + } by_field; +} SMC37c669_CR25; + +/* +** CR26 - default value 0x00 +** +** Parallel Port / FDC DMA Select Register +** +** D3 - D0 DMA +** D7 - D4 Selected +** ------- -------- +** 0000 None +** 0001 DMA_A +** 0010 DMA_B +** 0011 DMA_C +** +*/ +typedef union _SMC37c669_CR26 { + unsigned char as_uchar; + struct { + unsigned ppt_drq : 4; /* See note above */ + unsigned fdc_drq : 4; /* See note above */ + } by_field; +} SMC37c669_CR26; + +/* +** CR27 - default value 0x00 +** +** Parallel Port / FDC IRQ Select Register +** +** D3 - D0 IRQ +** D7 - D4 Selected +** ------- -------- +** 0000 None +** 0001 IRQ_A +** 0010 IRQ_B +** 0011 IRQ_C +** 0100 IRQ_D +** 0101 IRQ_E +** 0110 IRQ_F +** 0111 Reserved +** 1000 IRQ_H +** +** Any unselected IRQ REQ is in tristate +** +*/ +typedef union _SMC37c669_CR27 { + unsigned char as_uchar; + struct { + unsigned ppt_irq : 4; /* See note above */ + unsigned fdc_irq : 4; /* See note above */ + } by_field; +} SMC37c669_CR27; + +/* +** CR28 - default value 0x00 +** +** UART IRQ Select Register +** +** D3 - D0 IRQ +** D7 - D4 Selected +** ------- -------- +** 0000 None +** 0001 IRQ_A +** 0010 IRQ_B +** 0011 IRQ_C +** 0100 IRQ_D +** 0101 IRQ_E +** 0110 IRQ_F +** 0111 Reserved +** 1000 IRQ_H +** 1111 share with UART1 (only for UART2) +** +** Any unselected IRQ REQ is in tristate +** +** To share an IRQ between UART1 and UART2, set +** UART1 to use the desired IRQ and set UART2 to +** 0xF to enable sharing mechanism. +** +*/ +typedef union _SMC37c669_CR28 { + unsigned char as_uchar; + struct { + unsigned uart2_irq : 4; /* See note above */ + unsigned uart1_irq : 4; /* See note above */ + } by_field; +} SMC37c669_CR28; + +/* +** CR29 - default value 0x00 +** +** IRQIN IRQ Select Register +** +** D3 - D0 IRQ +** D7 - D4 Selected +** ------- -------- +** 0000 None +** 0001 IRQ_A +** 0010 IRQ_B +** 0011 IRQ_C +** 0100 IRQ_D +** 0101 IRQ_E +** 0110 IRQ_F +** 0111 Reserved +** 1000 IRQ_H +** +** Any unselected IRQ REQ is in tristate +** +*/ +typedef union _SMC37c669_CR29 { + unsigned char as_uchar; + struct { + unsigned irqin_irq : 4; /* See note above */ + unsigned reserved1 : 4; /* RAZ */ + } by_field; +} SMC37c669_CR29; + +/* +** Aliases of Configuration Register formats (should match +** the set of index aliases). +** +** Note that CR24 and CR25 have the same format and are the +** base address registers for UART1 and UART2. Because of +** this we only define 1 alias here - for CR24 - as the serial +** base address register. +** +** Note that CR21 and CR22 have the same format and are the +** base address and alternate status address registers for +** the IDE controller. Because of this we only define 1 alias +** here - for CR21 - as the IDE address register. +** +*/ +typedef SMC37c669_CR0D SMC37c669_DEVICE_ID_REGISTER; +typedef SMC37c669_CR0E SMC37c669_DEVICE_REVISION_REGISTER; +typedef SMC37c669_CR20 SMC37c669_FDC_BASE_ADDRESS_REGISTER; +typedef SMC37c669_CR21 SMC37c669_IDE_ADDRESS_REGISTER; +typedef SMC37c669_CR23 SMC37c669_PARALLEL_BASE_ADDRESS_REGISTER; +typedef SMC37c669_CR24 SMC37c669_SERIAL_BASE_ADDRESS_REGISTER; +typedef SMC37c669_CR26 SMC37c669_PARALLEL_FDC_DRQ_REGISTER; +typedef SMC37c669_CR27 SMC37c669_PARALLEL_FDC_IRQ_REGISTER; +typedef SMC37c669_CR28 SMC37c669_SERIAL_IRQ_REGISTER; + +/* +** ISA/Device IRQ Translation Table Entry Definition +*/ +typedef struct _SMC37c669_IRQ_TRANSLATION_ENTRY { + int device_irq; + int isa_irq; +} SMC37c669_IRQ_TRANSLATION_ENTRY; + +/* +** ISA/Device DMA Translation Table Entry Definition +*/ +typedef struct _SMC37c669_DRQ_TRANSLATION_ENTRY { + int device_drq; + int isa_drq; +} SMC37c669_DRQ_TRANSLATION_ENTRY; + +/* +** External Interface Function Prototype Declarations +*/ + +SMC37c669_CONFIG_REGS *SMC37c669_detect( + int +); + +unsigned int SMC37c669_enable_device( + unsigned int func +); + +unsigned int SMC37c669_disable_device( + unsigned int func +); + +unsigned int SMC37c669_configure_device( + unsigned int func, + int port, + int irq, + int drq +); + +void SMC37c669_display_device_info( + void +); + +#endif /* __SMC37c669_H */ + +/* file: smcc669.c + * + * Copyright (C) 1997 by + * Digital Equipment Corporation, Maynard, Massachusetts. + * All rights reserved. + * + * This software is furnished under a license and may be used and copied + * only in accordance of the terms of such license and with the + * inclusion of the above copyright notice. This software or any other + * copies thereof may not be provided or otherwise made available to any + * other person. No title to and ownership of the software is hereby + * transferred. + * + * The information in this software is subject to change without notice + * and should not be construed as a commitment by digital equipment + * corporation. + * + * Digital assumes no responsibility for the use or reliability of its + * software on equipment which is not supplied by digital. + */ + +/* + *++ + * FACILITY: + * + * Alpha SRM Console Firmware + * + * MODULE DESCRIPTION: + * + * SMC37c669 Super I/O controller configuration routines. + * + * AUTHORS: + * + * Eric Rasmussen + * + * CREATION DATE: + * + * 28-Jan-1997 + * + * MODIFICATION HISTORY: + * + * er 01-May-1997 Fixed pointer conversion errors in + * SMC37c669_get_device_config(). + * er 28-Jan-1997 Initial version. + * + *-- + */ +#if 0 +/* $INCLUDE_OPTIONS$ */ +#include "cp$inc:platform_io.h" +/* $INCLUDE_OPTIONS_END$ */ +#include "cp$src:common.h" +#include "cp$inc:prototypes.h" +#include "cp$src:kernel_def.h" +#include "cp$src:msg_def.h" +#include "cp$src:smcc669_def.h" +/* Platform-specific includes */ +#include "cp$src:platform.h" +#endif + +#ifndef TRUE +#define TRUE 1 +#endif +#ifndef FALSE +#define FALSE 0 +#endif + +#define wb( _x_, _y_ ) outb( _y_, (unsigned int)((unsigned long)_x_) ) +#define rb( _x_ ) inb( (unsigned int)((unsigned long)_x_) ) + +/* +** Local storage for device configuration information. +** +** Since the SMC37c669 does not provide an explicit +** mechanism for enabling/disabling individual device +** functions, other than unmapping the device, local +** storage for device configuration information is +** allocated here for use in implementing our own +** function enable/disable scheme. +*/ +static struct DEVICE_CONFIG { + unsigned int port1; + unsigned int port2; + int irq; + int drq; +} local_config [NUM_FUNCS]; + +/* +** List of all possible addresses for the Super I/O chip +*/ +static unsigned long SMC37c669_Addresses[] __initdata = + { + 0x3F0UL, /* Primary address */ + 0x370UL, /* Secondary address */ + 0UL /* End of list */ + }; + +/* +** Global Pointer to the Super I/O device +*/ +static SMC37c669_CONFIG_REGS *SMC37c669 __initdata = NULL; + +/* +** IRQ Translation Table +** +** The IRQ translation table is a list of SMC37c669 device +** and standard ISA IRQs. +** +*/ +static SMC37c669_IRQ_TRANSLATION_ENTRY *SMC37c669_irq_table __initdata; + +/* +** The following definition is for the default IRQ +** translation table. +*/ +static SMC37c669_IRQ_TRANSLATION_ENTRY SMC37c669_default_irq_table[] +__initdata = + { + { SMC37c669_DEVICE_IRQ_A, -1 }, + { SMC37c669_DEVICE_IRQ_B, -1 }, + { SMC37c669_DEVICE_IRQ_C, 7 }, + { SMC37c669_DEVICE_IRQ_D, 6 }, + { SMC37c669_DEVICE_IRQ_E, 4 }, + { SMC37c669_DEVICE_IRQ_F, 3 }, + { SMC37c669_DEVICE_IRQ_H, -1 }, + { -1, -1 } /* End of table */ + }; + +/* +** The following definition is for the MONET (XP1000) IRQ +** translation table. +*/ +static SMC37c669_IRQ_TRANSLATION_ENTRY SMC37c669_monet_irq_table[] +__initdata = + { + { SMC37c669_DEVICE_IRQ_A, -1 }, + { SMC37c669_DEVICE_IRQ_B, -1 }, + { SMC37c669_DEVICE_IRQ_C, 6 }, + { SMC37c669_DEVICE_IRQ_D, 7 }, + { SMC37c669_DEVICE_IRQ_E, 4 }, + { SMC37c669_DEVICE_IRQ_F, 3 }, + { SMC37c669_DEVICE_IRQ_H, -1 }, + { -1, -1 } /* End of table */ + }; + +static SMC37c669_IRQ_TRANSLATION_ENTRY *SMC37c669_irq_tables[] __initdata = + { + SMC37c669_default_irq_table, + SMC37c669_monet_irq_table + }; + +/* +** DRQ Translation Table +** +** The DRQ translation table is a list of SMC37c669 device and +** ISA DMA channels. +** +*/ +static SMC37c669_DRQ_TRANSLATION_ENTRY *SMC37c669_drq_table __initdata; + +/* +** The following definition is the default DRQ +** translation table. +*/ +static SMC37c669_DRQ_TRANSLATION_ENTRY SMC37c669_default_drq_table[] +__initdata = + { + { SMC37c669_DEVICE_DRQ_A, 2 }, + { SMC37c669_DEVICE_DRQ_B, 3 }, + { SMC37c669_DEVICE_DRQ_C, -1 }, + { -1, -1 } /* End of table */ + }; + +/* +** Local Function Prototype Declarations +*/ + +static unsigned int SMC37c669_is_device_enabled( + unsigned int func +); + +#if 0 +static unsigned int SMC37c669_get_device_config( + unsigned int func, + int *port, + int *irq, + int *drq +); +#endif + +static void SMC37c669_config_mode( + unsigned int enable +); + +static unsigned char SMC37c669_read_config( + unsigned char index +); + +static void SMC37c669_write_config( + unsigned char index, + unsigned char data +); + +static void SMC37c669_init_local_config( void ); + +static struct DEVICE_CONFIG *SMC37c669_get_config( + unsigned int func +); + +static int SMC37c669_xlate_irq( + int irq +); + +static int SMC37c669_xlate_drq( + int drq +); + +static __cacheline_aligned DEFINE_SPINLOCK(smc_lock); + +/* +**++ +** FUNCTIONAL DESCRIPTION: +** +** This function detects the presence of an SMC37c669 Super I/O +** controller. +** +** FORMAL PARAMETERS: +** +** None +** +** RETURN VALUE: +** +** Returns a pointer to the device if found, otherwise, +** the NULL pointer is returned. +** +** SIDE EFFECTS: +** +** None +** +**-- +*/ +SMC37c669_CONFIG_REGS * __init SMC37c669_detect( int index ) +{ + int i; + SMC37c669_DEVICE_ID_REGISTER id; + + for ( i = 0; SMC37c669_Addresses[i] != 0; i++ ) { +/* +** Initialize the device pointer even though we don't yet know if +** the controller is at this address. The support functions access +** the controller through this device pointer so we need to set it +** even when we are looking ... +*/ + SMC37c669 = ( SMC37c669_CONFIG_REGS * )SMC37c669_Addresses[i]; +/* +** Enter configuration mode +*/ + SMC37c669_config_mode( TRUE ); +/* +** Read the device id +*/ + id.as_uchar = SMC37c669_read_config( SMC37c669_DEVICE_ID_INDEX ); +/* +** Exit configuration mode +*/ + SMC37c669_config_mode( FALSE ); +/* +** Does the device id match? If so, assume we have found an +** SMC37c669 controller at this address. +*/ + if ( id.by_field.device_id == SMC37c669_DEVICE_ID ) { +/* +** Initialize the IRQ and DRQ translation tables. +*/ + SMC37c669_irq_table = SMC37c669_irq_tables[ index ]; + SMC37c669_drq_table = SMC37c669_default_drq_table; +/* +** erfix +** +** If the platform can't use the IRQ and DRQ defaults set up in this +** file, it should call a platform-specific external routine at this +** point to reset the IRQ and DRQ translation table pointers to point +** at the appropriate tables for the platform. If the defaults are +** acceptable, then the external routine should do nothing. +*/ + +/* +** Put the chip back into configuration mode +*/ + SMC37c669_config_mode( TRUE ); +/* +** Initialize local storage for configuration information +*/ + SMC37c669_init_local_config( ); +/* +** Exit configuration mode +*/ + SMC37c669_config_mode( FALSE ); +/* +** SMC37c669 controller found, break out of search loop +*/ + break; + } + else { +/* +** Otherwise, we did not find an SMC37c669 controller at this +** address so set the device pointer to NULL. +*/ + SMC37c669 = NULL; + } + } + return SMC37c669; +} + + +/* +**++ +** FUNCTIONAL DESCRIPTION: +** +** This function enables an SMC37c669 device function. +** +** FORMAL PARAMETERS: +** +** func: +** Which device function to enable +** +** RETURN VALUE: +** +** Returns TRUE is the device function was enabled, otherwise, FALSE +** +** SIDE EFFECTS: +** +** {@description or none@} +** +** DESIGN: +** +** Enabling a device function in the SMC37c669 controller involves +** setting all of its mappings (port, irq, drq ...). A local +** "shadow" copy of the device configuration is kept so we can +** just set each mapping to what the local copy says. +** +** This function ALWAYS updates the local shadow configuration of +** the device function being enabled, even if the device is always +** enabled. To avoid replication of code, functions such as +** configure_device set up the local copy and then call this +** function to the update the real device. +** +**-- +*/ +unsigned int __init SMC37c669_enable_device ( unsigned int func ) +{ + unsigned int ret_val = FALSE; +/* +** Put the device into configuration mode +*/ + SMC37c669_config_mode( TRUE ); + switch ( func ) { + case SERIAL_0: + { + SMC37c669_SERIAL_BASE_ADDRESS_REGISTER base_addr; + SMC37c669_SERIAL_IRQ_REGISTER irq; +/* +** Enable the serial 1 IRQ mapping +*/ + irq.as_uchar = + SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX ); + + irq.by_field.uart1_irq = + SMC37c669_RAW_DEVICE_IRQ( + SMC37c669_xlate_irq( local_config[ func ].irq ) + ); + + SMC37c669_write_config( SMC37c669_SERIAL_IRQ_INDEX, irq.as_uchar ); +/* +** Enable the serial 1 port base address mapping +*/ + base_addr.as_uchar = 0; + base_addr.by_field.addr9_3 = local_config[ func ].port1 >> 3; + + SMC37c669_write_config( + SMC37c669_SERIAL0_BASE_ADDRESS_INDEX, + base_addr.as_uchar + ); + ret_val = TRUE; + break; + } + case SERIAL_1: + { + SMC37c669_SERIAL_BASE_ADDRESS_REGISTER base_addr; + SMC37c669_SERIAL_IRQ_REGISTER irq; +/* +** Enable the serial 2 IRQ mapping +*/ + irq.as_uchar = + SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX ); + + irq.by_field.uart2_irq = + SMC37c669_RAW_DEVICE_IRQ( + SMC37c669_xlate_irq( local_config[ func ].irq ) + ); + + SMC37c669_write_config( SMC37c669_SERIAL_IRQ_INDEX, irq.as_uchar ); +/* +** Enable the serial 2 port base address mapping +*/ + base_addr.as_uchar = 0; + base_addr.by_field.addr9_3 = local_config[ func ].port1 >> 3; + + SMC37c669_write_config( + SMC37c669_SERIAL1_BASE_ADDRESS_INDEX, + base_addr.as_uchar + ); + ret_val = TRUE; + break; + } + case PARALLEL_0: + { + SMC37c669_PARALLEL_BASE_ADDRESS_REGISTER base_addr; + SMC37c669_PARALLEL_FDC_IRQ_REGISTER irq; + SMC37c669_PARALLEL_FDC_DRQ_REGISTER drq; +/* +** Enable the parallel port DMA channel mapping +*/ + drq.as_uchar = + SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX ); + + drq.by_field.ppt_drq = + SMC37c669_RAW_DEVICE_DRQ( + SMC37c669_xlate_drq( local_config[ func ].drq ) + ); + + SMC37c669_write_config( + SMC37c669_PARALLEL_FDC_DRQ_INDEX, + drq.as_uchar + ); +/* +** Enable the parallel port IRQ mapping +*/ + irq.as_uchar = + SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX ); + + irq.by_field.ppt_irq = + SMC37c669_RAW_DEVICE_IRQ( + SMC37c669_xlate_irq( local_config[ func ].irq ) + ); + + SMC37c669_write_config( + SMC37c669_PARALLEL_FDC_IRQ_INDEX, + irq.as_uchar + ); +/* +** Enable the parallel port base address mapping +*/ + base_addr.as_uchar = 0; + base_addr.by_field.addr9_2 = local_config[ func ].port1 >> 2; + + SMC37c669_write_config( + SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX, + base_addr.as_uchar + ); + ret_val = TRUE; + break; + } + case FLOPPY_0: + { + SMC37c669_FDC_BASE_ADDRESS_REGISTER base_addr; + SMC37c669_PARALLEL_FDC_IRQ_REGISTER irq; + SMC37c669_PARALLEL_FDC_DRQ_REGISTER drq; +/* +** Enable the floppy controller DMA channel mapping +*/ + drq.as_uchar = + SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX ); + + drq.by_field.fdc_drq = + SMC37c669_RAW_DEVICE_DRQ( + SMC37c669_xlate_drq( local_config[ func ].drq ) + ); + + SMC37c669_write_config( + SMC37c669_PARALLEL_FDC_DRQ_INDEX, + drq.as_uchar + ); +/* +** Enable the floppy controller IRQ mapping +*/ + irq.as_uchar = + SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX ); + + irq.by_field.fdc_irq = + SMC37c669_RAW_DEVICE_IRQ( + SMC37c669_xlate_irq( local_config[ func ].irq ) + ); + + SMC37c669_write_config( + SMC37c669_PARALLEL_FDC_IRQ_INDEX, + irq.as_uchar + ); +/* +** Enable the floppy controller base address mapping +*/ + base_addr.as_uchar = 0; + base_addr.by_field.addr9_4 = local_config[ func ].port1 >> 4; + + SMC37c669_write_config( + SMC37c669_FDC_BASE_ADDRESS_INDEX, + base_addr.as_uchar + ); + ret_val = TRUE; + break; + } + case IDE_0: + { + SMC37c669_IDE_ADDRESS_REGISTER ide_addr; +/* +** Enable the IDE alternate status base address mapping +*/ + ide_addr.as_uchar = 0; + ide_addr.by_field.addr9_4 = local_config[ func ].port2 >> 4; + + SMC37c669_write_config( + SMC37c669_IDE_ALTERNATE_ADDRESS_INDEX, + ide_addr.as_uchar + ); +/* +** Enable the IDE controller base address mapping +*/ + ide_addr.as_uchar = 0; + ide_addr.by_field.addr9_4 = local_config[ func ].port1 >> 4; + + SMC37c669_write_config( + SMC37c669_IDE_BASE_ADDRESS_INDEX, + ide_addr.as_uchar + ); + ret_val = TRUE; + break; + } + } +/* +** Exit configuration mode and return +*/ + SMC37c669_config_mode( FALSE ); + + return ret_val; +} + + +/* +**++ +** FUNCTIONAL DESCRIPTION: +** +** This function disables a device function within the +** SMC37c669 Super I/O controller. +** +** FORMAL PARAMETERS: +** +** func: +** Which function to disable +** +** RETURN VALUE: +** +** Return TRUE if the device function was disabled, otherwise, FALSE +** +** SIDE EFFECTS: +** +** {@description or none@} +** +** DESIGN: +** +** Disabling a function in the SMC37c669 device involves +** disabling all the function's mappings (port, irq, drq ...). +** A shadow copy of the device configuration is maintained +** in local storage so we won't worry aboving saving the +** current configuration information. +** +**-- +*/ +unsigned int __init SMC37c669_disable_device ( unsigned int func ) +{ + unsigned int ret_val = FALSE; + +/* +** Put the device into configuration mode +*/ + SMC37c669_config_mode( TRUE ); + switch ( func ) { + case SERIAL_0: + { + SMC37c669_SERIAL_BASE_ADDRESS_REGISTER base_addr; + SMC37c669_SERIAL_IRQ_REGISTER irq; +/* +** Disable the serial 1 IRQ mapping +*/ + irq.as_uchar = + SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX ); + + irq.by_field.uart1_irq = 0; + + SMC37c669_write_config( SMC37c669_SERIAL_IRQ_INDEX, irq.as_uchar ); +/* +** Disable the serial 1 port base address mapping +*/ + base_addr.as_uchar = 0; + SMC37c669_write_config( + SMC37c669_SERIAL0_BASE_ADDRESS_INDEX, + base_addr.as_uchar + ); + ret_val = TRUE; + break; + } + case SERIAL_1: + { + SMC37c669_SERIAL_BASE_ADDRESS_REGISTER base_addr; + SMC37c669_SERIAL_IRQ_REGISTER irq; +/* +** Disable the serial 2 IRQ mapping +*/ + irq.as_uchar = + SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX ); + + irq.by_field.uart2_irq = 0; + + SMC37c669_write_config( SMC37c669_SERIAL_IRQ_INDEX, irq.as_uchar ); +/* +** Disable the serial 2 port base address mapping +*/ + base_addr.as_uchar = 0; + + SMC37c669_write_config( + SMC37c669_SERIAL1_BASE_ADDRESS_INDEX, + base_addr.as_uchar + ); + ret_val = TRUE; + break; + } + case PARALLEL_0: + { + SMC37c669_PARALLEL_BASE_ADDRESS_REGISTER base_addr; + SMC37c669_PARALLEL_FDC_IRQ_REGISTER irq; + SMC37c669_PARALLEL_FDC_DRQ_REGISTER drq; +/* +** Disable the parallel port DMA channel mapping +*/ + drq.as_uchar = + SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX ); + + drq.by_field.ppt_drq = 0; + + SMC37c669_write_config( + SMC37c669_PARALLEL_FDC_DRQ_INDEX, + drq.as_uchar + ); +/* +** Disable the parallel port IRQ mapping +*/ + irq.as_uchar = + SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX ); + + irq.by_field.ppt_irq = 0; + + SMC37c669_write_config( + SMC37c669_PARALLEL_FDC_IRQ_INDEX, + irq.as_uchar + ); +/* +** Disable the parallel port base address mapping +*/ + base_addr.as_uchar = 0; + + SMC37c669_write_config( + SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX, + base_addr.as_uchar + ); + ret_val = TRUE; + break; + } + case FLOPPY_0: + { + SMC37c669_FDC_BASE_ADDRESS_REGISTER base_addr; + SMC37c669_PARALLEL_FDC_IRQ_REGISTER irq; + SMC37c669_PARALLEL_FDC_DRQ_REGISTER drq; +/* +** Disable the floppy controller DMA channel mapping +*/ + drq.as_uchar = + SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX ); + + drq.by_field.fdc_drq = 0; + + SMC37c669_write_config( + SMC37c669_PARALLEL_FDC_DRQ_INDEX, + drq.as_uchar + ); +/* +** Disable the floppy controller IRQ mapping +*/ + irq.as_uchar = + SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX ); + + irq.by_field.fdc_irq = 0; + + SMC37c669_write_config( + SMC37c669_PARALLEL_FDC_IRQ_INDEX, + irq.as_uchar + ); +/* +** Disable the floppy controller base address mapping +*/ + base_addr.as_uchar = 0; + + SMC37c669_write_config( + SMC37c669_FDC_BASE_ADDRESS_INDEX, + base_addr.as_uchar + ); + ret_val = TRUE; + break; + } + case IDE_0: + { + SMC37c669_IDE_ADDRESS_REGISTER ide_addr; +/* +** Disable the IDE alternate status base address mapping +*/ + ide_addr.as_uchar = 0; + + SMC37c669_write_config( + SMC37c669_IDE_ALTERNATE_ADDRESS_INDEX, + ide_addr.as_uchar + ); +/* +** Disable the IDE controller base address mapping +*/ + ide_addr.as_uchar = 0; + + SMC37c669_write_config( + SMC37c669_IDE_BASE_ADDRESS_INDEX, + ide_addr.as_uchar + ); + ret_val = TRUE; + break; + } + } +/* +** Exit configuration mode and return +*/ + SMC37c669_config_mode( FALSE ); + + return ret_val; +} + + +/* +**++ +** FUNCTIONAL DESCRIPTION: +** +** This function configures a device function within the +** SMC37c669 Super I/O controller. +** +** FORMAL PARAMETERS: +** +** func: +** Which device function +** +** port: +** I/O port for the function to use +** +** irq: +** IRQ for the device function to use +** +** drq: +** DMA channel for the device function to use +** +** RETURN VALUE: +** +** Returns TRUE if the device function was configured, +** otherwise, FALSE. +** +** SIDE EFFECTS: +** +** {@description or none@} +** +** DESIGN: +** +** If this function returns TRUE, the local shadow copy of +** the configuration is also updated. If the device function +** is currently disabled, only the local shadow copy is +** updated and the actual device function will be updated +** if/when it is enabled. +** +**-- +*/ +unsigned int __init SMC37c669_configure_device ( + unsigned int func, + int port, + int irq, + int drq ) +{ + struct DEVICE_CONFIG *cp; + +/* +** Check for a valid configuration +*/ + if ( ( cp = SMC37c669_get_config ( func ) ) != NULL ) { +/* +** Configuration is valid, update the local shadow copy +*/ + if ( ( drq & ~0xFF ) == 0 ) { + cp->drq = drq; + } + if ( ( irq & ~0xFF ) == 0 ) { + cp->irq = irq; + } + if ( ( port & ~0xFFFF ) == 0 ) { + cp->port1 = port; + } +/* +** If the device function is enabled, update the actual +** device configuration. +*/ + if ( SMC37c669_is_device_enabled( func ) ) { + SMC37c669_enable_device( func ); + } + return TRUE; + } + return FALSE; +} + + +/* +**++ +** FUNCTIONAL DESCRIPTION: +** +** This function determines whether a device function +** within the SMC37c669 controller is enabled. +** +** FORMAL PARAMETERS: +** +** func: +** Which device function +** +** RETURN VALUE: +** +** Returns TRUE if the device function is enabled, otherwise, FALSE +** +** SIDE EFFECTS: +** +** {@description or none@} +** +** DESIGN: +** +** To check whether a device is enabled we will only look at +** the port base address mapping. According to the SMC37c669 +** specification, all of the port base address mappings are +** disabled if the addr<9:8> (bits <7:6> of the register) are +** zero. +** +**-- +*/ +static unsigned int __init SMC37c669_is_device_enabled ( unsigned int func ) +{ + unsigned char base_addr = 0; + unsigned int dev_ok = FALSE; + unsigned int ret_val = FALSE; +/* +** Enter configuration mode +*/ + SMC37c669_config_mode( TRUE ); + + switch ( func ) { + case SERIAL_0: + base_addr = + SMC37c669_read_config( SMC37c669_SERIAL0_BASE_ADDRESS_INDEX ); + dev_ok = TRUE; + break; + case SERIAL_1: + base_addr = + SMC37c669_read_config( SMC37c669_SERIAL1_BASE_ADDRESS_INDEX ); + dev_ok = TRUE; + break; + case PARALLEL_0: + base_addr = + SMC37c669_read_config( SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX ); + dev_ok = TRUE; + break; + case FLOPPY_0: + base_addr = + SMC37c669_read_config( SMC37c669_FDC_BASE_ADDRESS_INDEX ); + dev_ok = TRUE; + break; + case IDE_0: + base_addr = + SMC37c669_read_config( SMC37c669_IDE_BASE_ADDRESS_INDEX ); + dev_ok = TRUE; + break; + } +/* +** If we have a valid device, check base_addr<7:6> to see if the +** device is enabled (mapped). +*/ + if ( ( dev_ok ) && ( ( base_addr & 0xC0 ) != 0 ) ) { +/* +** The mapping is not disabled, so assume that the function is +** enabled. +*/ + ret_val = TRUE; + } +/* +** Exit configuration mode +*/ + SMC37c669_config_mode( FALSE ); + + return ret_val; +} + + +#if 0 +/* +**++ +** FUNCTIONAL DESCRIPTION: +** +** This function retrieves the configuration information of a +** device function within the SMC37c699 Super I/O controller. +** +** FORMAL PARAMETERS: +** +** func: +** Which device function +** +** port: +** I/O port returned +** +** irq: +** IRQ returned +** +** drq: +** DMA channel returned +** +** RETURN VALUE: +** +** Returns TRUE if the device configuration was successfully +** retrieved, otherwise, FALSE. +** +** SIDE EFFECTS: +** +** The data pointed to by the port, irq, and drq parameters +** my be modified even if the configuration is not successfully +** retrieved. +** +** DESIGN: +** +** The device configuration is fetched from the local shadow +** copy. Any unused parameters will be set to -1. Any +** parameter which is not desired can specify the NULL +** pointer. +** +**-- +*/ +static unsigned int __init SMC37c669_get_device_config ( + unsigned int func, + int *port, + int *irq, + int *drq ) +{ + struct DEVICE_CONFIG *cp; + unsigned int ret_val = FALSE; +/* +** Check for a valid device configuration +*/ + if ( ( cp = SMC37c669_get_config( func ) ) != NULL ) { + if ( drq != NULL ) { + *drq = cp->drq; + ret_val = TRUE; + } + if ( irq != NULL ) { + *irq = cp->irq; + ret_val = TRUE; + } + if ( port != NULL ) { + *port = cp->port1; + ret_val = TRUE; + } + } + return ret_val; +} +#endif + + +/* +**++ +** FUNCTIONAL DESCRIPTION: +** +** This function displays the current state of the SMC37c699 +** Super I/O controller's device functions. +** +** FORMAL PARAMETERS: +** +** None +** +** RETURN VALUE: +** +** None +** +** SIDE EFFECTS: +** +** None +** +**-- +*/ +void __init SMC37c669_display_device_info ( void ) +{ + if ( SMC37c669_is_device_enabled( SERIAL_0 ) ) { + printk( " Serial 0: Enabled [ Port 0x%x, IRQ %d ]\n", + local_config[ SERIAL_0 ].port1, + local_config[ SERIAL_0 ].irq + ); + } + else { + printk( " Serial 0: Disabled\n" ); + } + + if ( SMC37c669_is_device_enabled( SERIAL_1 ) ) { + printk( " Serial 1: Enabled [ Port 0x%x, IRQ %d ]\n", + local_config[ SERIAL_1 ].port1, + local_config[ SERIAL_1 ].irq + ); + } + else { + printk( " Serial 1: Disabled\n" ); + } + + if ( SMC37c669_is_device_enabled( PARALLEL_0 ) ) { + printk( " Parallel: Enabled [ Port 0x%x, IRQ %d/%d ]\n", + local_config[ PARALLEL_0 ].port1, + local_config[ PARALLEL_0 ].irq, + local_config[ PARALLEL_0 ].drq + ); + } + else { + printk( " Parallel: Disabled\n" ); + } + + if ( SMC37c669_is_device_enabled( FLOPPY_0 ) ) { + printk( " Floppy Ctrl: Enabled [ Port 0x%x, IRQ %d/%d ]\n", + local_config[ FLOPPY_0 ].port1, + local_config[ FLOPPY_0 ].irq, + local_config[ FLOPPY_0 ].drq + ); + } + else { + printk( " Floppy Ctrl: Disabled\n" ); + } + + if ( SMC37c669_is_device_enabled( IDE_0 ) ) { + printk( " IDE 0: Enabled [ Port 0x%x, IRQ %d ]\n", + local_config[ IDE_0 ].port1, + local_config[ IDE_0 ].irq + ); + } + else { + printk( " IDE 0: Disabled\n" ); + } +} + + +/* +**++ +** FUNCTIONAL DESCRIPTION: +** +** This function puts the SMC37c669 Super I/O controller into, +** and takes it out of, configuration mode. +** +** FORMAL PARAMETERS: +** +** enable: +** TRUE to enter configuration mode, FALSE to exit. +** +** RETURN VALUE: +** +** None +** +** SIDE EFFECTS: +** +** The SMC37c669 controller may be left in configuration mode. +** +**-- +*/ +static void __init SMC37c669_config_mode( + unsigned int enable ) +{ + if ( enable ) { +/* +** To enter configuration mode, two writes in succession to the index +** port are required. If a write to another address or port occurs +** between these two writes, the chip does not enter configuration +** mode. Therefore, a spinlock is placed around the two writes to +** guarantee that they complete uninterrupted. +*/ + spin_lock(&smc_lock); + wb( &SMC37c669->index_port, SMC37c669_CONFIG_ON_KEY ); + wb( &SMC37c669->index_port, SMC37c669_CONFIG_ON_KEY ); + spin_unlock(&smc_lock); + } + else { + wb( &SMC37c669->index_port, SMC37c669_CONFIG_OFF_KEY ); + } +} + +/* +**++ +** FUNCTIONAL DESCRIPTION: +** +** This function reads an SMC37c669 Super I/O controller +** configuration register. This function assumes that the +** device is already in configuration mode. +** +** FORMAL PARAMETERS: +** +** index: +** Index value of configuration register to read +** +** RETURN VALUE: +** +** Data read from configuration register +** +** SIDE EFFECTS: +** +** None +** +**-- +*/ +static unsigned char __init SMC37c669_read_config( + unsigned char index ) +{ + unsigned char data; + + wb( &SMC37c669->index_port, index ); + data = rb( &SMC37c669->data_port ); + return data; +} + +/* +**++ +** FUNCTIONAL DESCRIPTION: +** +** This function writes an SMC37c669 Super I/O controller +** configuration register. This function assumes that the +** device is already in configuration mode. +** +** FORMAL PARAMETERS: +** +** index: +** Index of configuration register to write +** +** data: +** Data to be written +** +** RETURN VALUE: +** +** None +** +** SIDE EFFECTS: +** +** None +** +**-- +*/ +static void __init SMC37c669_write_config( + unsigned char index, + unsigned char data ) +{ + wb( &SMC37c669->index_port, index ); + wb( &SMC37c669->data_port, data ); +} + + +/* +**++ +** FUNCTIONAL DESCRIPTION: +** +** This function initializes the local device +** configuration storage. This function assumes +** that the device is already in configuration +** mode. +** +** FORMAL PARAMETERS: +** +** None +** +** RETURN VALUE: +** +** None +** +** SIDE EFFECTS: +** +** Local storage for device configuration information +** is initialized. +** +**-- +*/ +static void __init SMC37c669_init_local_config ( void ) +{ + SMC37c669_SERIAL_BASE_ADDRESS_REGISTER uart_base; + SMC37c669_SERIAL_IRQ_REGISTER uart_irqs; + SMC37c669_PARALLEL_BASE_ADDRESS_REGISTER ppt_base; + SMC37c669_PARALLEL_FDC_IRQ_REGISTER ppt_fdc_irqs; + SMC37c669_PARALLEL_FDC_DRQ_REGISTER ppt_fdc_drqs; + SMC37c669_FDC_BASE_ADDRESS_REGISTER fdc_base; + SMC37c669_IDE_ADDRESS_REGISTER ide_base; + SMC37c669_IDE_ADDRESS_REGISTER ide_alt; + +/* +** Get serial port 1 base address +*/ + uart_base.as_uchar = + SMC37c669_read_config( SMC37c669_SERIAL0_BASE_ADDRESS_INDEX ); +/* +** Get IRQs for serial ports 1 & 2 +*/ + uart_irqs.as_uchar = + SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX ); +/* +** Store local configuration information for serial port 1 +*/ + local_config[SERIAL_0].port1 = uart_base.by_field.addr9_3 << 3; + local_config[SERIAL_0].irq = + SMC37c669_xlate_irq( + SMC37c669_DEVICE_IRQ( uart_irqs.by_field.uart1_irq ) + ); +/* +** Get serial port 2 base address +*/ + uart_base.as_uchar = + SMC37c669_read_config( SMC37c669_SERIAL1_BASE_ADDRESS_INDEX ); +/* +** Store local configuration information for serial port 2 +*/ + local_config[SERIAL_1].port1 = uart_base.by_field.addr9_3 << 3; + local_config[SERIAL_1].irq = + SMC37c669_xlate_irq( + SMC37c669_DEVICE_IRQ( uart_irqs.by_field.uart2_irq ) + ); +/* +** Get parallel port base address +*/ + ppt_base.as_uchar = + SMC37c669_read_config( SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX ); +/* +** Get IRQs for parallel port and floppy controller +*/ + ppt_fdc_irqs.as_uchar = + SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX ); +/* +** Get DRQs for parallel port and floppy controller +*/ + ppt_fdc_drqs.as_uchar = + SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX ); +/* +** Store local configuration information for parallel port +*/ + local_config[PARALLEL_0].port1 = ppt_base.by_field.addr9_2 << 2; + local_config[PARALLEL_0].irq = + SMC37c669_xlate_irq( + SMC37c669_DEVICE_IRQ( ppt_fdc_irqs.by_field.ppt_irq ) + ); + local_config[PARALLEL_0].drq = + SMC37c669_xlate_drq( + SMC37c669_DEVICE_DRQ( ppt_fdc_drqs.by_field.ppt_drq ) + ); +/* +** Get floppy controller base address +*/ + fdc_base.as_uchar = + SMC37c669_read_config( SMC37c669_FDC_BASE_ADDRESS_INDEX ); +/* +** Store local configuration information for floppy controller +*/ + local_config[FLOPPY_0].port1 = fdc_base.by_field.addr9_4 << 4; + local_config[FLOPPY_0].irq = + SMC37c669_xlate_irq( + SMC37c669_DEVICE_IRQ( ppt_fdc_irqs.by_field.fdc_irq ) + ); + local_config[FLOPPY_0].drq = + SMC37c669_xlate_drq( + SMC37c669_DEVICE_DRQ( ppt_fdc_drqs.by_field.fdc_drq ) + ); +/* +** Get IDE controller base address +*/ + ide_base.as_uchar = + SMC37c669_read_config( SMC37c669_IDE_BASE_ADDRESS_INDEX ); +/* +** Get IDE alternate status base address +*/ + ide_alt.as_uchar = + SMC37c669_read_config( SMC37c669_IDE_ALTERNATE_ADDRESS_INDEX ); +/* +** Store local configuration information for IDE controller +*/ + local_config[IDE_0].port1 = ide_base.by_field.addr9_4 << 4; + local_config[IDE_0].port2 = ide_alt.by_field.addr9_4 << 4; + local_config[IDE_0].irq = 14; +} + + +/* +**++ +** FUNCTIONAL DESCRIPTION: +** +** This function returns a pointer to the local shadow +** configuration of the requested device function. +** +** FORMAL PARAMETERS: +** +** func: +** Which device function +** +** RETURN VALUE: +** +** Returns a pointer to the DEVICE_CONFIG structure for the +** requested function, otherwise, NULL. +** +** SIDE EFFECTS: +** +** {@description or none@} +** +**-- +*/ +static struct DEVICE_CONFIG * __init SMC37c669_get_config( unsigned int func ) +{ + struct DEVICE_CONFIG *cp = NULL; + + switch ( func ) { + case SERIAL_0: + cp = &local_config[ SERIAL_0 ]; + break; + case SERIAL_1: + cp = &local_config[ SERIAL_1 ]; + break; + case PARALLEL_0: + cp = &local_config[ PARALLEL_0 ]; + break; + case FLOPPY_0: + cp = &local_config[ FLOPPY_0 ]; + break; + case IDE_0: + cp = &local_config[ IDE_0 ]; + break; + } + return cp; +} + +/* +**++ +** FUNCTIONAL DESCRIPTION: +** +** This function translates IRQs back and forth between ISA +** IRQs and SMC37c669 device IRQs. +** +** FORMAL PARAMETERS: +** +** irq: +** The IRQ to translate +** +** RETURN VALUE: +** +** Returns the translated IRQ, otherwise, returns -1. +** +** SIDE EFFECTS: +** +** {@description or none@} +** +**-- +*/ +static int __init SMC37c669_xlate_irq ( int irq ) +{ + int i, translated_irq = -1; + + if ( SMC37c669_IS_DEVICE_IRQ( irq ) ) { +/* +** We are translating a device IRQ to an ISA IRQ +*/ + for ( i = 0; ( SMC37c669_irq_table[i].device_irq != -1 ) || ( SMC37c669_irq_table[i].isa_irq != -1 ); i++ ) { + if ( irq == SMC37c669_irq_table[i].device_irq ) { + translated_irq = SMC37c669_irq_table[i].isa_irq; + break; + } + } + } + else { +/* +** We are translating an ISA IRQ to a device IRQ +*/ + for ( i = 0; ( SMC37c669_irq_table[i].isa_irq != -1 ) || ( SMC37c669_irq_table[i].device_irq != -1 ); i++ ) { + if ( irq == SMC37c669_irq_table[i].isa_irq ) { + translated_irq = SMC37c669_irq_table[i].device_irq; + break; + } + } + } + return translated_irq; +} + + +/* +**++ +** FUNCTIONAL DESCRIPTION: +** +** This function translates DMA channels back and forth between +** ISA DMA channels and SMC37c669 device DMA channels. +** +** FORMAL PARAMETERS: +** +** drq: +** The DMA channel to translate +** +** RETURN VALUE: +** +** Returns the translated DMA channel, otherwise, returns -1 +** +** SIDE EFFECTS: +** +** {@description or none@} +** +**-- +*/ +static int __init SMC37c669_xlate_drq ( int drq ) +{ + int i, translated_drq = -1; + + if ( SMC37c669_IS_DEVICE_DRQ( drq ) ) { +/* +** We are translating a device DMA channel to an ISA DMA channel +*/ + for ( i = 0; ( SMC37c669_drq_table[i].device_drq != -1 ) || ( SMC37c669_drq_table[i].isa_drq != -1 ); i++ ) { + if ( drq == SMC37c669_drq_table[i].device_drq ) { + translated_drq = SMC37c669_drq_table[i].isa_drq; + break; + } + } + } + else { +/* +** We are translating an ISA DMA channel to a device DMA channel +*/ + for ( i = 0; ( SMC37c669_drq_table[i].isa_drq != -1 ) || ( SMC37c669_drq_table[i].device_drq != -1 ); i++ ) { + if ( drq == SMC37c669_drq_table[i].isa_drq ) { + translated_drq = SMC37c669_drq_table[i].device_drq; + break; + } + } + } + return translated_drq; +} + +#if 0 +int __init smcc669_init ( void ) +{ + struct INODE *ip; + + allocinode( smc_ddb.name, 1, &ip ); + ip->dva = &smc_ddb; + ip->attr = ATTR$M_WRITE | ATTR$M_READ; + ip->len[0] = 0x30; + ip->misc = 0; + INODE_UNLOCK( ip ); + + return msg_success; +} + +int __init smcc669_open( struct FILE *fp, char *info, char *next, char *mode ) +{ + struct INODE *ip; +/* +** Allow multiple readers but only one writer. ip->misc keeps track +** of the number of writers +*/ + ip = fp->ip; + INODE_LOCK( ip ); + if ( fp->mode & ATTR$M_WRITE ) { + if ( ip->misc ) { + INODE_UNLOCK( ip ); + return msg_failure; /* too many writers */ + } + ip->misc++; + } +/* +** Treat the information field as a byte offset +*/ + *fp->offset = xtoi( info ); + INODE_UNLOCK( ip ); + + return msg_success; +} + +int __init smcc669_close( struct FILE *fp ) +{ + struct INODE *ip; + + ip = fp->ip; + if ( fp->mode & ATTR$M_WRITE ) { + INODE_LOCK( ip ); + ip->misc--; + INODE_UNLOCK( ip ); + } + return msg_success; +} + +int __init smcc669_read( struct FILE *fp, int size, int number, unsigned char *buf ) +{ + int i; + int length; + int nbytes; + struct INODE *ip; + +/* +** Always access a byte at a time +*/ + ip = fp->ip; + length = size * number; + nbytes = 0; + + SMC37c669_config_mode( TRUE ); + for ( i = 0; i < length; i++ ) { + if ( !inrange( *fp->offset, 0, ip->len[0] ) ) + break; + *buf++ = SMC37c669_read_config( *fp->offset ); + *fp->offset += 1; + nbytes++; + } + SMC37c669_config_mode( FALSE ); + return nbytes; +} + +int __init smcc669_write( struct FILE *fp, int size, int number, unsigned char *buf ) +{ + int i; + int length; + int nbytes; + struct INODE *ip; +/* +** Always access a byte at a time +*/ + ip = fp->ip; + length = size * number; + nbytes = 0; + + SMC37c669_config_mode( TRUE ); + for ( i = 0; i < length; i++ ) { + if ( !inrange( *fp->offset, 0, ip->len[0] ) ) + break; + SMC37c669_write_config( *fp->offset, *buf ); + *fp->offset += 1; + buf++; + nbytes++; + } + SMC37c669_config_mode( FALSE ); + return nbytes; +} +#endif + +void __init +SMC37c669_dump_registers(void) +{ + int i; + for (i = 0; i <= 0x29; i++) + printk("-- CR%02x : %02x\n", i, SMC37c669_read_config(i)); +} +/*+ + * ============================================================================ + * = SMC_init - SMC37c669 Super I/O controller initialization = + * ============================================================================ + * + * OVERVIEW: + * + * This routine configures and enables device functions on the + * SMC37c669 Super I/O controller. + * + * FORM OF CALL: + * + * SMC_init( ); + * + * RETURNS: + * + * Nothing + * + * ARGUMENTS: + * + * None + * + * SIDE EFFECTS: + * + * None + * + */ +void __init SMC669_Init ( int index ) +{ + SMC37c669_CONFIG_REGS *SMC_base; + unsigned long flags; + + local_irq_save(flags); + if ( ( SMC_base = SMC37c669_detect( index ) ) != NULL ) { +#if SMC_DEBUG + SMC37c669_config_mode( TRUE ); + SMC37c669_dump_registers( ); + SMC37c669_config_mode( FALSE ); + SMC37c669_display_device_info( ); +#endif + SMC37c669_disable_device( SERIAL_0 ); + SMC37c669_configure_device( + SERIAL_0, + COM1_BASE, + COM1_IRQ, + -1 + ); + SMC37c669_enable_device( SERIAL_0 ); + + SMC37c669_disable_device( SERIAL_1 ); + SMC37c669_configure_device( + SERIAL_1, + COM2_BASE, + COM2_IRQ, + -1 + ); + SMC37c669_enable_device( SERIAL_1 ); + + SMC37c669_disable_device( PARALLEL_0 ); + SMC37c669_configure_device( + PARALLEL_0, + PARP_BASE, + PARP_IRQ, + PARP_DRQ + ); + SMC37c669_enable_device( PARALLEL_0 ); + + SMC37c669_disable_device( FLOPPY_0 ); + SMC37c669_configure_device( + FLOPPY_0, + FDC_BASE, + FDC_IRQ, + FDC_DRQ + ); + SMC37c669_enable_device( FLOPPY_0 ); + + /* Wake up sometimes forgotten floppy, especially on DP264. */ + outb(0xc, 0x3f2); + + SMC37c669_disable_device( IDE_0 ); + +#if SMC_DEBUG + SMC37c669_config_mode( TRUE ); + SMC37c669_dump_registers( ); + SMC37c669_config_mode( FALSE ); + SMC37c669_display_device_info( ); +#endif + local_irq_restore(flags); + printk( "SMC37c669 Super I/O Controller found @ 0x%lx\n", + (unsigned long) SMC_base ); + } + else { + local_irq_restore(flags); +#if SMC_DEBUG + printk( "No SMC37c669 Super I/O Controller found\n" ); +#endif + } +} diff --git a/arch/alpha/kernel/smc37c93x.c b/arch/alpha/kernel/smc37c93x.c new file mode 100644 index 0000000..421e51e --- /dev/null +++ b/arch/alpha/kernel/smc37c93x.c @@ -0,0 +1,277 @@ +/* + * SMC 37C93X initialization code + */ + +#include <linux/config.h> +#include <linux/kernel.h> + +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/init.h> +#include <linux/delay.h> + +#include <asm/hwrpb.h> +#include <asm/io.h> +#include <asm/segment.h> + +#define SMC_DEBUG 0 + +#if SMC_DEBUG +# define DBG_DEVS(args) printk args +#else +# define DBG_DEVS(args) +#endif + +#define KB 1024 +#define MB (1024*KB) +#define GB (1024*MB) + +/* device "activate" register contents */ +#define DEVICE_ON 1 +#define DEVICE_OFF 0 + +/* configuration on/off keys */ +#define CONFIG_ON_KEY 0x55 +#define CONFIG_OFF_KEY 0xaa + +/* configuration space device definitions */ +#define FDC 0 +#define IDE1 1 +#define IDE2 2 +#define PARP 3 +#define SER1 4 +#define SER2 5 +#define RTCL 6 +#define KYBD 7 +#define AUXIO 8 + +/* Chip register offsets from base */ +#define CONFIG_CONTROL 0x02 +#define INDEX_ADDRESS 0x03 +#define LOGICAL_DEVICE_NUMBER 0x07 +#define DEVICE_ID 0x20 +#define DEVICE_REV 0x21 +#define POWER_CONTROL 0x22 +#define POWER_MGMT 0x23 +#define OSC 0x24 + +#define ACTIVATE 0x30 +#define ADDR_HI 0x60 +#define ADDR_LO 0x61 +#define INTERRUPT_SEL 0x70 +#define INTERRUPT_SEL_2 0x72 /* KYBD/MOUS only */ +#define DMA_CHANNEL_SEL 0x74 /* FDC/PARP only */ + +#define FDD_MODE_REGISTER 0x90 +#define FDD_OPTION_REGISTER 0x91 + +/* values that we read back that are expected ... */ +#define VALID_DEVICE_ID 2 + +/* default device addresses */ +#define KYBD_INTERRUPT 1 +#define MOUS_INTERRUPT 12 +#define COM2_BASE 0x2f8 +#define COM2_INTERRUPT 3 +#define COM1_BASE 0x3f8 +#define COM1_INTERRUPT 4 +#define PARP_BASE 0x3bc +#define PARP_INTERRUPT 7 + +static unsigned long __init SMCConfigState(unsigned long baseAddr) +{ + unsigned char devId; + unsigned char devRev; + + unsigned long configPort; + unsigned long indexPort; + unsigned long dataPort; + + int i; + + configPort = indexPort = baseAddr; + dataPort = configPort + 1; + +#define NUM_RETRIES 5 + + for (i = 0; i < NUM_RETRIES; i++) + { + outb(CONFIG_ON_KEY, configPort); + outb(CONFIG_ON_KEY, configPort); + outb(DEVICE_ID, indexPort); + devId = inb(dataPort); + if (devId == VALID_DEVICE_ID) { + outb(DEVICE_REV, indexPort); + devRev = inb(dataPort); + break; + } + else + udelay(100); + } + return (i != NUM_RETRIES) ? baseAddr : 0L; +} + +static void __init SMCRunState(unsigned long baseAddr) +{ + outb(CONFIG_OFF_KEY, baseAddr); +} + +static unsigned long __init SMCDetectUltraIO(void) +{ + unsigned long baseAddr; + + baseAddr = 0x3F0; + if ( ( baseAddr = SMCConfigState( baseAddr ) ) == 0x3F0 ) { + return( baseAddr ); + } + baseAddr = 0x370; + if ( ( baseAddr = SMCConfigState( baseAddr ) ) == 0x370 ) { + return( baseAddr ); + } + return( ( unsigned long )0 ); +} + +static void __init SMCEnableDevice(unsigned long baseAddr, + unsigned long device, + unsigned long portaddr, + unsigned long interrupt) +{ + unsigned long indexPort; + unsigned long dataPort; + + indexPort = baseAddr; + dataPort = baseAddr + 1; + + outb(LOGICAL_DEVICE_NUMBER, indexPort); + outb(device, dataPort); + + outb(ADDR_LO, indexPort); + outb(( portaddr & 0xFF ), dataPort); + + outb(ADDR_HI, indexPort); + outb((portaddr >> 8) & 0xFF, dataPort); + + outb(INTERRUPT_SEL, indexPort); + outb(interrupt, dataPort); + + outb(ACTIVATE, indexPort); + outb(DEVICE_ON, dataPort); +} + +static void __init SMCEnableKYBD(unsigned long baseAddr) +{ + unsigned long indexPort; + unsigned long dataPort; + + indexPort = baseAddr; + dataPort = baseAddr + 1; + + outb(LOGICAL_DEVICE_NUMBER, indexPort); + outb(KYBD, dataPort); + + outb(INTERRUPT_SEL, indexPort); /* Primary interrupt select */ + outb(KYBD_INTERRUPT, dataPort); + + outb(INTERRUPT_SEL_2, indexPort); /* Secondary interrupt select */ + outb(MOUS_INTERRUPT, dataPort); + + outb(ACTIVATE, indexPort); + outb(DEVICE_ON, dataPort); +} + +static void __init SMCEnableFDC(unsigned long baseAddr) +{ + unsigned long indexPort; + unsigned long dataPort; + + unsigned char oldValue; + + indexPort = baseAddr; + dataPort = baseAddr + 1; + + outb(LOGICAL_DEVICE_NUMBER, indexPort); + outb(FDC, dataPort); + + outb(FDD_MODE_REGISTER, indexPort); + oldValue = inb(dataPort); + + oldValue |= 0x0E; /* Enable burst mode */ + outb(oldValue, dataPort); + + outb(INTERRUPT_SEL, indexPort); /* Primary interrupt select */ + outb(0x06, dataPort ); + + outb(DMA_CHANNEL_SEL, indexPort); /* DMA channel select */ + outb(0x02, dataPort); + + outb(ACTIVATE, indexPort); + outb(DEVICE_ON, dataPort); +} + +#if SMC_DEBUG +static void __init SMCReportDeviceStatus(unsigned long baseAddr) +{ + unsigned long indexPort; + unsigned long dataPort; + unsigned char currentControl; + + indexPort = baseAddr; + dataPort = baseAddr + 1; + + outb(POWER_CONTROL, indexPort); + currentControl = inb(dataPort); + + printk(currentControl & (1 << FDC) + ? "\t+FDC Enabled\n" : "\t-FDC Disabled\n"); + printk(currentControl & (1 << IDE1) + ? "\t+IDE1 Enabled\n" : "\t-IDE1 Disabled\n"); + printk(currentControl & (1 << IDE2) + ? "\t+IDE2 Enabled\n" : "\t-IDE2 Disabled\n"); + printk(currentControl & (1 << PARP) + ? "\t+PARP Enabled\n" : "\t-PARP Disabled\n"); + printk(currentControl & (1 << SER1) + ? "\t+SER1 Enabled\n" : "\t-SER1 Disabled\n"); + printk(currentControl & (1 << SER2) + ? "\t+SER2 Enabled\n" : "\t-SER2 Disabled\n"); + + printk( "\n" ); +} +#endif + +int __init SMC93x_Init(void) +{ + unsigned long SMCUltraBase; + unsigned long flags; + + local_irq_save(flags); + if ((SMCUltraBase = SMCDetectUltraIO()) != 0UL) { +#if SMC_DEBUG + SMCReportDeviceStatus(SMCUltraBase); +#endif + SMCEnableDevice(SMCUltraBase, SER1, COM1_BASE, COM1_INTERRUPT); + DBG_DEVS(("SMC FDC37C93X: SER1 done\n")); + SMCEnableDevice(SMCUltraBase, SER2, COM2_BASE, COM2_INTERRUPT); + DBG_DEVS(("SMC FDC37C93X: SER2 done\n")); + SMCEnableDevice(SMCUltraBase, PARP, PARP_BASE, PARP_INTERRUPT); + DBG_DEVS(("SMC FDC37C93X: PARP done\n")); + /* On PC164, IDE on the SMC is not enabled; + CMD646 (PCI) on MB */ + SMCEnableKYBD(SMCUltraBase); + DBG_DEVS(("SMC FDC37C93X: KYB done\n")); + SMCEnableFDC(SMCUltraBase); + DBG_DEVS(("SMC FDC37C93X: FDC done\n")); +#if SMC_DEBUG + SMCReportDeviceStatus(SMCUltraBase); +#endif + SMCRunState(SMCUltraBase); + local_irq_restore(flags); + printk("SMC FDC37C93X Ultra I/O Controller found @ 0x%lx\n", + SMCUltraBase); + return 1; + } + else { + local_irq_restore(flags); + DBG_DEVS(("No SMC FDC37C93X Ultra I/O Controller found\n")); + return 0; + } +} diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c new file mode 100644 index 0000000..8f1e785 --- /dev/null +++ b/arch/alpha/kernel/smp.c @@ -0,0 +1,1163 @@ +/* + * linux/arch/alpha/kernel/smp.c + * + * 2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com) + * Renamed modified smp_call_function to smp_call_function_on_cpu() + * Created an function that conforms to the old calling convention + * of smp_call_function(). + * + * This is helpful for DCPI. + * + */ + +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/kernel_stat.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/threads.h> +#include <linux/smp.h> +#include <linux/smp_lock.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/spinlock.h> +#include <linux/irq.h> +#include <linux/cache.h> +#include <linux/profile.h> +#include <linux/bitops.h> + +#include <asm/hwrpb.h> +#include <asm/ptrace.h> +#include <asm/atomic.h> + +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/pgtable.h> +#include <asm/pgalloc.h> +#include <asm/mmu_context.h> +#include <asm/tlbflush.h> + +#include "proto.h" +#include "irq_impl.h" + + +#define DEBUG_SMP 0 +#if DEBUG_SMP +#define DBGS(args) printk args +#else +#define DBGS(args) +#endif + +/* A collection of per-processor data. */ +struct cpuinfo_alpha cpu_data[NR_CPUS]; + +/* A collection of single bit ipi messages. */ +static struct { + unsigned long bits ____cacheline_aligned; +} ipi_data[NR_CPUS] __cacheline_aligned; + +enum ipi_message_type { + IPI_RESCHEDULE, + IPI_CALL_FUNC, + IPI_CPU_STOP, +}; + +/* Set to a secondary's cpuid when it comes online. */ +static int smp_secondary_alive __initdata = 0; + +/* Which cpus ids came online. */ +cpumask_t cpu_present_mask; +cpumask_t cpu_online_map; + +EXPORT_SYMBOL(cpu_online_map); + +/* cpus reported in the hwrpb */ +static unsigned long hwrpb_cpu_present_mask __initdata = 0; + +int smp_num_probed; /* Internal processor count */ +int smp_num_cpus = 1; /* Number that came online. */ + +extern void calibrate_delay(void); + + + +/* + * Called by both boot and secondaries to move global data into + * per-processor storage. + */ +static inline void __init +smp_store_cpu_info(int cpuid) +{ + cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy; + cpu_data[cpuid].last_asn = ASN_FIRST_VERSION; + cpu_data[cpuid].need_new_asn = 0; + cpu_data[cpuid].asn_lock = 0; +} + +/* + * Ideally sets up per-cpu profiling hooks. Doesn't do much now... + */ +static inline void __init +smp_setup_percpu_timer(int cpuid) +{ + cpu_data[cpuid].prof_counter = 1; + cpu_data[cpuid].prof_multiplier = 1; +} + +static void __init +wait_boot_cpu_to_stop(int cpuid) +{ + unsigned long stop = jiffies + 10*HZ; + + while (time_before(jiffies, stop)) { + if (!smp_secondary_alive) + return; + barrier(); + } + + printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid); + for (;;) + barrier(); +} + +/* + * Where secondaries begin a life of C. + */ +void __init +smp_callin(void) +{ + int cpuid = hard_smp_processor_id(); + + if (cpu_test_and_set(cpuid, cpu_online_map)) { + printk("??, cpu 0x%x already present??\n", cpuid); + BUG(); + } + + /* Turn on machine checks. */ + wrmces(7); + + /* Set trap vectors. */ + trap_init(); + + /* Set interrupt vector. */ + wrent(entInt, 0); + + /* Get our local ticker going. */ + smp_setup_percpu_timer(cpuid); + + /* Call platform-specific callin, if specified */ + if (alpha_mv.smp_callin) alpha_mv.smp_callin(); + + /* All kernel threads share the same mm context. */ + atomic_inc(&init_mm.mm_count); + current->active_mm = &init_mm; + + /* Must have completely accurate bogos. */ + local_irq_enable(); + + /* Wait boot CPU to stop with irq enabled before running + calibrate_delay. */ + wait_boot_cpu_to_stop(cpuid); + mb(); + calibrate_delay(); + + smp_store_cpu_info(cpuid); + /* Allow master to continue only after we written loops_per_jiffy. */ + wmb(); + smp_secondary_alive = 1; + + DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n", + cpuid, current, current->active_mm)); + + /* Do nothing. */ + cpu_idle(); +} + +/* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */ +static int __init +wait_for_txrdy (unsigned long cpumask) +{ + unsigned long timeout; + + if (!(hwrpb->txrdy & cpumask)) + return 0; + + timeout = jiffies + 10*HZ; + while (time_before(jiffies, timeout)) { + if (!(hwrpb->txrdy & cpumask)) + return 0; + udelay(10); + barrier(); + } + + return -1; +} + +/* + * Send a message to a secondary's console. "START" is one such + * interesting message. ;-) + */ +static void __init +send_secondary_console_msg(char *str, int cpuid) +{ + struct percpu_struct *cpu; + register char *cp1, *cp2; + unsigned long cpumask; + size_t len; + + cpu = (struct percpu_struct *) + ((char*)hwrpb + + hwrpb->processor_offset + + cpuid * hwrpb->processor_size); + + cpumask = (1UL << cpuid); + if (wait_for_txrdy(cpumask)) + goto timeout; + + cp2 = str; + len = strlen(cp2); + *(unsigned int *)&cpu->ipc_buffer[0] = len; + cp1 = (char *) &cpu->ipc_buffer[1]; + memcpy(cp1, cp2, len); + + /* atomic test and set */ + wmb(); + set_bit(cpuid, &hwrpb->rxrdy); + + if (wait_for_txrdy(cpumask)) + goto timeout; + return; + + timeout: + printk("Processor %x not ready\n", cpuid); +} + +/* + * A secondary console wants to send a message. Receive it. + */ +static void +recv_secondary_console_msg(void) +{ + int mycpu, i, cnt; + unsigned long txrdy = hwrpb->txrdy; + char *cp1, *cp2, buf[80]; + struct percpu_struct *cpu; + + DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy)); + + mycpu = hard_smp_processor_id(); + + for (i = 0; i < NR_CPUS; i++) { + if (!(txrdy & (1UL << i))) + continue; + + DBGS(("recv_secondary_console_msg: " + "TXRDY contains CPU %d.\n", i)); + + cpu = (struct percpu_struct *) + ((char*)hwrpb + + hwrpb->processor_offset + + i * hwrpb->processor_size); + + DBGS(("recv_secondary_console_msg: on %d from %d" + " HALT_REASON 0x%lx FLAGS 0x%lx\n", + mycpu, i, cpu->halt_reason, cpu->flags)); + + cnt = cpu->ipc_buffer[0] >> 32; + if (cnt <= 0 || cnt >= 80) + strcpy(buf, "<<< BOGUS MSG >>>"); + else { + cp1 = (char *) &cpu->ipc_buffer[11]; + cp2 = buf; + strcpy(cp2, cp1); + + while ((cp2 = strchr(cp2, '\r')) != 0) { + *cp2 = ' '; + if (cp2[1] == '\n') + cp2[1] = ' '; + } + } + + DBGS((KERN_INFO "recv_secondary_console_msg: on %d " + "message is '%s'\n", mycpu, buf)); + } + + hwrpb->txrdy = 0; +} + +/* + * Convince the console to have a secondary cpu begin execution. + */ +static int __init +secondary_cpu_start(int cpuid, struct task_struct *idle) +{ + struct percpu_struct *cpu; + struct pcb_struct *hwpcb, *ipcb; + unsigned long timeout; + + cpu = (struct percpu_struct *) + ((char*)hwrpb + + hwrpb->processor_offset + + cpuid * hwrpb->processor_size); + hwpcb = (struct pcb_struct *) cpu->hwpcb; + ipcb = &idle->thread_info->pcb; + + /* Initialize the CPU's HWPCB to something just good enough for + us to get started. Immediately after starting, we'll swpctx + to the target idle task's pcb. Reuse the stack in the mean + time. Precalculate the target PCBB. */ + hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16; + hwpcb->usp = 0; + hwpcb->ptbr = ipcb->ptbr; + hwpcb->pcc = 0; + hwpcb->asn = 0; + hwpcb->unique = virt_to_phys(ipcb); + hwpcb->flags = ipcb->flags; + hwpcb->res1 = hwpcb->res2 = 0; + +#if 0 + DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n", + hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique)); +#endif + DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n", + cpuid, idle->state, ipcb->flags)); + + /* Setup HWRPB fields that SRM uses to activate secondary CPU */ + hwrpb->CPU_restart = __smp_callin; + hwrpb->CPU_restart_data = (unsigned long) __smp_callin; + + /* Recalculate and update the HWRPB checksum */ + hwrpb_update_checksum(hwrpb); + + /* + * Send a "start" command to the specified processor. + */ + + /* SRM III 3.4.1.3 */ + cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */ + cpu->flags &= ~1; /* turn off Bootstrap In Progress */ + wmb(); + + send_secondary_console_msg("START\r\n", cpuid); + + /* Wait 10 seconds for an ACK from the console. */ + timeout = jiffies + 10*HZ; + while (time_before(jiffies, timeout)) { + if (cpu->flags & 1) + goto started; + udelay(10); + barrier(); + } + printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid); + return -1; + + started: + DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid)); + return 0; +} + +/* + * Bring one cpu online. + */ +static int __init +smp_boot_one_cpu(int cpuid) +{ + struct task_struct *idle; + unsigned long timeout; + + /* Cook up an idler for this guy. Note that the address we + give to kernel_thread is irrelevant -- it's going to start + where HWRPB.CPU_restart says to start. But this gets all + the other task-y sort of data structures set up like we + wish. We can't use kernel_thread since we must avoid + rescheduling the child. */ + idle = fork_idle(cpuid); + if (IS_ERR(idle)) + panic("failed fork for CPU %d", cpuid); + + DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n", + cpuid, idle->state, idle->flags)); + + /* Signal the secondary to wait a moment. */ + smp_secondary_alive = -1; + + /* Whirrr, whirrr, whirrrrrrrrr... */ + if (secondary_cpu_start(cpuid, idle)) + return -1; + + /* Notify the secondary CPU it can run calibrate_delay. */ + mb(); + smp_secondary_alive = 0; + + /* We've been acked by the console; wait one second for + the task to start up for real. */ + timeout = jiffies + 1*HZ; + while (time_before(jiffies, timeout)) { + if (smp_secondary_alive == 1) + goto alive; + udelay(10); + barrier(); + } + + /* We failed to boot the CPU. */ + + printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid); + return -1; + + alive: + /* Another "Red Snapper". */ + return 0; +} + +/* + * Called from setup_arch. Detect an SMP system and which processors + * are present. + */ +void __init +setup_smp(void) +{ + struct percpu_struct *cpubase, *cpu; + unsigned long i; + + if (boot_cpuid != 0) { + printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n", + boot_cpuid); + } + + if (hwrpb->nr_processors > 1) { + int boot_cpu_palrev; + + DBGS(("setup_smp: nr_processors %ld\n", + hwrpb->nr_processors)); + + cpubase = (struct percpu_struct *) + ((char*)hwrpb + hwrpb->processor_offset); + boot_cpu_palrev = cpubase->pal_revision; + + for (i = 0; i < hwrpb->nr_processors; i++) { + cpu = (struct percpu_struct *) + ((char *)cpubase + i*hwrpb->processor_size); + if ((cpu->flags & 0x1cc) == 0x1cc) { + smp_num_probed++; + /* Assume here that "whami" == index */ + hwrpb_cpu_present_mask |= (1UL << i); + cpu->pal_revision = boot_cpu_palrev; + } + + DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n", + i, cpu->flags, cpu->type)); + DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n", + i, cpu->pal_revision)); + } + } else { + smp_num_probed = 1; + hwrpb_cpu_present_mask = (1UL << boot_cpuid); + } + cpu_present_mask = cpumask_of_cpu(boot_cpuid); + + printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n", + smp_num_probed, hwrpb_cpu_present_mask); +} + +/* + * Called by smp_init prepare the secondaries + */ +void __init +smp_prepare_cpus(unsigned int max_cpus) +{ + int cpu_count, i; + + /* Take care of some initial bookkeeping. */ + memset(ipi_data, 0, sizeof(ipi_data)); + + current_thread_info()->cpu = boot_cpuid; + + smp_store_cpu_info(boot_cpuid); + smp_setup_percpu_timer(boot_cpuid); + + /* Nothing to do on a UP box, or when told not to. */ + if (smp_num_probed == 1 || max_cpus == 0) { + cpu_present_mask = cpumask_of_cpu(boot_cpuid); + printk(KERN_INFO "SMP mode deactivated.\n"); + return; + } + + printk(KERN_INFO "SMP starting up secondaries.\n"); + + cpu_count = 1; + for (i = 0; (i < NR_CPUS) && (cpu_count < max_cpus); i++) { + if (i == boot_cpuid) + continue; + + if (((hwrpb_cpu_present_mask >> i) & 1) == 0) + continue; + + cpu_set(i, cpu_possible_map); + cpu_count++; + } + + smp_num_cpus = cpu_count; +} + +void __devinit +smp_prepare_boot_cpu(void) +{ + /* + * Mark the boot cpu (current cpu) as both present and online + */ + cpu_set(smp_processor_id(), cpu_present_mask); + cpu_set(smp_processor_id(), cpu_online_map); +} + +int __devinit +__cpu_up(unsigned int cpu) +{ + smp_boot_one_cpu(cpu); + + return cpu_online(cpu) ? 0 : -ENOSYS; +} + +void __init +smp_cpus_done(unsigned int max_cpus) +{ + int cpu; + unsigned long bogosum = 0; + + for(cpu = 0; cpu < NR_CPUS; cpu++) + if (cpu_online(cpu)) + bogosum += cpu_data[cpu].loops_per_jiffy; + + printk(KERN_INFO "SMP: Total of %d processors activated " + "(%lu.%02lu BogoMIPS).\n", + num_online_cpus(), + (bogosum + 2500) / (500000/HZ), + ((bogosum + 2500) / (5000/HZ)) % 100); +} + + +void +smp_percpu_timer_interrupt(struct pt_regs *regs) +{ + int cpu = smp_processor_id(); + unsigned long user = user_mode(regs); + struct cpuinfo_alpha *data = &cpu_data[cpu]; + + /* Record kernel PC. */ + profile_tick(CPU_PROFILING, regs); + + if (!--data->prof_counter) { + /* We need to make like a normal interrupt -- otherwise + timer interrupts ignore the global interrupt lock, + which would be a Bad Thing. */ + irq_enter(); + + update_process_times(user); + + data->prof_counter = data->prof_multiplier; + + irq_exit(); + } +} + +int __init +setup_profiling_timer(unsigned int multiplier) +{ + return -EINVAL; +} + + +static void +send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation) +{ + int i; + + mb(); + for_each_cpu_mask(i, to_whom) + set_bit(operation, &ipi_data[i].bits); + + mb(); + for_each_cpu_mask(i, to_whom) + wripir(i); +} + +/* Structure and data for smp_call_function. This is designed to + minimize static memory requirements. Plus it looks cleaner. */ + +struct smp_call_struct { + void (*func) (void *info); + void *info; + long wait; + atomic_t unstarted_count; + atomic_t unfinished_count; +}; + +static struct smp_call_struct *smp_call_function_data; + +/* Atomicly drop data into a shared pointer. The pointer is free if + it is initially locked. If retry, spin until free. */ + +static int +pointer_lock (void *lock, void *data, int retry) +{ + void *old, *tmp; + + mb(); + again: + /* Compare and swap with zero. */ + asm volatile ( + "1: ldq_l %0,%1\n" + " mov %3,%2\n" + " bne %0,2f\n" + " stq_c %2,%1\n" + " beq %2,1b\n" + "2:" + : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp) + : "r"(data) + : "memory"); + + if (old == 0) + return 0; + if (! retry) + return -EBUSY; + + while (*(void **)lock) + barrier(); + goto again; +} + +void +handle_ipi(struct pt_regs *regs) +{ + int this_cpu = smp_processor_id(); + unsigned long *pending_ipis = &ipi_data[this_cpu].bits; + unsigned long ops; + +#if 0 + DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n", + this_cpu, *pending_ipis, regs->pc)); +#endif + + mb(); /* Order interrupt and bit testing. */ + while ((ops = xchg(pending_ipis, 0)) != 0) { + mb(); /* Order bit clearing and data access. */ + do { + unsigned long which; + + which = ops & -ops; + ops &= ~which; + which = __ffs(which); + + switch (which) { + case IPI_RESCHEDULE: + /* Reschedule callback. Everything to be done + is done by the interrupt return path. */ + break; + + case IPI_CALL_FUNC: + { + struct smp_call_struct *data; + void (*func)(void *info); + void *info; + int wait; + + data = smp_call_function_data; + func = data->func; + info = data->info; + wait = data->wait; + + /* Notify the sending CPU that the data has been + received, and execution is about to begin. */ + mb(); + atomic_dec (&data->unstarted_count); + + /* At this point the structure may be gone unless + wait is true. */ + (*func)(info); + + /* Notify the sending CPU that the task is done. */ + mb(); + if (wait) atomic_dec (&data->unfinished_count); + break; + } + + case IPI_CPU_STOP: + halt(); + + default: + printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", + this_cpu, which); + break; + } + } while (ops); + + mb(); /* Order data access and bit testing. */ + } + + cpu_data[this_cpu].ipi_count++; + + if (hwrpb->txrdy) + recv_secondary_console_msg(); +} + +void +smp_send_reschedule(int cpu) +{ +#ifdef DEBUG_IPI_MSG + if (cpu == hard_smp_processor_id()) + printk(KERN_WARNING + "smp_send_reschedule: Sending IPI to self.\n"); +#endif + send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE); +} + +void +smp_send_stop(void) +{ + cpumask_t to_whom = cpu_possible_map; + cpu_clear(smp_processor_id(), to_whom); +#ifdef DEBUG_IPI_MSG + if (hard_smp_processor_id() != boot_cpu_id) + printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n"); +#endif + send_ipi_message(to_whom, IPI_CPU_STOP); +} + +/* + * Run a function on all other CPUs. + * <func> The function to run. This must be fast and non-blocking. + * <info> An arbitrary pointer to pass to the function. + * <retry> If true, keep retrying until ready. + * <wait> If true, wait until function has completed on other CPUs. + * [RETURNS] 0 on success, else a negative status code. + * + * Does not return until remote CPUs are nearly ready to execute <func> + * or are or have executed. + * You must not call this function with disabled interrupts or from a + * hardware interrupt handler or from a bottom half handler. + */ + +int +smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry, + int wait, cpumask_t to_whom) +{ + struct smp_call_struct data; + unsigned long timeout; + int num_cpus_to_call; + + /* Can deadlock when called with interrupts disabled */ + WARN_ON(irqs_disabled()); + + data.func = func; + data.info = info; + data.wait = wait; + + cpu_clear(smp_processor_id(), to_whom); + num_cpus_to_call = cpus_weight(to_whom); + + atomic_set(&data.unstarted_count, num_cpus_to_call); + atomic_set(&data.unfinished_count, num_cpus_to_call); + + /* Acquire the smp_call_function_data mutex. */ + if (pointer_lock(&smp_call_function_data, &data, retry)) + return -EBUSY; + + /* Send a message to the requested CPUs. */ + send_ipi_message(to_whom, IPI_CALL_FUNC); + + /* Wait for a minimal response. */ + timeout = jiffies + HZ; + while (atomic_read (&data.unstarted_count) > 0 + && time_before (jiffies, timeout)) + barrier(); + + /* If there's no response yet, log a message but allow a longer + * timeout period -- if we get a response this time, log + * a message saying when we got it.. + */ + if (atomic_read(&data.unstarted_count) > 0) { + long start_time = jiffies; + printk(KERN_ERR "%s: initial timeout -- trying long wait\n", + __FUNCTION__); + timeout = jiffies + 30 * HZ; + while (atomic_read(&data.unstarted_count) > 0 + && time_before(jiffies, timeout)) + barrier(); + if (atomic_read(&data.unstarted_count) <= 0) { + long delta = jiffies - start_time; + printk(KERN_ERR + "%s: response %ld.%ld seconds into long wait\n", + __FUNCTION__, delta / HZ, + (100 * (delta - ((delta / HZ) * HZ))) / HZ); + } + } + + /* We either got one or timed out -- clear the lock. */ + mb(); + smp_call_function_data = NULL; + + /* + * If after both the initial and long timeout periods we still don't + * have a response, something is very wrong... + */ + BUG_ON(atomic_read (&data.unstarted_count) > 0); + + /* Wait for a complete response, if needed. */ + if (wait) { + while (atomic_read (&data.unfinished_count) > 0) + barrier(); + } + + return 0; +} + +int +smp_call_function (void (*func) (void *info), void *info, int retry, int wait) +{ + return smp_call_function_on_cpu (func, info, retry, wait, + cpu_online_map); +} + +static void +ipi_imb(void *ignored) +{ + imb(); +} + +void +smp_imb(void) +{ + /* Must wait other processors to flush their icache before continue. */ + if (on_each_cpu(ipi_imb, NULL, 1, 1)) + printk(KERN_CRIT "smp_imb: timed out\n"); +} + +static void +ipi_flush_tlb_all(void *ignored) +{ + tbia(); +} + +void +flush_tlb_all(void) +{ + /* Although we don't have any data to pass, we do want to + synchronize with the other processors. */ + if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) { + printk(KERN_CRIT "flush_tlb_all: timed out\n"); + } +} + +#define asn_locked() (cpu_data[smp_processor_id()].asn_lock) + +static void +ipi_flush_tlb_mm(void *x) +{ + struct mm_struct *mm = (struct mm_struct *) x; + if (mm == current->active_mm && !asn_locked()) + flush_tlb_current(mm); + else + flush_tlb_other(mm); +} + +void +flush_tlb_mm(struct mm_struct *mm) +{ + preempt_disable(); + + if (mm == current->active_mm) { + flush_tlb_current(mm); + if (atomic_read(&mm->mm_users) <= 1) { + int cpu, this_cpu = smp_processor_id(); + for (cpu = 0; cpu < NR_CPUS; cpu++) { + if (!cpu_online(cpu) || cpu == this_cpu) + continue; + if (mm->context[cpu]) + mm->context[cpu] = 0; + } + preempt_enable(); + return; + } + } + + if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) { + printk(KERN_CRIT "flush_tlb_mm: timed out\n"); + } + + preempt_enable(); +} + +struct flush_tlb_page_struct { + struct vm_area_struct *vma; + struct mm_struct *mm; + unsigned long addr; +}; + +static void +ipi_flush_tlb_page(void *x) +{ + struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x; + struct mm_struct * mm = data->mm; + + if (mm == current->active_mm && !asn_locked()) + flush_tlb_current_page(mm, data->vma, data->addr); + else + flush_tlb_other(mm); +} + +void +flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) +{ + struct flush_tlb_page_struct data; + struct mm_struct *mm = vma->vm_mm; + + preempt_disable(); + + if (mm == current->active_mm) { + flush_tlb_current_page(mm, vma, addr); + if (atomic_read(&mm->mm_users) <= 1) { + int cpu, this_cpu = smp_processor_id(); + for (cpu = 0; cpu < NR_CPUS; cpu++) { + if (!cpu_online(cpu) || cpu == this_cpu) + continue; + if (mm->context[cpu]) + mm->context[cpu] = 0; + } + preempt_enable(); + return; + } + } + + data.vma = vma; + data.mm = mm; + data.addr = addr; + + if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) { + printk(KERN_CRIT "flush_tlb_page: timed out\n"); + } + + preempt_enable(); +} + +void +flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) +{ + /* On the Alpha we always flush the whole user tlb. */ + flush_tlb_mm(vma->vm_mm); +} + +static void +ipi_flush_icache_page(void *x) +{ + struct mm_struct *mm = (struct mm_struct *) x; + if (mm == current->active_mm && !asn_locked()) + __load_new_mm_context(mm); + else + flush_tlb_other(mm); +} + +void +flush_icache_user_range(struct vm_area_struct *vma, struct page *page, + unsigned long addr, int len) +{ + struct mm_struct *mm = vma->vm_mm; + + if ((vma->vm_flags & VM_EXEC) == 0) + return; + + preempt_disable(); + + if (mm == current->active_mm) { + __load_new_mm_context(mm); + if (atomic_read(&mm->mm_users) <= 1) { + int cpu, this_cpu = smp_processor_id(); + for (cpu = 0; cpu < NR_CPUS; cpu++) { + if (!cpu_online(cpu) || cpu == this_cpu) + continue; + if (mm->context[cpu]) + mm->context[cpu] = 0; + } + preempt_enable(); + return; + } + } + + if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) { + printk(KERN_CRIT "flush_icache_page: timed out\n"); + } + + preempt_enable(); +} + +#ifdef CONFIG_DEBUG_SPINLOCK +void +_raw_spin_unlock(spinlock_t * lock) +{ + mb(); + lock->lock = 0; + + lock->on_cpu = -1; + lock->previous = NULL; + lock->task = NULL; + lock->base_file = "none"; + lock->line_no = 0; +} + +void +debug_spin_lock(spinlock_t * lock, const char *base_file, int line_no) +{ + long tmp; + long stuck; + void *inline_pc = __builtin_return_address(0); + unsigned long started = jiffies; + int printed = 0; + int cpu = smp_processor_id(); + + stuck = 1L << 30; + try_again: + + /* Use sub-sections to put the actual loop at the end + of this object file's text section so as to perfect + branch prediction. */ + __asm__ __volatile__( + "1: ldl_l %0,%1\n" + " subq %2,1,%2\n" + " blbs %0,2f\n" + " or %0,1,%0\n" + " stl_c %0,%1\n" + " beq %0,3f\n" + "4: mb\n" + ".subsection 2\n" + "2: ldl %0,%1\n" + " subq %2,1,%2\n" + "3: blt %2,4b\n" + " blbs %0,2b\n" + " br 1b\n" + ".previous" + : "=r" (tmp), "=m" (lock->lock), "=r" (stuck) + : "1" (lock->lock), "2" (stuck) : "memory"); + + if (stuck < 0) { + printk(KERN_WARNING + "%s:%d spinlock stuck in %s at %p(%d)" + " owner %s at %p(%d) %s:%d\n", + base_file, line_no, + current->comm, inline_pc, cpu, + lock->task->comm, lock->previous, + lock->on_cpu, lock->base_file, lock->line_no); + stuck = 1L << 36; + printed = 1; + goto try_again; + } + + /* Exiting. Got the lock. */ + lock->on_cpu = cpu; + lock->previous = inline_pc; + lock->task = current; + lock->base_file = base_file; + lock->line_no = line_no; + + if (printed) { + printk(KERN_WARNING + "%s:%d spinlock grabbed in %s at %p(%d) %ld ticks\n", + base_file, line_no, current->comm, inline_pc, + cpu, jiffies - started); + } +} + +int +debug_spin_trylock(spinlock_t * lock, const char *base_file, int line_no) +{ + int ret; + if ((ret = !test_and_set_bit(0, lock))) { + lock->on_cpu = smp_processor_id(); + lock->previous = __builtin_return_address(0); + lock->task = current; + } else { + lock->base_file = base_file; + lock->line_no = line_no; + } + return ret; +} +#endif /* CONFIG_DEBUG_SPINLOCK */ + +#ifdef CONFIG_DEBUG_RWLOCK +void _raw_write_lock(rwlock_t * lock) +{ + long regx, regy; + int stuck_lock, stuck_reader; + void *inline_pc = __builtin_return_address(0); + + try_again: + + stuck_lock = 1<<30; + stuck_reader = 1<<30; + + __asm__ __volatile__( + "1: ldl_l %1,%0\n" + " blbs %1,6f\n" + " blt %1,8f\n" + " mov 1,%1\n" + " stl_c %1,%0\n" + " beq %1,6f\n" + "4: mb\n" + ".subsection 2\n" + "6: blt %3,4b # debug\n" + " subl %3,1,%3 # debug\n" + " ldl %1,%0\n" + " blbs %1,6b\n" + "8: blt %4,4b # debug\n" + " subl %4,1,%4 # debug\n" + " ldl %1,%0\n" + " blt %1,8b\n" + " br 1b\n" + ".previous" + : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (regy), + "=&r" (stuck_lock), "=&r" (stuck_reader) + : "0" (*(volatile int *)lock), "3" (stuck_lock), "4" (stuck_reader) : "memory"); + + if (stuck_lock < 0) { + printk(KERN_WARNING "write_lock stuck at %p\n", inline_pc); + goto try_again; + } + if (stuck_reader < 0) { + printk(KERN_WARNING "write_lock stuck on readers at %p\n", + inline_pc); + goto try_again; + } +} + +void _raw_read_lock(rwlock_t * lock) +{ + long regx; + int stuck_lock; + void *inline_pc = __builtin_return_address(0); + + try_again: + + stuck_lock = 1<<30; + + __asm__ __volatile__( + "1: ldl_l %1,%0;" + " blbs %1,6f;" + " subl %1,2,%1;" + " stl_c %1,%0;" + " beq %1,6f;" + "4: mb\n" + ".subsection 2\n" + "6: ldl %1,%0;" + " blt %2,4b # debug\n" + " subl %2,1,%2 # debug\n" + " blbs %1,6b;" + " br 1b\n" + ".previous" + : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (stuck_lock) + : "0" (*(volatile int *)lock), "2" (stuck_lock) : "memory"); + + if (stuck_lock < 0) { + printk(KERN_WARNING "read_lock stuck at %p\n", inline_pc); + goto try_again; + } +} +#endif /* CONFIG_DEBUG_RWLOCK */ diff --git a/arch/alpha/kernel/srm_env.c b/arch/alpha/kernel/srm_env.c new file mode 100644 index 0000000..5c98fc8 --- /dev/null +++ b/arch/alpha/kernel/srm_env.c @@ -0,0 +1,335 @@ +/* + * srm_env.c - Access to SRM environment + * variables through linux' procfs + * + * Copyright (C) 2001-2002 Jan-Benedict Glaw <jbglaw@lug-owl.de> + * + * This driver is at all a modified version of Erik Mouw's + * Documentation/DocBook/procfs_example.c, so: thank + * you, Erik! He can be reached via email at + * <J.A.K.Mouw@its.tudelft.nl>. It is based on an idea + * provided by DEC^WCompaq^WIntel's "Jumpstart" CD. They + * included a patch like this as well. Thanks for idea! + * + * This program is free software; you can redistribute + * it and/or modify it under the terms of the GNU General + * Public License version 2 as published by the Free Software + * Foundation. + * + * This program is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + * PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place, + * Suite 330, Boston, MA 02111-1307 USA + * + */ + +/* + * Changelog + * ~~~~~~~~~ + * + * Thu, 22 Aug 2002 15:10:43 +0200 + * - Update Config.help entry. I got a number of emails asking + * me to tell their senders if they could make use of this + * piece of code... So: "SRM is something like BIOS for your + * Alpha" + * - Update code formatting a bit to better conform CodingStyle + * rules. + * - So this is v0.0.5, with no changes (except formatting) + * + * Wed, 22 May 2002 00:11:21 +0200 + * - Fix typo on comment (SRC -> SRM) + * - Call this "Version 0.0.4" + * + * Tue, 9 Apr 2002 18:44:40 +0200 + * - Implement access by variable name and additionally + * by number. This is done by creating two subdirectories + * where one holds all names (like the old directory + * did) and the other holding 256 files named like "0", + * "1" and so on. + * - Call this "Version 0.0.3" + * + */ + +#include <linux/kernel.h> +#include <linux/config.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/proc_fs.h> +#include <asm/console.h> +#include <asm/uaccess.h> +#include <asm/machvec.h> + +#define BASE_DIR "srm_environment" /* Subdir in /proc/ */ +#define NAMED_DIR "named_variables" /* Subdir for known variables */ +#define NUMBERED_DIR "numbered_variables" /* Subdir for all variables */ +#define VERSION "0.0.5" /* Module version */ +#define NAME "srm_env" /* Module name */ + +MODULE_AUTHOR("Jan-Benedict Glaw <jbglaw@lug-owl.de>"); +MODULE_DESCRIPTION("Accessing Alpha SRM environment through procfs interface"); +MODULE_LICENSE("GPL"); + +typedef struct _srm_env { + char *name; + unsigned long id; + struct proc_dir_entry *proc_entry; +} srm_env_t; + +static struct proc_dir_entry *base_dir; +static struct proc_dir_entry *named_dir; +static struct proc_dir_entry *numbered_dir; +static char number[256][4]; + +static srm_env_t srm_named_entries[] = { + { "auto_action", ENV_AUTO_ACTION }, + { "boot_dev", ENV_BOOT_DEV }, + { "bootdef_dev", ENV_BOOTDEF_DEV }, + { "booted_dev", ENV_BOOTED_DEV }, + { "boot_file", ENV_BOOT_FILE }, + { "booted_file", ENV_BOOTED_FILE }, + { "boot_osflags", ENV_BOOT_OSFLAGS }, + { "booted_osflags", ENV_BOOTED_OSFLAGS }, + { "boot_reset", ENV_BOOT_RESET }, + { "dump_dev", ENV_DUMP_DEV }, + { "enable_audit", ENV_ENABLE_AUDIT }, + { "license", ENV_LICENSE }, + { "char_set", ENV_CHAR_SET }, + { "language", ENV_LANGUAGE }, + { "tty_dev", ENV_TTY_DEV }, + { NULL, 0 }, +}; +static srm_env_t srm_numbered_entries[256]; + + + +static int +srm_env_read(char *page, char **start, off_t off, int count, int *eof, + void *data) +{ + int nbytes; + unsigned long ret; + srm_env_t *entry; + + if(off != 0) + return -EFAULT; + + entry = (srm_env_t *) data; + ret = callback_getenv(entry->id, page, count); + + if((ret >> 61) == 0) + nbytes = (int) ret; + else + nbytes = -EFAULT; + + return nbytes; +} + + +static int +srm_env_write(struct file *file, const char __user *buffer, unsigned long count, + void *data) +{ + int res; + srm_env_t *entry; + char *buf = (char *) __get_free_page(GFP_USER); + unsigned long ret1, ret2; + + entry = (srm_env_t *) data; + + if (!buf) + return -ENOMEM; + + res = -EINVAL; + if (count >= PAGE_SIZE) + goto out; + + res = -EFAULT; + if (copy_from_user(buf, buffer, count)) + goto out; + buf[count] = '\0'; + + ret1 = callback_setenv(entry->id, buf, count); + if ((ret1 >> 61) == 0) { + do + ret2 = callback_save_env(); + while((ret2 >> 61) == 1); + res = (int) ret1; + } + + out: + free_page((unsigned long)buf); + return res; +} + +static void +srm_env_cleanup(void) +{ + srm_env_t *entry; + unsigned long var_num; + + if(base_dir) { + /* + * Remove named entries + */ + if(named_dir) { + entry = srm_named_entries; + while(entry->name != NULL && entry->id != 0) { + if(entry->proc_entry) { + remove_proc_entry(entry->name, + named_dir); + entry->proc_entry = NULL; + } + entry++; + } + remove_proc_entry(NAMED_DIR, base_dir); + } + + /* + * Remove numbered entries + */ + if(numbered_dir) { + for(var_num = 0; var_num <= 255; var_num++) { + entry = &srm_numbered_entries[var_num]; + + if(entry->proc_entry) { + remove_proc_entry(entry->name, + numbered_dir); + entry->proc_entry = NULL; + entry->name = NULL; + } + } + remove_proc_entry(NUMBERED_DIR, base_dir); + } + + remove_proc_entry(BASE_DIR, NULL); + } + + return; +} + + +static int __init +srm_env_init(void) +{ + srm_env_t *entry; + unsigned long var_num; + + /* + * Check system + */ + if(!alpha_using_srm) { + printk(KERN_INFO "%s: This Alpha system doesn't " + "know about SRM (or you've booted " + "SRM->MILO->Linux, which gets " + "misdetected)...\n", __FUNCTION__); + return -ENODEV; + } + + /* + * Init numbers + */ + for(var_num = 0; var_num <= 255; var_num++) + sprintf(number[var_num], "%ld", var_num); + + /* + * Create base directory + */ + base_dir = proc_mkdir(BASE_DIR, NULL); + if(base_dir == NULL) { + printk(KERN_ERR "Couldn't create base dir /proc/%s\n", + BASE_DIR); + goto cleanup; + } + base_dir->owner = THIS_MODULE; + + /* + * Create per-name subdirectory + */ + named_dir = proc_mkdir(NAMED_DIR, base_dir); + if(named_dir == NULL) { + printk(KERN_ERR "Couldn't create dir /proc/%s/%s\n", + BASE_DIR, NAMED_DIR); + goto cleanup; + } + named_dir->owner = THIS_MODULE; + + /* + * Create per-number subdirectory + */ + numbered_dir = proc_mkdir(NUMBERED_DIR, base_dir); + if(numbered_dir == NULL) { + printk(KERN_ERR "Couldn't create dir /proc/%s/%s\n", + BASE_DIR, NUMBERED_DIR); + goto cleanup; + + } + numbered_dir->owner = THIS_MODULE; + + /* + * Create all named nodes + */ + entry = srm_named_entries; + while(entry->name != NULL && entry->id != 0) { + entry->proc_entry = create_proc_entry(entry->name, + 0644, named_dir); + if(entry->proc_entry == NULL) + goto cleanup; + + entry->proc_entry->data = (void *) entry; + entry->proc_entry->owner = THIS_MODULE; + entry->proc_entry->read_proc = srm_env_read; + entry->proc_entry->write_proc = srm_env_write; + + entry++; + } + + /* + * Create all numbered nodes + */ + for(var_num = 0; var_num <= 255; var_num++) { + entry = &srm_numbered_entries[var_num]; + entry->name = number[var_num]; + + entry->proc_entry = create_proc_entry(entry->name, + 0644, numbered_dir); + if(entry->proc_entry == NULL) + goto cleanup; + + entry->id = var_num; + entry->proc_entry->data = (void *) entry; + entry->proc_entry->owner = THIS_MODULE; + entry->proc_entry->read_proc = srm_env_read; + entry->proc_entry->write_proc = srm_env_write; + } + + printk(KERN_INFO "%s: version %s loaded successfully\n", NAME, + VERSION); + + return 0; + +cleanup: + srm_env_cleanup(); + + return -ENOMEM; +} + + +static void __exit +srm_env_exit(void) +{ + srm_env_cleanup(); + printk(KERN_INFO "%s: unloaded successfully\n", NAME); + + return; +} + + +module_init(srm_env_init); +module_exit(srm_env_exit); + diff --git a/arch/alpha/kernel/srmcons.c b/arch/alpha/kernel/srmcons.c new file mode 100644 index 0000000..3b30d4f --- /dev/null +++ b/arch/alpha/kernel/srmcons.c @@ -0,0 +1,326 @@ +/* + * linux/arch/alpha/kernel/srmcons.c + * + * Callback based driver for SRM Console console device. + * (TTY driver and console driver) + */ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/console.h> +#include <linux/delay.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/timer.h> +#include <linux/tty.h> +#include <linux/tty_driver.h> +#include <linux/tty_flip.h> + +#include <asm/console.h> +#include <asm/uaccess.h> + + +static DEFINE_SPINLOCK(srmcons_callback_lock); +static int srm_is_registered_console = 0; + +/* + * The TTY driver + */ +#define MAX_SRM_CONSOLE_DEVICES 1 /* only support 1 console device */ + +struct srmcons_private { + struct tty_struct *tty; + struct timer_list timer; + spinlock_t lock; +}; + +typedef union _srmcons_result { + struct { + unsigned long c :61; + unsigned long status :3; + } bits; + long as_long; +} srmcons_result; + +/* called with callback_lock held */ +static int +srmcons_do_receive_chars(struct tty_struct *tty) +{ + srmcons_result result; + int count = 0, loops = 0; + + do { + result.as_long = callback_getc(0); + if (result.bits.status < 2) { + tty_insert_flip_char(tty, (char)result.bits.c, 0); + count++; + } + } while((result.bits.status & 1) && (++loops < 10)); + + if (count) + tty_schedule_flip(tty); + + return count; +} + +static void +srmcons_receive_chars(unsigned long data) +{ + struct srmcons_private *srmconsp = (struct srmcons_private *)data; + unsigned long flags; + int incr = 10; + + local_irq_save(flags); + if (spin_trylock(&srmcons_callback_lock)) { + if (!srmcons_do_receive_chars(srmconsp->tty)) + incr = 100; + spin_unlock(&srmcons_callback_lock); + } + + spin_lock(&srmconsp->lock); + if (srmconsp->tty) { + srmconsp->timer.expires = jiffies + incr; + add_timer(&srmconsp->timer); + } + spin_unlock(&srmconsp->lock); + + local_irq_restore(flags); +} + +/* called with callback_lock held */ +static int +srmcons_do_write(struct tty_struct *tty, const char *buf, int count) +{ + static char str_cr[1] = "\r"; + long c, remaining = count; + srmcons_result result; + char *cur; + int need_cr; + + for (cur = (char *)buf; remaining > 0; ) { + need_cr = 0; + /* + * Break it up into reasonable size chunks to allow a chance + * for input to get in + */ + for (c = 0; c < min_t(long, 128L, remaining) && !need_cr; c++) + if (cur[c] == '\n') + need_cr = 1; + + while (c > 0) { + result.as_long = callback_puts(0, cur, c); + c -= result.bits.c; + remaining -= result.bits.c; + cur += result.bits.c; + + /* + * Check for pending input iff a tty was provided + */ + if (tty) + srmcons_do_receive_chars(tty); + } + + while (need_cr) { + result.as_long = callback_puts(0, str_cr, 1); + if (result.bits.c > 0) + need_cr = 0; + } + } + return count; +} + +static int +srmcons_write(struct tty_struct *tty, + const unsigned char *buf, int count) +{ + unsigned long flags; + + spin_lock_irqsave(&srmcons_callback_lock, flags); + srmcons_do_write(tty, (const char *) buf, count); + spin_unlock_irqrestore(&srmcons_callback_lock, flags); + + return count; +} + +static int +srmcons_write_room(struct tty_struct *tty) +{ + return 512; +} + +static int +srmcons_chars_in_buffer(struct tty_struct *tty) +{ + return 0; +} + +static int +srmcons_get_private_struct(struct srmcons_private **ps) +{ + static struct srmcons_private *srmconsp = NULL; + static DEFINE_SPINLOCK(srmconsp_lock); + unsigned long flags; + int retval = 0; + + if (srmconsp == NULL) { + spin_lock_irqsave(&srmconsp_lock, flags); + + srmconsp = kmalloc(sizeof(*srmconsp), GFP_KERNEL); + if (srmconsp == NULL) + retval = -ENOMEM; + else { + srmconsp->tty = NULL; + spin_lock_init(&srmconsp->lock); + init_timer(&srmconsp->timer); + } + + spin_unlock_irqrestore(&srmconsp_lock, flags); + } + + *ps = srmconsp; + return retval; +} + +static int +srmcons_open(struct tty_struct *tty, struct file *filp) +{ + struct srmcons_private *srmconsp; + unsigned long flags; + int retval; + + retval = srmcons_get_private_struct(&srmconsp); + if (retval) + return retval; + + spin_lock_irqsave(&srmconsp->lock, flags); + + if (!srmconsp->tty) { + tty->driver_data = srmconsp; + + srmconsp->tty = tty; + srmconsp->timer.function = srmcons_receive_chars; + srmconsp->timer.data = (unsigned long)srmconsp; + srmconsp->timer.expires = jiffies + 10; + add_timer(&srmconsp->timer); + } + + spin_unlock_irqrestore(&srmconsp->lock, flags); + + return 0; +} + +static void +srmcons_close(struct tty_struct *tty, struct file *filp) +{ + struct srmcons_private *srmconsp = tty->driver_data; + unsigned long flags; + + spin_lock_irqsave(&srmconsp->lock, flags); + + if (tty->count == 1) { + srmconsp->tty = NULL; + del_timer(&srmconsp->timer); + } + + spin_unlock_irqrestore(&srmconsp->lock, flags); +} + + +static struct tty_driver *srmcons_driver; + +static struct tty_operations srmcons_ops = { + .open = srmcons_open, + .close = srmcons_close, + .write = srmcons_write, + .write_room = srmcons_write_room, + .chars_in_buffer= srmcons_chars_in_buffer, +}; + +static int __init +srmcons_init(void) +{ + if (srm_is_registered_console) { + struct tty_driver *driver; + int err; + + driver = alloc_tty_driver(MAX_SRM_CONSOLE_DEVICES); + if (!driver) + return -ENOMEM; + driver->driver_name = "srm"; + driver->name = "srm"; + driver->major = 0; /* dynamic */ + driver->minor_start = 0; + driver->type = TTY_DRIVER_TYPE_SYSTEM; + driver->subtype = SYSTEM_TYPE_SYSCONS; + driver->init_termios = tty_std_termios; + tty_set_operations(driver, &srmcons_ops); + err = tty_register_driver(driver); + if (err) { + put_tty_driver(driver); + return err; + } + srmcons_driver = driver; + } + + return -ENODEV; +} + +module_init(srmcons_init); + + +/* + * The console driver + */ +static void +srm_console_write(struct console *co, const char *s, unsigned count) +{ + unsigned long flags; + + spin_lock_irqsave(&srmcons_callback_lock, flags); + srmcons_do_write(NULL, s, count); + spin_unlock_irqrestore(&srmcons_callback_lock, flags); +} + +static struct tty_driver * +srm_console_device(struct console *co, int *index) +{ + *index = co->index; + return srmcons_driver; +} + +static int __init +srm_console_setup(struct console *co, char *options) +{ + return 0; +} + +static struct console srmcons = { + .name = "srm", + .write = srm_console_write, + .device = srm_console_device, + .setup = srm_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, +}; + +void __init +register_srm_console(void) +{ + if (!srm_is_registered_console) { + callback_open_console(); + register_console(&srmcons); + srm_is_registered_console = 1; + } +} + +void __init +unregister_srm_console(void) +{ + if (srm_is_registered_console) { + callback_close_console(); + unregister_console(&srmcons); + srm_is_registered_console = 0; + } +} diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c new file mode 100644 index 0000000..145dcde --- /dev/null +++ b/arch/alpha/kernel/sys_alcor.c @@ -0,0 +1,326 @@ +/* + * linux/arch/alpha/kernel/sys_alcor.c + * + * Copyright (C) 1995 David A Rusling + * Copyright (C) 1996 Jay A Estabrook + * Copyright (C) 1998, 1999 Richard Henderson + * + * Code supporting the ALCOR and XLT (XL-300/366/433). + */ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/reboot.h> +#include <linux/bitops.h> + +#include <asm/ptrace.h> +#include <asm/system.h> +#include <asm/io.h> +#include <asm/dma.h> +#include <asm/mmu_context.h> +#include <asm/irq.h> +#include <asm/pgtable.h> +#include <asm/core_cia.h> +#include <asm/tlbflush.h> + +#include "proto.h" +#include "irq_impl.h" +#include "pci_impl.h" +#include "machvec_impl.h" + + +/* Note mask bit is true for ENABLED irqs. */ +static unsigned long cached_irq_mask; + +static inline void +alcor_update_irq_hw(unsigned long mask) +{ + *(vuip)GRU_INT_MASK = mask; + mb(); +} + +static inline void +alcor_enable_irq(unsigned int irq) +{ + alcor_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); +} + +static void +alcor_disable_irq(unsigned int irq) +{ + alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); +} + +static void +alcor_mask_and_ack_irq(unsigned int irq) +{ + alcor_disable_irq(irq); + + /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ + *(vuip)GRU_INT_CLEAR = 1 << (irq - 16); mb(); + *(vuip)GRU_INT_CLEAR = 0; mb(); +} + +static unsigned int +alcor_startup_irq(unsigned int irq) +{ + alcor_enable_irq(irq); + return 0; +} + +static void +alcor_isa_mask_and_ack_irq(unsigned int irq) +{ + i8259a_mask_and_ack_irq(irq); + + /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ + *(vuip)GRU_INT_CLEAR = 0x80000000; mb(); + *(vuip)GRU_INT_CLEAR = 0; mb(); +} + +static void +alcor_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + alcor_enable_irq(irq); +} + +static struct hw_interrupt_type alcor_irq_type = { + .typename = "ALCOR", + .startup = alcor_startup_irq, + .shutdown = alcor_disable_irq, + .enable = alcor_enable_irq, + .disable = alcor_disable_irq, + .ack = alcor_mask_and_ack_irq, + .end = alcor_end_irq, +}; + +static void +alcor_device_interrupt(unsigned long vector, struct pt_regs *regs) +{ + unsigned long pld; + unsigned int i; + + /* Read the interrupt summary register of the GRU */ + pld = (*(vuip)GRU_INT_REQ) & GRU_INT_REQ_BITS; + + /* + * Now for every possible bit set, work through them and call + * the appropriate interrupt handler. + */ + while (pld) { + i = ffz(~pld); + pld &= pld - 1; /* clear least bit set */ + if (i == 31) { + isa_device_interrupt(vector, regs); + } else { + handle_irq(16 + i, regs); + } + } +} + +static void __init +alcor_init_irq(void) +{ + long i; + + if (alpha_using_srm) + alpha_mv.device_interrupt = srm_device_interrupt; + + *(vuip)GRU_INT_MASK = 0; mb(); /* all disabled */ + *(vuip)GRU_INT_EDGE = 0; mb(); /* all are level */ + *(vuip)GRU_INT_HILO = 0x80000000U; mb(); /* ISA only HI */ + *(vuip)GRU_INT_CLEAR = 0; mb(); /* all clear */ + + for (i = 16; i < 48; ++i) { + /* On Alcor, at least, lines 20..30 are not connected + and can generate spurrious interrupts if we turn them + on while IRQ probing. */ + if (i >= 16+20 && i <= 16+30) + continue; + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].handler = &alcor_irq_type; + } + i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq; + + init_i8259a_irqs(); + common_init_isa_dma(); + + setup_irq(16+31, &isa_cascade_irqaction); +} + + +/* + * PCI Fixup configuration. + * + * Summary @ GRU_INT_REQ: + * Bit Meaning + * 0 Interrupt Line A from slot 2 + * 1 Interrupt Line B from slot 2 + * 2 Interrupt Line C from slot 2 + * 3 Interrupt Line D from slot 2 + * 4 Interrupt Line A from slot 1 + * 5 Interrupt line B from slot 1 + * 6 Interrupt Line C from slot 1 + * 7 Interrupt Line D from slot 1 + * 8 Interrupt Line A from slot 0 + * 9 Interrupt Line B from slot 0 + *10 Interrupt Line C from slot 0 + *11 Interrupt Line D from slot 0 + *12 Interrupt Line A from slot 4 + *13 Interrupt Line B from slot 4 + *14 Interrupt Line C from slot 4 + *15 Interrupt Line D from slot 4 + *16 Interrupt Line D from slot 3 + *17 Interrupt Line D from slot 3 + *18 Interrupt Line D from slot 3 + *19 Interrupt Line D from slot 3 + *20-30 Reserved + *31 EISA interrupt + * + * The device to slot mapping looks like: + * + * Slot Device + * 6 built-in TULIP (XLT only) + * 7 PCI on board slot 0 + * 8 PCI on board slot 3 + * 9 PCI on board slot 4 + * 10 PCEB (PCI-EISA bridge) + * 11 PCI on board slot 2 + * 12 PCI on board slot 1 + * + * + * This two layered interrupt approach means that we allocate IRQ 16 and + * above for PCI interrupts. The IRQ relates to which bit the interrupt + * comes in on. This makes interrupt processing much easier. + */ + +static int __init +alcor_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + static char irq_tab[7][5] __initdata = { + /*INT INTA INTB INTC INTD */ + /* note: IDSEL 17 is XLT only */ + {16+13, 16+13, 16+13, 16+13, 16+13}, /* IdSel 17, TULIP */ + { 16+8, 16+8, 16+9, 16+10, 16+11}, /* IdSel 18, slot 0 */ + {16+16, 16+16, 16+17, 16+18, 16+19}, /* IdSel 19, slot 3 */ + {16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 20, slot 4 */ + { -1, -1, -1, -1, -1}, /* IdSel 21, PCEB */ + { 16+0, 16+0, 16+1, 16+2, 16+3}, /* IdSel 22, slot 2 */ + { 16+4, 16+4, 16+5, 16+6, 16+7}, /* IdSel 23, slot 1 */ + }; + const long min_idsel = 6, max_idsel = 12, irqs_per_slot = 5; + return COMMON_TABLE_LOOKUP; +} + +static void +alcor_kill_arch(int mode) +{ + cia_kill_arch(mode); + +#ifndef ALPHA_RESTORE_SRM_SETUP + switch(mode) { + case LINUX_REBOOT_CMD_RESTART: + /* Who said DEC engineer's have no sense of humor? ;-) */ + if (alpha_using_srm) { + *(vuip) GRU_RESET = 0x0000dead; + mb(); + } + break; + case LINUX_REBOOT_CMD_HALT: + break; + case LINUX_REBOOT_CMD_POWER_OFF: + break; + } + + halt(); +#endif +} + +static void __init +alcor_init_pci(void) +{ + struct pci_dev *dev; + + cia_init_pci(); + + /* + * Now we can look to see if we are really running on an XLT-type + * motherboard, by looking for a 21040 TULIP in slot 6, which is + * built into XLT and BRET/MAVERICK, but not available on ALCOR. + */ + dev = pci_find_device(PCI_VENDOR_ID_DEC, + PCI_DEVICE_ID_DEC_TULIP, + NULL); + if (dev && dev->devfn == PCI_DEVFN(6,0)) { + alpha_mv.sys.cia.gru_int_req_bits = XLT_GRU_INT_REQ_BITS; + printk(KERN_INFO "%s: Detected AS500 or XLT motherboard.\n", + __FUNCTION__); + } +} + + +/* + * The System Vectors + */ + +struct alpha_machine_vector alcor_mv __initmv = { + .vector_name = "Alcor", + DO_EV5_MMU, + DO_DEFAULT_RTC, + DO_CIA_IO, + .machine_check = cia_machine_check, + .max_isa_dma_address = ALPHA_ALCOR_MAX_ISA_DMA_ADDRESS, + .min_io_address = EISA_DEFAULT_IO_BASE, + .min_mem_address = CIA_DEFAULT_MEM_BASE, + + .nr_irqs = 48, + .device_interrupt = alcor_device_interrupt, + + .init_arch = cia_init_arch, + .init_irq = alcor_init_irq, + .init_rtc = common_init_rtc, + .init_pci = alcor_init_pci, + .kill_arch = alcor_kill_arch, + .pci_map_irq = alcor_map_irq, + .pci_swizzle = common_swizzle, + + .sys = { .cia = { + .gru_int_req_bits = ALCOR_GRU_INT_REQ_BITS + }} +}; +ALIAS_MV(alcor) + +struct alpha_machine_vector xlt_mv __initmv = { + .vector_name = "XLT", + DO_EV5_MMU, + DO_DEFAULT_RTC, + DO_CIA_IO, + .machine_check = cia_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = EISA_DEFAULT_IO_BASE, + .min_mem_address = CIA_DEFAULT_MEM_BASE, + + .nr_irqs = 48, + .device_interrupt = alcor_device_interrupt, + + .init_arch = cia_init_arch, + .init_irq = alcor_init_irq, + .init_rtc = common_init_rtc, + .init_pci = alcor_init_pci, + .kill_arch = alcor_kill_arch, + .pci_map_irq = alcor_map_irq, + .pci_swizzle = common_swizzle, + + .sys = { .cia = { + .gru_int_req_bits = XLT_GRU_INT_REQ_BITS + }} +}; + +/* No alpha_mv alias for XLT, since we compile it in unconditionally + with ALCOR; setup_arch knows how to cope. */ diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c new file mode 100644 index 0000000..8e3374d --- /dev/null +++ b/arch/alpha/kernel/sys_cabriolet.c @@ -0,0 +1,448 @@ +/* + * linux/arch/alpha/kernel/sys_cabriolet.c + * + * Copyright (C) 1995 David A Rusling + * Copyright (C) 1996 Jay A Estabrook + * Copyright (C) 1998, 1999, 2000 Richard Henderson + * + * Code supporting the Cabriolet (AlphaPC64), EB66+, and EB164, + * PC164 and LX164. + */ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/bitops.h> + +#include <asm/ptrace.h> +#include <asm/system.h> +#include <asm/dma.h> +#include <asm/irq.h> +#include <asm/mmu_context.h> +#include <asm/io.h> +#include <asm/pgtable.h> +#include <asm/core_apecs.h> +#include <asm/core_cia.h> +#include <asm/core_lca.h> +#include <asm/tlbflush.h> + +#include "proto.h" +#include "irq_impl.h" +#include "pci_impl.h" +#include "machvec_impl.h" + + +/* Note mask bit is true for DISABLED irqs. */ +static unsigned long cached_irq_mask = ~0UL; + +static inline void +cabriolet_update_irq_hw(unsigned int irq, unsigned long mask) +{ + int ofs = (irq - 16) / 8; + outb(mask >> (16 + ofs * 8), 0x804 + ofs); +} + +static inline void +cabriolet_enable_irq(unsigned int irq) +{ + cabriolet_update_irq_hw(irq, cached_irq_mask &= ~(1UL << irq)); +} + +static void +cabriolet_disable_irq(unsigned int irq) +{ + cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq); +} + +static unsigned int +cabriolet_startup_irq(unsigned int irq) +{ + cabriolet_enable_irq(irq); + return 0; /* never anything pending */ +} + +static void +cabriolet_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + cabriolet_enable_irq(irq); +} + +static struct hw_interrupt_type cabriolet_irq_type = { + .typename = "CABRIOLET", + .startup = cabriolet_startup_irq, + .shutdown = cabriolet_disable_irq, + .enable = cabriolet_enable_irq, + .disable = cabriolet_disable_irq, + .ack = cabriolet_disable_irq, + .end = cabriolet_end_irq, +}; + +static void +cabriolet_device_interrupt(unsigned long v, struct pt_regs *r) +{ + unsigned long pld; + unsigned int i; + + /* Read the interrupt summary registers */ + pld = inb(0x804) | (inb(0x805) << 8) | (inb(0x806) << 16); + + /* + * Now for every possible bit set, work through them and call + * the appropriate interrupt handler. + */ + while (pld) { + i = ffz(~pld); + pld &= pld - 1; /* clear least bit set */ + if (i == 4) { + isa_device_interrupt(v, r); + } else { + handle_irq(16 + i, r); + } + } +} + +static void __init +common_init_irq(void (*srm_dev_int)(unsigned long v, struct pt_regs *r)) +{ + init_i8259a_irqs(); + + if (alpha_using_srm) { + alpha_mv.device_interrupt = srm_dev_int; + init_srm_irqs(35, 0); + } + else { + long i; + + outb(0xff, 0x804); + outb(0xff, 0x805); + outb(0xff, 0x806); + + for (i = 16; i < 35; ++i) { + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].handler = &cabriolet_irq_type; + } + } + + common_init_isa_dma(); + setup_irq(16+4, &isa_cascade_irqaction); +} + +#ifndef CONFIG_ALPHA_PC164 +static void __init +cabriolet_init_irq(void) +{ + common_init_irq(srm_device_interrupt); +} +#endif + +#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PC164) +/* In theory, the PC164 has the same interrupt hardware as the other + Cabriolet based systems. However, something got screwed up late + in the development cycle which broke the interrupt masking hardware. + Repeat, it is not possible to mask and ack interrupts. At all. + + In an attempt to work around this, while processing interrupts, + we do not allow the IPL to drop below what it is currently. This + prevents the possibility of recursion. + + ??? Another option might be to force all PCI devices to use edge + triggered rather than level triggered interrupts. That might be + too invasive though. */ + +static void +pc164_srm_device_interrupt(unsigned long v, struct pt_regs *r) +{ + __min_ipl = getipl(); + srm_device_interrupt(v, r); + __min_ipl = 0; +} + +static void +pc164_device_interrupt(unsigned long v, struct pt_regs *r) +{ + __min_ipl = getipl(); + cabriolet_device_interrupt(v, r); + __min_ipl = 0; +} + +static void __init +pc164_init_irq(void) +{ + common_init_irq(pc164_srm_device_interrupt); +} +#endif + +/* + * The EB66+ is very similar to the EB66 except that it does not have + * the on-board NCR and Tulip chips. In the code below, I have used + * slot number to refer to the id select line and *not* the slot + * number used in the EB66+ documentation. However, in the table, + * I've given the slot number, the id select line and the Jxx number + * that's printed on the board. The interrupt pins from the PCI slots + * are wired into 3 interrupt summary registers at 0x804, 0x805 and + * 0x806 ISA. + * + * In the table, -1 means don't assign an IRQ number. This is usually + * because it is the Saturn IO (SIO) PCI/ISA Bridge Chip. + */ + +static inline int __init +eb66p_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + static char irq_tab[5][5] __initdata = { + /*INT INTA INTB INTC INTD */ + {16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J25 */ + {16+1, 16+1, 16+6, 16+10, 16+14}, /* IdSel 7, slot 1, J26 */ + { -1, -1, -1, -1, -1}, /* IdSel 8, SIO */ + {16+2, 16+2, 16+7, 16+11, 16+15}, /* IdSel 9, slot 2, J27 */ + {16+3, 16+3, 16+8, 16+12, 16+6} /* IdSel 10, slot 3, J28 */ + }; + const long min_idsel = 6, max_idsel = 10, irqs_per_slot = 5; + return COMMON_TABLE_LOOKUP; +} + + +/* + * The AlphaPC64 is very similar to the EB66+ except that its slots + * are numbered differently. In the code below, I have used slot + * number to refer to the id select line and *not* the slot number + * used in the AlphaPC64 documentation. However, in the table, I've + * given the slot number, the id select line and the Jxx number that's + * printed on the board. The interrupt pins from the PCI slots are + * wired into 3 interrupt summary registers at 0x804, 0x805 and 0x806 + * ISA. + * + * In the table, -1 means don't assign an IRQ number. This is usually + * because it is the Saturn IO (SIO) PCI/ISA Bridge Chip. + */ + +static inline int __init +cabriolet_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + static char irq_tab[5][5] __initdata = { + /*INT INTA INTB INTC INTD */ + { 16+2, 16+2, 16+7, 16+11, 16+15}, /* IdSel 5, slot 2, J21 */ + { 16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J19 */ + { 16+1, 16+1, 16+6, 16+10, 16+14}, /* IdSel 7, slot 1, J20 */ + { -1, -1, -1, -1, -1}, /* IdSel 8, SIO */ + { 16+3, 16+3, 16+8, 16+12, 16+16} /* IdSel 9, slot 3, J22 */ + }; + const long min_idsel = 5, max_idsel = 9, irqs_per_slot = 5; + return COMMON_TABLE_LOOKUP; +} + +static inline void __init +cabriolet_init_pci(void) +{ + common_init_pci(); + ns87312_enable_ide(0x398); +} + +static inline void __init +cia_cab_init_pci(void) +{ + cia_init_pci(); + ns87312_enable_ide(0x398); +} + +/* + * The PC164 and LX164 have 19 PCI interrupts, four from each of the four + * PCI slots, the SIO, PCI/IDE, and USB. + * + * Each of the interrupts can be individually masked. This is + * accomplished by setting the appropriate bit in the mask register. + * A bit is set by writing a "1" to the desired position in the mask + * register and cleared by writing a "0". There are 3 mask registers + * located at ISA address 804h, 805h and 806h. + * + * An I/O read at ISA address 804h, 805h, 806h will return the + * state of the 11 PCI interrupts and not the state of the MASKED + * interrupts. + * + * Note: A write to I/O 804h, 805h, and 806h the mask register will be + * updated. + * + * + * ISA DATA<7:0> + * ISA +--------------------------------------------------------------+ + * ADDRESS | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + * +==============================================================+ + * 0x804 | INTB0 | USB | IDE | SIO | INTA3 |INTA2 | INTA1 | INTA0 | + * +--------------------------------------------------------------+ + * 0x805 | INTD0 | INTC3 | INTC2 | INTC1 | INTC0 |INTB3 | INTB2 | INTB1 | + * +--------------------------------------------------------------+ + * 0x806 | Rsrv | Rsrv | Rsrv | Rsrv | Rsrv |INTD3 | INTD2 | INTD1 | + * +--------------------------------------------------------------+ + * * Rsrv = reserved bits + * Note: The mask register is write-only. + * + * IdSel + * 5 32 bit PCI option slot 2 + * 6 64 bit PCI option slot 0 + * 7 64 bit PCI option slot 1 + * 8 Saturn I/O + * 9 32 bit PCI option slot 3 + * 10 USB + * 11 IDE + * + */ + +static inline int __init +alphapc164_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + static char irq_tab[7][5] __initdata = { + /*INT INTA INTB INTC INTD */ + { 16+2, 16+2, 16+9, 16+13, 16+17}, /* IdSel 5, slot 2, J20 */ + { 16+0, 16+0, 16+7, 16+11, 16+15}, /* IdSel 6, slot 0, J29 */ + { 16+1, 16+1, 16+8, 16+12, 16+16}, /* IdSel 7, slot 1, J26 */ + { -1, -1, -1, -1, -1}, /* IdSel 8, SIO */ + { 16+3, 16+3, 16+10, 16+14, 16+18}, /* IdSel 9, slot 3, J19 */ + { 16+6, 16+6, 16+6, 16+6, 16+6}, /* IdSel 10, USB */ + { 16+5, 16+5, 16+5, 16+5, 16+5} /* IdSel 11, IDE */ + }; + const long min_idsel = 5, max_idsel = 11, irqs_per_slot = 5; + return COMMON_TABLE_LOOKUP; +} + +static inline void __init +alphapc164_init_pci(void) +{ + cia_init_pci(); + SMC93x_Init(); +} + + +/* + * The System Vector + */ + +#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_CABRIOLET) +struct alpha_machine_vector cabriolet_mv __initmv = { + .vector_name = "Cabriolet", + DO_EV4_MMU, + DO_DEFAULT_RTC, + DO_APECS_IO, + .machine_check = apecs_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, + + .nr_irqs = 35, + .device_interrupt = cabriolet_device_interrupt, + + .init_arch = apecs_init_arch, + .init_irq = cabriolet_init_irq, + .init_rtc = common_init_rtc, + .init_pci = cabriolet_init_pci, + .pci_map_irq = cabriolet_map_irq, + .pci_swizzle = common_swizzle, +}; +#ifndef CONFIG_ALPHA_EB64P +ALIAS_MV(cabriolet) +#endif +#endif + +#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB164) +struct alpha_machine_vector eb164_mv __initmv = { + .vector_name = "EB164", + DO_EV5_MMU, + DO_DEFAULT_RTC, + DO_CIA_IO, + .machine_check = cia_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = CIA_DEFAULT_MEM_BASE, + + .nr_irqs = 35, + .device_interrupt = cabriolet_device_interrupt, + + .init_arch = cia_init_arch, + .init_irq = cabriolet_init_irq, + .init_rtc = common_init_rtc, + .init_pci = cia_cab_init_pci, + .kill_arch = cia_kill_arch, + .pci_map_irq = cabriolet_map_irq, + .pci_swizzle = common_swizzle, +}; +ALIAS_MV(eb164) +#endif + +#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB66P) +struct alpha_machine_vector eb66p_mv __initmv = { + .vector_name = "EB66+", + DO_EV4_MMU, + DO_DEFAULT_RTC, + DO_LCA_IO, + .machine_check = lca_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, + + .nr_irqs = 35, + .device_interrupt = cabriolet_device_interrupt, + + .init_arch = lca_init_arch, + .init_irq = cabriolet_init_irq, + .init_rtc = common_init_rtc, + .init_pci = cabriolet_init_pci, + .pci_map_irq = eb66p_map_irq, + .pci_swizzle = common_swizzle, +}; +ALIAS_MV(eb66p) +#endif + +#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LX164) +struct alpha_machine_vector lx164_mv __initmv = { + .vector_name = "LX164", + DO_EV5_MMU, + DO_DEFAULT_RTC, + DO_PYXIS_IO, + .machine_check = cia_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = DEFAULT_MEM_BASE, + .pci_dac_offset = PYXIS_DAC_OFFSET, + + .nr_irqs = 35, + .device_interrupt = cabriolet_device_interrupt, + + .init_arch = pyxis_init_arch, + .init_irq = cabriolet_init_irq, + .init_rtc = common_init_rtc, + .init_pci = alphapc164_init_pci, + .kill_arch = cia_kill_arch, + .pci_map_irq = alphapc164_map_irq, + .pci_swizzle = common_swizzle, +}; +ALIAS_MV(lx164) +#endif + +#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PC164) +struct alpha_machine_vector pc164_mv __initmv = { + .vector_name = "PC164", + DO_EV5_MMU, + DO_DEFAULT_RTC, + DO_CIA_IO, + .machine_check = cia_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = CIA_DEFAULT_MEM_BASE, + + .nr_irqs = 35, + .device_interrupt = pc164_device_interrupt, + + .init_arch = cia_init_arch, + .init_irq = pc164_init_irq, + .init_rtc = common_init_rtc, + .init_pci = alphapc164_init_pci, + .kill_arch = cia_kill_arch, + .pci_map_irq = alphapc164_map_irq, + .pci_swizzle = common_swizzle, +}; +ALIAS_MV(pc164) +#endif diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c new file mode 100644 index 0000000..9e36b07 --- /dev/null +++ b/arch/alpha/kernel/sys_dp264.c @@ -0,0 +1,689 @@ +/* + * linux/arch/alpha/kernel/sys_dp264.c + * + * Copyright (C) 1995 David A Rusling + * Copyright (C) 1996, 1999 Jay A Estabrook + * Copyright (C) 1998, 1999 Richard Henderson + * + * Modified by Christopher C. Chimelis, 2001 to + * add support for the addition of Shark to the + * Tsunami family. + * + * Code supporting the DP264 (EV6+TSUNAMI). + */ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/bitops.h> + +#include <asm/ptrace.h> +#include <asm/system.h> +#include <asm/dma.h> +#include <asm/irq.h> +#include <asm/mmu_context.h> +#include <asm/io.h> +#include <asm/pgtable.h> +#include <asm/core_tsunami.h> +#include <asm/hwrpb.h> +#include <asm/tlbflush.h> + +#include "proto.h" +#include "irq_impl.h" +#include "pci_impl.h" +#include "machvec_impl.h" + + +/* Note mask bit is true for ENABLED irqs. */ +static unsigned long cached_irq_mask; +/* dp264 boards handle at max four CPUs */ +static unsigned long cpu_irq_affinity[4] = { 0UL, 0UL, 0UL, 0UL }; + +DEFINE_SPINLOCK(dp264_irq_lock); + +static void +tsunami_update_irq_hw(unsigned long mask) +{ + register tsunami_cchip *cchip = TSUNAMI_cchip; + unsigned long isa_enable = 1UL << 55; + register int bcpu = boot_cpuid; + +#ifdef CONFIG_SMP + volatile unsigned long *dim0, *dim1, *dim2, *dim3; + unsigned long mask0, mask1, mask2, mask3, dummy; + + mask &= ~isa_enable; + mask0 = mask & cpu_irq_affinity[0]; + mask1 = mask & cpu_irq_affinity[1]; + mask2 = mask & cpu_irq_affinity[2]; + mask3 = mask & cpu_irq_affinity[3]; + + if (bcpu == 0) mask0 |= isa_enable; + else if (bcpu == 1) mask1 |= isa_enable; + else if (bcpu == 2) mask2 |= isa_enable; + else mask3 |= isa_enable; + + dim0 = &cchip->dim0.csr; + dim1 = &cchip->dim1.csr; + dim2 = &cchip->dim2.csr; + dim3 = &cchip->dim3.csr; + if (!cpu_possible(0)) dim0 = &dummy; + if (!cpu_possible(1)) dim1 = &dummy; + if (!cpu_possible(2)) dim2 = &dummy; + if (!cpu_possible(3)) dim3 = &dummy; + + *dim0 = mask0; + *dim1 = mask1; + *dim2 = mask2; + *dim3 = mask3; + mb(); + *dim0; + *dim1; + *dim2; + *dim3; +#else + volatile unsigned long *dimB; + if (bcpu == 0) dimB = &cchip->dim0.csr; + else if (bcpu == 1) dimB = &cchip->dim1.csr; + else if (bcpu == 2) dimB = &cchip->dim2.csr; + else dimB = &cchip->dim3.csr; + + *dimB = mask | isa_enable; + mb(); + *dimB; +#endif +} + +static void +dp264_enable_irq(unsigned int irq) +{ + spin_lock(&dp264_irq_lock); + cached_irq_mask |= 1UL << irq; + tsunami_update_irq_hw(cached_irq_mask); + spin_unlock(&dp264_irq_lock); +} + +static void +dp264_disable_irq(unsigned int irq) +{ + spin_lock(&dp264_irq_lock); + cached_irq_mask &= ~(1UL << irq); + tsunami_update_irq_hw(cached_irq_mask); + spin_unlock(&dp264_irq_lock); +} + +static unsigned int +dp264_startup_irq(unsigned int irq) +{ + dp264_enable_irq(irq); + return 0; /* never anything pending */ +} + +static void +dp264_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + dp264_enable_irq(irq); +} + +static void +clipper_enable_irq(unsigned int irq) +{ + spin_lock(&dp264_irq_lock); + cached_irq_mask |= 1UL << (irq - 16); + tsunami_update_irq_hw(cached_irq_mask); + spin_unlock(&dp264_irq_lock); +} + +static void +clipper_disable_irq(unsigned int irq) +{ + spin_lock(&dp264_irq_lock); + cached_irq_mask &= ~(1UL << (irq - 16)); + tsunami_update_irq_hw(cached_irq_mask); + spin_unlock(&dp264_irq_lock); +} + +static unsigned int +clipper_startup_irq(unsigned int irq) +{ + clipper_enable_irq(irq); + return 0; /* never anything pending */ +} + +static void +clipper_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + clipper_enable_irq(irq); +} + +static void +cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) +{ + int cpu; + + for (cpu = 0; cpu < 4; cpu++) { + unsigned long aff = cpu_irq_affinity[cpu]; + if (cpu_isset(cpu, affinity)) + aff |= 1UL << irq; + else + aff &= ~(1UL << irq); + cpu_irq_affinity[cpu] = aff; + } +} + +static void +dp264_set_affinity(unsigned int irq, cpumask_t affinity) +{ + spin_lock(&dp264_irq_lock); + cpu_set_irq_affinity(irq, affinity); + tsunami_update_irq_hw(cached_irq_mask); + spin_unlock(&dp264_irq_lock); +} + +static void +clipper_set_affinity(unsigned int irq, cpumask_t affinity) +{ + spin_lock(&dp264_irq_lock); + cpu_set_irq_affinity(irq - 16, affinity); + tsunami_update_irq_hw(cached_irq_mask); + spin_unlock(&dp264_irq_lock); +} + +static struct hw_interrupt_type dp264_irq_type = { + .typename = "DP264", + .startup = dp264_startup_irq, + .shutdown = dp264_disable_irq, + .enable = dp264_enable_irq, + .disable = dp264_disable_irq, + .ack = dp264_disable_irq, + .end = dp264_end_irq, + .set_affinity = dp264_set_affinity, +}; + +static struct hw_interrupt_type clipper_irq_type = { + .typename = "CLIPPER", + .startup = clipper_startup_irq, + .shutdown = clipper_disable_irq, + .enable = clipper_enable_irq, + .disable = clipper_disable_irq, + .ack = clipper_disable_irq, + .end = clipper_end_irq, + .set_affinity = clipper_set_affinity, +}; + +static void +dp264_device_interrupt(unsigned long vector, struct pt_regs * regs) +{ +#if 1 + printk("dp264_device_interrupt: NOT IMPLEMENTED YET!! \n"); +#else + unsigned long pld; + unsigned int i; + + /* Read the interrupt summary register of TSUNAMI */ + pld = TSUNAMI_cchip->dir0.csr; + + /* + * Now for every possible bit set, work through them and call + * the appropriate interrupt handler. + */ + while (pld) { + i = ffz(~pld); + pld &= pld - 1; /* clear least bit set */ + if (i == 55) + isa_device_interrupt(vector, regs); + else + handle_irq(16 + i, 16 + i, regs); +#if 0 + TSUNAMI_cchip->dir0.csr = 1UL << i; mb(); + tmp = TSUNAMI_cchip->dir0.csr; +#endif + } +#endif +} + +static void +dp264_srm_device_interrupt(unsigned long vector, struct pt_regs * regs) +{ + int irq; + + irq = (vector - 0x800) >> 4; + + /* + * The SRM console reports PCI interrupts with a vector calculated by: + * + * 0x900 + (0x10 * DRIR-bit) + * + * So bit 16 shows up as IRQ 32, etc. + * + * On DP264/BRICK/MONET, we adjust it down by 16 because at least + * that many of the low order bits of the DRIR are not used, and + * so we don't count them. + */ + if (irq >= 32) + irq -= 16; + + handle_irq(irq, regs); +} + +static void +clipper_srm_device_interrupt(unsigned long vector, struct pt_regs * regs) +{ + int irq; + + irq = (vector - 0x800) >> 4; + +/* + * The SRM console reports PCI interrupts with a vector calculated by: + * + * 0x900 + (0x10 * DRIR-bit) + * + * So bit 16 shows up as IRQ 32, etc. + * + * CLIPPER uses bits 8-47 for PCI interrupts, so we do not need + * to scale down the vector reported, we just use it. + * + * Eg IRQ 24 is DRIR bit 8, etc, etc + */ + handle_irq(irq, regs); +} + +static void __init +init_tsunami_irqs(struct hw_interrupt_type * ops, int imin, int imax) +{ + long i; + for (i = imin; i <= imax; ++i) { + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].handler = ops; + } +} + +static void __init +dp264_init_irq(void) +{ + outb(0, DMA1_RESET_REG); + outb(0, DMA2_RESET_REG); + outb(DMA_MODE_CASCADE, DMA2_MODE_REG); + outb(0, DMA2_MASK_REG); + + if (alpha_using_srm) + alpha_mv.device_interrupt = dp264_srm_device_interrupt; + + tsunami_update_irq_hw(0); + + init_i8259a_irqs(); + init_tsunami_irqs(&dp264_irq_type, 16, 47); +} + +static void __init +clipper_init_irq(void) +{ + outb(0, DMA1_RESET_REG); + outb(0, DMA2_RESET_REG); + outb(DMA_MODE_CASCADE, DMA2_MODE_REG); + outb(0, DMA2_MASK_REG); + + if (alpha_using_srm) + alpha_mv.device_interrupt = clipper_srm_device_interrupt; + + tsunami_update_irq_hw(0); + + init_i8259a_irqs(); + init_tsunami_irqs(&clipper_irq_type, 24, 63); +} + + +/* + * PCI Fixup configuration. + * + * Summary @ TSUNAMI_CSR_DIM0: + * Bit Meaning + * 0-17 Unused + *18 Interrupt SCSI B (Adaptec 7895 builtin) + *19 Interrupt SCSI A (Adaptec 7895 builtin) + *20 Interrupt Line D from slot 2 PCI0 + *21 Interrupt Line C from slot 2 PCI0 + *22 Interrupt Line B from slot 2 PCI0 + *23 Interrupt Line A from slot 2 PCI0 + *24 Interrupt Line D from slot 1 PCI0 + *25 Interrupt Line C from slot 1 PCI0 + *26 Interrupt Line B from slot 1 PCI0 + *27 Interrupt Line A from slot 1 PCI0 + *28 Interrupt Line D from slot 0 PCI0 + *29 Interrupt Line C from slot 0 PCI0 + *30 Interrupt Line B from slot 0 PCI0 + *31 Interrupt Line A from slot 0 PCI0 + * + *32 Interrupt Line D from slot 3 PCI1 + *33 Interrupt Line C from slot 3 PCI1 + *34 Interrupt Line B from slot 3 PCI1 + *35 Interrupt Line A from slot 3 PCI1 + *36 Interrupt Line D from slot 2 PCI1 + *37 Interrupt Line C from slot 2 PCI1 + *38 Interrupt Line B from slot 2 PCI1 + *39 Interrupt Line A from slot 2 PCI1 + *40 Interrupt Line D from slot 1 PCI1 + *41 Interrupt Line C from slot 1 PCI1 + *42 Interrupt Line B from slot 1 PCI1 + *43 Interrupt Line A from slot 1 PCI1 + *44 Interrupt Line D from slot 0 PCI1 + *45 Interrupt Line C from slot 0 PCI1 + *46 Interrupt Line B from slot 0 PCI1 + *47 Interrupt Line A from slot 0 PCI1 + *48-52 Unused + *53 PCI0 NMI (from Cypress) + *54 PCI0 SMI INT (from Cypress) + *55 PCI0 ISA Interrupt (from Cypress) + *56-60 Unused + *61 PCI1 Bus Error + *62 PCI0 Bus Error + *63 Reserved + * + * IdSel + * 5 Cypress Bridge I/O + * 6 SCSI Adaptec builtin + * 7 64 bit PCI option slot 0 (all busses) + * 8 64 bit PCI option slot 1 (all busses) + * 9 64 bit PCI option slot 2 (all busses) + * 10 64 bit PCI option slot 3 (not bus 0) + */ + +static int __init +dp264_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + static char irq_tab[6][5] __initdata = { + /*INT INTA INTB INTC INTD */ + { -1, -1, -1, -1, -1}, /* IdSel 5 ISA Bridge */ + { 16+ 3, 16+ 3, 16+ 2, 16+ 2, 16+ 2}, /* IdSel 6 SCSI builtin*/ + { 16+15, 16+15, 16+14, 16+13, 16+12}, /* IdSel 7 slot 0 */ + { 16+11, 16+11, 16+10, 16+ 9, 16+ 8}, /* IdSel 8 slot 1 */ + { 16+ 7, 16+ 7, 16+ 6, 16+ 5, 16+ 4}, /* IdSel 9 slot 2 */ + { 16+ 3, 16+ 3, 16+ 2, 16+ 1, 16+ 0} /* IdSel 10 slot 3 */ + }; + const long min_idsel = 5, max_idsel = 10, irqs_per_slot = 5; + + struct pci_controller *hose = dev->sysdata; + int irq = COMMON_TABLE_LOOKUP; + + if (irq > 0) { + irq += 16 * hose->index; + } else { + /* ??? The Contaq IDE controller on the ISA bridge uses + "legacy" interrupts 14 and 15. I don't know if anything + can wind up at the same slot+pin on hose1, so we'll + just have to trust whatever value the console might + have assigned. */ + + u8 irq8; + pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq8); + irq = irq8; + } + + return irq; +} + +static int __init +monet_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + static char irq_tab[13][5] __initdata = { + /*INT INTA INTB INTC INTD */ + { 45, 45, 45, 45, 45}, /* IdSel 3 21143 PCI1 */ + { -1, -1, -1, -1, -1}, /* IdSel 4 unused */ + { -1, -1, -1, -1, -1}, /* IdSel 5 unused */ + { 47, 47, 47, 47, 47}, /* IdSel 6 SCSI PCI1 */ + { -1, -1, -1, -1, -1}, /* IdSel 7 ISA Bridge */ + { -1, -1, -1, -1, -1}, /* IdSel 8 P2P PCI1 */ +#if 1 + { 28, 28, 29, 30, 31}, /* IdSel 14 slot 4 PCI2*/ + { 24, 24, 25, 26, 27}, /* IdSel 15 slot 5 PCI2*/ +#else + { -1, -1, -1, -1, -1}, /* IdSel 9 unused */ + { -1, -1, -1, -1, -1}, /* IdSel 10 unused */ +#endif + { 40, 40, 41, 42, 43}, /* IdSel 11 slot 1 PCI0*/ + { 36, 36, 37, 38, 39}, /* IdSel 12 slot 2 PCI0*/ + { 32, 32, 33, 34, 35}, /* IdSel 13 slot 3 PCI0*/ + { 28, 28, 29, 30, 31}, /* IdSel 14 slot 4 PCI2*/ + { 24, 24, 25, 26, 27} /* IdSel 15 slot 5 PCI2*/ + }; + const long min_idsel = 3, max_idsel = 15, irqs_per_slot = 5; + return COMMON_TABLE_LOOKUP; +} + +static u8 __init +monet_swizzle(struct pci_dev *dev, u8 *pinp) +{ + struct pci_controller *hose = dev->sysdata; + int slot, pin = *pinp; + + if (!dev->bus->parent) { + slot = PCI_SLOT(dev->devfn); + } + /* Check for the built-in bridge on hose 1. */ + else if (hose->index == 1 && PCI_SLOT(dev->bus->self->devfn) == 8) { + slot = PCI_SLOT(dev->devfn); + } else { + /* Must be a card-based bridge. */ + do { + /* Check for built-in bridge on hose 1. */ + if (hose->index == 1 && + PCI_SLOT(dev->bus->self->devfn) == 8) { + slot = PCI_SLOT(dev->devfn); + break; + } + pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)) ; + + /* Move up the chain of bridges. */ + dev = dev->bus->self; + /* Slot of the next bridge. */ + slot = PCI_SLOT(dev->devfn); + } while (dev->bus->self); + } + *pinp = pin; + return slot; +} + +static int __init +webbrick_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + static char irq_tab[13][5] __initdata = { + /*INT INTA INTB INTC INTD */ + { -1, -1, -1, -1, -1}, /* IdSel 7 ISA Bridge */ + { -1, -1, -1, -1, -1}, /* IdSel 8 unused */ + { 29, 29, 29, 29, 29}, /* IdSel 9 21143 #1 */ + { -1, -1, -1, -1, -1}, /* IdSel 10 unused */ + { 30, 30, 30, 30, 30}, /* IdSel 11 21143 #2 */ + { -1, -1, -1, -1, -1}, /* IdSel 12 unused */ + { -1, -1, -1, -1, -1}, /* IdSel 13 unused */ + { 35, 35, 34, 33, 32}, /* IdSel 14 slot 0 */ + { 39, 39, 38, 37, 36}, /* IdSel 15 slot 1 */ + { 43, 43, 42, 41, 40}, /* IdSel 16 slot 2 */ + { 47, 47, 46, 45, 44}, /* IdSel 17 slot 3 */ + }; + const long min_idsel = 7, max_idsel = 17, irqs_per_slot = 5; + return COMMON_TABLE_LOOKUP; +} + +static int __init +clipper_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + static char irq_tab[7][5] __initdata = { + /*INT INTA INTB INTC INTD */ + { 16+ 8, 16+ 8, 16+ 9, 16+10, 16+11}, /* IdSel 1 slot 1 */ + { 16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 2 slot 2 */ + { 16+16, 16+16, 16+17, 16+18, 16+19}, /* IdSel 3 slot 3 */ + { 16+20, 16+20, 16+21, 16+22, 16+23}, /* IdSel 4 slot 4 */ + { 16+24, 16+24, 16+25, 16+26, 16+27}, /* IdSel 5 slot 5 */ + { 16+28, 16+28, 16+29, 16+30, 16+31}, /* IdSel 6 slot 6 */ + { -1, -1, -1, -1, -1} /* IdSel 7 ISA Bridge */ + }; + const long min_idsel = 1, max_idsel = 7, irqs_per_slot = 5; + + struct pci_controller *hose = dev->sysdata; + int irq = COMMON_TABLE_LOOKUP; + + if (irq > 0) + irq += 16 * hose->index; + + return irq; +} + +static void __init +dp264_init_pci(void) +{ + common_init_pci(); + SMC669_Init(0); +} + +static void __init +monet_init_pci(void) +{ + common_init_pci(); + SMC669_Init(1); + es1888_init(); +} + +static void __init +webbrick_init_arch(void) +{ + tsunami_init_arch(); + + /* Tsunami caches 4 PTEs at a time; DS10 has only 1 hose. */ + hose_head->sg_isa->align_entry = 4; + hose_head->sg_pci->align_entry = 4; +} + + +/* + * The System Vectors + */ + +struct alpha_machine_vector dp264_mv __initmv = { + .vector_name = "DP264", + DO_EV6_MMU, + DO_DEFAULT_RTC, + DO_TSUNAMI_IO, + .machine_check = tsunami_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = DEFAULT_MEM_BASE, + .pci_dac_offset = TSUNAMI_DAC_OFFSET, + + .nr_irqs = 64, + .device_interrupt = dp264_device_interrupt, + + .init_arch = tsunami_init_arch, + .init_irq = dp264_init_irq, + .init_rtc = common_init_rtc, + .init_pci = dp264_init_pci, + .kill_arch = tsunami_kill_arch, + .pci_map_irq = dp264_map_irq, + .pci_swizzle = common_swizzle, +}; +ALIAS_MV(dp264) + +struct alpha_machine_vector monet_mv __initmv = { + .vector_name = "Monet", + DO_EV6_MMU, + DO_DEFAULT_RTC, + DO_TSUNAMI_IO, + .machine_check = tsunami_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = DEFAULT_MEM_BASE, + .pci_dac_offset = TSUNAMI_DAC_OFFSET, + + .nr_irqs = 64, + .device_interrupt = dp264_device_interrupt, + + .init_arch = tsunami_init_arch, + .init_irq = dp264_init_irq, + .init_rtc = common_init_rtc, + .init_pci = monet_init_pci, + .kill_arch = tsunami_kill_arch, + .pci_map_irq = monet_map_irq, + .pci_swizzle = monet_swizzle, +}; + +struct alpha_machine_vector webbrick_mv __initmv = { + .vector_name = "Webbrick", + DO_EV6_MMU, + DO_DEFAULT_RTC, + DO_TSUNAMI_IO, + .machine_check = tsunami_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = DEFAULT_MEM_BASE, + .pci_dac_offset = TSUNAMI_DAC_OFFSET, + + .nr_irqs = 64, + .device_interrupt = dp264_device_interrupt, + + .init_arch = webbrick_init_arch, + .init_irq = dp264_init_irq, + .init_rtc = common_init_rtc, + .init_pci = common_init_pci, + .kill_arch = tsunami_kill_arch, + .pci_map_irq = webbrick_map_irq, + .pci_swizzle = common_swizzle, +}; + +struct alpha_machine_vector clipper_mv __initmv = { + .vector_name = "Clipper", + DO_EV6_MMU, + DO_DEFAULT_RTC, + DO_TSUNAMI_IO, + .machine_check = tsunami_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = DEFAULT_MEM_BASE, + .pci_dac_offset = TSUNAMI_DAC_OFFSET, + + .nr_irqs = 64, + .device_interrupt = dp264_device_interrupt, + + .init_arch = tsunami_init_arch, + .init_irq = clipper_init_irq, + .init_rtc = common_init_rtc, + .init_pci = common_init_pci, + .kill_arch = tsunami_kill_arch, + .pci_map_irq = clipper_map_irq, + .pci_swizzle = common_swizzle, +}; + +/* Sharks strongly resemble Clipper, at least as far + * as interrupt routing, etc, so we're using the + * same functions as Clipper does + */ + +struct alpha_machine_vector shark_mv __initmv = { + .vector_name = "Shark", + DO_EV6_MMU, + DO_DEFAULT_RTC, + DO_TSUNAMI_IO, + .machine_check = tsunami_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = DEFAULT_MEM_BASE, + .pci_dac_offset = TSUNAMI_DAC_OFFSET, + + .nr_irqs = 64, + .device_interrupt = dp264_device_interrupt, + + .init_arch = tsunami_init_arch, + .init_irq = clipper_init_irq, + .init_rtc = common_init_rtc, + .init_pci = common_init_pci, + .kill_arch = tsunami_kill_arch, + .pci_map_irq = clipper_map_irq, + .pci_swizzle = common_swizzle, +}; + +/* No alpha_mv alias for webbrick/monet/clipper, since we compile them + in unconditionally with DP264; setup_arch knows how to cope. */ diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c new file mode 100644 index 0000000..61a79c3 --- /dev/null +++ b/arch/alpha/kernel/sys_eb64p.c @@ -0,0 +1,256 @@ +/* + * linux/arch/alpha/kernel/sys_eb64p.c + * + * Copyright (C) 1995 David A Rusling + * Copyright (C) 1996 Jay A Estabrook + * Copyright (C) 1998, 1999 Richard Henderson + * + * Code supporting the EB64+ and EB66. + */ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/bitops.h> + +#include <asm/ptrace.h> +#include <asm/system.h> +#include <asm/dma.h> +#include <asm/irq.h> +#include <asm/mmu_context.h> +#include <asm/io.h> +#include <asm/pgtable.h> +#include <asm/core_apecs.h> +#include <asm/core_lca.h> +#include <asm/hwrpb.h> +#include <asm/tlbflush.h> + +#include "proto.h" +#include "irq_impl.h" +#include "pci_impl.h" +#include "machvec_impl.h" + + +/* Note mask bit is true for DISABLED irqs. */ +static unsigned int cached_irq_mask = -1; + +static inline void +eb64p_update_irq_hw(unsigned int irq, unsigned long mask) +{ + outb(mask >> (irq >= 24 ? 24 : 16), (irq >= 24 ? 0x27 : 0x26)); +} + +static inline void +eb64p_enable_irq(unsigned int irq) +{ + eb64p_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); +} + +static void +eb64p_disable_irq(unsigned int irq) +{ + eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq); +} + +static unsigned int +eb64p_startup_irq(unsigned int irq) +{ + eb64p_enable_irq(irq); + return 0; /* never anything pending */ +} + +static void +eb64p_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + eb64p_enable_irq(irq); +} + +static struct hw_interrupt_type eb64p_irq_type = { + .typename = "EB64P", + .startup = eb64p_startup_irq, + .shutdown = eb64p_disable_irq, + .enable = eb64p_enable_irq, + .disable = eb64p_disable_irq, + .ack = eb64p_disable_irq, + .end = eb64p_end_irq, +}; + +static void +eb64p_device_interrupt(unsigned long vector, struct pt_regs *regs) +{ + unsigned long pld; + unsigned int i; + + /* Read the interrupt summary registers */ + pld = inb(0x26) | (inb(0x27) << 8); + + /* + * Now, for every possible bit set, work through + * them and call the appropriate interrupt handler. + */ + while (pld) { + i = ffz(~pld); + pld &= pld - 1; /* clear least bit set */ + + if (i == 5) { + isa_device_interrupt(vector, regs); + } else { + handle_irq(16 + i, regs); + } + } +} + +static void __init +eb64p_init_irq(void) +{ + long i; + +#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_CABRIOLET) + /* + * CABRIO SRM may not set variation correctly, so here we test + * the high word of the interrupt summary register for the RAZ + * bits, and hope that a true EB64+ would read all ones... + */ + if (inw(0x806) != 0xffff) { + extern struct alpha_machine_vector cabriolet_mv; + + printk("Detected Cabriolet: correcting HWRPB.\n"); + + hwrpb->sys_variation |= 2L << 10; + hwrpb_update_checksum(hwrpb); + + alpha_mv = cabriolet_mv; + alpha_mv.init_irq(); + return; + } +#endif /* GENERIC */ + + outb(0xff, 0x26); + outb(0xff, 0x27); + + init_i8259a_irqs(); + + for (i = 16; i < 32; ++i) { + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].handler = &eb64p_irq_type; + } + + common_init_isa_dma(); + setup_irq(16+5, &isa_cascade_irqaction); +} + +/* + * PCI Fixup configuration. + * + * There are two 8 bit external summary registers as follows: + * + * Summary @ 0x26: + * Bit Meaning + * 0 Interrupt Line A from slot 0 + * 1 Interrupt Line A from slot 1 + * 2 Interrupt Line B from slot 0 + * 3 Interrupt Line B from slot 1 + * 4 Interrupt Line C from slot 0 + * 5 Interrupt line from the two ISA PICs + * 6 Tulip + * 7 NCR SCSI + * + * Summary @ 0x27 + * Bit Meaning + * 0 Interrupt Line C from slot 1 + * 1 Interrupt Line D from slot 0 + * 2 Interrupt Line D from slot 1 + * 3 RAZ + * 4 RAZ + * 5 RAZ + * 6 RAZ + * 7 RAZ + * + * The device to slot mapping looks like: + * + * Slot Device + * 5 NCR SCSI controller + * 6 PCI on board slot 0 + * 7 PCI on board slot 1 + * 8 Intel SIO PCI-ISA bridge chip + * 9 Tulip - DECchip 21040 Ethernet controller + * + * + * This two layered interrupt approach means that we allocate IRQ 16 and + * above for PCI interrupts. The IRQ relates to which bit the interrupt + * comes in on. This makes interrupt processing much easier. + */ + +static int __init +eb64p_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + static char irq_tab[5][5] __initdata = { + /*INT INTA INTB INTC INTD */ + {16+7, 16+7, 16+7, 16+7, 16+7}, /* IdSel 5, slot ?, ?? */ + {16+0, 16+0, 16+2, 16+4, 16+9}, /* IdSel 6, slot ?, ?? */ + {16+1, 16+1, 16+3, 16+8, 16+10}, /* IdSel 7, slot ?, ?? */ + { -1, -1, -1, -1, -1}, /* IdSel 8, SIO */ + {16+6, 16+6, 16+6, 16+6, 16+6}, /* IdSel 9, TULIP */ + }; + const long min_idsel = 5, max_idsel = 9, irqs_per_slot = 5; + return COMMON_TABLE_LOOKUP; +} + + +/* + * The System Vector + */ + +#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB64P) +struct alpha_machine_vector eb64p_mv __initmv = { + .vector_name = "EB64+", + DO_EV4_MMU, + DO_DEFAULT_RTC, + DO_APECS_IO, + .machine_check = apecs_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, + + .nr_irqs = 32, + .device_interrupt = eb64p_device_interrupt, + + .init_arch = apecs_init_arch, + .init_irq = eb64p_init_irq, + .init_rtc = common_init_rtc, + .init_pci = common_init_pci, + .kill_arch = NULL, + .pci_map_irq = eb64p_map_irq, + .pci_swizzle = common_swizzle, +}; +ALIAS_MV(eb64p) +#endif + +#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB66) +struct alpha_machine_vector eb66_mv __initmv = { + .vector_name = "EB66", + DO_EV4_MMU, + DO_DEFAULT_RTC, + DO_LCA_IO, + .machine_check = lca_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, + + .nr_irqs = 32, + .device_interrupt = eb64p_device_interrupt, + + .init_arch = lca_init_arch, + .init_irq = eb64p_init_irq, + .init_rtc = common_init_rtc, + .init_pci = common_init_pci, + .pci_map_irq = eb64p_map_irq, + .pci_swizzle = common_swizzle, +}; +ALIAS_MV(eb66) +#endif diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c new file mode 100644 index 0000000..bd6e5f0 --- /dev/null +++ b/arch/alpha/kernel/sys_eiger.c @@ -0,0 +1,242 @@ +/* + * linux/arch/alpha/kernel/sys_eiger.c + * + * Copyright (C) 1995 David A Rusling + * Copyright (C) 1996, 1999 Jay A Estabrook + * Copyright (C) 1998, 1999 Richard Henderson + * Copyright (C) 1999 Iain Grant + * + * Code supporting the EIGER (EV6+TSUNAMI). + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/bitops.h> + +#include <asm/ptrace.h> +#include <asm/system.h> +#include <asm/dma.h> +#include <asm/irq.h> +#include <asm/mmu_context.h> +#include <asm/io.h> +#include <asm/pci.h> +#include <asm/pgtable.h> +#include <asm/core_tsunami.h> +#include <asm/hwrpb.h> +#include <asm/tlbflush.h> + +#include "proto.h" +#include "irq_impl.h" +#include "pci_impl.h" +#include "machvec_impl.h" + + +/* Note that this interrupt code is identical to TAKARA. */ + +/* Note mask bit is true for DISABLED irqs. */ +static unsigned long cached_irq_mask[2] = { -1, -1 }; + +static inline void +eiger_update_irq_hw(unsigned long irq, unsigned long mask) +{ + int regaddr; + + mask = (irq >= 64 ? mask << 16 : mask >> ((irq - 16) & 0x30)); + regaddr = 0x510 + (((irq - 16) >> 2) & 0x0c); + outl(mask & 0xffff0000UL, regaddr); +} + +static inline void +eiger_enable_irq(unsigned int irq) +{ + unsigned long mask; + mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); + eiger_update_irq_hw(irq, mask); +} + +static void +eiger_disable_irq(unsigned int irq) +{ + unsigned long mask; + mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); + eiger_update_irq_hw(irq, mask); +} + +static unsigned int +eiger_startup_irq(unsigned int irq) +{ + eiger_enable_irq(irq); + return 0; /* never anything pending */ +} + +static void +eiger_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + eiger_enable_irq(irq); +} + +static struct hw_interrupt_type eiger_irq_type = { + .typename = "EIGER", + .startup = eiger_startup_irq, + .shutdown = eiger_disable_irq, + .enable = eiger_enable_irq, + .disable = eiger_disable_irq, + .ack = eiger_disable_irq, + .end = eiger_end_irq, +}; + +static void +eiger_device_interrupt(unsigned long vector, struct pt_regs * regs) +{ + unsigned intstatus; + + /* + * The PALcode will have passed us vectors 0x800 or 0x810, + * which are fairly arbitrary values and serve only to tell + * us whether an interrupt has come in on IRQ0 or IRQ1. If + * it's IRQ1 it's a PCI interrupt; if it's IRQ0, it's + * probably ISA, but PCI interrupts can come through IRQ0 + * as well if the interrupt controller isn't in accelerated + * mode. + * + * OTOH, the accelerator thing doesn't seem to be working + * overly well, so what we'll do instead is try directly + * examining the Master Interrupt Register to see if it's a + * PCI interrupt, and if _not_ then we'll pass it on to the + * ISA handler. + */ + + intstatus = inw(0x500) & 15; + if (intstatus) { + /* + * This is a PCI interrupt. Check each bit and + * despatch an interrupt if it's set. + */ + + if (intstatus & 8) handle_irq(16+3, regs); + if (intstatus & 4) handle_irq(16+2, regs); + if (intstatus & 2) handle_irq(16+1, regs); + if (intstatus & 1) handle_irq(16+0, regs); + } else { + isa_device_interrupt(vector, regs); + } +} + +static void +eiger_srm_device_interrupt(unsigned long vector, struct pt_regs * regs) +{ + int irq = (vector - 0x800) >> 4; + handle_irq(irq, regs); +} + +static void __init +eiger_init_irq(void) +{ + long i; + + outb(0, DMA1_RESET_REG); + outb(0, DMA2_RESET_REG); + outb(DMA_MODE_CASCADE, DMA2_MODE_REG); + outb(0, DMA2_MASK_REG); + + if (alpha_using_srm) + alpha_mv.device_interrupt = eiger_srm_device_interrupt; + + for (i = 16; i < 128; i += 16) + eiger_update_irq_hw(i, -1); + + init_i8259a_irqs(); + + for (i = 16; i < 128; ++i) { + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].handler = &eiger_irq_type; + } +} + +static int __init +eiger_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + u8 irq_orig; + + /* The SRM console has already calculated out the IRQ value's for + option cards. As this works lets just read in the value already + set and change it to a useable value by Linux. + + All the IRQ values generated by the console are greater than 90, + so we subtract 80 because it is (90 - allocated ISA IRQ's). */ + + pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq_orig); + + return irq_orig - 0x80; +} + +static u8 __init +eiger_swizzle(struct pci_dev *dev, u8 *pinp) +{ + struct pci_controller *hose = dev->sysdata; + int slot, pin = *pinp; + int bridge_count = 0; + + /* Find the number of backplane bridges. */ + int backplane = inw(0x502) & 0x0f; + + switch (backplane) + { + case 0x00: bridge_count = 0; break; /* No bridges */ + case 0x01: bridge_count = 1; break; /* 1 */ + case 0x03: bridge_count = 2; break; /* 2 */ + case 0x07: bridge_count = 3; break; /* 3 */ + case 0x0f: bridge_count = 4; break; /* 4 */ + }; + + slot = PCI_SLOT(dev->devfn); + while (dev->bus->self) { + /* Check for built-in bridges on hose 0. */ + if (hose->index == 0 + && (PCI_SLOT(dev->bus->self->devfn) + > 20 - bridge_count)) { + slot = PCI_SLOT(dev->devfn); + break; + } + /* Must be a card-based bridge. */ + pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); + + /* Move up the chain of bridges. */ + dev = dev->bus->self; + } + *pinp = pin; + return slot; +} + +/* + * The System Vectors + */ + +struct alpha_machine_vector eiger_mv __initmv = { + .vector_name = "Eiger", + DO_EV6_MMU, + DO_DEFAULT_RTC, + DO_TSUNAMI_IO, + .machine_check = tsunami_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = DEFAULT_MEM_BASE, + .pci_dac_offset = TSUNAMI_DAC_OFFSET, + + .nr_irqs = 128, + .device_interrupt = eiger_device_interrupt, + + .init_arch = tsunami_init_arch, + .init_irq = eiger_init_irq, + .init_rtc = common_init_rtc, + .init_pci = common_init_pci, + .kill_arch = tsunami_kill_arch, + .pci_map_irq = eiger_map_irq, + .pci_swizzle = eiger_swizzle, +}; +ALIAS_MV(eiger) diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c new file mode 100644 index 0000000..fcabb7c --- /dev/null +++ b/arch/alpha/kernel/sys_jensen.c @@ -0,0 +1,274 @@ +/* + * linux/arch/alpha/kernel/sys_jensen.c + * + * Copyright (C) 1995 Linus Torvalds + * Copyright (C) 1998, 1999 Richard Henderson + * + * Code supporting the Jensen. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/init.h> + +#include <asm/ptrace.h> +#include <asm/system.h> + +#define __EXTERN_INLINE inline +#include <asm/io.h> +#include <asm/jensen.h> +#undef __EXTERN_INLINE + +#include <asm/dma.h> +#include <asm/irq.h> +#include <asm/mmu_context.h> +#include <asm/pgtable.h> +#include <asm/tlbflush.h> + +#include "proto.h" +#include "irq_impl.h" +#include "pci_impl.h" +#include "machvec_impl.h" + + +/* + * Jensen is special: the vector is 0x8X0 for EISA interrupt X, and + * 0x9X0 for the local motherboard interrupts. + * + * Note especially that those local interrupts CANNOT be masked, + * which causes much of the pain below... + * + * 0x660 - NMI + * + * 0x800 - IRQ0 interval timer (not used, as we use the RTC timer) + * 0x810 - IRQ1 line printer (duh..) + * 0x860 - IRQ6 floppy disk + * + * 0x900 - COM1 + * 0x920 - COM2 + * 0x980 - keyboard + * 0x990 - mouse + * + * PCI-based systems are more sane: they don't have the local + * interrupts at all, and have only normal PCI interrupts from + * devices. Happily it's easy enough to do a sane mapping from the + * Jensen. + * + * Note that this means that we may have to do a hardware + * "local_op" to a different interrupt than we report to the rest of the + * world. + */ + +static unsigned int +jensen_local_startup(unsigned int irq) +{ + /* the parport is really hw IRQ 1, silly Jensen. */ + if (irq == 7) + i8259a_startup_irq(1); + else + /* + * For all true local interrupts, set the flag that prevents + * the IPL from being dropped during handler processing. + */ + if (irq_desc[irq].action) + irq_desc[irq].action->flags |= SA_INTERRUPT; + return 0; +} + +static void +jensen_local_shutdown(unsigned int irq) +{ + /* the parport is really hw IRQ 1, silly Jensen. */ + if (irq == 7) + i8259a_disable_irq(1); +} + +static void +jensen_local_enable(unsigned int irq) +{ + /* the parport is really hw IRQ 1, silly Jensen. */ + if (irq == 7) + i8259a_enable_irq(1); +} + +static void +jensen_local_disable(unsigned int irq) +{ + /* the parport is really hw IRQ 1, silly Jensen. */ + if (irq == 7) + i8259a_disable_irq(1); +} + +static void +jensen_local_ack(unsigned int irq) +{ + /* the parport is really hw IRQ 1, silly Jensen. */ + if (irq == 7) + i8259a_mask_and_ack_irq(1); +} + +static void +jensen_local_end(unsigned int irq) +{ + /* the parport is really hw IRQ 1, silly Jensen. */ + if (irq == 7) + i8259a_end_irq(1); +} + +static struct hw_interrupt_type jensen_local_irq_type = { + .typename = "LOCAL", + .startup = jensen_local_startup, + .shutdown = jensen_local_shutdown, + .enable = jensen_local_enable, + .disable = jensen_local_disable, + .ack = jensen_local_ack, + .end = jensen_local_end, +}; + +static void +jensen_device_interrupt(unsigned long vector, struct pt_regs * regs) +{ + int irq; + + switch (vector) { + case 0x660: + printk("Whee.. NMI received. Probable hardware error\n"); + printk("61=%02x, 461=%02x\n", inb(0x61), inb(0x461)); + return; + + /* local device interrupts: */ + case 0x900: irq = 4; break; /* com1 -> irq 4 */ + case 0x920: irq = 3; break; /* com2 -> irq 3 */ + case 0x980: irq = 1; break; /* kbd -> irq 1 */ + case 0x990: irq = 9; break; /* mouse -> irq 9 */ + + default: + if (vector > 0x900) { + printk("Unknown local interrupt %lx\n", vector); + return; + } + + irq = (vector - 0x800) >> 4; + if (irq == 1) + irq = 7; + break; + } + + /* If there is no handler yet... */ + if (irq_desc[irq].action == NULL) { + /* If it is a local interrupt that cannot be masked... */ + if (vector >= 0x900) + { + /* Clear keyboard/mouse state */ + inb(0x64); + inb(0x60); + /* Reset serial ports */ + inb(0x3fa); + inb(0x2fa); + outb(0x0c, 0x3fc); + outb(0x0c, 0x2fc); + /* Clear NMI */ + outb(0,0x61); + outb(0,0x461); + } + } + +#if 0 + /* A useful bit of code to find out if an interrupt is going wild. */ + { + static unsigned int last_msg = 0, last_cc = 0; + static int last_irq = -1, count = 0; + unsigned int cc; + + __asm __volatile("rpcc %0" : "=r"(cc)); + ++count; +#define JENSEN_CYCLES_PER_SEC (150000000) + if (cc - last_msg > ((JENSEN_CYCLES_PER_SEC) * 3) || + irq != last_irq) { + printk(KERN_CRIT " irq %d count %d cc %u @ %lx\n", + irq, count, cc-last_cc, regs->pc); + count = 0; + last_msg = cc; + last_irq = irq; + } + last_cc = cc; + } +#endif + + handle_irq(irq, regs); +} + +static void __init +jensen_init_irq(void) +{ + init_i8259a_irqs(); + + irq_desc[1].handler = &jensen_local_irq_type; + irq_desc[4].handler = &jensen_local_irq_type; + irq_desc[3].handler = &jensen_local_irq_type; + irq_desc[7].handler = &jensen_local_irq_type; + irq_desc[9].handler = &jensen_local_irq_type; + + common_init_isa_dma(); +} + +static void __init +jensen_init_arch(void) +{ + struct pci_controller *hose; +#ifdef CONFIG_PCI + static struct pci_dev fake_isa_bridge = { .dma_mask = 0xffffffffUL, }; + + isa_bridge = &fake_isa_bridge; +#endif + + /* Create a hose so that we can report i/o base addresses to + userland. */ + + pci_isa_hose = hose = alloc_pci_controller(); + hose->io_space = &ioport_resource; + hose->mem_space = &iomem_resource; + hose->index = 0; + + hose->sparse_mem_base = EISA_MEM - IDENT_ADDR; + hose->dense_mem_base = 0; + hose->sparse_io_base = EISA_IO - IDENT_ADDR; + hose->dense_io_base = 0; + + hose->sg_isa = hose->sg_pci = NULL; + __direct_map_base = 0; + __direct_map_size = 0xffffffff; +} + +static void +jensen_machine_check (u64 vector, u64 la, struct pt_regs *regs) +{ + printk(KERN_CRIT "Machine check\n"); +} + + +/* + * The System Vector + */ + +struct alpha_machine_vector jensen_mv __initmv = { + .vector_name = "Jensen", + DO_EV4_MMU, + IO_LITE(JENSEN,jensen), + .machine_check = jensen_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .rtc_port = 0x170, + + .nr_irqs = 16, + .device_interrupt = jensen_device_interrupt, + + .init_arch = jensen_init_arch, + .init_irq = jensen_init_irq, + .init_rtc = common_init_rtc, + .init_pci = NULL, + .kill_arch = NULL, +}; +ALIAS_MV(jensen) diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c new file mode 100644 index 0000000..8047278 --- /dev/null +++ b/arch/alpha/kernel/sys_marvel.c @@ -0,0 +1,499 @@ +/* + * linux/arch/alpha/kernel/sys_marvel.c + * + * Marvel / IO7 support + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/bitops.h> + +#include <asm/ptrace.h> +#include <asm/system.h> +#include <asm/dma.h> +#include <asm/irq.h> +#include <asm/mmu_context.h> +#include <asm/io.h> +#include <asm/pgtable.h> +#include <asm/core_marvel.h> +#include <asm/hwrpb.h> +#include <asm/tlbflush.h> + +#include "proto.h" +#include "err_impl.h" +#include "irq_impl.h" +#include "pci_impl.h" +#include "machvec_impl.h" + +#if NR_IRQS < MARVEL_NR_IRQS +# error NR_IRQS < MARVEL_NR_IRQS !!! +#endif + + +/* + * Interrupt handling. + */ +static void +io7_device_interrupt(unsigned long vector, struct pt_regs * regs) +{ + unsigned int pid; + unsigned int irq; + + /* + * Vector is 0x800 + (interrupt) + * + * where (interrupt) is: + * + * ...16|15 14|13 4|3 0 + * -----+-----+--------+--- + * PE | 0 | irq | 0 + * + * where (irq) is + * + * 0x0800 - 0x0ff0 - 0x0800 + (LSI id << 4) + * 0x1000 - 0x2ff0 - 0x1000 + (MSI_DAT<8:0> << 4) + */ + pid = vector >> 16; + irq = ((vector & 0xffff) - 0x800) >> 4; + + irq += 16; /* offset for legacy */ + irq &= MARVEL_IRQ_VEC_IRQ_MASK; /* not too many bits */ + irq |= pid << MARVEL_IRQ_VEC_PE_SHIFT; /* merge the pid */ + + handle_irq(irq, regs); +} + +static volatile unsigned long * +io7_get_irq_ctl(unsigned int irq, struct io7 **pio7) +{ + volatile unsigned long *ctl; + unsigned int pid; + struct io7 *io7; + + pid = irq >> MARVEL_IRQ_VEC_PE_SHIFT; + + if (!(io7 = marvel_find_io7(pid))) { + printk(KERN_ERR + "%s for nonexistent io7 -- vec %x, pid %d\n", + __FUNCTION__, irq, pid); + return NULL; + } + + irq &= MARVEL_IRQ_VEC_IRQ_MASK; /* isolate the vector */ + irq -= 16; /* subtract legacy bias */ + + if (irq >= 0x180) { + printk(KERN_ERR + "%s for invalid irq -- pid %d adjusted irq %x\n", + __FUNCTION__, pid, irq); + return NULL; + } + + ctl = &io7->csrs->PO7_LSI_CTL[irq & 0xff].csr; /* assume LSI */ + if (irq >= 0x80) /* MSI */ + ctl = &io7->csrs->PO7_MSI_CTL[((irq - 0x80) >> 5) & 0x0f].csr; + + if (pio7) *pio7 = io7; + return ctl; +} + +static void +io7_enable_irq(unsigned int irq) +{ + volatile unsigned long *ctl; + struct io7 *io7; + + ctl = io7_get_irq_ctl(irq, &io7); + if (!ctl || !io7) { + printk(KERN_ERR "%s: get_ctl failed for irq %x\n", + __FUNCTION__, irq); + return; + } + + spin_lock(&io7->irq_lock); + *ctl |= 1UL << 24; + mb(); + *ctl; + spin_unlock(&io7->irq_lock); +} + +static void +io7_disable_irq(unsigned int irq) +{ + volatile unsigned long *ctl; + struct io7 *io7; + + ctl = io7_get_irq_ctl(irq, &io7); + if (!ctl || !io7) { + printk(KERN_ERR "%s: get_ctl failed for irq %x\n", + __FUNCTION__, irq); + return; + } + + spin_lock(&io7->irq_lock); + *ctl &= ~(1UL << 24); + mb(); + *ctl; + spin_unlock(&io7->irq_lock); +} + +static unsigned int +io7_startup_irq(unsigned int irq) +{ + io7_enable_irq(irq); + return 0; /* never anything pending */ +} + +static void +io7_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + io7_enable_irq(irq); +} + +static void +marvel_irq_noop(unsigned int irq) +{ + return; +} + +static unsigned int +marvel_irq_noop_return(unsigned int irq) +{ + return 0; +} + +static struct hw_interrupt_type marvel_legacy_irq_type = { + .typename = "LEGACY", + .startup = marvel_irq_noop_return, + .shutdown = marvel_irq_noop, + .enable = marvel_irq_noop, + .disable = marvel_irq_noop, + .ack = marvel_irq_noop, + .end = marvel_irq_noop, +}; + +static struct hw_interrupt_type io7_lsi_irq_type = { + .typename = "LSI", + .startup = io7_startup_irq, + .shutdown = io7_disable_irq, + .enable = io7_enable_irq, + .disable = io7_disable_irq, + .ack = io7_disable_irq, + .end = io7_end_irq, +}; + +static struct hw_interrupt_type io7_msi_irq_type = { + .typename = "MSI", + .startup = io7_startup_irq, + .shutdown = io7_disable_irq, + .enable = io7_enable_irq, + .disable = io7_disable_irq, + .ack = marvel_irq_noop, + .end = io7_end_irq, +}; + +static void +io7_redirect_irq(struct io7 *io7, + volatile unsigned long *csr, + unsigned int where) +{ + unsigned long val; + + val = *csr; + val &= ~(0x1ffUL << 24); /* clear the target pid */ + val |= ((unsigned long)where << 24); /* set the new target pid */ + + *csr = val; + mb(); + *csr; +} + +static void +io7_redirect_one_lsi(struct io7 *io7, unsigned int which, unsigned int where) +{ + unsigned long val; + + /* + * LSI_CTL has target PID @ 14 + */ + val = io7->csrs->PO7_LSI_CTL[which].csr; + val &= ~(0x1ffUL << 14); /* clear the target pid */ + val |= ((unsigned long)where << 14); /* set the new target pid */ + + io7->csrs->PO7_LSI_CTL[which].csr = val; + mb(); + io7->csrs->PO7_LSI_CTL[which].csr; +} + +static void +io7_redirect_one_msi(struct io7 *io7, unsigned int which, unsigned int where) +{ + unsigned long val; + + /* + * MSI_CTL has target PID @ 14 + */ + val = io7->csrs->PO7_MSI_CTL[which].csr; + val &= ~(0x1ffUL << 14); /* clear the target pid */ + val |= ((unsigned long)where << 14); /* set the new target pid */ + + io7->csrs->PO7_MSI_CTL[which].csr = val; + mb(); + io7->csrs->PO7_MSI_CTL[which].csr; +} + +static void __init +init_one_io7_lsi(struct io7 *io7, unsigned int which, unsigned int where) +{ + /* + * LSI_CTL has target PID @ 14 + */ + io7->csrs->PO7_LSI_CTL[which].csr = ((unsigned long)where << 14); + mb(); + io7->csrs->PO7_LSI_CTL[which].csr; +} + +static void __init +init_one_io7_msi(struct io7 *io7, unsigned int which, unsigned int where) +{ + /* + * MSI_CTL has target PID @ 14 + */ + io7->csrs->PO7_MSI_CTL[which].csr = ((unsigned long)where << 14); + mb(); + io7->csrs->PO7_MSI_CTL[which].csr; +} + +static void __init +init_io7_irqs(struct io7 *io7, + struct hw_interrupt_type *lsi_ops, + struct hw_interrupt_type *msi_ops) +{ + long base = (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT) + 16; + long i; + + printk("Initializing interrupts for IO7 at PE %u - base %lx\n", + io7->pe, base); + + /* + * Where should interrupts from this IO7 go? + * + * They really should be sent to the local CPU to avoid having to + * traverse the mesh, but if it's not an SMP kernel, they have to + * go to the boot CPU. Send them all to the boot CPU for now, + * as each secondary starts, it can redirect it's local device + * interrupts. + */ + printk(" Interrupts reported to CPU at PE %u\n", boot_cpuid); + + spin_lock(&io7->irq_lock); + + /* set up the error irqs */ + io7_redirect_irq(io7, &io7->csrs->HLT_CTL.csr, boot_cpuid); + io7_redirect_irq(io7, &io7->csrs->HPI_CTL.csr, boot_cpuid); + io7_redirect_irq(io7, &io7->csrs->CRD_CTL.csr, boot_cpuid); + io7_redirect_irq(io7, &io7->csrs->STV_CTL.csr, boot_cpuid); + io7_redirect_irq(io7, &io7->csrs->HEI_CTL.csr, boot_cpuid); + + /* Set up the lsi irqs. */ + for (i = 0; i < 128; ++i) { + irq_desc[base + i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[base + i].handler = lsi_ops; + } + + /* Disable the implemented irqs in hardware. */ + for (i = 0; i < 0x60; ++i) + init_one_io7_lsi(io7, i, boot_cpuid); + + init_one_io7_lsi(io7, 0x74, boot_cpuid); + init_one_io7_lsi(io7, 0x75, boot_cpuid); + + + /* Set up the msi irqs. */ + for (i = 128; i < (128 + 512); ++i) { + irq_desc[base + i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[base + i].handler = msi_ops; + } + + for (i = 0; i < 16; ++i) + init_one_io7_msi(io7, i, boot_cpuid); + + spin_unlock(&io7->irq_lock); +} + +static void __init +marvel_init_irq(void) +{ + int i; + struct io7 *io7 = NULL; + + /* Reserve the legacy irqs. */ + for (i = 0; i < 16; ++i) { + irq_desc[i].status = IRQ_DISABLED; + irq_desc[i].handler = &marvel_legacy_irq_type; + } + + /* Init the io7 irqs. */ + for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; ) + init_io7_irqs(io7, &io7_lsi_irq_type, &io7_msi_irq_type); +} + +static int +marvel_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + struct pci_controller *hose = dev->sysdata; + struct io7_port *io7_port = hose->sysdata; + struct io7 *io7 = io7_port->io7; + int msi_loc, msi_data_off; + u16 msg_ctl; + u16 msg_dat; + u8 intline; + int irq; + + pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline); + irq = intline; + + msi_loc = pci_find_capability(dev, PCI_CAP_ID_MSI); + msg_ctl = 0; + if (msi_loc) + pci_read_config_word(dev, msi_loc + PCI_MSI_FLAGS, &msg_ctl); + + if (msg_ctl & PCI_MSI_FLAGS_ENABLE) { + msi_data_off = PCI_MSI_DATA_32; + if (msg_ctl & PCI_MSI_FLAGS_64BIT) + msi_data_off = PCI_MSI_DATA_64; + pci_read_config_word(dev, msi_loc + msi_data_off, &msg_dat); + + irq = msg_dat & 0x1ff; /* we use msg_data<8:0> */ + irq += 0x80; /* offset for lsi */ + +#if 1 + printk("PCI:%d:%d:%d (hose %d) [%s] is using MSI\n", + dev->bus->number, + PCI_SLOT(dev->devfn), + PCI_FUNC(dev->devfn), + hose->index, + pci_pretty_name (dev)); + printk(" %d message(s) from 0x%04x\n", + 1 << ((msg_ctl & PCI_MSI_FLAGS_QSIZE) >> 4), + msg_dat); + printk(" reporting on %d IRQ(s) from %d (0x%x)\n", + 1 << ((msg_ctl & PCI_MSI_FLAGS_QSIZE) >> 4), + (irq + 16) | (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT), + (irq + 16) | (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT)); +#endif + +#if 0 + pci_write_config_word(dev, msi_loc + PCI_MSI_FLAGS, + msg_ctl & ~PCI_MSI_FLAGS_ENABLE); + pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline); + irq = intline; + + printk(" forcing LSI interrupt on irq %d [0x%x]\n", irq, irq); +#endif + } + + irq += 16; /* offset for legacy */ + irq |= io7->pe << MARVEL_IRQ_VEC_PE_SHIFT; /* merge the pid */ + + return irq; +} + +static void __init +marvel_init_pci(void) +{ + struct io7 *io7; + + marvel_register_error_handlers(); + + pci_probe_only = 1; + common_init_pci(); + +#ifdef CONFIG_VGA_HOSE + locate_and_init_vga(NULL); +#endif + + /* Clear any io7 errors. */ + for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; ) + io7_clear_errors(io7); +} + +static void +marvel_init_rtc(void) +{ + init_rtc_irq(); +} + +static void +marvel_smp_callin(void) +{ + int cpuid = hard_smp_processor_id(); + struct io7 *io7 = marvel_find_io7(cpuid); + unsigned int i; + + if (!io7) + return; + + /* + * There is a local IO7 - redirect all of its interrupts here. + */ + printk("Redirecting IO7 interrupts to local CPU at PE %u\n", cpuid); + + /* Redirect the error IRQS here. */ + io7_redirect_irq(io7, &io7->csrs->HLT_CTL.csr, cpuid); + io7_redirect_irq(io7, &io7->csrs->HPI_CTL.csr, cpuid); + io7_redirect_irq(io7, &io7->csrs->CRD_CTL.csr, cpuid); + io7_redirect_irq(io7, &io7->csrs->STV_CTL.csr, cpuid); + io7_redirect_irq(io7, &io7->csrs->HEI_CTL.csr, cpuid); + + /* Redirect the implemented LSIs here. */ + for (i = 0; i < 0x60; ++i) + io7_redirect_one_lsi(io7, i, cpuid); + + io7_redirect_one_lsi(io7, 0x74, cpuid); + io7_redirect_one_lsi(io7, 0x75, cpuid); + + /* Redirect the MSIs here. */ + for (i = 0; i < 16; ++i) + io7_redirect_one_msi(io7, i, cpuid); +} + +/* + * System Vectors + */ +struct alpha_machine_vector marvel_ev7_mv __initmv = { + .vector_name = "MARVEL/EV7", + DO_EV7_MMU, + DO_DEFAULT_RTC, + DO_MARVEL_IO, + .machine_check = marvel_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = DEFAULT_MEM_BASE, + .pci_dac_offset = IO7_DAC_OFFSET, + + .nr_irqs = MARVEL_NR_IRQS, + .device_interrupt = io7_device_interrupt, + + .agp_info = marvel_agp_info, + + .smp_callin = marvel_smp_callin, + .init_arch = marvel_init_arch, + .init_irq = marvel_init_irq, + .init_rtc = marvel_init_rtc, + .init_pci = marvel_init_pci, + .kill_arch = marvel_kill_arch, + .pci_map_irq = marvel_map_irq, + .pci_swizzle = common_swizzle, + + .pa_to_nid = marvel_pa_to_nid, + .cpuid_to_nid = marvel_cpuid_to_nid, + .node_mem_start = marvel_node_mem_start, + .node_mem_size = marvel_node_mem_size, +}; +ALIAS_MV(marvel_ev7) diff --git a/arch/alpha/kernel/sys_miata.c b/arch/alpha/kernel/sys_miata.c new file mode 100644 index 0000000..61ac56f --- /dev/null +++ b/arch/alpha/kernel/sys_miata.c @@ -0,0 +1,289 @@ +/* + * linux/arch/alpha/kernel/sys_miata.c + * + * Copyright (C) 1995 David A Rusling + * Copyright (C) 1996 Jay A Estabrook + * Copyright (C) 1998, 1999, 2000 Richard Henderson + * + * Code supporting the MIATA (EV56+PYXIS). + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/reboot.h> + +#include <asm/ptrace.h> +#include <asm/system.h> +#include <asm/dma.h> +#include <asm/irq.h> +#include <asm/mmu_context.h> +#include <asm/io.h> +#include <asm/pgtable.h> +#include <asm/core_cia.h> +#include <asm/tlbflush.h> + +#include "proto.h" +#include "irq_impl.h" +#include "pci_impl.h" +#include "machvec_impl.h" + + +static void +miata_srm_device_interrupt(unsigned long vector, struct pt_regs * regs) +{ + int irq; + + irq = (vector - 0x800) >> 4; + + /* + * I really hate to do this, but the MIATA SRM console ignores the + * low 8 bits in the interrupt summary register, and reports the + * vector 0x80 *lower* than I expected from the bit numbering in + * the documentation. + * This was done because the low 8 summary bits really aren't used + * for reporting any interrupts (the PCI-ISA bridge, bit 7, isn't + * used for this purpose, as PIC interrupts are delivered as the + * vectors 0x800-0x8f0). + * But I really don't want to change the fixup code for allocation + * of IRQs, nor the alpha_irq_mask maintenance stuff, both of which + * look nice and clean now. + * So, here's this grotty hack... :-( + */ + if (irq >= 16) + irq = irq + 8; + + handle_irq(irq, regs); +} + +static void __init +miata_init_irq(void) +{ + if (alpha_using_srm) + alpha_mv.device_interrupt = miata_srm_device_interrupt; + +#if 0 + /* These break on MiataGL so we'll try not to do it at all. */ + *(vulp)PYXIS_INT_HILO = 0x000000B2UL; mb(); /* ISA/NMI HI */ + *(vulp)PYXIS_RT_COUNT = 0UL; mb(); /* clear count */ +#endif + + init_i8259a_irqs(); + + /* Not interested in the bogus interrupts (3,10), Fan Fault (0), + NMI (1), or EIDE (9). + + We also disable the risers (4,5), since we don't know how to + route the interrupts behind the bridge. */ + init_pyxis_irqs(0x63b0000); + + common_init_isa_dma(); + setup_irq(16+2, &halt_switch_irqaction); /* SRM only? */ + setup_irq(16+6, &timer_cascade_irqaction); +} + + +/* + * PCI Fixup configuration. + * + * Summary @ PYXIS_INT_REQ: + * Bit Meaning + * 0 Fan Fault + * 1 NMI + * 2 Halt/Reset switch + * 3 none + * 4 CID0 (Riser ID) + * 5 CID1 (Riser ID) + * 6 Interval timer + * 7 PCI-ISA Bridge + * 8 Ethernet + * 9 EIDE (deprecated, ISA 14/15 used) + *10 none + *11 USB + *12 Interrupt Line A from slot 4 + *13 Interrupt Line B from slot 4 + *14 Interrupt Line C from slot 4 + *15 Interrupt Line D from slot 4 + *16 Interrupt Line A from slot 5 + *17 Interrupt line B from slot 5 + *18 Interrupt Line C from slot 5 + *19 Interrupt Line D from slot 5 + *20 Interrupt Line A from slot 1 + *21 Interrupt Line B from slot 1 + *22 Interrupt Line C from slot 1 + *23 Interrupt Line D from slot 1 + *24 Interrupt Line A from slot 2 + *25 Interrupt Line B from slot 2 + *26 Interrupt Line C from slot 2 + *27 Interrupt Line D from slot 2 + *27 Interrupt Line A from slot 3 + *29 Interrupt Line B from slot 3 + *30 Interrupt Line C from slot 3 + *31 Interrupt Line D from slot 3 + * + * The device to slot mapping looks like: + * + * Slot Device + * 3 DC21142 Ethernet + * 4 EIDE CMD646 + * 5 none + * 6 USB + * 7 PCI-ISA bridge + * 8 PCI-PCI Bridge (SBU Riser) + * 9 none + * 10 none + * 11 PCI on board slot 4 (SBU Riser) + * 12 PCI on board slot 5 (SBU Riser) + * + * These are behind the bridge, so I'm not sure what to do... + * + * 13 PCI on board slot 1 (SBU Riser) + * 14 PCI on board slot 2 (SBU Riser) + * 15 PCI on board slot 3 (SBU Riser) + * + * + * This two layered interrupt approach means that we allocate IRQ 16 and + * above for PCI interrupts. The IRQ relates to which bit the interrupt + * comes in on. This makes interrupt processing much easier. + */ + +static int __init +miata_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + static char irq_tab[18][5] __initdata = { + /*INT INTA INTB INTC INTD */ + {16+ 8, 16+ 8, 16+ 8, 16+ 8, 16+ 8}, /* IdSel 14, DC21142 */ + { -1, -1, -1, -1, -1}, /* IdSel 15, EIDE */ + { -1, -1, -1, -1, -1}, /* IdSel 16, none */ + { -1, -1, -1, -1, -1}, /* IdSel 17, none */ + { -1, -1, -1, -1, -1}, /* IdSel 18, PCI-ISA */ + { -1, -1, -1, -1, -1}, /* IdSel 19, PCI-PCI */ + { -1, -1, -1, -1, -1}, /* IdSel 20, none */ + { -1, -1, -1, -1, -1}, /* IdSel 21, none */ + {16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 22, slot 4 */ + {16+16, 16+16, 16+17, 16+18, 16+19}, /* IdSel 23, slot 5 */ + /* the next 7 are actually on PCI bus 1, across the bridge */ + {16+11, 16+11, 16+11, 16+11, 16+11}, /* IdSel 24, QLISP/GL*/ + { -1, -1, -1, -1, -1}, /* IdSel 25, none */ + { -1, -1, -1, -1, -1}, /* IdSel 26, none */ + { -1, -1, -1, -1, -1}, /* IdSel 27, none */ + {16+20, 16+20, 16+21, 16+22, 16+23}, /* IdSel 28, slot 1 */ + {16+24, 16+24, 16+25, 16+26, 16+27}, /* IdSel 29, slot 2 */ + {16+28, 16+28, 16+29, 16+30, 16+31}, /* IdSel 30, slot 3 */ + /* This bridge is on the main bus of the later orig MIATA */ + { -1, -1, -1, -1, -1}, /* IdSel 31, PCI-PCI */ + }; + const long min_idsel = 3, max_idsel = 20, irqs_per_slot = 5; + + /* the USB function of the 82c693 has it's interrupt connected to + the 2nd 8259 controller. So we have to check for it first. */ + + if((slot == 7) && (PCI_FUNC(dev->devfn) == 3)) { + u8 irq=0; + + if(pci_read_config_byte(pci_find_slot(dev->bus->number, dev->devfn & ~(7)), 0x40,&irq)!=PCIBIOS_SUCCESSFUL) + return -1; + else + return irq; + } + + return COMMON_TABLE_LOOKUP; +} + +static u8 __init +miata_swizzle(struct pci_dev *dev, u8 *pinp) +{ + int slot, pin = *pinp; + + if (dev->bus->number == 0) { + slot = PCI_SLOT(dev->devfn); + } + /* Check for the built-in bridge. */ + else if ((PCI_SLOT(dev->bus->self->devfn) == 8) || + (PCI_SLOT(dev->bus->self->devfn) == 20)) { + slot = PCI_SLOT(dev->devfn) + 9; + } + else + { + /* Must be a card-based bridge. */ + do { + if ((PCI_SLOT(dev->bus->self->devfn) == 8) || + (PCI_SLOT(dev->bus->self->devfn) == 20)) { + slot = PCI_SLOT(dev->devfn) + 9; + break; + } + pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); + + /* Move up the chain of bridges. */ + dev = dev->bus->self; + /* Slot of the next bridge. */ + slot = PCI_SLOT(dev->devfn); + } while (dev->bus->self); + } + *pinp = pin; + return slot; +} + +static void __init +miata_init_pci(void) +{ + cia_init_pci(); + SMC669_Init(0); /* it might be a GL (fails harmlessly if not) */ + es1888_init(); +} + +static void +miata_kill_arch(int mode) +{ + cia_kill_arch(mode); + +#ifndef ALPHA_RESTORE_SRM_SETUP + switch(mode) { + case LINUX_REBOOT_CMD_RESTART: + /* Who said DEC engineers have no sense of humor? ;-) */ + if (alpha_using_srm) { + *(vuip) PYXIS_RESET = 0x0000dead; + mb(); + } + break; + case LINUX_REBOOT_CMD_HALT: + break; + case LINUX_REBOOT_CMD_POWER_OFF: + break; + } + + halt(); +#endif +} + + +/* + * The System Vector + */ + +struct alpha_machine_vector miata_mv __initmv = { + .vector_name = "Miata", + DO_EV5_MMU, + DO_DEFAULT_RTC, + DO_PYXIS_IO, + .machine_check = cia_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = DEFAULT_MEM_BASE, + .pci_dac_offset = PYXIS_DAC_OFFSET, + + .nr_irqs = 48, + .device_interrupt = pyxis_device_interrupt, + + .init_arch = pyxis_init_arch, + .init_irq = miata_init_irq, + .init_rtc = common_init_rtc, + .init_pci = miata_init_pci, + .kill_arch = miata_kill_arch, + .pci_map_irq = miata_map_irq, + .pci_swizzle = miata_swizzle, +}; +ALIAS_MV(miata) diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c new file mode 100644 index 0000000..d78a0da --- /dev/null +++ b/arch/alpha/kernel/sys_mikasa.c @@ -0,0 +1,265 @@ +/* + * linux/arch/alpha/kernel/sys_mikasa.c + * + * Copyright (C) 1995 David A Rusling + * Copyright (C) 1996 Jay A Estabrook + * Copyright (C) 1998, 1999 Richard Henderson + * + * Code supporting the MIKASA (AlphaServer 1000). + */ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/bitops.h> + +#include <asm/ptrace.h> +#include <asm/system.h> +#include <asm/dma.h> +#include <asm/irq.h> +#include <asm/mmu_context.h> +#include <asm/io.h> +#include <asm/pgtable.h> +#include <asm/core_apecs.h> +#include <asm/core_cia.h> +#include <asm/tlbflush.h> + +#include "proto.h" +#include "irq_impl.h" +#include "pci_impl.h" +#include "machvec_impl.h" + + +/* Note mask bit is true for ENABLED irqs. */ +static int cached_irq_mask; + +static inline void +mikasa_update_irq_hw(int mask) +{ + outw(mask, 0x536); +} + +static inline void +mikasa_enable_irq(unsigned int irq) +{ + mikasa_update_irq_hw(cached_irq_mask |= 1 << (irq - 16)); +} + +static void +mikasa_disable_irq(unsigned int irq) +{ + mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16))); +} + +static unsigned int +mikasa_startup_irq(unsigned int irq) +{ + mikasa_enable_irq(irq); + return 0; +} + +static void +mikasa_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + mikasa_enable_irq(irq); +} + +static struct hw_interrupt_type mikasa_irq_type = { + .typename = "MIKASA", + .startup = mikasa_startup_irq, + .shutdown = mikasa_disable_irq, + .enable = mikasa_enable_irq, + .disable = mikasa_disable_irq, + .ack = mikasa_disable_irq, + .end = mikasa_end_irq, +}; + +static void +mikasa_device_interrupt(unsigned long vector, struct pt_regs *regs) +{ + unsigned long pld; + unsigned int i; + + /* Read the interrupt summary registers */ + pld = (((~inw(0x534) & 0x0000ffffUL) << 16) + | (((unsigned long) inb(0xa0)) << 8) + | inb(0x20)); + + /* + * Now for every possible bit set, work through them and call + * the appropriate interrupt handler. + */ + while (pld) { + i = ffz(~pld); + pld &= pld - 1; /* clear least bit set */ + if (i < 16) { + isa_device_interrupt(vector, regs); + } else { + handle_irq(i, regs); + } + } +} + +static void __init +mikasa_init_irq(void) +{ + long i; + + if (alpha_using_srm) + alpha_mv.device_interrupt = srm_device_interrupt; + + mikasa_update_irq_hw(0); + + for (i = 16; i < 32; ++i) { + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].handler = &mikasa_irq_type; + } + + init_i8259a_irqs(); + common_init_isa_dma(); +} + + +/* + * PCI Fixup configuration. + * + * Summary @ 0x536: + * Bit Meaning + * 0 Interrupt Line A from slot 0 + * 1 Interrupt Line B from slot 0 + * 2 Interrupt Line C from slot 0 + * 3 Interrupt Line D from slot 0 + * 4 Interrupt Line A from slot 1 + * 5 Interrupt line B from slot 1 + * 6 Interrupt Line C from slot 1 + * 7 Interrupt Line D from slot 1 + * 8 Interrupt Line A from slot 2 + * 9 Interrupt Line B from slot 2 + *10 Interrupt Line C from slot 2 + *11 Interrupt Line D from slot 2 + *12 NCR 810 SCSI + *13 Power Supply Fail + *14 Temperature Warn + *15 Reserved + * + * The device to slot mapping looks like: + * + * Slot Device + * 6 NCR SCSI controller + * 7 Intel PCI-EISA bridge chip + * 11 PCI on board slot 0 + * 12 PCI on board slot 1 + * 13 PCI on board slot 2 + * + * + * This two layered interrupt approach means that we allocate IRQ 16 and + * above for PCI interrupts. The IRQ relates to which bit the interrupt + * comes in on. This makes interrupt processing much easier. + */ + +static int __init +mikasa_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + static char irq_tab[8][5] __initdata = { + /*INT INTA INTB INTC INTD */ + {16+12, 16+12, 16+12, 16+12, 16+12}, /* IdSel 17, SCSI */ + { -1, -1, -1, -1, -1}, /* IdSel 18, PCEB */ + { -1, -1, -1, -1, -1}, /* IdSel 19, ???? */ + { -1, -1, -1, -1, -1}, /* IdSel 20, ???? */ + { -1, -1, -1, -1, -1}, /* IdSel 21, ???? */ + { 16+0, 16+0, 16+1, 16+2, 16+3}, /* IdSel 22, slot 0 */ + { 16+4, 16+4, 16+5, 16+6, 16+7}, /* IdSel 23, slot 1 */ + { 16+8, 16+8, 16+9, 16+10, 16+11}, /* IdSel 24, slot 2 */ + }; + const long min_idsel = 6, max_idsel = 13, irqs_per_slot = 5; + return COMMON_TABLE_LOOKUP; +} + + +#if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO) +static void +mikasa_apecs_machine_check(unsigned long vector, unsigned long la_ptr, + struct pt_regs * regs) +{ +#define MCHK_NO_DEVSEL 0x205U +#define MCHK_NO_TABT 0x204U + + struct el_common *mchk_header; + unsigned int code; + + mchk_header = (struct el_common *)la_ptr; + + /* Clear the error before any reporting. */ + mb(); + mb(); /* magic */ + draina(); + apecs_pci_clr_err(); + wrmces(0x7); + mb(); + + code = mchk_header->code; + process_mcheck_info(vector, la_ptr, regs, "MIKASA APECS", + (mcheck_expected(0) + && (code == MCHK_NO_DEVSEL + || code == MCHK_NO_TABT))); +} +#endif + + +/* + * The System Vector + */ + +#if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO) +struct alpha_machine_vector mikasa_mv __initmv = { + .vector_name = "Mikasa", + DO_EV4_MMU, + DO_DEFAULT_RTC, + DO_APECS_IO, + .machine_check = mikasa_apecs_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, + + .nr_irqs = 32, + .device_interrupt = mikasa_device_interrupt, + + .init_arch = apecs_init_arch, + .init_irq = mikasa_init_irq, + .init_rtc = common_init_rtc, + .init_pci = common_init_pci, + .pci_map_irq = mikasa_map_irq, + .pci_swizzle = common_swizzle, +}; +ALIAS_MV(mikasa) +#endif + +#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PRIMO) +struct alpha_machine_vector mikasa_primo_mv __initmv = { + .vector_name = "Mikasa-Primo", + DO_EV5_MMU, + DO_DEFAULT_RTC, + DO_CIA_IO, + .machine_check = cia_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = CIA_DEFAULT_MEM_BASE, + + .nr_irqs = 32, + .device_interrupt = mikasa_device_interrupt, + + .init_arch = cia_init_arch, + .init_irq = mikasa_init_irq, + .init_rtc = common_init_rtc, + .init_pci = cia_init_pci, + .kill_arch = cia_kill_arch, + .pci_map_irq = mikasa_map_irq, + .pci_swizzle = common_swizzle, +}; +ALIAS_MV(mikasa_primo) +#endif diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c new file mode 100644 index 0000000..c0d696e --- /dev/null +++ b/arch/alpha/kernel/sys_nautilus.c @@ -0,0 +1,269 @@ +/* + * linux/arch/alpha/kernel/sys_nautilus.c + * + * Copyright (C) 1995 David A Rusling + * Copyright (C) 1998 Richard Henderson + * Copyright (C) 1999 Alpha Processor, Inc., + * (David Daniel, Stig Telfer, Soohoon Lee) + * + * Code supporting NAUTILUS systems. + * + * + * NAUTILUS has the following I/O features: + * + * a) Driven by AMD 751 aka IRONGATE (northbridge): + * 4 PCI slots + * 1 AGP slot + * + * b) Driven by ALI M1543C (southbridge) + * 2 ISA slots + * 2 IDE connectors + * 1 dual drive capable FDD controller + * 2 serial ports + * 1 ECP/EPP/SP parallel port + * 2 USB ports + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/reboot.h> +#include <linux/bootmem.h> +#include <linux/bitops.h> + +#include <asm/ptrace.h> +#include <asm/system.h> +#include <asm/dma.h> +#include <asm/irq.h> +#include <asm/mmu_context.h> +#include <asm/io.h> +#include <asm/pci.h> +#include <asm/pgtable.h> +#include <asm/core_irongate.h> +#include <asm/hwrpb.h> +#include <asm/tlbflush.h> + +#include "proto.h" +#include "err_impl.h" +#include "irq_impl.h" +#include "pci_impl.h" +#include "machvec_impl.h" + + +static void __init +nautilus_init_irq(void) +{ + if (alpha_using_srm) { + alpha_mv.device_interrupt = srm_device_interrupt; + } + + init_i8259a_irqs(); + common_init_isa_dma(); +} + +static int __init +nautilus_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + /* Preserve the IRQ set up by the console. */ + + u8 irq; + pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); + return irq; +} + +void +nautilus_kill_arch(int mode) +{ + struct pci_bus *bus = pci_isa_hose->bus; + u32 pmuport; + int off; + + switch (mode) { + case LINUX_REBOOT_CMD_RESTART: + if (! alpha_using_srm) { + u8 t8; + pci_bus_read_config_byte(bus, 0x38, 0x43, &t8); + pci_bus_write_config_byte(bus, 0x38, 0x43, t8 | 0x80); + outb(1, 0x92); + outb(0, 0x92); + /* NOTREACHED */ + } + break; + + case LINUX_REBOOT_CMD_POWER_OFF: + /* Assume M1543C */ + off = 0x2000; /* SLP_TYPE = 0, SLP_EN = 1 */ + pci_bus_read_config_dword(bus, 0x88, 0x10, &pmuport); + if (!pmuport) { + /* M1535D/D+ */ + off = 0x3400; /* SLP_TYPE = 5, SLP_EN = 1 */ + pci_bus_read_config_dword(bus, 0x88, 0xe0, &pmuport); + } + pmuport &= 0xfffe; + outw(0xffff, pmuport); /* Clear pending events. */ + outw(off, pmuport + 4); + /* NOTREACHED */ + break; + } +} + +/* Perform analysis of a machine check that arrived from the system (NMI) */ + +static void +naut_sys_machine_check(unsigned long vector, unsigned long la_ptr, + struct pt_regs *regs) +{ + printk("PC %lx RA %lx\n", regs->pc, regs->r26); + irongate_pci_clr_err(); +} + +/* Machine checks can come from two sources - those on the CPU and those + in the system. They are analysed separately but all starts here. */ + +void +nautilus_machine_check(unsigned long vector, unsigned long la_ptr, + struct pt_regs *regs) +{ + char *mchk_class; + + /* Now for some analysis. Machine checks fall into two classes -- + those picked up by the system, and those picked up by the CPU. + Add to that the two levels of severity - correctable or not. */ + + if (vector == SCB_Q_SYSMCHK + && ((IRONGATE0->dramms & 0x300) == 0x300)) { + unsigned long nmi_ctl; + + /* Clear ALI NMI */ + nmi_ctl = inb(0x61); + nmi_ctl |= 0x0c; + outb(nmi_ctl, 0x61); + nmi_ctl &= ~0x0c; + outb(nmi_ctl, 0x61); + + /* Write again clears error bits. */ + IRONGATE0->stat_cmd = IRONGATE0->stat_cmd & ~0x100; + mb(); + IRONGATE0->stat_cmd; + + /* Write again clears error bits. */ + IRONGATE0->dramms = IRONGATE0->dramms; + mb(); + IRONGATE0->dramms; + + draina(); + wrmces(0x7); + mb(); + return; + } + + if (vector == SCB_Q_SYSERR) + mchk_class = "Correctable"; + else if (vector == SCB_Q_SYSMCHK) + mchk_class = "Fatal"; + else { + ev6_machine_check(vector, la_ptr, regs); + return; + } + + printk(KERN_CRIT "NAUTILUS Machine check 0x%lx " + "[%s System Machine Check (NMI)]\n", + vector, mchk_class); + + naut_sys_machine_check(vector, la_ptr, regs); + + /* Tell the PALcode to clear the machine check */ + draina(); + wrmces(0x7); + mb(); +} + +extern void free_reserved_mem(void *, void *); + +static struct resource irongate_mem = { + .name = "Irongate PCI MEM", + .flags = IORESOURCE_MEM, +}; + +void __init +nautilus_init_pci(void) +{ + struct pci_controller *hose = hose_head; + struct pci_bus *bus; + struct pci_dev *irongate; + unsigned long bus_align, bus_size, pci_mem; + unsigned long memtop = max_low_pfn << PAGE_SHIFT; + + /* Scan our single hose. */ + bus = pci_scan_bus(0, alpha_mv.pci_ops, hose); + hose->bus = bus; + + irongate = pci_find_slot(0, 0); + bus->self = irongate; + bus->resource[1] = &irongate_mem; + + pci_bus_size_bridges(bus); + + /* IO port range. */ + bus->resource[0]->start = 0; + bus->resource[0]->end = 0xffff; + + /* Set up PCI memory range - limit is hardwired to 0xffffffff, + base must be at aligned to 16Mb. */ + bus_align = bus->resource[1]->start; + bus_size = bus->resource[1]->end + 1 - bus_align; + if (bus_align < 0x1000000UL) + bus_align = 0x1000000UL; + + pci_mem = (0x100000000UL - bus_size) & -bus_align; + + bus->resource[1]->start = pci_mem; + bus->resource[1]->end = 0xffffffffUL; + if (request_resource(&iomem_resource, bus->resource[1]) < 0) + printk(KERN_ERR "Failed to request MEM on hose 0\n"); + + if (pci_mem < memtop) + memtop = pci_mem; + if (memtop > alpha_mv.min_mem_address) { + free_reserved_mem(__va(alpha_mv.min_mem_address), + __va(memtop)); + printk("nautilus_init_pci: %ldk freed\n", + (memtop - alpha_mv.min_mem_address) >> 10); + } + + if ((IRONGATE0->dev_vendor >> 16) > 0x7006) /* Albacore? */ + IRONGATE0->pci_mem = pci_mem; + + pci_bus_assign_resources(bus); + pci_fixup_irqs(alpha_mv.pci_swizzle, alpha_mv.pci_map_irq); +} + +/* + * The System Vectors + */ + +struct alpha_machine_vector nautilus_mv __initmv = { + .vector_name = "Nautilus", + DO_EV6_MMU, + DO_DEFAULT_RTC, + DO_IRONGATE_IO, + .machine_check = nautilus_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = IRONGATE_DEFAULT_MEM_BASE, + + .nr_irqs = 16, + .device_interrupt = isa_device_interrupt, + + .init_arch = irongate_init_arch, + .init_irq = nautilus_init_irq, + .init_rtc = common_init_rtc, + .init_pci = nautilus_init_pci, + .kill_arch = nautilus_kill_arch, + .pci_map_irq = nautilus_map_irq, + .pci_swizzle = common_swizzle, +}; +ALIAS_MV(nautilus) diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c new file mode 100644 index 0000000..65061f5 --- /dev/null +++ b/arch/alpha/kernel/sys_noritake.c @@ -0,0 +1,347 @@ +/* + * linux/arch/alpha/kernel/sys_noritake.c + * + * Copyright (C) 1995 David A Rusling + * Copyright (C) 1996 Jay A Estabrook + * Copyright (C) 1998, 1999 Richard Henderson + * + * Code supporting the NORITAKE (AlphaServer 1000A), + * CORELLE (AlphaServer 800), and ALCOR Primo (AlphaStation 600A). + */ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/bitops.h> + +#include <asm/ptrace.h> +#include <asm/system.h> +#include <asm/dma.h> +#include <asm/irq.h> +#include <asm/mmu_context.h> +#include <asm/io.h> +#include <asm/pgtable.h> +#include <asm/core_apecs.h> +#include <asm/core_cia.h> +#include <asm/tlbflush.h> + +#include "proto.h" +#include "irq_impl.h" +#include "pci_impl.h" +#include "machvec_impl.h" + +/* Note mask bit is true for ENABLED irqs. */ +static int cached_irq_mask; + +static inline void +noritake_update_irq_hw(int irq, int mask) +{ + int port = 0x54a; + if (irq >= 32) { + mask >>= 16; + port = 0x54c; + } + outw(mask, port); +} + +static void +noritake_enable_irq(unsigned int irq) +{ + noritake_update_irq_hw(irq, cached_irq_mask |= 1 << (irq - 16)); +} + +static void +noritake_disable_irq(unsigned int irq) +{ + noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16))); +} + +static unsigned int +noritake_startup_irq(unsigned int irq) +{ + noritake_enable_irq(irq); + return 0; +} + +static struct hw_interrupt_type noritake_irq_type = { + .typename = "NORITAKE", + .startup = noritake_startup_irq, + .shutdown = noritake_disable_irq, + .enable = noritake_enable_irq, + .disable = noritake_disable_irq, + .ack = noritake_disable_irq, + .end = noritake_enable_irq, +}; + +static void +noritake_device_interrupt(unsigned long vector, struct pt_regs *regs) +{ + unsigned long pld; + unsigned int i; + + /* Read the interrupt summary registers of NORITAKE */ + pld = (((unsigned long) inw(0x54c) << 32) + | ((unsigned long) inw(0x54a) << 16) + | ((unsigned long) inb(0xa0) << 8) + | inb(0x20)); + + /* + * Now for every possible bit set, work through them and call + * the appropriate interrupt handler. + */ + while (pld) { + i = ffz(~pld); + pld &= pld - 1; /* clear least bit set */ + if (i < 16) { + isa_device_interrupt(vector, regs); + } else { + handle_irq(i, regs); + } + } +} + +static void +noritake_srm_device_interrupt(unsigned long vector, struct pt_regs * regs) +{ + int irq; + + irq = (vector - 0x800) >> 4; + + /* + * I really hate to do this, too, but the NORITAKE SRM console also + * reports PCI vectors *lower* than I expected from the bit numbers + * in the documentation. + * But I really don't want to change the fixup code for allocation + * of IRQs, nor the alpha_irq_mask maintenance stuff, both of which + * look nice and clean now. + * So, here's this additional grotty hack... :-( + */ + if (irq >= 16) + irq = irq + 1; + + handle_irq(irq, regs); +} + +static void __init +noritake_init_irq(void) +{ + long i; + + if (alpha_using_srm) + alpha_mv.device_interrupt = noritake_srm_device_interrupt; + + outw(0, 0x54a); + outw(0, 0x54c); + + for (i = 16; i < 48; ++i) { + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].handler = &noritake_irq_type; + } + + init_i8259a_irqs(); + common_init_isa_dma(); +} + + +/* + * PCI Fixup configuration. + * + * Summary @ 0x542, summary register #1: + * Bit Meaning + * 0 All valid ints from summary regs 2 & 3 + * 1 QLOGIC ISP1020A SCSI + * 2 Interrupt Line A from slot 0 + * 3 Interrupt Line B from slot 0 + * 4 Interrupt Line A from slot 1 + * 5 Interrupt line B from slot 1 + * 6 Interrupt Line A from slot 2 + * 7 Interrupt Line B from slot 2 + * 8 Interrupt Line A from slot 3 + * 9 Interrupt Line B from slot 3 + *10 Interrupt Line A from slot 4 + *11 Interrupt Line B from slot 4 + *12 Interrupt Line A from slot 5 + *13 Interrupt Line B from slot 5 + *14 Interrupt Line A from slot 6 + *15 Interrupt Line B from slot 6 + * + * Summary @ 0x544, summary register #2: + * Bit Meaning + * 0 OR of all unmasked ints in SR #2 + * 1 OR of secondary bus ints + * 2 Interrupt Line C from slot 0 + * 3 Interrupt Line D from slot 0 + * 4 Interrupt Line C from slot 1 + * 5 Interrupt line D from slot 1 + * 6 Interrupt Line C from slot 2 + * 7 Interrupt Line D from slot 2 + * 8 Interrupt Line C from slot 3 + * 9 Interrupt Line D from slot 3 + *10 Interrupt Line C from slot 4 + *11 Interrupt Line D from slot 4 + *12 Interrupt Line C from slot 5 + *13 Interrupt Line D from slot 5 + *14 Interrupt Line C from slot 6 + *15 Interrupt Line D from slot 6 + * + * The device to slot mapping looks like: + * + * Slot Device + * 7 Intel PCI-EISA bridge chip + * 8 DEC PCI-PCI bridge chip + * 11 PCI on board slot 0 + * 12 PCI on board slot 1 + * 13 PCI on board slot 2 + * + * + * This two layered interrupt approach means that we allocate IRQ 16 and + * above for PCI interrupts. The IRQ relates to which bit the interrupt + * comes in on. This makes interrupt processing much easier. + */ + +static int __init +noritake_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + static char irq_tab[15][5] __initdata = { + /*INT INTA INTB INTC INTD */ + /* note: IDSELs 16, 17, and 25 are CORELLE only */ + { 16+1, 16+1, 16+1, 16+1, 16+1}, /* IdSel 16, QLOGIC */ + { -1, -1, -1, -1, -1}, /* IdSel 17, S3 Trio64 */ + { -1, -1, -1, -1, -1}, /* IdSel 18, PCEB */ + { -1, -1, -1, -1, -1}, /* IdSel 19, PPB */ + { -1, -1, -1, -1, -1}, /* IdSel 20, ???? */ + { -1, -1, -1, -1, -1}, /* IdSel 21, ???? */ + { 16+2, 16+2, 16+3, 32+2, 32+3}, /* IdSel 22, slot 0 */ + { 16+4, 16+4, 16+5, 32+4, 32+5}, /* IdSel 23, slot 1 */ + { 16+6, 16+6, 16+7, 32+6, 32+7}, /* IdSel 24, slot 2 */ + { 16+8, 16+8, 16+9, 32+8, 32+9}, /* IdSel 25, slot 3 */ + /* The following 5 are actually on PCI bus 1, which is + across the built-in bridge of the NORITAKE only. */ + { 16+1, 16+1, 16+1, 16+1, 16+1}, /* IdSel 16, QLOGIC */ + { 16+8, 16+8, 16+9, 32+8, 32+9}, /* IdSel 17, slot 3 */ + {16+10, 16+10, 16+11, 32+10, 32+11}, /* IdSel 18, slot 4 */ + {16+12, 16+12, 16+13, 32+12, 32+13}, /* IdSel 19, slot 5 */ + {16+14, 16+14, 16+15, 32+14, 32+15}, /* IdSel 20, slot 6 */ + }; + const long min_idsel = 5, max_idsel = 19, irqs_per_slot = 5; + return COMMON_TABLE_LOOKUP; +} + +static u8 __init +noritake_swizzle(struct pci_dev *dev, u8 *pinp) +{ + int slot, pin = *pinp; + + if (dev->bus->number == 0) { + slot = PCI_SLOT(dev->devfn); + } + /* Check for the built-in bridge */ + else if (PCI_SLOT(dev->bus->self->devfn) == 8) { + slot = PCI_SLOT(dev->devfn) + 15; /* WAG! */ + } + else + { + /* Must be a card-based bridge. */ + do { + if (PCI_SLOT(dev->bus->self->devfn) == 8) { + slot = PCI_SLOT(dev->devfn) + 15; + break; + } + pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)) ; + + /* Move up the chain of bridges. */ + dev = dev->bus->self; + /* Slot of the next bridge. */ + slot = PCI_SLOT(dev->devfn); + } while (dev->bus->self); + } + *pinp = pin; + return slot; +} + +#if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO) +static void +noritake_apecs_machine_check(unsigned long vector, unsigned long la_ptr, + struct pt_regs * regs) +{ +#define MCHK_NO_DEVSEL 0x205U +#define MCHK_NO_TABT 0x204U + + struct el_common *mchk_header; + unsigned int code; + + mchk_header = (struct el_common *)la_ptr; + + /* Clear the error before any reporting. */ + mb(); + mb(); /* magic */ + draina(); + apecs_pci_clr_err(); + wrmces(0x7); + mb(); + + code = mchk_header->code; + process_mcheck_info(vector, la_ptr, regs, "NORITAKE APECS", + (mcheck_expected(0) + && (code == MCHK_NO_DEVSEL + || code == MCHK_NO_TABT))); +} +#endif + + +/* + * The System Vectors + */ + +#if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO) +struct alpha_machine_vector noritake_mv __initmv = { + .vector_name = "Noritake", + DO_EV4_MMU, + DO_DEFAULT_RTC, + DO_APECS_IO, + .machine_check = noritake_apecs_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = EISA_DEFAULT_IO_BASE, + .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, + + .nr_irqs = 48, + .device_interrupt = noritake_device_interrupt, + + .init_arch = apecs_init_arch, + .init_irq = noritake_init_irq, + .init_rtc = common_init_rtc, + .init_pci = common_init_pci, + .pci_map_irq = noritake_map_irq, + .pci_swizzle = noritake_swizzle, +}; +ALIAS_MV(noritake) +#endif + +#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PRIMO) +struct alpha_machine_vector noritake_primo_mv __initmv = { + .vector_name = "Noritake-Primo", + DO_EV5_MMU, + DO_DEFAULT_RTC, + DO_CIA_IO, + .machine_check = cia_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = EISA_DEFAULT_IO_BASE, + .min_mem_address = CIA_DEFAULT_MEM_BASE, + + .nr_irqs = 48, + .device_interrupt = noritake_device_interrupt, + + .init_arch = cia_init_arch, + .init_irq = noritake_init_irq, + .init_rtc = common_init_rtc, + .init_pci = cia_init_pci, + .kill_arch = cia_kill_arch, + .pci_map_irq = noritake_map_irq, + .pci_swizzle = noritake_swizzle, +}; +ALIAS_MV(noritake_primo) +#endif diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c new file mode 100644 index 0000000..05888a0 --- /dev/null +++ b/arch/alpha/kernel/sys_rawhide.c @@ -0,0 +1,270 @@ +/* + * linux/arch/alpha/kernel/sys_rawhide.c + * + * Copyright (C) 1995 David A Rusling + * Copyright (C) 1996 Jay A Estabrook + * Copyright (C) 1998, 1999 Richard Henderson + * + * Code supporting the RAWHIDE. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/init.h> + +#include <asm/ptrace.h> +#include <asm/system.h> +#include <asm/dma.h> +#include <asm/irq.h> +#include <asm/mmu_context.h> +#include <asm/io.h> +#include <asm/pgtable.h> +#include <asm/core_mcpcia.h> +#include <asm/tlbflush.h> + +#include "proto.h" +#include "irq_impl.h" +#include "pci_impl.h" +#include "machvec_impl.h" + + +/* + * HACK ALERT! only the boot cpu is used for interrupts. + */ + + +/* Note mask bit is true for ENABLED irqs. */ + +static unsigned int hose_irq_masks[4] = { + 0xff0000, 0xfe0000, 0xff0000, 0xff0000 +}; +static unsigned int cached_irq_masks[4]; +DEFINE_SPINLOCK(rawhide_irq_lock); + +static inline void +rawhide_update_irq_hw(int hose, int mask) +{ + *(vuip)MCPCIA_INT_MASK0(MCPCIA_HOSE2MID(hose)) = mask; + mb(); + *(vuip)MCPCIA_INT_MASK0(MCPCIA_HOSE2MID(hose)); +} + +static inline void +rawhide_enable_irq(unsigned int irq) +{ + unsigned int mask, hose; + + irq -= 16; + hose = irq / 24; + irq -= hose * 24; + mask = 1 << irq; + + spin_lock(&rawhide_irq_lock); + mask |= cached_irq_masks[hose]; + cached_irq_masks[hose] = mask; + rawhide_update_irq_hw(hose, mask); + spin_unlock(&rawhide_irq_lock); +} + +static void +rawhide_disable_irq(unsigned int irq) +{ + unsigned int mask, hose; + + irq -= 16; + hose = irq / 24; + irq -= hose * 24; + mask = ~(1 << irq) | hose_irq_masks[hose]; + + spin_lock(&rawhide_irq_lock); + mask &= cached_irq_masks[hose]; + cached_irq_masks[hose] = mask; + rawhide_update_irq_hw(hose, mask); + spin_unlock(&rawhide_irq_lock); +} + +static void +rawhide_mask_and_ack_irq(unsigned int irq) +{ + unsigned int mask, mask1, hose; + + irq -= 16; + hose = irq / 24; + irq -= hose * 24; + mask1 = 1 << irq; + mask = ~mask1 | hose_irq_masks[hose]; + + spin_lock(&rawhide_irq_lock); + + mask &= cached_irq_masks[hose]; + cached_irq_masks[hose] = mask; + rawhide_update_irq_hw(hose, mask); + + /* Clear the interrupt. */ + *(vuip)MCPCIA_INT_REQ(MCPCIA_HOSE2MID(hose)) = mask1; + + spin_unlock(&rawhide_irq_lock); +} + +static unsigned int +rawhide_startup_irq(unsigned int irq) +{ + rawhide_enable_irq(irq); + return 0; +} + +static void +rawhide_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + rawhide_enable_irq(irq); +} + +static struct hw_interrupt_type rawhide_irq_type = { + .typename = "RAWHIDE", + .startup = rawhide_startup_irq, + .shutdown = rawhide_disable_irq, + .enable = rawhide_enable_irq, + .disable = rawhide_disable_irq, + .ack = rawhide_mask_and_ack_irq, + .end = rawhide_end_irq, +}; + +static void +rawhide_srm_device_interrupt(unsigned long vector, struct pt_regs * regs) +{ + int irq; + + irq = (vector - 0x800) >> 4; + + /* + * The RAWHIDE SRM console reports PCI interrupts with a vector + * 0x80 *higher* than one might expect, as PCI IRQ 0 (ie bit 0) + * shows up as IRQ 24, etc, etc. We adjust it down by 8 to have + * it line up with the actual bit numbers from the REQ registers, + * which is how we manage the interrupts/mask. Sigh... + * + * Also, PCI #1 interrupts are offset some more... :-( + */ + + if (irq == 52) { + /* SCSI on PCI1 is special. */ + irq = 72; + } + + /* Adjust by which hose it is from. */ + irq -= ((irq + 16) >> 2) & 0x38; + + handle_irq(irq, regs); +} + +static void __init +rawhide_init_irq(void) +{ + struct pci_controller *hose; + long i; + + mcpcia_init_hoses(); + + for (hose = hose_head; hose; hose = hose->next) { + unsigned int h = hose->index; + unsigned int mask = hose_irq_masks[h]; + + cached_irq_masks[h] = mask; + *(vuip)MCPCIA_INT_MASK0(MCPCIA_HOSE2MID(h)) = mask; + *(vuip)MCPCIA_INT_MASK1(MCPCIA_HOSE2MID(h)) = 0; + } + + for (i = 16; i < 128; ++i) { + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].handler = &rawhide_irq_type; + } + + init_i8259a_irqs(); + common_init_isa_dma(); +} + +/* + * PCI Fixup configuration. + * + * Summary @ MCPCIA_PCI0_INT_REQ: + * Bit Meaning + * 0 Interrupt Line A from slot 2 PCI0 + * 1 Interrupt Line B from slot 2 PCI0 + * 2 Interrupt Line C from slot 2 PCI0 + * 3 Interrupt Line D from slot 2 PCI0 + * 4 Interrupt Line A from slot 3 PCI0 + * 5 Interrupt Line B from slot 3 PCI0 + * 6 Interrupt Line C from slot 3 PCI0 + * 7 Interrupt Line D from slot 3 PCI0 + * 8 Interrupt Line A from slot 4 PCI0 + * 9 Interrupt Line B from slot 4 PCI0 + * 10 Interrupt Line C from slot 4 PCI0 + * 11 Interrupt Line D from slot 4 PCI0 + * 12 Interrupt Line A from slot 5 PCI0 + * 13 Interrupt Line B from slot 5 PCI0 + * 14 Interrupt Line C from slot 5 PCI0 + * 15 Interrupt Line D from slot 5 PCI0 + * 16 EISA interrupt (PCI 0) or SCSI interrupt (PCI 1) + * 17-23 NA + * + * IdSel + * 1 EISA bridge (PCI bus 0 only) + * 2 PCI option slot 2 + * 3 PCI option slot 3 + * 4 PCI option slot 4 + * 5 PCI option slot 5 + * + */ + +static int __init +rawhide_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + static char irq_tab[5][5] __initdata = { + /*INT INTA INTB INTC INTD */ + { 16+16, 16+16, 16+16, 16+16, 16+16}, /* IdSel 1 SCSI PCI 1 */ + { 16+ 0, 16+ 0, 16+ 1, 16+ 2, 16+ 3}, /* IdSel 2 slot 2 */ + { 16+ 4, 16+ 4, 16+ 5, 16+ 6, 16+ 7}, /* IdSel 3 slot 3 */ + { 16+ 8, 16+ 8, 16+ 9, 16+10, 16+11}, /* IdSel 4 slot 4 */ + { 16+12, 16+12, 16+13, 16+14, 16+15} /* IdSel 5 slot 5 */ + }; + const long min_idsel = 1, max_idsel = 5, irqs_per_slot = 5; + + struct pci_controller *hose = dev->sysdata; + int irq = COMMON_TABLE_LOOKUP; + if (irq >= 0) + irq += 24 * hose->index; + return irq; +} + + +/* + * The System Vector + */ + +struct alpha_machine_vector rawhide_mv __initmv = { + .vector_name = "Rawhide", + DO_EV5_MMU, + DO_DEFAULT_RTC, + DO_MCPCIA_IO, + .machine_check = mcpcia_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = MCPCIA_DEFAULT_MEM_BASE, + .pci_dac_offset = MCPCIA_DAC_OFFSET, + + .nr_irqs = 128, + .device_interrupt = rawhide_srm_device_interrupt, + + .init_arch = mcpcia_init_arch, + .init_irq = rawhide_init_irq, + .init_rtc = common_init_rtc, + .init_pci = common_init_pci, + .kill_arch = NULL, + .pci_map_irq = rawhide_map_irq, + .pci_swizzle = common_swizzle, +}; +ALIAS_MV(rawhide) diff --git a/arch/alpha/kernel/sys_ruffian.c b/arch/alpha/kernel/sys_ruffian.c new file mode 100644 index 0000000..78c30de --- /dev/null +++ b/arch/alpha/kernel/sys_ruffian.c @@ -0,0 +1,240 @@ +/* + * linux/arch/alpha/kernel/sys_ruffian.c + * + * Copyright (C) 1995 David A Rusling + * Copyright (C) 1996 Jay A Estabrook + * Copyright (C) 1998, 1999, 2000 Richard Henderson + * + * Code supporting the RUFFIAN. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/ioport.h> +#include <linux/init.h> + +#include <asm/ptrace.h> +#include <asm/system.h> +#include <asm/dma.h> +#include <asm/irq.h> +#include <asm/mmu_context.h> +#include <asm/io.h> +#include <asm/pgtable.h> +#include <asm/core_cia.h> +#include <asm/tlbflush.h> +#include <asm/8253pit.h> + +#include "proto.h" +#include "irq_impl.h" +#include "pci_impl.h" +#include "machvec_impl.h" + + +static void __init +ruffian_init_irq(void) +{ + /* Invert 6&7 for i82371 */ + *(vulp)PYXIS_INT_HILO = 0x000000c0UL; mb(); + *(vulp)PYXIS_INT_CNFG = 0x00002064UL; mb(); /* all clear */ + + outb(0x11,0xA0); + outb(0x08,0xA1); + outb(0x02,0xA1); + outb(0x01,0xA1); + outb(0xFF,0xA1); + + outb(0x11,0x20); + outb(0x00,0x21); + outb(0x04,0x21); + outb(0x01,0x21); + outb(0xFF,0x21); + + /* Finish writing the 82C59A PIC Operation Control Words */ + outb(0x20,0xA0); + outb(0x20,0x20); + + init_i8259a_irqs(); + + /* Not interested in the bogus interrupts (0,3,6), + NMI (1), HALT (2), flash (5), or 21142 (8). */ + init_pyxis_irqs(0x16f0000); + + common_init_isa_dma(); +} + +#define RUFFIAN_LATCH ((PIT_TICK_RATE + HZ / 2) / HZ) + +static void __init +ruffian_init_rtc(void) +{ + /* Ruffian does not have the RTC connected to the CPU timer + interrupt. Instead, it uses the PIT connected to IRQ 0. */ + + /* Setup interval timer. */ + outb(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */ + outb(RUFFIAN_LATCH & 0xff, 0x40); /* LSB */ + outb(RUFFIAN_LATCH >> 8, 0x40); /* MSB */ + + outb(0xb6, 0x43); /* pit counter 2: speaker */ + outb(0x31, 0x42); + outb(0x13, 0x42); + + setup_irq(0, &timer_irqaction); +} + +static void +ruffian_kill_arch (int mode) +{ + cia_kill_arch(mode); +#if 0 + /* This only causes re-entry to ARCSBIOS */ + /* Perhaps this works for other PYXIS as well? */ + *(vuip) PYXIS_RESET = 0x0000dead; + mb(); +#endif +} + +/* + * Interrupt routing: + * + * Primary bus + * IdSel INTA INTB INTC INTD + * 21052 13 - - - - + * SIO 14 23 - - - + * 21143 15 44 - - - + * Slot 0 17 43 42 41 40 + * + * Secondary bus + * IdSel INTA INTB INTC INTD + * Slot 0 8 (18) 19 18 17 16 + * Slot 1 9 (19) 31 30 29 28 + * Slot 2 10 (20) 27 26 25 24 + * Slot 3 11 (21) 39 38 37 36 + * Slot 4 12 (22) 35 34 33 32 + * 53c875 13 (23) 20 - - - + * + */ + +static int __init +ruffian_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + static char irq_tab[11][5] __initdata = { + /*INT INTA INTB INTC INTD */ + {-1, -1, -1, -1, -1}, /* IdSel 13, 21052 */ + {-1, -1, -1, -1, -1}, /* IdSel 14, SIO */ + {44, 44, 44, 44, 44}, /* IdSel 15, 21143 */ + {-1, -1, -1, -1, -1}, /* IdSel 16, none */ + {43, 43, 42, 41, 40}, /* IdSel 17, 64-bit slot */ + /* the next 6 are actually on PCI bus 1, across the bridge */ + {19, 19, 18, 17, 16}, /* IdSel 8, slot 0 */ + {31, 31, 30, 29, 28}, /* IdSel 9, slot 1 */ + {27, 27, 26, 25, 24}, /* IdSel 10, slot 2 */ + {39, 39, 38, 37, 36}, /* IdSel 11, slot 3 */ + {35, 35, 34, 33, 32}, /* IdSel 12, slot 4 */ + {20, 20, 20, 20, 20}, /* IdSel 13, 53c875 */ + }; + const long min_idsel = 13, max_idsel = 23, irqs_per_slot = 5; + return COMMON_TABLE_LOOKUP; +} + +static u8 __init +ruffian_swizzle(struct pci_dev *dev, u8 *pinp) +{ + int slot, pin = *pinp; + + if (dev->bus->number == 0) { + slot = PCI_SLOT(dev->devfn); + } + /* Check for the built-in bridge. */ + else if (PCI_SLOT(dev->bus->self->devfn) == 13) { + slot = PCI_SLOT(dev->devfn) + 10; + } + else + { + /* Must be a card-based bridge. */ + do { + if (PCI_SLOT(dev->bus->self->devfn) == 13) { + slot = PCI_SLOT(dev->devfn) + 10; + break; + } + pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); + + /* Move up the chain of bridges. */ + dev = dev->bus->self; + /* Slot of the next bridge. */ + slot = PCI_SLOT(dev->devfn); + } while (dev->bus->self); + } + *pinp = pin; + return slot; +} + +#ifdef BUILDING_FOR_MILO +/* + * The DeskStation Ruffian motherboard firmware does not place + * the memory size in the PALimpure area. Therefore, we use + * the Bank Configuration Registers in PYXIS to obtain the size. + */ +static unsigned long __init +ruffian_get_bank_size(unsigned long offset) +{ + unsigned long bank_addr, bank, ret = 0; + + /* Valid offsets are: 0x800, 0x840 and 0x880 + since Ruffian only uses three banks. */ + bank_addr = (unsigned long)PYXIS_MCR + offset; + bank = *(vulp)bank_addr; + + /* Check BANK_ENABLE */ + if (bank & 0x01) { + static unsigned long size[] __initdata = { + 0x40000000UL, /* 0x00, 1G */ + 0x20000000UL, /* 0x02, 512M */ + 0x10000000UL, /* 0x04, 256M */ + 0x08000000UL, /* 0x06, 128M */ + 0x04000000UL, /* 0x08, 64M */ + 0x02000000UL, /* 0x0a, 32M */ + 0x01000000UL, /* 0x0c, 16M */ + 0x00800000UL, /* 0x0e, 8M */ + 0x80000000UL, /* 0x10, 2G */ + }; + + bank = (bank & 0x1e) >> 1; + if (bank < sizeof(size)/sizeof(*size)) + ret = size[bank]; + } + + return ret; +} +#endif /* BUILDING_FOR_MILO */ + +/* + * The System Vector + */ + +struct alpha_machine_vector ruffian_mv __initmv = { + .vector_name = "Ruffian", + DO_EV5_MMU, + DO_DEFAULT_RTC, + DO_PYXIS_IO, + .machine_check = cia_machine_check, + .max_isa_dma_address = ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = DEFAULT_MEM_BASE, + .pci_dac_offset = PYXIS_DAC_OFFSET, + + .nr_irqs = 48, + .device_interrupt = pyxis_device_interrupt, + + .init_arch = pyxis_init_arch, + .init_irq = ruffian_init_irq, + .init_rtc = ruffian_init_rtc, + .init_pci = cia_init_pci, + .kill_arch = ruffian_kill_arch, + .pci_map_irq = ruffian_map_irq, + .pci_swizzle = ruffian_swizzle, +}; +ALIAS_MV(ruffian) diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c new file mode 100644 index 0000000..5840424 --- /dev/null +++ b/arch/alpha/kernel/sys_rx164.c @@ -0,0 +1,220 @@ +/* + * linux/arch/alpha/kernel/sys_rx164.c + * + * Copyright (C) 1995 David A Rusling + * Copyright (C) 1996 Jay A Estabrook + * Copyright (C) 1998, 1999 Richard Henderson + * + * Code supporting the RX164 (PCA56+POLARIS). + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/bitops.h> + +#include <asm/ptrace.h> +#include <asm/system.h> +#include <asm/dma.h> +#include <asm/irq.h> +#include <asm/mmu_context.h> +#include <asm/io.h> +#include <asm/pgtable.h> +#include <asm/core_polaris.h> +#include <asm/tlbflush.h> + +#include "proto.h" +#include "irq_impl.h" +#include "pci_impl.h" +#include "machvec_impl.h" + + +/* Note mask bit is true for ENABLED irqs. */ +static unsigned long cached_irq_mask; + +static inline void +rx164_update_irq_hw(unsigned long mask) +{ + volatile unsigned int *irq_mask; + + irq_mask = (void *)(POLARIS_DENSE_CONFIG_BASE + 0x74); + *irq_mask = mask; + mb(); + *irq_mask; +} + +static inline void +rx164_enable_irq(unsigned int irq) +{ + rx164_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); +} + +static void +rx164_disable_irq(unsigned int irq) +{ + rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); +} + +static unsigned int +rx164_startup_irq(unsigned int irq) +{ + rx164_enable_irq(irq); + return 0; +} + +static void +rx164_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + rx164_enable_irq(irq); +} + +static struct hw_interrupt_type rx164_irq_type = { + .typename = "RX164", + .startup = rx164_startup_irq, + .shutdown = rx164_disable_irq, + .enable = rx164_enable_irq, + .disable = rx164_disable_irq, + .ack = rx164_disable_irq, + .end = rx164_end_irq, +}; + +static void +rx164_device_interrupt(unsigned long vector, struct pt_regs *regs) +{ + unsigned long pld; + volatile unsigned int *dirr; + long i; + + /* Read the interrupt summary register. On Polaris, this is + the DIRR register in PCI config space (offset 0x84). */ + dirr = (void *)(POLARIS_DENSE_CONFIG_BASE + 0x84); + pld = *dirr; + + /* + * Now for every possible bit set, work through them and call + * the appropriate interrupt handler. + */ + while (pld) { + i = ffz(~pld); + pld &= pld - 1; /* clear least bit set */ + if (i == 20) { + isa_no_iack_sc_device_interrupt(vector, regs); + } else { + handle_irq(16+i, regs); + } + } +} + +static void __init +rx164_init_irq(void) +{ + long i; + + rx164_update_irq_hw(0); + for (i = 16; i < 40; ++i) { + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].handler = &rx164_irq_type; + } + + init_i8259a_irqs(); + common_init_isa_dma(); + + setup_irq(16+20, &isa_cascade_irqaction); +} + + +/* + * The RX164 changed its interrupt routing between pass1 and pass2... + * + * PASS1: + * + * Slot IDSEL INTA INTB INTC INTD + * 0 6 5 10 15 20 + * 1 7 4 9 14 19 + * 2 5 3 8 13 18 + * 3 9 2 7 12 17 + * 4 10 1 6 11 16 + * + * PASS2: + * Slot IDSEL INTA INTB INTC INTD + * 0 5 1 7 12 17 + * 1 6 2 8 13 18 + * 2 8 3 9 14 19 + * 3 9 4 10 15 20 + * 4 10 5 11 16 6 + * + */ + +/* + * IdSel + * 5 32 bit PCI option slot 0 + * 6 64 bit PCI option slot 1 + * 7 PCI-ISA bridge + * 7 64 bit PCI option slot 2 + * 9 32 bit PCI option slot 3 + * 10 PCI-PCI bridge + * + */ + +static int __init +rx164_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ +#if 0 + static char irq_tab_pass1[6][5] __initdata = { + /*INT INTA INTB INTC INTD */ + { 16+3, 16+3, 16+8, 16+13, 16+18}, /* IdSel 5, slot 2 */ + { 16+5, 16+5, 16+10, 16+15, 16+20}, /* IdSel 6, slot 0 */ + { 16+4, 16+4, 16+9, 16+14, 16+19}, /* IdSel 7, slot 1 */ + { -1, -1, -1, -1, -1}, /* IdSel 8, PCI/ISA bridge */ + { 16+2, 16+2, 16+7, 16+12, 16+17}, /* IdSel 9, slot 3 */ + { 16+1, 16+1, 16+6, 16+11, 16+16}, /* IdSel 10, slot 4 */ + }; +#else + static char irq_tab[6][5] __initdata = { + /*INT INTA INTB INTC INTD */ + { 16+0, 16+0, 16+6, 16+11, 16+16}, /* IdSel 5, slot 0 */ + { 16+1, 16+1, 16+7, 16+12, 16+17}, /* IdSel 6, slot 1 */ + { -1, -1, -1, -1, -1}, /* IdSel 7, PCI/ISA bridge */ + { 16+2, 16+2, 16+8, 16+13, 16+18}, /* IdSel 8, slot 2 */ + { 16+3, 16+3, 16+9, 16+14, 16+19}, /* IdSel 9, slot 3 */ + { 16+4, 16+4, 16+10, 16+15, 16+5}, /* IdSel 10, PCI-PCI */ + }; +#endif + const long min_idsel = 5, max_idsel = 10, irqs_per_slot = 5; + + /* JRP - Need to figure out how to distinguish pass1 from pass2, + and use the correct table. */ + return COMMON_TABLE_LOOKUP; +} + + +/* + * The System Vector + */ + +struct alpha_machine_vector rx164_mv __initmv = { + .vector_name = "RX164", + DO_EV5_MMU, + DO_DEFAULT_RTC, + DO_POLARIS_IO, + .machine_check = polaris_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = DEFAULT_MEM_BASE, + + .nr_irqs = 40, + .device_interrupt = rx164_device_interrupt, + + .init_arch = polaris_init_arch, + .init_irq = rx164_init_irq, + .init_rtc = common_init_rtc, + .init_pci = common_init_pci, + .kill_arch = NULL, + .pci_map_irq = rx164_map_irq, + .pci_swizzle = common_swizzle, +}; +ALIAS_MV(rx164) diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c new file mode 100644 index 0000000..a7ff844 --- /dev/null +++ b/arch/alpha/kernel/sys_sable.c @@ -0,0 +1,653 @@ +/* + * linux/arch/alpha/kernel/sys_sable.c + * + * Copyright (C) 1995 David A Rusling + * Copyright (C) 1996 Jay A Estabrook + * Copyright (C) 1998, 1999 Richard Henderson + * + * Code supporting the Sable, Sable-Gamma, and Lynx systems. + */ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/init.h> + +#include <asm/ptrace.h> +#include <asm/system.h> +#include <asm/dma.h> +#include <asm/irq.h> +#include <asm/mmu_context.h> +#include <asm/io.h> +#include <asm/pgtable.h> +#include <asm/core_t2.h> +#include <asm/tlbflush.h> + +#include "proto.h" +#include "irq_impl.h" +#include "pci_impl.h" +#include "machvec_impl.h" + +DEFINE_SPINLOCK(sable_lynx_irq_lock); + +typedef struct irq_swizzle_struct +{ + char irq_to_mask[64]; + char mask_to_irq[64]; + + /* Note mask bit is true for DISABLED irqs. */ + unsigned long shadow_mask; + + void (*update_irq_hw)(unsigned long bit, unsigned long mask); + void (*ack_irq_hw)(unsigned long bit); + +} irq_swizzle_t; + +static irq_swizzle_t *sable_lynx_irq_swizzle; + +static void sable_lynx_init_irq(int nr_irqs); + +#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SABLE) + +/***********************************************************************/ +/* + * For SABLE, which is really baroque, we manage 40 IRQ's, but the + * hardware really only supports 24, not via normal ISA PIC, + * but cascaded custom 8259's, etc. + * 0-7 (char at 536) + * 8-15 (char at 53a) + * 16-23 (char at 53c) + * + * Summary Registers (536/53a/53c): + * + * Bit Meaning Kernel IRQ + *------------------------------------------ + * 0 PCI slot 0 34 + * 1 NCR810 (builtin) 33 + * 2 TULIP (builtin) 32 + * 3 mouse 12 + * 4 PCI slot 1 35 + * 5 PCI slot 2 36 + * 6 keyboard 1 + * 7 floppy 6 + * 8 COM2 3 + * 9 parallel port 7 + *10 EISA irq 3 - + *11 EISA irq 4 - + *12 EISA irq 5 5 + *13 EISA irq 6 - + *14 EISA irq 7 - + *15 COM1 4 + *16 EISA irq 9 9 + *17 EISA irq 10 10 + *18 EISA irq 11 11 + *19 EISA irq 12 - + *20 EISA irq 13 - + *21 EISA irq 14 14 + *22 NC 15 + *23 IIC - + */ + +static void +sable_update_irq_hw(unsigned long bit, unsigned long mask) +{ + int port = 0x537; + + if (bit >= 16) { + port = 0x53d; + mask >>= 16; + } else if (bit >= 8) { + port = 0x53b; + mask >>= 8; + } + + outb(mask, port); +} + +static void +sable_ack_irq_hw(unsigned long bit) +{ + int port, val1, val2; + + if (bit >= 16) { + port = 0x53c; + val1 = 0xE0 | (bit - 16); + val2 = 0xE0 | 4; + } else if (bit >= 8) { + port = 0x53a; + val1 = 0xE0 | (bit - 8); + val2 = 0xE0 | 3; + } else { + port = 0x536; + val1 = 0xE0 | (bit - 0); + val2 = 0xE0 | 1; + } + + outb(val1, port); /* ack the slave */ + outb(val2, 0x534); /* ack the master */ +} + +static irq_swizzle_t sable_irq_swizzle = { + { + -1, 6, -1, 8, 15, 12, 7, 9, /* pseudo PIC 0-7 */ + -1, 16, 17, 18, 3, -1, 21, 22, /* pseudo PIC 8-15 */ + -1, -1, -1, -1, -1, -1, -1, -1, /* pseudo EISA 0-7 */ + -1, -1, -1, -1, -1, -1, -1, -1, /* pseudo EISA 8-15 */ + 2, 1, 0, 4, 5, -1, -1, -1, /* pseudo PCI */ + -1, -1, -1, -1, -1, -1, -1, -1, /* */ + -1, -1, -1, -1, -1, -1, -1, -1, /* */ + -1, -1, -1, -1, -1, -1, -1, -1 /* */ + }, + { + 34, 33, 32, 12, 35, 36, 1, 6, /* mask 0-7 */ + 3, 7, -1, -1, 5, -1, -1, 4, /* mask 8-15 */ + 9, 10, 11, -1, -1, 14, 15, -1, /* mask 16-23 */ + -1, -1, -1, -1, -1, -1, -1, -1, /* */ + -1, -1, -1, -1, -1, -1, -1, -1, /* */ + -1, -1, -1, -1, -1, -1, -1, -1, /* */ + -1, -1, -1, -1, -1, -1, -1, -1, /* */ + -1, -1, -1, -1, -1, -1, -1, -1 /* */ + }, + -1, + sable_update_irq_hw, + sable_ack_irq_hw +}; + +static void __init +sable_init_irq(void) +{ + outb(-1, 0x537); /* slave 0 */ + outb(-1, 0x53b); /* slave 1 */ + outb(-1, 0x53d); /* slave 2 */ + outb(0x44, 0x535); /* enable cascades in master */ + + sable_lynx_irq_swizzle = &sable_irq_swizzle; + sable_lynx_init_irq(40); +} + +/* + * PCI Fixup configuration for ALPHA SABLE (2100). + * + * The device to slot mapping looks like: + * + * Slot Device + * 0 TULIP + * 1 SCSI + * 2 PCI-EISA bridge + * 3 none + * 4 none + * 5 none + * 6 PCI on board slot 0 + * 7 PCI on board slot 1 + * 8 PCI on board slot 2 + * + * + * This two layered interrupt approach means that we allocate IRQ 16 and + * above for PCI interrupts. The IRQ relates to which bit the interrupt + * comes in on. This makes interrupt processing much easier. + */ +/* + * NOTE: the IRQ assignments below are arbitrary, but need to be consistent + * with the values in the irq swizzling tables above. + */ + +static int __init +sable_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + static char irq_tab[9][5] __initdata = { + /*INT INTA INTB INTC INTD */ + { 32+0, 32+0, 32+0, 32+0, 32+0}, /* IdSel 0, TULIP */ + { 32+1, 32+1, 32+1, 32+1, 32+1}, /* IdSel 1, SCSI */ + { -1, -1, -1, -1, -1}, /* IdSel 2, SIO */ + { -1, -1, -1, -1, -1}, /* IdSel 3, none */ + { -1, -1, -1, -1, -1}, /* IdSel 4, none */ + { -1, -1, -1, -1, -1}, /* IdSel 5, none */ + { 32+2, 32+2, 32+2, 32+2, 32+2}, /* IdSel 6, slot 0 */ + { 32+3, 32+3, 32+3, 32+3, 32+3}, /* IdSel 7, slot 1 */ + { 32+4, 32+4, 32+4, 32+4, 32+4} /* IdSel 8, slot 2 */ + }; + long min_idsel = 0, max_idsel = 8, irqs_per_slot = 5; + return COMMON_TABLE_LOOKUP; +} +#endif /* defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SABLE) */ + +#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX) + +/***********************************************************************/ +/* LYNX hardware specifics + */ +/* + * For LYNX, which is also baroque, we manage 64 IRQs, via a custom IC. + * + * Bit Meaning Kernel IRQ + *------------------------------------------ + * 0 + * 1 + * 2 + * 3 mouse 12 + * 4 + * 5 + * 6 keyboard 1 + * 7 floppy 6 + * 8 COM2 3 + * 9 parallel port 7 + *10 EISA irq 3 - + *11 EISA irq 4 - + *12 EISA irq 5 5 + *13 EISA irq 6 - + *14 EISA irq 7 - + *15 COM1 4 + *16 EISA irq 9 9 + *17 EISA irq 10 10 + *18 EISA irq 11 11 + *19 EISA irq 12 - + *20 + *21 EISA irq 14 14 + *22 EISA irq 15 15 + *23 IIC - + *24 VGA (builtin) - + *25 + *26 + *27 + *28 NCR810 (builtin) 28 + *29 + *30 + *31 + *32 PCI 0 slot 4 A primary bus 32 + *33 PCI 0 slot 4 B primary bus 33 + *34 PCI 0 slot 4 C primary bus 34 + *35 PCI 0 slot 4 D primary bus + *36 PCI 0 slot 5 A primary bus + *37 PCI 0 slot 5 B primary bus + *38 PCI 0 slot 5 C primary bus + *39 PCI 0 slot 5 D primary bus + *40 PCI 0 slot 6 A primary bus + *41 PCI 0 slot 6 B primary bus + *42 PCI 0 slot 6 C primary bus + *43 PCI 0 slot 6 D primary bus + *44 PCI 0 slot 7 A primary bus + *45 PCI 0 slot 7 B primary bus + *46 PCI 0 slot 7 C primary bus + *47 PCI 0 slot 7 D primary bus + *48 PCI 0 slot 0 A secondary bus + *49 PCI 0 slot 0 B secondary bus + *50 PCI 0 slot 0 C secondary bus + *51 PCI 0 slot 0 D secondary bus + *52 PCI 0 slot 1 A secondary bus + *53 PCI 0 slot 1 B secondary bus + *54 PCI 0 slot 1 C secondary bus + *55 PCI 0 slot 1 D secondary bus + *56 PCI 0 slot 2 A secondary bus + *57 PCI 0 slot 2 B secondary bus + *58 PCI 0 slot 2 C secondary bus + *59 PCI 0 slot 2 D secondary bus + *60 PCI 0 slot 3 A secondary bus + *61 PCI 0 slot 3 B secondary bus + *62 PCI 0 slot 3 C secondary bus + *63 PCI 0 slot 3 D secondary bus + */ + +static void +lynx_update_irq_hw(unsigned long bit, unsigned long mask) +{ + /* + * Write the AIR register on the T3/T4 with the + * address of the IC mask register (offset 0x40) + */ + *(vulp)T2_AIR = 0x40; + mb(); + *(vulp)T2_AIR; /* re-read to force write */ + mb(); + *(vulp)T2_DIR = mask; + mb(); + mb(); +} + +static void +lynx_ack_irq_hw(unsigned long bit) +{ + *(vulp)T2_VAR = (u_long) bit; + mb(); + mb(); +} + +static irq_swizzle_t lynx_irq_swizzle = { + { /* irq_to_mask */ + -1, 6, -1, 8, 15, 12, 7, 9, /* pseudo PIC 0-7 */ + -1, 16, 17, 18, 3, -1, 21, 22, /* pseudo PIC 8-15 */ + -1, -1, -1, -1, -1, -1, -1, -1, /* pseudo */ + -1, -1, -1, -1, 28, -1, -1, -1, /* pseudo */ + 32, 33, 34, 35, 36, 37, 38, 39, /* mask 32-39 */ + 40, 41, 42, 43, 44, 45, 46, 47, /* mask 40-47 */ + 48, 49, 50, 51, 52, 53, 54, 55, /* mask 48-55 */ + 56, 57, 58, 59, 60, 61, 62, 63 /* mask 56-63 */ + }, + { /* mask_to_irq */ + -1, -1, -1, 12, -1, -1, 1, 6, /* mask 0-7 */ + 3, 7, -1, -1, 5, -1, -1, 4, /* mask 8-15 */ + 9, 10, 11, -1, -1, 14, 15, -1, /* mask 16-23 */ + -1, -1, -1, -1, 28, -1, -1, -1, /* mask 24-31 */ + 32, 33, 34, 35, 36, 37, 38, 39, /* mask 32-39 */ + 40, 41, 42, 43, 44, 45, 46, 47, /* mask 40-47 */ + 48, 49, 50, 51, 52, 53, 54, 55, /* mask 48-55 */ + 56, 57, 58, 59, 60, 61, 62, 63 /* mask 56-63 */ + }, + -1, + lynx_update_irq_hw, + lynx_ack_irq_hw +}; + +static void __init +lynx_init_irq(void) +{ + sable_lynx_irq_swizzle = &lynx_irq_swizzle; + sable_lynx_init_irq(64); +} + +/* + * PCI Fixup configuration for ALPHA LYNX (2100A) + * + * The device to slot mapping looks like: + * + * Slot Device + * 0 none + * 1 none + * 2 PCI-EISA bridge + * 3 PCI-PCI bridge + * 4 NCR 810 (Demi-Lynx only) + * 5 none + * 6 PCI on board slot 4 + * 7 PCI on board slot 5 + * 8 PCI on board slot 6 + * 9 PCI on board slot 7 + * + * And behind the PPB we have: + * + * 11 PCI on board slot 0 + * 12 PCI on board slot 1 + * 13 PCI on board slot 2 + * 14 PCI on board slot 3 + */ +/* + * NOTE: the IRQ assignments below are arbitrary, but need to be consistent + * with the values in the irq swizzling tables above. + */ + +static int __init +lynx_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + static char irq_tab[19][5] __initdata = { + /*INT INTA INTB INTC INTD */ + { -1, -1, -1, -1, -1}, /* IdSel 13, PCEB */ + { -1, -1, -1, -1, -1}, /* IdSel 14, PPB */ + { 28, 28, 28, 28, 28}, /* IdSel 15, NCR demi */ + { -1, -1, -1, -1, -1}, /* IdSel 16, none */ + { 32, 32, 33, 34, 35}, /* IdSel 17, slot 4 */ + { 36, 36, 37, 38, 39}, /* IdSel 18, slot 5 */ + { 40, 40, 41, 42, 43}, /* IdSel 19, slot 6 */ + { 44, 44, 45, 46, 47}, /* IdSel 20, slot 7 */ + { -1, -1, -1, -1, -1}, /* IdSel 22, none */ + /* The following are actually behind the PPB. */ + { -1, -1, -1, -1, -1}, /* IdSel 16 none */ + { 28, 28, 28, 28, 28}, /* IdSel 17 NCR lynx */ + { -1, -1, -1, -1, -1}, /* IdSel 18 none */ + { -1, -1, -1, -1, -1}, /* IdSel 19 none */ + { -1, -1, -1, -1, -1}, /* IdSel 20 none */ + { -1, -1, -1, -1, -1}, /* IdSel 21 none */ + { 48, 48, 49, 50, 51}, /* IdSel 22 slot 0 */ + { 52, 52, 53, 54, 55}, /* IdSel 23 slot 1 */ + { 56, 56, 57, 58, 59}, /* IdSel 24 slot 2 */ + { 60, 60, 61, 62, 63} /* IdSel 25 slot 3 */ + }; + const long min_idsel = 2, max_idsel = 20, irqs_per_slot = 5; + return COMMON_TABLE_LOOKUP; +} + +static u8 __init +lynx_swizzle(struct pci_dev *dev, u8 *pinp) +{ + int slot, pin = *pinp; + + if (dev->bus->number == 0) { + slot = PCI_SLOT(dev->devfn); + } + /* Check for the built-in bridge */ + else if (PCI_SLOT(dev->bus->self->devfn) == 3) { + slot = PCI_SLOT(dev->devfn) + 11; + } + else + { + /* Must be a card-based bridge. */ + do { + if (PCI_SLOT(dev->bus->self->devfn) == 3) { + slot = PCI_SLOT(dev->devfn) + 11; + break; + } + pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)) ; + + /* Move up the chain of bridges. */ + dev = dev->bus->self; + /* Slot of the next bridge. */ + slot = PCI_SLOT(dev->devfn); + } while (dev->bus->self); + } + *pinp = pin; + return slot; +} + +#endif /* defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX) */ + +/***********************************************************************/ +/* GENERIC irq routines */ + +static inline void +sable_lynx_enable_irq(unsigned int irq) +{ + unsigned long bit, mask; + + bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; + spin_lock(&sable_lynx_irq_lock); + mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit); + sable_lynx_irq_swizzle->update_irq_hw(bit, mask); + spin_unlock(&sable_lynx_irq_lock); +#if 0 + printk("%s: mask 0x%lx bit 0x%x irq 0x%x\n", + __FUNCTION__, mask, bit, irq); +#endif +} + +static void +sable_lynx_disable_irq(unsigned int irq) +{ + unsigned long bit, mask; + + bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; + spin_lock(&sable_lynx_irq_lock); + mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; + sable_lynx_irq_swizzle->update_irq_hw(bit, mask); + spin_unlock(&sable_lynx_irq_lock); +#if 0 + printk("%s: mask 0x%lx bit 0x%x irq 0x%x\n", + __FUNCTION__, mask, bit, irq); +#endif +} + +static unsigned int +sable_lynx_startup_irq(unsigned int irq) +{ + sable_lynx_enable_irq(irq); + return 0; +} + +static void +sable_lynx_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + sable_lynx_enable_irq(irq); +} + +static void +sable_lynx_mask_and_ack_irq(unsigned int irq) +{ + unsigned long bit, mask; + + bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; + spin_lock(&sable_lynx_irq_lock); + mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; + sable_lynx_irq_swizzle->update_irq_hw(bit, mask); + sable_lynx_irq_swizzle->ack_irq_hw(bit); + spin_unlock(&sable_lynx_irq_lock); +} + +static struct hw_interrupt_type sable_lynx_irq_type = { + .typename = "SABLE/LYNX", + .startup = sable_lynx_startup_irq, + .shutdown = sable_lynx_disable_irq, + .enable = sable_lynx_enable_irq, + .disable = sable_lynx_disable_irq, + .ack = sable_lynx_mask_and_ack_irq, + .end = sable_lynx_end_irq, +}; + +static void +sable_lynx_srm_device_interrupt(unsigned long vector, struct pt_regs * regs) +{ + /* Note that the vector reported by the SRM PALcode corresponds + to the interrupt mask bits, but we have to manage via the + so-called legacy IRQs for many common devices. */ + + int bit, irq; + + bit = (vector - 0x800) >> 4; + irq = sable_lynx_irq_swizzle->mask_to_irq[bit]; +#if 0 + printk("%s: vector 0x%lx bit 0x%x irq 0x%x\n", + __FUNCTION__, vector, bit, irq); +#endif + handle_irq(irq, regs); +} + +static void __init +sable_lynx_init_irq(int nr_irqs) +{ + long i; + + for (i = 0; i < nr_irqs; ++i) { + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].handler = &sable_lynx_irq_type; + } + + common_init_isa_dma(); +} + +static void __init +sable_lynx_init_pci(void) +{ + common_init_pci(); +} + +/*****************************************************************/ +/* + * The System Vectors + * + * In order that T2_HAE_ADDRESS should be a constant, we play + * these games with GAMMA_BIAS. + */ + +#if defined(CONFIG_ALPHA_GENERIC) || \ + (defined(CONFIG_ALPHA_SABLE) && !defined(CONFIG_ALPHA_GAMMA)) +#undef GAMMA_BIAS +#define GAMMA_BIAS 0 +struct alpha_machine_vector sable_mv __initmv = { + .vector_name = "Sable", + DO_EV4_MMU, + DO_DEFAULT_RTC, + DO_T2_IO, + .machine_check = t2_machine_check, + .max_isa_dma_address = ALPHA_SABLE_MAX_ISA_DMA_ADDRESS, + .min_io_address = EISA_DEFAULT_IO_BASE, + .min_mem_address = T2_DEFAULT_MEM_BASE, + + .nr_irqs = 40, + .device_interrupt = sable_lynx_srm_device_interrupt, + + .init_arch = t2_init_arch, + .init_irq = sable_init_irq, + .init_rtc = common_init_rtc, + .init_pci = sable_lynx_init_pci, + .kill_arch = t2_kill_arch, + .pci_map_irq = sable_map_irq, + .pci_swizzle = common_swizzle, + + .sys = { .t2 = { + .gamma_bias = 0 + } } +}; +ALIAS_MV(sable) +#endif /* GENERIC || (SABLE && !GAMMA) */ + +#if defined(CONFIG_ALPHA_GENERIC) || \ + (defined(CONFIG_ALPHA_SABLE) && defined(CONFIG_ALPHA_GAMMA)) +#undef GAMMA_BIAS +#define GAMMA_BIAS _GAMMA_BIAS +struct alpha_machine_vector sable_gamma_mv __initmv = { + .vector_name = "Sable-Gamma", + DO_EV5_MMU, + DO_DEFAULT_RTC, + DO_T2_IO, + .machine_check = t2_machine_check, + .max_isa_dma_address = ALPHA_SABLE_MAX_ISA_DMA_ADDRESS, + .min_io_address = EISA_DEFAULT_IO_BASE, + .min_mem_address = T2_DEFAULT_MEM_BASE, + + .nr_irqs = 40, + .device_interrupt = sable_lynx_srm_device_interrupt, + + .init_arch = t2_init_arch, + .init_irq = sable_init_irq, + .init_rtc = common_init_rtc, + .init_pci = sable_lynx_init_pci, + .kill_arch = t2_kill_arch, + .pci_map_irq = sable_map_irq, + .pci_swizzle = common_swizzle, + + .sys = { .t2 = { + .gamma_bias = _GAMMA_BIAS + } } +}; +ALIAS_MV(sable_gamma) +#endif /* GENERIC || (SABLE && GAMMA) */ + +#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX) +#undef GAMMA_BIAS +#define GAMMA_BIAS _GAMMA_BIAS +struct alpha_machine_vector lynx_mv __initmv = { + .vector_name = "Lynx", + DO_EV4_MMU, + DO_DEFAULT_RTC, + DO_T2_IO, + .machine_check = t2_machine_check, + .max_isa_dma_address = ALPHA_SABLE_MAX_ISA_DMA_ADDRESS, + .min_io_address = EISA_DEFAULT_IO_BASE, + .min_mem_address = T2_DEFAULT_MEM_BASE, + + .nr_irqs = 64, + .device_interrupt = sable_lynx_srm_device_interrupt, + + .init_arch = t2_init_arch, + .init_irq = lynx_init_irq, + .init_rtc = common_init_rtc, + .init_pci = sable_lynx_init_pci, + .kill_arch = t2_kill_arch, + .pci_map_irq = lynx_map_irq, + .pci_swizzle = lynx_swizzle, + + .sys = { .t2 = { + .gamma_bias = _GAMMA_BIAS + } } +}; +ALIAS_MV(lynx) +#endif /* GENERIC || LYNX */ diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c new file mode 100644 index 0000000..47df48a --- /dev/null +++ b/arch/alpha/kernel/sys_sio.c @@ -0,0 +1,438 @@ +/* + * linux/arch/alpha/kernel/sys_sio.c + * + * Copyright (C) 1995 David A Rusling + * Copyright (C) 1996 Jay A Estabrook + * Copyright (C) 1998, 1999 Richard Henderson + * + * Code for all boards that route the PCI interrupts through the SIO + * PCI/ISA bridge. This includes Noname (AXPpci33), Multia (UDB), + * Kenetics's Platform 2000, Avanti (AlphaStation), XL, and AlphaBook1. + */ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/tty.h> + +#include <asm/compiler.h> +#include <asm/ptrace.h> +#include <asm/system.h> +#include <asm/dma.h> +#include <asm/irq.h> +#include <asm/mmu_context.h> +#include <asm/io.h> +#include <asm/pgtable.h> +#include <asm/core_apecs.h> +#include <asm/core_lca.h> +#include <asm/tlbflush.h> + +#include "proto.h" +#include "irq_impl.h" +#include "pci_impl.h" +#include "machvec_impl.h" + +#if defined(ALPHA_RESTORE_SRM_SETUP) +/* Save LCA configuration data as the console had it set up. */ +struct +{ + unsigned int orig_route_tab; /* for SAVE/RESTORE */ +} saved_config __attribute((common)); +#endif + + +static void __init +sio_init_irq(void) +{ + if (alpha_using_srm) + alpha_mv.device_interrupt = srm_device_interrupt; + + init_i8259a_irqs(); + common_init_isa_dma(); +} + +static inline void __init +alphabook1_init_arch(void) +{ + /* The AlphaBook1 has LCD video fixed at 800x600, + 37 rows and 100 cols. */ + screen_info.orig_y = 37; + screen_info.orig_video_cols = 100; + screen_info.orig_video_lines = 37; + + lca_init_arch(); +} + + +/* + * sio_route_tab selects irq routing in PCI/ISA bridge so that: + * PIRQ0 -> irq 15 + * PIRQ1 -> irq 9 + * PIRQ2 -> irq 10 + * PIRQ3 -> irq 11 + * + * This probably ought to be configurable via MILO. For + * example, sound boards seem to like using IRQ 9. + * + * This is NOT how we should do it. PIRQ0-X should have + * their own IRQ's, the way intel uses the IO-APIC irq's. + */ + +static void __init +sio_pci_route(void) +{ +#if defined(ALPHA_RESTORE_SRM_SETUP) + /* First, read and save the original setting. */ + pci_bus_read_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60, + &saved_config.orig_route_tab); + printk("%s: PIRQ original 0x%x new 0x%x\n", __FUNCTION__, + saved_config.orig_route_tab, alpha_mv.sys.sio.route_tab); +#endif + + /* Now override with desired setting. */ + pci_bus_write_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60, + alpha_mv.sys.sio.route_tab); +} + +static unsigned int __init +sio_collect_irq_levels(void) +{ + unsigned int level_bits = 0; + struct pci_dev *dev = NULL; + + /* Iterate through the devices, collecting IRQ levels. */ + while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { + if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) && + (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA)) + continue; + + if (dev->irq) + level_bits |= (1 << dev->irq); + } + return level_bits; +} + +static void __init +sio_fixup_irq_levels(unsigned int level_bits) +{ + unsigned int old_level_bits; + + /* + * Now, make all PCI interrupts level sensitive. Notice: + * these registers must be accessed byte-wise. inw()/outw() + * don't work. + * + * Make sure to turn off any level bits set for IRQs 9,10,11,15, + * so that the only bits getting set are for devices actually found. + * Note that we do preserve the remainder of the bits, which we hope + * will be set correctly by ARC/SRM. + * + * Note: we at least preserve any level-set bits on AlphaBook1 + */ + old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8); + + level_bits |= (old_level_bits & 0x71ff); + + outb((level_bits >> 0) & 0xff, 0x4d0); + outb((level_bits >> 8) & 0xff, 0x4d1); +} + +static inline int __init +noname_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + /* + * The Noname board has 5 PCI slots with each of the 4 + * interrupt pins routed to different pins on the PCI/ISA + * bridge (PIRQ0-PIRQ3). The table below is based on + * information available at: + * + * http://ftp.digital.com/pub/DEC/axppci/ref_interrupts.txt + * + * I have no information on the Avanti interrupt routing, but + * the routing seems to be identical to the Noname except + * that the Avanti has an additional slot whose routing I'm + * unsure of. + * + * pirq_tab[0] is a fake entry to deal with old PCI boards + * that have the interrupt pin number hardwired to 0 (meaning + * that they use the default INTA line, if they are interrupt + * driven at all). + */ + static char irq_tab[][5] __initdata = { + /*INT A B C D */ + { 3, 3, 3, 3, 3}, /* idsel 6 (53c810) */ + {-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */ + { 2, 2, -1, -1, -1}, /* idsel 8 (Hack: slot closest ISA) */ + {-1, -1, -1, -1, -1}, /* idsel 9 (unused) */ + {-1, -1, -1, -1, -1}, /* idsel 10 (unused) */ + { 0, 0, 2, 1, 0}, /* idsel 11 KN25_PCI_SLOT0 */ + { 1, 1, 0, 2, 1}, /* idsel 12 KN25_PCI_SLOT1 */ + { 2, 2, 1, 0, 2}, /* idsel 13 KN25_PCI_SLOT2 */ + { 0, 0, 0, 0, 0}, /* idsel 14 AS255 TULIP */ + }; + const long min_idsel = 6, max_idsel = 14, irqs_per_slot = 5; + int irq = COMMON_TABLE_LOOKUP, tmp; + tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq); + return irq >= 0 ? tmp : -1; +} + +static inline int __init +p2k_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + static char irq_tab[][5] __initdata = { + /*INT A B C D */ + { 0, 0, -1, -1, -1}, /* idsel 6 (53c810) */ + {-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */ + { 1, 1, 2, 3, 0}, /* idsel 8 (slot A) */ + { 2, 2, 3, 0, 1}, /* idsel 9 (slot B) */ + {-1, -1, -1, -1, -1}, /* idsel 10 (unused) */ + {-1, -1, -1, -1, -1}, /* idsel 11 (unused) */ + { 3, 3, -1, -1, -1}, /* idsel 12 (CMD0646) */ + }; + const long min_idsel = 6, max_idsel = 12, irqs_per_slot = 5; + int irq = COMMON_TABLE_LOOKUP, tmp; + tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq); + return irq >= 0 ? tmp : -1; +} + +static inline void __init +noname_init_pci(void) +{ + common_init_pci(); + sio_pci_route(); + sio_fixup_irq_levels(sio_collect_irq_levels()); + ns87312_enable_ide(0x26e); +} + +static inline void __init +alphabook1_init_pci(void) +{ + struct pci_dev *dev; + unsigned char orig, config; + + common_init_pci(); + sio_pci_route(); + + /* + * On the AlphaBook1, the PCMCIA chip (Cirrus 6729) + * is sensitive to PCI bus bursts, so we must DISABLE + * burst mode for the NCR 8xx SCSI... :-( + * + * Note that the NCR810 SCSI driver must preserve the + * setting of the bit in order for this to work. At the + * moment (2.0.29), ncr53c8xx.c does NOT do this, but + * 53c7,8xx.c DOES. + */ + + dev = NULL; + while ((dev = pci_find_device(PCI_VENDOR_ID_NCR, PCI_ANY_ID, dev))) { + if (dev->device == PCI_DEVICE_ID_NCR_53C810 + || dev->device == PCI_DEVICE_ID_NCR_53C815 + || dev->device == PCI_DEVICE_ID_NCR_53C820 + || dev->device == PCI_DEVICE_ID_NCR_53C825) { + unsigned long io_port; + unsigned char ctest4; + + io_port = dev->resource[0].start; + ctest4 = inb(io_port+0x21); + if (!(ctest4 & 0x80)) { + printk("AlphaBook1 NCR init: setting" + " burst disable\n"); + outb(ctest4 | 0x80, io_port+0x21); + } + } + } + + /* Do not set *ANY* level triggers for AlphaBook1. */ + sio_fixup_irq_levels(0); + + /* Make sure that register PR1 indicates 1Mb mem */ + outb(0x0f, 0x3ce); orig = inb(0x3cf); /* read PR5 */ + outb(0x0f, 0x3ce); outb(0x05, 0x3cf); /* unlock PR0-4 */ + outb(0x0b, 0x3ce); config = inb(0x3cf); /* read PR1 */ + if ((config & 0xc0) != 0xc0) { + printk("AlphaBook1 VGA init: setting 1Mb memory\n"); + config |= 0xc0; + outb(0x0b, 0x3ce); outb(config, 0x3cf); /* write PR1 */ + } + outb(0x0f, 0x3ce); outb(orig, 0x3cf); /* (re)lock PR0-4 */ +} + +void +sio_kill_arch(int mode) +{ +#if defined(ALPHA_RESTORE_SRM_SETUP) + /* Since we cannot read the PCI DMA Window CSRs, we + * cannot restore them here. + * + * However, we CAN read the PIRQ route register, so restore it + * now... + */ + pci_bus_write_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60, + saved_config.orig_route_tab); +#endif +} + + +/* + * The System Vectors + */ + +#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_BOOK1) +struct alpha_machine_vector alphabook1_mv __initmv = { + .vector_name = "AlphaBook1", + DO_EV4_MMU, + DO_DEFAULT_RTC, + DO_LCA_IO, + .machine_check = lca_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, + + .nr_irqs = 16, + .device_interrupt = isa_device_interrupt, + + .init_arch = alphabook1_init_arch, + .init_irq = sio_init_irq, + .init_rtc = common_init_rtc, + .init_pci = alphabook1_init_pci, + .kill_arch = sio_kill_arch, + .pci_map_irq = noname_map_irq, + .pci_swizzle = common_swizzle, + + .sys = { .sio = { + /* NCR810 SCSI is 14, PCMCIA controller is 15. */ + .route_tab = 0x0e0f0a0a, + }} +}; +ALIAS_MV(alphabook1) +#endif + +#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_AVANTI) +struct alpha_machine_vector avanti_mv __initmv = { + .vector_name = "Avanti", + DO_EV4_MMU, + DO_DEFAULT_RTC, + DO_APECS_IO, + .machine_check = apecs_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, + + .nr_irqs = 16, + .device_interrupt = isa_device_interrupt, + + .init_arch = apecs_init_arch, + .init_irq = sio_init_irq, + .init_rtc = common_init_rtc, + .init_pci = noname_init_pci, + .kill_arch = sio_kill_arch, + .pci_map_irq = noname_map_irq, + .pci_swizzle = common_swizzle, + + .sys = { .sio = { + .route_tab = 0x0b0a0e0f, + }} +}; +ALIAS_MV(avanti) +#endif + +#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_NONAME) +struct alpha_machine_vector noname_mv __initmv = { + .vector_name = "Noname", + DO_EV4_MMU, + DO_DEFAULT_RTC, + DO_LCA_IO, + .machine_check = lca_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, + + .nr_irqs = 16, + .device_interrupt = srm_device_interrupt, + + .init_arch = lca_init_arch, + .init_irq = sio_init_irq, + .init_rtc = common_init_rtc, + .init_pci = noname_init_pci, + .kill_arch = sio_kill_arch, + .pci_map_irq = noname_map_irq, + .pci_swizzle = common_swizzle, + + .sys = { .sio = { + /* For UDB, the only available PCI slot must not map to IRQ 9, + since that's the builtin MSS sound chip. That PCI slot + will map to PIRQ1 (for INTA at least), so we give it IRQ 15 + instead. + + Unfortunately we have to do this for NONAME as well, since + they are co-indicated when the platform type "Noname" is + selected... :-( */ + + .route_tab = 0x0b0a0f0d, + }} +}; +ALIAS_MV(noname) +#endif + +#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_P2K) +struct alpha_machine_vector p2k_mv __initmv = { + .vector_name = "Platform2000", + DO_EV4_MMU, + DO_DEFAULT_RTC, + DO_LCA_IO, + .machine_check = lca_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, + + .nr_irqs = 16, + .device_interrupt = srm_device_interrupt, + + .init_arch = lca_init_arch, + .init_irq = sio_init_irq, + .init_rtc = common_init_rtc, + .init_pci = noname_init_pci, + .kill_arch = sio_kill_arch, + .pci_map_irq = p2k_map_irq, + .pci_swizzle = common_swizzle, + + .sys = { .sio = { + .route_tab = 0x0b0a090f, + }} +}; +ALIAS_MV(p2k) +#endif + +#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_XL) +struct alpha_machine_vector xl_mv __initmv = { + .vector_name = "XL", + DO_EV4_MMU, + DO_DEFAULT_RTC, + DO_APECS_IO, + .machine_check = apecs_machine_check, + .max_isa_dma_address = ALPHA_XL_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = XL_DEFAULT_MEM_BASE, + + .nr_irqs = 16, + .device_interrupt = isa_device_interrupt, + + .init_arch = apecs_init_arch, + .init_irq = sio_init_irq, + .init_rtc = common_init_rtc, + .init_pci = noname_init_pci, + .kill_arch = sio_kill_arch, + .pci_map_irq = noname_map_irq, + .pci_swizzle = common_swizzle, + + .sys = { .sio = { + .route_tab = 0x0b0a090f, + }} +}; +ALIAS_MV(xl) +#endif diff --git a/arch/alpha/kernel/sys_sx164.c b/arch/alpha/kernel/sys_sx164.c new file mode 100644 index 0000000..94ad68b --- /dev/null +++ b/arch/alpha/kernel/sys_sx164.c @@ -0,0 +1,178 @@ +/* + * linux/arch/alpha/kernel/sys_sx164.c + * + * Copyright (C) 1995 David A Rusling + * Copyright (C) 1996 Jay A Estabrook + * Copyright (C) 1998, 1999, 2000 Richard Henderson + * + * Code supporting the SX164 (PCA56+PYXIS). + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/bitops.h> + +#include <asm/ptrace.h> +#include <asm/system.h> +#include <asm/dma.h> +#include <asm/irq.h> +#include <asm/mmu_context.h> +#include <asm/io.h> +#include <asm/pgtable.h> +#include <asm/core_cia.h> +#include <asm/hwrpb.h> +#include <asm/tlbflush.h> + +#include "proto.h" +#include "irq_impl.h" +#include "pci_impl.h" +#include "machvec_impl.h" + + +static void __init +sx164_init_irq(void) +{ + outb(0, DMA1_RESET_REG); + outb(0, DMA2_RESET_REG); + outb(DMA_MODE_CASCADE, DMA2_MODE_REG); + outb(0, DMA2_MASK_REG); + + if (alpha_using_srm) + alpha_mv.device_interrupt = srm_device_interrupt; + + init_i8259a_irqs(); + + /* Not interested in the bogus interrupts (0,3,4,5,40-47), + NMI (1), or HALT (2). */ + if (alpha_using_srm) + init_srm_irqs(40, 0x3f0000); + else + init_pyxis_irqs(0xff00003f0000UL); + + setup_irq(16+6, &timer_cascade_irqaction); +} + +/* + * PCI Fixup configuration. + * + * Summary @ PYXIS_INT_REQ: + * Bit Meaning + * 0 RSVD + * 1 NMI + * 2 Halt/Reset switch + * 3 MBZ + * 4 RAZ + * 5 RAZ + * 6 Interval timer (RTC) + * 7 PCI-ISA Bridge + * 8 Interrupt Line A from slot 3 + * 9 Interrupt Line A from slot 2 + *10 Interrupt Line A from slot 1 + *11 Interrupt Line A from slot 0 + *12 Interrupt Line B from slot 3 + *13 Interrupt Line B from slot 2 + *14 Interrupt Line B from slot 1 + *15 Interrupt line B from slot 0 + *16 Interrupt Line C from slot 3 + *17 Interrupt Line C from slot 2 + *18 Interrupt Line C from slot 1 + *19 Interrupt Line C from slot 0 + *20 Interrupt Line D from slot 3 + *21 Interrupt Line D from slot 2 + *22 Interrupt Line D from slot 1 + *23 Interrupt Line D from slot 0 + * + * IdSel + * 5 32 bit PCI option slot 2 + * 6 64 bit PCI option slot 0 + * 7 64 bit PCI option slot 1 + * 8 Cypress I/O + * 9 32 bit PCI option slot 3 + */ + +static int __init +sx164_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + static char irq_tab[5][5] __initdata = { + /*INT INTA INTB INTC INTD */ + { 16+ 9, 16+ 9, 16+13, 16+17, 16+21}, /* IdSel 5 slot 2 J17 */ + { 16+11, 16+11, 16+15, 16+19, 16+23}, /* IdSel 6 slot 0 J19 */ + { 16+10, 16+10, 16+14, 16+18, 16+22}, /* IdSel 7 slot 1 J18 */ + { -1, -1, -1, -1, -1}, /* IdSel 8 SIO */ + { 16+ 8, 16+ 8, 16+12, 16+16, 16+20} /* IdSel 9 slot 3 J15 */ + }; + const long min_idsel = 5, max_idsel = 9, irqs_per_slot = 5; + return COMMON_TABLE_LOOKUP; +} + +static void __init +sx164_init_pci(void) +{ + cia_init_pci(); + SMC669_Init(0); +} + +static void __init +sx164_init_arch(void) +{ + /* + * OSF palcode v1.23 forgets to enable PCA56 Motion Video + * Instructions. Let's enable it. + * We have to check palcode revision because CSERVE interface + * is subject to change without notice. For example, it + * has been changed completely since v1.16 (found in MILO + * distribution). -ink + */ + struct percpu_struct *cpu = (struct percpu_struct*) + ((char*)hwrpb + hwrpb->processor_offset); + + if (amask(AMASK_MAX) != 0 + && alpha_using_srm + && (cpu->pal_revision & 0xffff) == 0x117) { + __asm__ __volatile__( + "lda $16,8($31)\n" + "call_pal 9\n" /* Allow PALRES insns in kernel mode */ + ".long 0x64000118\n\n" /* hw_mfpr $0,icsr */ + "ldah $16,(1<<(19-16))($31)\n" + "or $0,$16,$0\n" /* set MVE bit */ + ".long 0x74000118\n" /* hw_mtpr $0,icsr */ + "lda $16,9($31)\n" + "call_pal 9" /* Disable PALRES insns */ + : : : "$0", "$16"); + printk("PCA56 MVI set enabled\n"); + } + + pyxis_init_arch(); +} + +/* + * The System Vector + */ + +struct alpha_machine_vector sx164_mv __initmv = { + .vector_name = "SX164", + DO_EV5_MMU, + DO_DEFAULT_RTC, + DO_PYXIS_IO, + .machine_check = cia_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = DEFAULT_MEM_BASE, + .pci_dac_offset = PYXIS_DAC_OFFSET, + + .nr_irqs = 48, + .device_interrupt = pyxis_device_interrupt, + + .init_arch = sx164_init_arch, + .init_irq = sx164_init_irq, + .init_rtc = common_init_rtc, + .init_pci = sx164_init_pci, + .kill_arch = cia_kill_arch, + .pci_map_irq = sx164_map_irq, + .pci_swizzle = common_swizzle, +}; +ALIAS_MV(sx164) diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c new file mode 100644 index 0000000..7955bdf --- /dev/null +++ b/arch/alpha/kernel/sys_takara.c @@ -0,0 +1,296 @@ +/* + * linux/arch/alpha/kernel/sys_takara.c + * + * Copyright (C) 1995 David A Rusling + * Copyright (C) 1996 Jay A Estabrook + * Copyright (C) 1998, 1999 Richard Henderson + * + * Code supporting the TAKARA. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/init.h> + +#include <asm/ptrace.h> +#include <asm/system.h> +#include <asm/dma.h> +#include <asm/irq.h> +#include <asm/mmu_context.h> +#include <asm/io.h> +#include <asm/pgtable.h> +#include <asm/core_cia.h> +#include <asm/tlbflush.h> + +#include "proto.h" +#include "irq_impl.h" +#include "pci_impl.h" +#include "machvec_impl.h" + + +/* Note mask bit is true for DISABLED irqs. */ +static unsigned long cached_irq_mask[2] = { -1, -1 }; + +static inline void +takara_update_irq_hw(unsigned long irq, unsigned long mask) +{ + int regaddr; + + mask = (irq >= 64 ? mask << 16 : mask >> ((irq - 16) & 0x30)); + regaddr = 0x510 + (((irq - 16) >> 2) & 0x0c); + outl(mask & 0xffff0000UL, regaddr); +} + +static inline void +takara_enable_irq(unsigned int irq) +{ + unsigned long mask; + mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); + takara_update_irq_hw(irq, mask); +} + +static void +takara_disable_irq(unsigned int irq) +{ + unsigned long mask; + mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); + takara_update_irq_hw(irq, mask); +} + +static unsigned int +takara_startup_irq(unsigned int irq) +{ + takara_enable_irq(irq); + return 0; /* never anything pending */ +} + +static void +takara_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + takara_enable_irq(irq); +} + +static struct hw_interrupt_type takara_irq_type = { + .typename = "TAKARA", + .startup = takara_startup_irq, + .shutdown = takara_disable_irq, + .enable = takara_enable_irq, + .disable = takara_disable_irq, + .ack = takara_disable_irq, + .end = takara_end_irq, +}; + +static void +takara_device_interrupt(unsigned long vector, struct pt_regs *regs) +{ + unsigned intstatus; + + /* + * The PALcode will have passed us vectors 0x800 or 0x810, + * which are fairly arbitrary values and serve only to tell + * us whether an interrupt has come in on IRQ0 or IRQ1. If + * it's IRQ1 it's a PCI interrupt; if it's IRQ0, it's + * probably ISA, but PCI interrupts can come through IRQ0 + * as well if the interrupt controller isn't in accelerated + * mode. + * + * OTOH, the accelerator thing doesn't seem to be working + * overly well, so what we'll do instead is try directly + * examining the Master Interrupt Register to see if it's a + * PCI interrupt, and if _not_ then we'll pass it on to the + * ISA handler. + */ + + intstatus = inw(0x500) & 15; + if (intstatus) { + /* + * This is a PCI interrupt. Check each bit and + * despatch an interrupt if it's set. + */ + + if (intstatus & 8) handle_irq(16+3, regs); + if (intstatus & 4) handle_irq(16+2, regs); + if (intstatus & 2) handle_irq(16+1, regs); + if (intstatus & 1) handle_irq(16+0, regs); + } else { + isa_device_interrupt (vector, regs); + } +} + +static void +takara_srm_device_interrupt(unsigned long vector, struct pt_regs * regs) +{ + int irq = (vector - 0x800) >> 4; + handle_irq(irq, regs); +} + +static void __init +takara_init_irq(void) +{ + long i; + + init_i8259a_irqs(); + + if (alpha_using_srm) { + alpha_mv.device_interrupt = takara_srm_device_interrupt; + } else { + unsigned int ctlreg = inl(0x500); + + /* Return to non-accelerated mode. */ + ctlreg &= ~0x8000; + outl(ctlreg, 0x500); + + /* Enable the PCI interrupt register. */ + ctlreg = 0x05107c00; + outl(ctlreg, 0x500); + } + + for (i = 16; i < 128; i += 16) + takara_update_irq_hw(i, -1); + + for (i = 16; i < 128; ++i) { + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].handler = &takara_irq_type; + } + + common_init_isa_dma(); +} + + +/* + * The Takara has PCI devices 1, 2, and 3 configured to slots 20, + * 19, and 18 respectively, in the default configuration. They can + * also be jumpered to slots 8, 7, and 6 respectively, which is fun + * because the SIO ISA bridge can also be slot 7. However, the SIO + * doesn't explicitly generate PCI-type interrupts, so we can + * assign it whatever the hell IRQ we like and it doesn't matter. + */ + +static int __init +takara_map_irq_srm(struct pci_dev *dev, u8 slot, u8 pin) +{ + static char irq_tab[15][5] __initdata = { + { 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */ + { 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */ + { 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */ + { -1, -1, -1, -1, -1}, /* slot 9 == nothing */ + { -1, -1, -1, -1, -1}, /* slot 10 == nothing */ + { -1, -1, -1, -1, -1}, /* slot 11 == nothing */ + /* These are behind the bridges. */ + { 12, 12, 13, 14, 15}, /* slot 12 == nothing */ + { 8, 8, 9, 19, 11}, /* slot 13 == nothing */ + { 4, 4, 5, 6, 7}, /* slot 14 == nothing */ + { 0, 0, 1, 2, 3}, /* slot 15 == nothing */ + { -1, -1, -1, -1, -1}, /* slot 16 == nothing */ + {64+ 0, 64+0, 64+1, 64+2, 64+3}, /* slot 17= device 4 */ + {48+ 0, 48+0, 48+1, 48+2, 48+3}, /* slot 18= device 3 */ + {32+ 0, 32+0, 32+1, 32+2, 32+3}, /* slot 19= device 2 */ + {16+ 0, 16+0, 16+1, 16+2, 16+3}, /* slot 20= device 1 */ + }; + const long min_idsel = 6, max_idsel = 20, irqs_per_slot = 5; + int irq = COMMON_TABLE_LOOKUP; + if (irq >= 0 && irq < 16) { + /* Guess that we are behind a bridge. */ + unsigned int busslot = PCI_SLOT(dev->bus->self->devfn); + irq += irq_tab[busslot-min_idsel][0]; + } + return irq; +} + +static int __init +takara_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + static char irq_tab[15][5] __initdata = { + { 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */ + { 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */ + { 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */ + { -1, -1, -1, -1, -1}, /* slot 9 == nothing */ + { -1, -1, -1, -1, -1}, /* slot 10 == nothing */ + { -1, -1, -1, -1, -1}, /* slot 11 == nothing */ + { -1, -1, -1, -1, -1}, /* slot 12 == nothing */ + { -1, -1, -1, -1, -1}, /* slot 13 == nothing */ + { -1, -1, -1, -1, -1}, /* slot 14 == nothing */ + { -1, -1, -1, -1, -1}, /* slot 15 == nothing */ + { -1, -1, -1, -1, -1}, /* slot 16 == nothing */ + { -1, -1, -1, -1, -1}, /* slot 17 == nothing */ + { 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 18 == device 3 */ + { 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 19 == device 2 */ + { 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 20 == device 1 */ + }; + const long min_idsel = 6, max_idsel = 20, irqs_per_slot = 5; + return COMMON_TABLE_LOOKUP; +} + +static u8 __init +takara_swizzle(struct pci_dev *dev, u8 *pinp) +{ + int slot = PCI_SLOT(dev->devfn); + int pin = *pinp; + unsigned int ctlreg = inl(0x500); + unsigned int busslot; + + if (!dev->bus->self) + return slot; + + busslot = PCI_SLOT(dev->bus->self->devfn); + /* Check for built-in bridges. */ + if (dev->bus->number != 0 + && busslot > 16 + && ((1<<(36-busslot)) & ctlreg)) { + if (pin == 1) + pin += (20 - busslot); + else { + printk(KERN_WARNING "takara_swizzle: can only " + "handle cards with INTA IRQ pin.\n"); + } + } else { + /* Must be a card-based bridge. */ + printk(KERN_WARNING "takara_swizzle: cannot handle " + "card-bridge behind builtin bridge yet.\n"); + } + + *pinp = pin; + return slot; +} + +static void __init +takara_init_pci(void) +{ + if (alpha_using_srm) + alpha_mv.pci_map_irq = takara_map_irq_srm; + + cia_init_pci(); + ns87312_enable_ide(0x26e); +} + + +/* + * The System Vector + */ + +struct alpha_machine_vector takara_mv __initmv = { + .vector_name = "Takara", + DO_EV5_MMU, + DO_DEFAULT_RTC, + DO_CIA_IO, + .machine_check = cia_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = CIA_DEFAULT_MEM_BASE, + + .nr_irqs = 128, + .device_interrupt = takara_device_interrupt, + + .init_arch = cia_init_arch, + .init_irq = takara_init_irq, + .init_rtc = common_init_rtc, + .init_pci = takara_init_pci, + .kill_arch = cia_kill_arch, + .pci_map_irq = takara_map_irq, + .pci_swizzle = takara_swizzle, +}; +ALIAS_MV(takara) diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c new file mode 100644 index 0000000..5f84417 --- /dev/null +++ b/arch/alpha/kernel/sys_titan.c @@ -0,0 +1,420 @@ +/* + * linux/arch/alpha/kernel/sys_titan.c + * + * Copyright (C) 1995 David A Rusling + * Copyright (C) 1996, 1999 Jay A Estabrook + * Copyright (C) 1998, 1999 Richard Henderson + * Copyright (C) 1999, 2000 Jeff Wiedemeier + * + * Code supporting TITAN systems (EV6+TITAN), currently: + * Privateer + * Falcon + * Granite + */ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/bitops.h> + +#include <asm/ptrace.h> +#include <asm/system.h> +#include <asm/dma.h> +#include <asm/irq.h> +#include <asm/mmu_context.h> +#include <asm/io.h> +#include <asm/pgtable.h> +#include <asm/core_titan.h> +#include <asm/hwrpb.h> +#include <asm/tlbflush.h> + +#include "proto.h" +#include "irq_impl.h" +#include "pci_impl.h" +#include "machvec_impl.h" +#include "err_impl.h" + + +/* + * Titan generic + */ + +/* + * Titan supports up to 4 CPUs + */ +static unsigned long titan_cpu_irq_affinity[4] = { ~0UL, ~0UL, ~0UL, ~0UL }; + +/* + * Mask is set (1) if enabled + */ +static unsigned long titan_cached_irq_mask; + +/* + * Need SMP-safe access to interrupt CSRs + */ +DEFINE_SPINLOCK(titan_irq_lock); + +static void +titan_update_irq_hw(unsigned long mask) +{ + register titan_cchip *cchip = TITAN_cchip; + unsigned long isa_enable = 1UL << 55; + register int bcpu = boot_cpuid; + +#ifdef CONFIG_SMP + cpumask_t cpm = cpu_present_mask; + volatile unsigned long *dim0, *dim1, *dim2, *dim3; + unsigned long mask0, mask1, mask2, mask3, dummy; + + mask &= ~isa_enable; + mask0 = mask & titan_cpu_irq_affinity[0]; + mask1 = mask & titan_cpu_irq_affinity[1]; + mask2 = mask & titan_cpu_irq_affinity[2]; + mask3 = mask & titan_cpu_irq_affinity[3]; + + if (bcpu == 0) mask0 |= isa_enable; + else if (bcpu == 1) mask1 |= isa_enable; + else if (bcpu == 2) mask2 |= isa_enable; + else mask3 |= isa_enable; + + dim0 = &cchip->dim0.csr; + dim1 = &cchip->dim1.csr; + dim2 = &cchip->dim2.csr; + dim3 = &cchip->dim3.csr; + if (!cpu_isset(0, cpm)) dim0 = &dummy; + if (!cpu_isset(1, cpm)) dim1 = &dummy; + if (!cpu_isset(2, cpm)) dim2 = &dummy; + if (!cpu_isset(3, cpm)) dim3 = &dummy; + + *dim0 = mask0; + *dim1 = mask1; + *dim2 = mask2; + *dim3 = mask3; + mb(); + *dim0; + *dim1; + *dim2; + *dim3; +#else + volatile unsigned long *dimB; + dimB = &cchip->dim0.csr; + if (bcpu == 1) dimB = &cchip->dim1.csr; + else if (bcpu == 2) dimB = &cchip->dim2.csr; + else if (bcpu == 3) dimB = &cchip->dim3.csr; + + *dimB = mask | isa_enable; + mb(); + *dimB; +#endif +} + +static inline void +titan_enable_irq(unsigned int irq) +{ + spin_lock(&titan_irq_lock); + titan_cached_irq_mask |= 1UL << (irq - 16); + titan_update_irq_hw(titan_cached_irq_mask); + spin_unlock(&titan_irq_lock); +} + +static inline void +titan_disable_irq(unsigned int irq) +{ + spin_lock(&titan_irq_lock); + titan_cached_irq_mask &= ~(1UL << (irq - 16)); + titan_update_irq_hw(titan_cached_irq_mask); + spin_unlock(&titan_irq_lock); +} + +static unsigned int +titan_startup_irq(unsigned int irq) +{ + titan_enable_irq(irq); + return 0; /* never anything pending */ +} + +static void +titan_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + titan_enable_irq(irq); +} + +static void +titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) +{ + int cpu; + + for (cpu = 0; cpu < 4; cpu++) { + if (cpu_isset(cpu, affinity)) + titan_cpu_irq_affinity[cpu] |= 1UL << irq; + else + titan_cpu_irq_affinity[cpu] &= ~(1UL << irq); + } + +} + +static void +titan_set_irq_affinity(unsigned int irq, cpumask_t affinity) +{ + spin_lock(&titan_irq_lock); + titan_cpu_set_irq_affinity(irq - 16, affinity); + titan_update_irq_hw(titan_cached_irq_mask); + spin_unlock(&titan_irq_lock); +} + +static void +titan_device_interrupt(unsigned long vector, struct pt_regs * regs) +{ + printk("titan_device_interrupt: NOT IMPLEMENTED YET!! \n"); +} + +static void +titan_srm_device_interrupt(unsigned long vector, struct pt_regs * regs) +{ + int irq; + + irq = (vector - 0x800) >> 4; + handle_irq(irq, regs); +} + + +static void __init +init_titan_irqs(struct hw_interrupt_type * ops, int imin, int imax) +{ + long i; + for (i = imin; i <= imax; ++i) { + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].handler = ops; + } +} + +static struct hw_interrupt_type titan_irq_type = { + .typename = "TITAN", + .startup = titan_startup_irq, + .shutdown = titan_disable_irq, + .enable = titan_enable_irq, + .disable = titan_disable_irq, + .ack = titan_disable_irq, + .end = titan_end_irq, + .set_affinity = titan_set_irq_affinity, +}; + +static irqreturn_t +titan_intr_nop(int irq, void *dev_id, struct pt_regs *regs) +{ + /* + * This is a NOP interrupt handler for the purposes of + * event counting -- just return. + */ + return IRQ_HANDLED; +} + +static void __init +titan_init_irq(void) +{ + if (alpha_using_srm && !alpha_mv.device_interrupt) + alpha_mv.device_interrupt = titan_srm_device_interrupt; + if (!alpha_mv.device_interrupt) + alpha_mv.device_interrupt = titan_device_interrupt; + + titan_update_irq_hw(0); + + init_titan_irqs(&titan_irq_type, 16, 63 + 16); +} + +static void __init +titan_legacy_init_irq(void) +{ + /* init the legacy dma controller */ + outb(0, DMA1_RESET_REG); + outb(0, DMA2_RESET_REG); + outb(DMA_MODE_CASCADE, DMA2_MODE_REG); + outb(0, DMA2_MASK_REG); + + /* init the legacy irq controller */ + init_i8259a_irqs(); + + /* init the titan irqs */ + titan_init_irq(); +} + +void +titan_dispatch_irqs(u64 mask, struct pt_regs *regs) +{ + unsigned long vector; + + /* + * Mask down to those interrupts which are enable on this processor + */ + mask &= titan_cpu_irq_affinity[smp_processor_id()]; + + /* + * Dispatch all requested interrupts + */ + while (mask) { + /* convert to SRM vector... priority is <63> -> <0> */ + __asm__("ctlz %1, %0" : "=r"(vector) : "r"(mask)); + vector = 63 - vector; + mask &= ~(1UL << vector); /* clear it out */ + vector = 0x900 + (vector << 4); /* convert to SRM vector */ + + /* dispatch it */ + alpha_mv.device_interrupt(vector, regs); + } +} + + +/* + * Titan Family + */ +static void __init +titan_late_init(void) +{ + /* + * Enable the system error interrupts. These interrupts are + * all reported to the kernel as machine checks, so the handler + * is a nop so it can be called to count the individual events. + */ + request_irq(63+16, titan_intr_nop, SA_INTERRUPT, + "CChip Error", NULL); + request_irq(62+16, titan_intr_nop, SA_INTERRUPT, + "PChip 0 H_Error", NULL); + request_irq(61+16, titan_intr_nop, SA_INTERRUPT, + "PChip 1 H_Error", NULL); + request_irq(60+16, titan_intr_nop, SA_INTERRUPT, + "PChip 0 C_Error", NULL); + request_irq(59+16, titan_intr_nop, SA_INTERRUPT, + "PChip 1 C_Error", NULL); + + /* + * Register our error handlers. + */ + titan_register_error_handlers(); + + /* + * Check if the console left us any error logs. + */ + cdl_check_console_data_log(); + +} + +static int __devinit +titan_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + u8 intline; + int irq; + + /* Get the current intline. */ + pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline); + irq = intline; + + /* Is it explicitly routed through ISA? */ + if ((irq & 0xF0) == 0xE0) + return irq; + + /* Offset by 16 to make room for ISA interrupts 0 - 15. */ + return irq + 16; +} + +static void __init +titan_init_pci(void) +{ + /* + * This isn't really the right place, but there's some init + * that needs to be done after everything is basically up. + */ + titan_late_init(); + + pci_probe_only = 1; + common_init_pci(); + SMC669_Init(0); +#ifdef CONFIG_VGA_HOSE + locate_and_init_vga(NULL); +#endif +} + + +/* + * Privateer + */ +static void __init +privateer_init_pci(void) +{ + /* + * Hook a couple of extra err interrupts that the + * common titan code won't. + */ + request_irq(53+16, titan_intr_nop, SA_INTERRUPT, + "NMI", NULL); + request_irq(50+16, titan_intr_nop, SA_INTERRUPT, + "Temperature Warning", NULL); + + /* + * Finish with the common version. + */ + return titan_init_pci(); +} + + +/* + * The System Vectors. + */ +struct alpha_machine_vector titan_mv __initmv = { + .vector_name = "TITAN", + DO_EV6_MMU, + DO_DEFAULT_RTC, + DO_TITAN_IO, + .machine_check = titan_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = DEFAULT_MEM_BASE, + .pci_dac_offset = TITAN_DAC_OFFSET, + + .nr_irqs = 80, /* 64 + 16 */ + /* device_interrupt will be filled in by titan_init_irq */ + + .agp_info = titan_agp_info, + + .init_arch = titan_init_arch, + .init_irq = titan_legacy_init_irq, + .init_rtc = common_init_rtc, + .init_pci = titan_init_pci, + + .kill_arch = titan_kill_arch, + .pci_map_irq = titan_map_irq, + .pci_swizzle = common_swizzle, +}; +ALIAS_MV(titan) + +struct alpha_machine_vector privateer_mv __initmv = { + .vector_name = "PRIVATEER", + DO_EV6_MMU, + DO_DEFAULT_RTC, + DO_TITAN_IO, + .machine_check = privateer_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = DEFAULT_MEM_BASE, + .pci_dac_offset = TITAN_DAC_OFFSET, + + .nr_irqs = 80, /* 64 + 16 */ + /* device_interrupt will be filled in by titan_init_irq */ + + .agp_info = titan_agp_info, + + .init_arch = titan_init_arch, + .init_irq = titan_legacy_init_irq, + .init_rtc = common_init_rtc, + .init_pci = privateer_init_pci, + + .kill_arch = titan_kill_arch, + .pci_map_irq = titan_map_irq, + .pci_swizzle = common_swizzle, +}; +/* No alpha_mv alias for privateer since we compile it + in unconditionally with titan; setup_arch knows how to cope. */ diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c new file mode 100644 index 0000000..1553f47 --- /dev/null +++ b/arch/alpha/kernel/sys_wildfire.c @@ -0,0 +1,361 @@ +/* + * linux/arch/alpha/kernel/sys_wildfire.c + * + * Wildfire support. + * + * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/bitops.h> + +#include <asm/ptrace.h> +#include <asm/system.h> +#include <asm/dma.h> +#include <asm/irq.h> +#include <asm/mmu_context.h> +#include <asm/io.h> +#include <asm/pgtable.h> +#include <asm/core_wildfire.h> +#include <asm/hwrpb.h> +#include <asm/tlbflush.h> + +#include "proto.h" +#include "irq_impl.h" +#include "pci_impl.h" +#include "machvec_impl.h" + +static unsigned long cached_irq_mask[WILDFIRE_NR_IRQS/(sizeof(long)*8)]; + +DEFINE_SPINLOCK(wildfire_irq_lock); + +static int doing_init_irq_hw = 0; + +static void +wildfire_update_irq_hw(unsigned int irq) +{ + int qbbno = (irq >> 8) & (WILDFIRE_MAX_QBB - 1); + int pcano = (irq >> 6) & (WILDFIRE_PCA_PER_QBB - 1); + wildfire_pca *pca; + volatile unsigned long * enable0; + + if (!WILDFIRE_PCA_EXISTS(qbbno, pcano)) { + if (!doing_init_irq_hw) { + printk(KERN_ERR "wildfire_update_irq_hw:" + " got irq %d for non-existent PCA %d" + " on QBB %d.\n", + irq, pcano, qbbno); + } + return; + } + + pca = WILDFIRE_pca(qbbno, pcano); + enable0 = (unsigned long *) &pca->pca_int[0].enable; /* ??? */ + + *enable0 = cached_irq_mask[qbbno * WILDFIRE_PCA_PER_QBB + pcano]; + mb(); + *enable0; +} + +static void __init +wildfire_init_irq_hw(void) +{ +#if 0 + register wildfire_pca * pca = WILDFIRE_pca(0, 0); + volatile unsigned long * enable0, * enable1, * enable2, *enable3; + volatile unsigned long * target0, * target1, * target2, *target3; + + enable0 = (unsigned long *) &pca->pca_int[0].enable; + enable1 = (unsigned long *) &pca->pca_int[1].enable; + enable2 = (unsigned long *) &pca->pca_int[2].enable; + enable3 = (unsigned long *) &pca->pca_int[3].enable; + + target0 = (unsigned long *) &pca->pca_int[0].target; + target1 = (unsigned long *) &pca->pca_int[1].target; + target2 = (unsigned long *) &pca->pca_int[2].target; + target3 = (unsigned long *) &pca->pca_int[3].target; + + *enable0 = *enable1 = *enable2 = *enable3 = 0; + + *target0 = (1UL<<8) | WILDFIRE_QBB(0); + *target1 = *target2 = *target3 = 0; + + mb(); + + *enable0; *enable1; *enable2; *enable3; + *target0; *target1; *target2; *target3; + +#else + int i; + + doing_init_irq_hw = 1; + + /* Need to update only once for every possible PCA. */ + for (i = 0; i < WILDFIRE_NR_IRQS; i+=WILDFIRE_IRQ_PER_PCA) + wildfire_update_irq_hw(i); + + doing_init_irq_hw = 0; +#endif +} + +static void +wildfire_enable_irq(unsigned int irq) +{ + if (irq < 16) + i8259a_enable_irq(irq); + + spin_lock(&wildfire_irq_lock); + set_bit(irq, &cached_irq_mask); + wildfire_update_irq_hw(irq); + spin_unlock(&wildfire_irq_lock); +} + +static void +wildfire_disable_irq(unsigned int irq) +{ + if (irq < 16) + i8259a_disable_irq(irq); + + spin_lock(&wildfire_irq_lock); + clear_bit(irq, &cached_irq_mask); + wildfire_update_irq_hw(irq); + spin_unlock(&wildfire_irq_lock); +} + +static void +wildfire_mask_and_ack_irq(unsigned int irq) +{ + if (irq < 16) + i8259a_mask_and_ack_irq(irq); + + spin_lock(&wildfire_irq_lock); + clear_bit(irq, &cached_irq_mask); + wildfire_update_irq_hw(irq); + spin_unlock(&wildfire_irq_lock); +} + +static unsigned int +wildfire_startup_irq(unsigned int irq) +{ + wildfire_enable_irq(irq); + return 0; /* never anything pending */ +} + +static void +wildfire_end_irq(unsigned int irq) +{ +#if 0 + if (!irq_desc[irq].action) + printk("got irq %d\n", irq); +#endif + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + wildfire_enable_irq(irq); +} + +static struct hw_interrupt_type wildfire_irq_type = { + .typename = "WILDFIRE", + .startup = wildfire_startup_irq, + .shutdown = wildfire_disable_irq, + .enable = wildfire_enable_irq, + .disable = wildfire_disable_irq, + .ack = wildfire_mask_and_ack_irq, + .end = wildfire_end_irq, +}; + +static void __init +wildfire_init_irq_per_pca(int qbbno, int pcano) +{ + int i, irq_bias; + unsigned long io_bias; + static struct irqaction isa_enable = { + .handler = no_action, + .name = "isa_enable", + }; + + irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA) + + pcano * WILDFIRE_IRQ_PER_PCA; + + /* Only need the following for first PCI bus per PCA. */ + io_bias = WILDFIRE_IO(qbbno, pcano<<1) - WILDFIRE_IO_BIAS; + +#if 0 + outb(0, DMA1_RESET_REG + io_bias); + outb(0, DMA2_RESET_REG + io_bias); + outb(DMA_MODE_CASCADE, DMA2_MODE_REG + io_bias); + outb(0, DMA2_MASK_REG + io_bias); +#endif + +#if 0 + /* ??? Not sure how to do this, yet... */ + init_i8259a_irqs(); /* ??? */ +#endif + + for (i = 0; i < 16; ++i) { + if (i == 2) + continue; + irq_desc[i+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i+irq_bias].handler = &wildfire_irq_type; + } + + irq_desc[36+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[36+irq_bias].handler = &wildfire_irq_type; + for (i = 40; i < 64; ++i) { + irq_desc[i+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i+irq_bias].handler = &wildfire_irq_type; + } + + setup_irq(32+irq_bias, &isa_enable); +} + +static void __init +wildfire_init_irq(void) +{ + int qbbno, pcano; + +#if 1 + wildfire_init_irq_hw(); + init_i8259a_irqs(); +#endif + + for (qbbno = 0; qbbno < WILDFIRE_MAX_QBB; qbbno++) { + if (WILDFIRE_QBB_EXISTS(qbbno)) { + for (pcano = 0; pcano < WILDFIRE_PCA_PER_QBB; pcano++) { + if (WILDFIRE_PCA_EXISTS(qbbno, pcano)) { + wildfire_init_irq_per_pca(qbbno, pcano); + } + } + } + } +} + +static void +wildfire_device_interrupt(unsigned long vector, struct pt_regs * regs) +{ + int irq; + + irq = (vector - 0x800) >> 4; + + /* + * bits 10-8: source QBB ID + * bits 7-6: PCA + * bits 5-0: irq in PCA + */ + + handle_irq(irq, regs); + return; +} + +/* + * PCI Fixup configuration. + * + * Summary per PCA (2 PCI or HIPPI buses): + * + * Bit Meaning + * 0-15 ISA + * + *32 ISA summary + *33 SMI + *34 NMI + *36 builtin QLogic SCSI (or slot 0 if no IO module) + *40 Interrupt Line A from slot 2 PCI0 + *41 Interrupt Line B from slot 2 PCI0 + *42 Interrupt Line C from slot 2 PCI0 + *43 Interrupt Line D from slot 2 PCI0 + *44 Interrupt Line A from slot 3 PCI0 + *45 Interrupt Line B from slot 3 PCI0 + *46 Interrupt Line C from slot 3 PCI0 + *47 Interrupt Line D from slot 3 PCI0 + * + *48 Interrupt Line A from slot 4 PCI1 + *49 Interrupt Line B from slot 4 PCI1 + *50 Interrupt Line C from slot 4 PCI1 + *51 Interrupt Line D from slot 4 PCI1 + *52 Interrupt Line A from slot 5 PCI1 + *53 Interrupt Line B from slot 5 PCI1 + *54 Interrupt Line C from slot 5 PCI1 + *55 Interrupt Line D from slot 5 PCI1 + *56 Interrupt Line A from slot 6 PCI1 + *57 Interrupt Line B from slot 6 PCI1 + *58 Interrupt Line C from slot 6 PCI1 + *50 Interrupt Line D from slot 6 PCI1 + *60 Interrupt Line A from slot 7 PCI1 + *61 Interrupt Line B from slot 7 PCI1 + *62 Interrupt Line C from slot 7 PCI1 + *63 Interrupt Line D from slot 7 PCI1 + * + * + * IdSel + * 0 Cypress Bridge I/O (ISA summary interrupt) + * 1 64 bit PCI 0 option slot 1 (SCSI QLogic builtin) + * 2 64 bit PCI 0 option slot 2 + * 3 64 bit PCI 0 option slot 3 + * 4 64 bit PCI 1 option slot 4 + * 5 64 bit PCI 1 option slot 5 + * 6 64 bit PCI 1 option slot 6 + * 7 64 bit PCI 1 option slot 7 + */ + +static int __init +wildfire_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + static char irq_tab[8][5] __initdata = { + /*INT INTA INTB INTC INTD */ + { -1, -1, -1, -1, -1}, /* IdSel 0 ISA Bridge */ + { 36, 36, 36+1, 36+2, 36+3}, /* IdSel 1 SCSI builtin */ + { 40, 40, 40+1, 40+2, 40+3}, /* IdSel 2 PCI 0 slot 2 */ + { 44, 44, 44+1, 44+2, 44+3}, /* IdSel 3 PCI 0 slot 3 */ + { 48, 48, 48+1, 48+2, 48+3}, /* IdSel 4 PCI 1 slot 4 */ + { 52, 52, 52+1, 52+2, 52+3}, /* IdSel 5 PCI 1 slot 5 */ + { 56, 56, 56+1, 56+2, 56+3}, /* IdSel 6 PCI 1 slot 6 */ + { 60, 60, 60+1, 60+2, 60+3}, /* IdSel 7 PCI 1 slot 7 */ + }; + long min_idsel = 0, max_idsel = 7, irqs_per_slot = 5; + + struct pci_controller *hose = dev->sysdata; + int irq = COMMON_TABLE_LOOKUP; + + if (irq > 0) { + int qbbno = hose->index >> 3; + int pcano = (hose->index >> 1) & 3; + irq += (qbbno << 8) + (pcano << 6); + } + return irq; +} + + +/* + * The System Vectors + */ + +struct alpha_machine_vector wildfire_mv __initmv = { + .vector_name = "WILDFIRE", + DO_EV6_MMU, + DO_DEFAULT_RTC, + DO_WILDFIRE_IO, + .machine_check = wildfire_machine_check, + .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, + .min_io_address = DEFAULT_IO_BASE, + .min_mem_address = DEFAULT_MEM_BASE, + + .nr_irqs = WILDFIRE_NR_IRQS, + .device_interrupt = wildfire_device_interrupt, + + .init_arch = wildfire_init_arch, + .init_irq = wildfire_init_irq, + .init_rtc = common_init_rtc, + .init_pci = common_init_pci, + .kill_arch = wildfire_kill_arch, + .pci_map_irq = wildfire_map_irq, + .pci_swizzle = common_swizzle, + + .pa_to_nid = wildfire_pa_to_nid, + .cpuid_to_nid = wildfire_cpuid_to_nid, + .node_mem_start = wildfire_node_mem_start, + .node_mem_size = wildfire_node_mem_size, +}; +ALIAS_MV(wildfire) diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S new file mode 100644 index 0000000..faab8c2 --- /dev/null +++ b/arch/alpha/kernel/systbls.S @@ -0,0 +1,468 @@ +/* + * arch/alpha/kernel/systbls.S + * + * The system call table. + */ + +#include <linux/config.h> /* CONFIG_OSF4_COMPAT */ +#include <asm/unistd.h> + + .data + .align 3 + .globl sys_call_table +sys_call_table: + .quad alpha_ni_syscall /* 0 */ + .quad sys_exit + .quad sys_fork + .quad sys_read + .quad sys_write + .quad alpha_ni_syscall /* 5 */ + .quad sys_close + .quad osf_wait4 + .quad alpha_ni_syscall + .quad sys_link + .quad sys_unlink /* 10 */ + .quad alpha_ni_syscall + .quad sys_chdir + .quad sys_fchdir + .quad sys_mknod + .quad sys_chmod /* 15 */ + .quad sys_chown + .quad osf_brk + .quad alpha_ni_syscall + .quad sys_lseek + .quad sys_getxpid /* 20 */ + .quad osf_mount + .quad sys_umount + .quad sys_setuid + .quad sys_getxuid + .quad alpha_ni_syscall /* 25 */ + .quad sys_ptrace + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 30 */ + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad sys_access + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 35 */ + .quad sys_sync + .quad sys_kill + .quad alpha_ni_syscall + .quad sys_setpgid + .quad alpha_ni_syscall /* 40 */ + .quad sys_dup + .quad sys_pipe + .quad osf_set_program_attributes + .quad alpha_ni_syscall + .quad sys_open /* 45 */ + .quad alpha_ni_syscall + .quad sys_getxgid + .quad osf_sigprocmask + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 50 */ + .quad sys_acct + .quad sys_sigpending + .quad alpha_ni_syscall + .quad sys_ioctl + .quad alpha_ni_syscall /* 55 */ + .quad alpha_ni_syscall + .quad sys_symlink + .quad sys_readlink + .quad sys_execve + .quad sys_umask /* 60 */ + .quad sys_chroot + .quad alpha_ni_syscall + .quad sys_getpgrp + .quad sys_getpagesize + .quad alpha_ni_syscall /* 65 */ + .quad sys_vfork + .quad sys_newstat + .quad sys_newlstat + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 70 */ + .quad osf_mmap + .quad alpha_ni_syscall + .quad sys_munmap + .quad sys_mprotect + .quad sys_madvise /* 75 */ + .quad sys_vhangup + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad sys_getgroups + /* map BSD's setpgrp to sys_setpgid for binary compatibility: */ + .quad sys_setgroups /* 80 */ + .quad alpha_ni_syscall + .quad sys_setpgid + .quad osf_setitimer + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 85 */ + .quad osf_getitimer + .quad sys_gethostname + .quad sys_sethostname + .quad sys_getdtablesize + .quad sys_dup2 /* 90 */ + .quad sys_newfstat + .quad sys_fcntl + .quad osf_select + .quad sys_poll + .quad sys_fsync /* 95 */ + .quad sys_setpriority + .quad sys_socket + .quad sys_connect + .quad sys_accept + .quad osf_getpriority /* 100 */ + .quad sys_send + .quad sys_recv + .quad sys_sigreturn + .quad sys_bind + .quad sys_setsockopt /* 105 */ + .quad sys_listen + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 110 */ + .quad sys_sigsuspend + .quad osf_sigstack + .quad sys_recvmsg + .quad sys_sendmsg + .quad alpha_ni_syscall /* 115 */ + .quad osf_gettimeofday + .quad osf_getrusage + .quad sys_getsockopt + .quad alpha_ni_syscall +#ifdef CONFIG_OSF4_COMPAT + .quad osf_readv /* 120 */ + .quad osf_writev +#else + .quad sys_readv /* 120 */ + .quad sys_writev +#endif + .quad osf_settimeofday + .quad sys_fchown + .quad sys_fchmod + .quad sys_recvfrom /* 125 */ + .quad sys_setreuid + .quad sys_setregid + .quad sys_rename + .quad sys_truncate + .quad sys_ftruncate /* 130 */ + .quad sys_flock + .quad sys_setgid + .quad sys_sendto + .quad sys_shutdown + .quad sys_socketpair /* 135 */ + .quad sys_mkdir + .quad sys_rmdir + .quad osf_utimes + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 140 */ + .quad sys_getpeername + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad sys_getrlimit + .quad sys_setrlimit /* 145 */ + .quad alpha_ni_syscall + .quad sys_setsid + .quad sys_quotactl + .quad alpha_ni_syscall + .quad sys_getsockname /* 150 */ + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 155 */ + .quad osf_sigaction + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad osf_getdirentries + .quad osf_statfs /* 160 */ + .quad osf_fstatfs + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad osf_getdomainname /* 165 */ + .quad sys_setdomainname + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 170 */ + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 175 */ + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 180 */ + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 185 */ + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 190 */ + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 195 */ + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + /* The OSF swapon has two extra arguments, but we ignore them. */ + .quad sys_swapon + .quad sys_msgctl /* 200 */ + .quad sys_msgget + .quad sys_msgrcv + .quad sys_msgsnd + .quad sys_semctl + .quad sys_semget /* 205 */ + .quad sys_semop + .quad osf_utsname + .quad sys_lchown + .quad osf_shmat + .quad sys_shmctl /* 210 */ + .quad sys_shmdt + .quad sys_shmget + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 215 */ + .quad alpha_ni_syscall + .quad sys_msync + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 220 */ + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 225 */ + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 230 */ + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad sys_getpgid + .quad sys_getsid + .quad sys_sigaltstack /* 235 */ + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 240 */ + .quad osf_sysinfo + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad osf_proplist_syscall + .quad alpha_ni_syscall /* 245 */ + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 250 */ + .quad osf_usleep_thread + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad sys_sysfs + .quad alpha_ni_syscall /* 255 */ + .quad osf_getsysinfo + .quad osf_setsysinfo + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 260 */ + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 265 */ + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 270 */ + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 275 */ + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 280 */ + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 285 */ + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 290 */ + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall /* 295 */ + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall + .quad alpha_ni_syscall +/* linux-specific system calls start at 300 */ + .quad sys_bdflush /* 300 */ + .quad sys_sethae + .quad sys_mount + .quad sys_old_adjtimex + .quad sys_swapoff + .quad sys_getdents /* 305 */ + .quad sys_ni_syscall /* 306: old create_module */ + .quad sys_init_module + .quad sys_delete_module + .quad sys_ni_syscall /* 309: old get_kernel_syms */ + .quad sys_syslog /* 310 */ + .quad sys_reboot + .quad sys_clone + .quad sys_uselib + .quad sys_mlock + .quad sys_munlock /* 315 */ + .quad sys_mlockall + .quad sys_munlockall + .quad sys_sysinfo + .quad sys_sysctl + .quad sys_ni_syscall /* 320 */ + .quad sys_oldumount + .quad sys_swapon + .quad sys_times + .quad sys_personality + .quad sys_setfsuid /* 325 */ + .quad sys_setfsgid + .quad sys_ustat + .quad sys_statfs + .quad sys_fstatfs + .quad sys_sched_setparam /* 330 */ + .quad sys_sched_getparam + .quad sys_sched_setscheduler + .quad sys_sched_getscheduler + .quad sys_sched_yield + .quad sys_sched_get_priority_max /* 335 */ + .quad sys_sched_get_priority_min + .quad sys_sched_rr_get_interval + .quad sys_ni_syscall /* sys_afs_syscall */ + .quad sys_newuname + .quad sys_nanosleep /* 340 */ + .quad sys_mremap + .quad sys_nfsservctl + .quad sys_setresuid + .quad sys_getresuid + .quad sys_pciconfig_read /* 345 */ + .quad sys_pciconfig_write + .quad sys_ni_syscall /* 347: old query_module */ + .quad sys_prctl + .quad sys_pread64 + .quad sys_pwrite64 /* 350 */ + .quad sys_rt_sigreturn + .quad sys_rt_sigaction + .quad sys_rt_sigprocmask + .quad sys_rt_sigpending + .quad sys_rt_sigtimedwait /* 355 */ + .quad sys_rt_sigqueueinfo + .quad sys_rt_sigsuspend + .quad sys_select + .quad sys_gettimeofday + .quad sys_settimeofday /* 360 */ + .quad sys_getitimer + .quad sys_setitimer + .quad sys_utimes + .quad sys_getrusage + .quad sys_wait4 /* 365 */ + .quad sys_adjtimex + .quad sys_getcwd + .quad sys_capget + .quad sys_capset + .quad sys_sendfile64 /* 370 */ + .quad sys_setresgid + .quad sys_getresgid + .quad sys_ni_syscall /* sys_dipc */ + .quad sys_pivot_root + .quad sys_mincore /* 375 */ + .quad sys_pciconfig_iobase + .quad sys_getdents64 + .quad sys_gettid + .quad sys_readahead + .quad sys_ni_syscall /* 380 */ + .quad sys_tkill + .quad sys_setxattr + .quad sys_lsetxattr + .quad sys_fsetxattr + .quad sys_getxattr /* 385 */ + .quad sys_lgetxattr + .quad sys_fgetxattr + .quad sys_listxattr + .quad sys_llistxattr + .quad sys_flistxattr /* 390 */ + .quad sys_removexattr + .quad sys_lremovexattr + .quad sys_fremovexattr + .quad sys_futex + .quad sys_sched_setaffinity /* 395 */ + .quad sys_sched_getaffinity + .quad sys_ni_syscall /* 397, tux */ + .quad sys_io_setup + .quad sys_io_destroy + .quad sys_io_getevents /* 400 */ + .quad sys_io_submit + .quad sys_io_cancel + .quad sys_ni_syscall /* 403, sys_alloc_hugepages */ + .quad sys_ni_syscall /* 404, sys_free_hugepages */ + .quad sys_exit_group /* 405 */ + .quad sys_lookup_dcookie + .quad sys_epoll_create + .quad sys_epoll_ctl + .quad sys_epoll_wait + .quad sys_remap_file_pages /* 410 */ + .quad sys_set_tid_address + .quad sys_restart_syscall + .quad sys_fadvise64 + .quad sys_timer_create + .quad sys_timer_settime /* 415 */ + .quad sys_timer_gettime + .quad sys_timer_getoverrun + .quad sys_timer_delete + .quad sys_clock_settime + .quad sys_clock_gettime /* 420 */ + .quad sys_clock_getres + .quad sys_clock_nanosleep + .quad sys_semtimedop + .quad sys_tgkill + .quad sys_stat64 /* 425 */ + .quad sys_lstat64 + .quad sys_fstat64 + .quad sys_ni_syscall /* sys_vserver */ + .quad sys_ni_syscall /* sys_mbind */ + .quad sys_ni_syscall /* sys_get_mempolicy */ + .quad sys_ni_syscall /* sys_set_mempolicy */ + .quad sys_mq_open + .quad sys_mq_unlink + .quad sys_mq_timedsend + .quad sys_mq_timedreceive /* 435 */ + .quad sys_mq_notify + .quad sys_mq_getsetattr + .quad sys_waitid + + .size sys_call_table, . - sys_call_table + .type sys_call_table, @object + +/* Remember to update everything, kids. */ +.ifne (. - sys_call_table) - (NR_SYSCALLS * 8) +.err +.endif diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c new file mode 100644 index 0000000..8226c5c --- /dev/null +++ b/arch/alpha/kernel/time.c @@ -0,0 +1,591 @@ +/* + * linux/arch/alpha/kernel/time.c + * + * Copyright (C) 1991, 1992, 1995, 1999, 2000 Linus Torvalds + * + * This file contains the PC-specific time handling details: + * reading the RTC at bootup, etc.. + * 1994-07-02 Alan Modra + * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime + * 1995-03-26 Markus Kuhn + * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887 + * precision CMOS clock update + * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 + * "A Kernel Model for Precision Timekeeping" by Dave Mills + * 1997-01-09 Adrian Sun + * use interval timer if CONFIG_RTC=y + * 1997-10-29 John Bowman (bowman@math.ualberta.ca) + * fixed tick loss calculation in timer_interrupt + * (round system clock to nearest tick instead of truncating) + * fixed algorithm in time_init for getting time from CMOS clock + * 1999-04-16 Thorsten Kranzkowski (dl8bcu@gmx.net) + * fixed algorithm in do_gettimeofday() for calculating the precise time + * from processor cycle counter (now taking lost_ticks into account) + * 2000-08-13 Jan-Benedict Glaw <jbglaw@lug-owl.de> + * Fixed time_init to be aware of epoches != 1900. This prevents + * booting up in 2048 for me;) Code is stolen from rtc.c. + * 2003-06-03 R. Scott Bailey <scott.bailey@eds.com> + * Tighten sanity in time_init from 1% (10,000 PPM) to 250 PPM + */ +#include <linux/config.h> +#include <linux/errno.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/param.h> +#include <linux/string.h> +#include <linux/mm.h> +#include <linux/delay.h> +#include <linux/ioport.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/bcd.h> +#include <linux/profile.h> + +#include <asm/uaccess.h> +#include <asm/io.h> +#include <asm/hwrpb.h> +#include <asm/8253pit.h> + +#include <linux/mc146818rtc.h> +#include <linux/time.h> +#include <linux/timex.h> + +#include "proto.h" +#include "irq_impl.h" + +u64 jiffies_64 = INITIAL_JIFFIES; + +EXPORT_SYMBOL(jiffies_64); + +extern unsigned long wall_jiffies; /* kernel/timer.c */ + +static int set_rtc_mmss(unsigned long); + +DEFINE_SPINLOCK(rtc_lock); + +#define TICK_SIZE (tick_nsec / 1000) + +/* + * Shift amount by which scaled_ticks_per_cycle is scaled. Shifting + * by 48 gives us 16 bits for HZ while keeping the accuracy good even + * for large CPU clock rates. + */ +#define FIX_SHIFT 48 + +/* lump static variables together for more efficient access: */ +static struct { + /* cycle counter last time it got invoked */ + __u32 last_time; + /* ticks/cycle * 2^48 */ + unsigned long scaled_ticks_per_cycle; + /* last time the CMOS clock got updated */ + time_t last_rtc_update; + /* partial unused tick */ + unsigned long partial_tick; +} state; + +unsigned long est_cycle_freq; + + +static inline __u32 rpcc(void) +{ + __u32 result; + asm volatile ("rpcc %0" : "=r"(result)); + return result; +} + +/* + * Scheduler clock - returns current time in nanosec units. + * + * Copied from ARM code for expediency... ;-} + */ +unsigned long long sched_clock(void) +{ + return (unsigned long long)jiffies * (1000000000 / HZ); +} + + +/* + * timer_interrupt() needs to keep up the real-time clock, + * as well as call the "do_timer()" routine every clocktick + */ +irqreturn_t timer_interrupt(int irq, void *dev, struct pt_regs * regs) +{ + unsigned long delta; + __u32 now; + long nticks; + +#ifndef CONFIG_SMP + /* Not SMP, do kernel PC profiling here. */ + profile_tick(CPU_PROFILING, regs); +#endif + + write_seqlock(&xtime_lock); + + /* + * Calculate how many ticks have passed since the last update, + * including any previous partial leftover. Save any resulting + * fraction for the next pass. + */ + now = rpcc(); + delta = now - state.last_time; + state.last_time = now; + delta = delta * state.scaled_ticks_per_cycle + state.partial_tick; + state.partial_tick = delta & ((1UL << FIX_SHIFT) - 1); + nticks = delta >> FIX_SHIFT; + + while (nticks > 0) { + do_timer(regs); +#ifndef CONFIG_SMP + update_process_times(user_mode(regs)); +#endif + nticks--; + } + + /* + * If we have an externally synchronized Linux clock, then update + * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be + * called as close as possible to 500 ms before the new second starts. + */ + if ((time_status & STA_UNSYNC) == 0 + && xtime.tv_sec > state.last_rtc_update + 660 + && xtime.tv_nsec >= 500000 - ((unsigned) TICK_SIZE) / 2 + && xtime.tv_nsec <= 500000 + ((unsigned) TICK_SIZE) / 2) { + int tmp = set_rtc_mmss(xtime.tv_sec); + state.last_rtc_update = xtime.tv_sec - (tmp ? 600 : 0); + } + + write_sequnlock(&xtime_lock); + return IRQ_HANDLED; +} + +void +common_init_rtc(void) +{ + unsigned char x; + + /* Reset periodic interrupt frequency. */ + x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f; + /* Test includes known working values on various platforms + where 0x26 is wrong; we refuse to change those. */ + if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) { + printk("Setting RTC_FREQ to 1024 Hz (%x)\n", x); + CMOS_WRITE(0x26, RTC_FREQ_SELECT); + } + + /* Turn on periodic interrupts. */ + x = CMOS_READ(RTC_CONTROL); + if (!(x & RTC_PIE)) { + printk("Turning on RTC interrupts.\n"); + x |= RTC_PIE; + x &= ~(RTC_AIE | RTC_UIE); + CMOS_WRITE(x, RTC_CONTROL); + } + (void) CMOS_READ(RTC_INTR_FLAGS); + + outb(0x36, 0x43); /* pit counter 0: system timer */ + outb(0x00, 0x40); + outb(0x00, 0x40); + + outb(0xb6, 0x43); /* pit counter 2: speaker */ + outb(0x31, 0x42); + outb(0x13, 0x42); + + init_rtc_irq(); +} + + +/* Validate a computed cycle counter result against the known bounds for + the given processor core. There's too much brokenness in the way of + timing hardware for any one method to work everywhere. :-( + + Return 0 if the result cannot be trusted, otherwise return the argument. */ + +static unsigned long __init +validate_cc_value(unsigned long cc) +{ + static struct bounds { + unsigned int min, max; + } cpu_hz[] __initdata = { + [EV3_CPU] = { 50000000, 200000000 }, /* guess */ + [EV4_CPU] = { 100000000, 300000000 }, + [LCA4_CPU] = { 100000000, 300000000 }, /* guess */ + [EV45_CPU] = { 200000000, 300000000 }, + [EV5_CPU] = { 250000000, 433000000 }, + [EV56_CPU] = { 333000000, 667000000 }, + [PCA56_CPU] = { 400000000, 600000000 }, /* guess */ + [PCA57_CPU] = { 500000000, 600000000 }, /* guess */ + [EV6_CPU] = { 466000000, 600000000 }, + [EV67_CPU] = { 600000000, 750000000 }, + [EV68AL_CPU] = { 750000000, 940000000 }, + [EV68CB_CPU] = { 1000000000, 1333333333 }, + /* None of the following are shipping as of 2001-11-01. */ + [EV68CX_CPU] = { 1000000000, 1700000000 }, /* guess */ + [EV69_CPU] = { 1000000000, 1700000000 }, /* guess */ + [EV7_CPU] = { 800000000, 1400000000 }, /* guess */ + [EV79_CPU] = { 1000000000, 2000000000 }, /* guess */ + }; + + /* Allow for some drift in the crystal. 10MHz is more than enough. */ + const unsigned int deviation = 10000000; + + struct percpu_struct *cpu; + unsigned int index; + + cpu = (struct percpu_struct *)((char*)hwrpb + hwrpb->processor_offset); + index = cpu->type & 0xffffffff; + + /* If index out of bounds, no way to validate. */ + if (index >= sizeof(cpu_hz)/sizeof(cpu_hz[0])) + return cc; + + /* If index contains no data, no way to validate. */ + if (cpu_hz[index].max == 0) + return cc; + + if (cc < cpu_hz[index].min - deviation + || cc > cpu_hz[index].max + deviation) + return 0; + + return cc; +} + + +/* + * Calibrate CPU clock using legacy 8254 timer/counter. Stolen from + * arch/i386/time.c. + */ + +#define CALIBRATE_LATCH 0xffff +#define TIMEOUT_COUNT 0x100000 + +static unsigned long __init +calibrate_cc_with_pit(void) +{ + int cc, count = 0; + + /* Set the Gate high, disable speaker */ + outb((inb(0x61) & ~0x02) | 0x01, 0x61); + + /* + * Now let's take care of CTC channel 2 + * + * Set the Gate high, program CTC channel 2 for mode 0, + * (interrupt on terminal count mode), binary count, + * load 5 * LATCH count, (LSB and MSB) to begin countdown. + */ + outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */ + outb(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */ + outb(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */ + + cc = rpcc(); + do { + count++; + } while ((inb(0x61) & 0x20) == 0 && count < TIMEOUT_COUNT); + cc = rpcc() - cc; + + /* Error: ECTCNEVERSET or ECPUTOOFAST. */ + if (count <= 1 || count == TIMEOUT_COUNT) + return 0; + + return ((long)cc * PIT_TICK_RATE) / (CALIBRATE_LATCH + 1); +} + +/* The Linux interpretation of the CMOS clock register contents: + When the Update-In-Progress (UIP) flag goes from 1 to 0, the + RTC registers show the second which has precisely just started. + Let's hope other operating systems interpret the RTC the same way. */ + +static unsigned long __init +rpcc_after_update_in_progress(void) +{ + do { } while (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)); + do { } while (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP); + + return rpcc(); +} + +void __init +time_init(void) +{ + unsigned int year, mon, day, hour, min, sec, cc1, cc2, epoch; + unsigned long cycle_freq, tolerance; + long diff; + + /* Calibrate CPU clock -- attempt #1. */ + if (!est_cycle_freq) + est_cycle_freq = validate_cc_value(calibrate_cc_with_pit()); + + cc1 = rpcc_after_update_in_progress(); + + /* Calibrate CPU clock -- attempt #2. */ + if (!est_cycle_freq) { + cc2 = rpcc_after_update_in_progress(); + est_cycle_freq = validate_cc_value(cc2 - cc1); + cc1 = cc2; + } + + cycle_freq = hwrpb->cycle_freq; + if (est_cycle_freq) { + /* If the given value is within 250 PPM of what we calculated, + accept it. Otherwise, use what we found. */ + tolerance = cycle_freq / 4000; + diff = cycle_freq - est_cycle_freq; + if (diff < 0) + diff = -diff; + if ((unsigned long)diff > tolerance) { + cycle_freq = est_cycle_freq; + printk("HWRPB cycle frequency bogus. " + "Estimated %lu Hz\n", cycle_freq); + } else { + est_cycle_freq = 0; + } + } else if (! validate_cc_value (cycle_freq)) { + printk("HWRPB cycle frequency bogus, " + "and unable to estimate a proper value!\n"); + } + + /* From John Bowman <bowman@math.ualberta.ca>: allow the values + to settle, as the Update-In-Progress bit going low isn't good + enough on some hardware. 2ms is our guess; we haven't found + bogomips yet, but this is close on a 500Mhz box. */ + __delay(1000000); + + sec = CMOS_READ(RTC_SECONDS); + min = CMOS_READ(RTC_MINUTES); + hour = CMOS_READ(RTC_HOURS); + day = CMOS_READ(RTC_DAY_OF_MONTH); + mon = CMOS_READ(RTC_MONTH); + year = CMOS_READ(RTC_YEAR); + + if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { + BCD_TO_BIN(sec); + BCD_TO_BIN(min); + BCD_TO_BIN(hour); + BCD_TO_BIN(day); + BCD_TO_BIN(mon); + BCD_TO_BIN(year); + } + + /* PC-like is standard; used for year >= 70 */ + epoch = 1900; + if (year < 20) + epoch = 2000; + else if (year >= 20 && year < 48) + /* NT epoch */ + epoch = 1980; + else if (year >= 48 && year < 70) + /* Digital UNIX epoch */ + epoch = 1952; + + printk(KERN_INFO "Using epoch = %d\n", epoch); + + if ((year += epoch) < 1970) + year += 100; + + xtime.tv_sec = mktime(year, mon, day, hour, min, sec); + xtime.tv_nsec = 0; + + wall_to_monotonic.tv_sec -= xtime.tv_sec; + wall_to_monotonic.tv_nsec = 0; + + if (HZ > (1<<16)) { + extern void __you_loose (void); + __you_loose(); + } + + state.last_time = cc1; + state.scaled_ticks_per_cycle + = ((unsigned long) HZ << FIX_SHIFT) / cycle_freq; + state.last_rtc_update = 0; + state.partial_tick = 0L; + + /* Startup the timer source. */ + alpha_mv.init_rtc(); +} + +/* + * Use the cycle counter to estimate an displacement from the last time + * tick. Unfortunately the Alpha designers made only the low 32-bits of + * the cycle counter active, so we overflow on 8.2 seconds on a 500MHz + * part. So we can't do the "find absolute time in terms of cycles" thing + * that the other ports do. + */ +void +do_gettimeofday(struct timeval *tv) +{ + unsigned long flags; + unsigned long sec, usec, lost, seq; + unsigned long delta_cycles, delta_usec, partial_tick; + + do { + seq = read_seqbegin_irqsave(&xtime_lock, flags); + + delta_cycles = rpcc() - state.last_time; + sec = xtime.tv_sec; + usec = (xtime.tv_nsec / 1000); + partial_tick = state.partial_tick; + lost = jiffies - wall_jiffies; + + } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); + +#ifdef CONFIG_SMP + /* Until and unless we figure out how to get cpu cycle counters + in sync and keep them there, we can't use the rpcc tricks. */ + delta_usec = lost * (1000000 / HZ); +#else + /* + * usec = cycles * ticks_per_cycle * 2**48 * 1e6 / (2**48 * ticks) + * = cycles * (s_t_p_c) * 1e6 / (2**48 * ticks) + * = cycles * (s_t_p_c) * 15625 / (2**42 * ticks) + * + * which, given a 600MHz cycle and a 1024Hz tick, has a + * dynamic range of about 1.7e17, which is less than the + * 1.8e19 in an unsigned long, so we are safe from overflow. + * + * Round, but with .5 up always, since .5 to even is harder + * with no clear gain. + */ + + delta_usec = (delta_cycles * state.scaled_ticks_per_cycle + + partial_tick + + (lost << FIX_SHIFT)) * 15625; + delta_usec = ((delta_usec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2; +#endif + + usec += delta_usec; + if (usec >= 1000000) { + sec += 1; + usec -= 1000000; + } + + tv->tv_sec = sec; + tv->tv_usec = usec; +} + +EXPORT_SYMBOL(do_gettimeofday); + +int +do_settimeofday(struct timespec *tv) +{ + time_t wtm_sec, sec = tv->tv_sec; + long wtm_nsec, nsec = tv->tv_nsec; + unsigned long delta_nsec; + + if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) + return -EINVAL; + + write_seqlock_irq(&xtime_lock); + + /* The offset that is added into time in do_gettimeofday above + must be subtracted out here to keep a coherent view of the + time. Without this, a full-tick error is possible. */ + +#ifdef CONFIG_SMP + delta_nsec = (jiffies - wall_jiffies) * (NSEC_PER_SEC / HZ); +#else + delta_nsec = rpcc() - state.last_time; + delta_nsec = (delta_nsec * state.scaled_ticks_per_cycle + + state.partial_tick + + ((jiffies - wall_jiffies) << FIX_SHIFT)) * 15625; + delta_nsec = ((delta_nsec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2; + delta_nsec *= 1000; +#endif + + nsec -= delta_nsec; + + wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); + wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); + + set_normalized_timespec(&xtime, sec, nsec); + set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); + + time_adjust = 0; /* stop active adjtime() */ + time_status |= STA_UNSYNC; + time_maxerror = NTP_PHASE_LIMIT; + time_esterror = NTP_PHASE_LIMIT; + + write_sequnlock_irq(&xtime_lock); + clock_was_set(); + return 0; +} + +EXPORT_SYMBOL(do_settimeofday); + + +/* + * In order to set the CMOS clock precisely, set_rtc_mmss has to be + * called 500 ms after the second nowtime has started, because when + * nowtime is written into the registers of the CMOS clock, it will + * jump to the next second precisely 500 ms later. Check the Motorola + * MC146818A or Dallas DS12887 data sheet for details. + * + * BUG: This routine does not handle hour overflow properly; it just + * sets the minutes. Usually you won't notice until after reboot! + */ + + +static int +set_rtc_mmss(unsigned long nowtime) +{ + int retval = 0; + int real_seconds, real_minutes, cmos_minutes; + unsigned char save_control, save_freq_select; + + /* irq are locally disabled here */ + spin_lock(&rtc_lock); + /* Tell the clock it's being set */ + save_control = CMOS_READ(RTC_CONTROL); + CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); + + /* Stop and reset prescaler */ + save_freq_select = CMOS_READ(RTC_FREQ_SELECT); + CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); + + cmos_minutes = CMOS_READ(RTC_MINUTES); + if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) + BCD_TO_BIN(cmos_minutes); + + /* + * since we're only adjusting minutes and seconds, + * don't interfere with hour overflow. This avoids + * messing with unknown time zones but requires your + * RTC not to be off by more than 15 minutes + */ + real_seconds = nowtime % 60; + real_minutes = nowtime / 60; + if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) { + /* correct for half hour time zone */ + real_minutes += 30; + } + real_minutes %= 60; + + if (abs(real_minutes - cmos_minutes) < 30) { + if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { + BIN_TO_BCD(real_seconds); + BIN_TO_BCD(real_minutes); + } + CMOS_WRITE(real_seconds,RTC_SECONDS); + CMOS_WRITE(real_minutes,RTC_MINUTES); + } else { + printk(KERN_WARNING + "set_rtc_mmss: can't update from %d to %d\n", + cmos_minutes, real_minutes); + retval = -1; + } + + /* The following flags have to be released exactly in this order, + * otherwise the DS12887 (popular MC146818A clone with integrated + * battery and quartz) will not reset the oscillator and will not + * update precisely 500 ms later. You won't find this mentioned in + * the Dallas Semiconductor data sheets, but who believes data + * sheets anyway ... -- Markus Kuhn + */ + CMOS_WRITE(save_control, RTC_CONTROL); + CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); + spin_unlock(&rtc_lock); + + return retval; +} diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c new file mode 100644 index 0000000..fd7bd17 --- /dev/null +++ b/arch/alpha/kernel/traps.c @@ -0,0 +1,1092 @@ +/* + * arch/alpha/kernel/traps.c + * + * (C) Copyright 1994 Linus Torvalds + */ + +/* + * This file initializes the trap entry points + */ + +#include <linux/config.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/tty.h> +#include <linux/delay.h> +#include <linux/smp_lock.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kallsyms.h> + +#include <asm/gentrap.h> +#include <asm/uaccess.h> +#include <asm/unaligned.h> +#include <asm/sysinfo.h> +#include <asm/hwrpb.h> +#include <asm/mmu_context.h> + +#include "proto.h" + +/* Work-around for some SRMs which mishandle opDEC faults. */ + +static int opDEC_fix; + +static void __init +opDEC_check(void) +{ + __asm__ __volatile__ ( + /* Load the address of... */ + " br $16, 1f\n" + /* A stub instruction fault handler. Just add 4 to the + pc and continue. */ + " ldq $16, 8($sp)\n" + " addq $16, 4, $16\n" + " stq $16, 8($sp)\n" + " call_pal %[rti]\n" + /* Install the instruction fault handler. */ + "1: lda $17, 3\n" + " call_pal %[wrent]\n" + /* With that in place, the fault from the round-to-minf fp + insn will arrive either at the "lda 4" insn (bad) or one + past that (good). This places the correct fixup in %0. */ + " lda %[fix], 0\n" + " cvttq/svm $f31,$f31\n" + " lda %[fix], 4" + : [fix] "=r" (opDEC_fix) + : [rti] "n" (PAL_rti), [wrent] "n" (PAL_wrent) + : "$0", "$1", "$16", "$17", "$22", "$23", "$24", "$25"); + + if (opDEC_fix) + printk("opDEC fixup enabled.\n"); +} + +void +dik_show_regs(struct pt_regs *regs, unsigned long *r9_15) +{ + printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n", + regs->pc, regs->r26, regs->ps, print_tainted()); + print_symbol("pc is at %s\n", regs->pc); + print_symbol("ra is at %s\n", regs->r26 ); + printk("v0 = %016lx t0 = %016lx t1 = %016lx\n", + regs->r0, regs->r1, regs->r2); + printk("t2 = %016lx t3 = %016lx t4 = %016lx\n", + regs->r3, regs->r4, regs->r5); + printk("t5 = %016lx t6 = %016lx t7 = %016lx\n", + regs->r6, regs->r7, regs->r8); + + if (r9_15) { + printk("s0 = %016lx s1 = %016lx s2 = %016lx\n", + r9_15[9], r9_15[10], r9_15[11]); + printk("s3 = %016lx s4 = %016lx s5 = %016lx\n", + r9_15[12], r9_15[13], r9_15[14]); + printk("s6 = %016lx\n", r9_15[15]); + } + + printk("a0 = %016lx a1 = %016lx a2 = %016lx\n", + regs->r16, regs->r17, regs->r18); + printk("a3 = %016lx a4 = %016lx a5 = %016lx\n", + regs->r19, regs->r20, regs->r21); + printk("t8 = %016lx t9 = %016lx t10= %016lx\n", + regs->r22, regs->r23, regs->r24); + printk("t11= %016lx pv = %016lx at = %016lx\n", + regs->r25, regs->r27, regs->r28); + printk("gp = %016lx sp = %p\n", regs->gp, regs+1); +#if 0 +__halt(); +#endif +} + +#if 0 +static char * ireg_name[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6", + "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6", + "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9", + "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"}; +#endif + +static void +dik_show_code(unsigned int *pc) +{ + long i; + + printk("Code:"); + for (i = -6; i < 2; i++) { + unsigned int insn; + if (__get_user(insn, (unsigned int __user *)pc + i)) + break; + printk("%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>'); + } + printk("\n"); +} + +static void +dik_show_trace(unsigned long *sp) +{ + long i = 0; + printk("Trace:\n"); + while (0x1ff8 & (unsigned long) sp) { + extern char _stext[], _etext[]; + unsigned long tmp = *sp; + sp++; + if (tmp < (unsigned long) &_stext) + continue; + if (tmp >= (unsigned long) &_etext) + continue; + printk("[<%lx>]", tmp); + print_symbol(" %s", tmp); + printk("\n"); + if (i > 40) { + printk(" ..."); + break; + } + } + printk("\n"); +} + +static int kstack_depth_to_print = 24; + +void show_stack(struct task_struct *task, unsigned long *sp) +{ + unsigned long *stack; + int i; + + /* + * debugging aid: "show_stack(NULL);" prints the + * back trace for this cpu. + */ + if(sp==NULL) + sp=(unsigned long*)&sp; + + stack = sp; + for(i=0; i < kstack_depth_to_print; i++) { + if (((long) stack & (THREAD_SIZE-1)) == 0) + break; + if (i && ((i % 4) == 0)) + printk("\n "); + printk("%016lx ", *stack++); + } + printk("\n"); + dik_show_trace(sp); +} + +void dump_stack(void) +{ + show_stack(NULL, NULL); +} + +EXPORT_SYMBOL(dump_stack); + +void +die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15) +{ + if (regs->ps & 8) + return; +#ifdef CONFIG_SMP + printk("CPU %d ", hard_smp_processor_id()); +#endif + printk("%s(%d): %s %ld\n", current->comm, current->pid, str, err); + dik_show_regs(regs, r9_15); + dik_show_trace((unsigned long *)(regs+1)); + dik_show_code((unsigned int *)regs->pc); + + if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) { + printk("die_if_kernel recursion detected.\n"); + local_irq_enable(); + while (1); + } + do_exit(SIGSEGV); +} + +#ifndef CONFIG_MATHEMU +static long dummy_emul(void) { return 0; } +long (*alpha_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask) + = (void *)dummy_emul; +long (*alpha_fp_emul) (unsigned long pc) + = (void *)dummy_emul; +#else +long alpha_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask); +long alpha_fp_emul (unsigned long pc); +#endif + +asmlinkage void +do_entArith(unsigned long summary, unsigned long write_mask, + struct pt_regs *regs) +{ + long si_code = FPE_FLTINV; + siginfo_t info; + + if (summary & 1) { + /* Software-completion summary bit is set, so try to + emulate the instruction. If the processor supports + precise exceptions, we don't have to search. */ + if (!amask(AMASK_PRECISE_TRAP)) + si_code = alpha_fp_emul(regs->pc - 4); + else + si_code = alpha_fp_emul_imprecise(regs, write_mask); + if (si_code == 0) + return; + } + die_if_kernel("Arithmetic fault", regs, 0, NULL); + + info.si_signo = SIGFPE; + info.si_errno = 0; + info.si_code = si_code; + info.si_addr = (void __user *) regs->pc; + send_sig_info(SIGFPE, &info, current); +} + +asmlinkage void +do_entIF(unsigned long type, struct pt_regs *regs) +{ + siginfo_t info; + int signo, code; + + if (regs->ps == 0) { + if (type == 1) { + const unsigned int *data + = (const unsigned int *) regs->pc; + printk("Kernel bug at %s:%d\n", + (const char *)(data[1] | (long)data[2] << 32), + data[0]); + } + die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"), + regs, type, NULL); + } + + switch (type) { + case 0: /* breakpoint */ + info.si_signo = SIGTRAP; + info.si_errno = 0; + info.si_code = TRAP_BRKPT; + info.si_trapno = 0; + info.si_addr = (void __user *) regs->pc; + + if (ptrace_cancel_bpt(current)) { + regs->pc -= 4; /* make pc point to former bpt */ + } + + send_sig_info(SIGTRAP, &info, current); + return; + + case 1: /* bugcheck */ + info.si_signo = SIGTRAP; + info.si_errno = 0; + info.si_code = __SI_FAULT; + info.si_addr = (void __user *) regs->pc; + info.si_trapno = 0; + send_sig_info(SIGTRAP, &info, current); + return; + + case 2: /* gentrap */ + info.si_addr = (void __user *) regs->pc; + info.si_trapno = regs->r16; + switch ((long) regs->r16) { + case GEN_INTOVF: + signo = SIGFPE; + code = FPE_INTOVF; + break; + case GEN_INTDIV: + signo = SIGFPE; + code = FPE_INTDIV; + break; + case GEN_FLTOVF: + signo = SIGFPE; + code = FPE_FLTOVF; + break; + case GEN_FLTDIV: + signo = SIGFPE; + code = FPE_FLTDIV; + break; + case GEN_FLTUND: + signo = SIGFPE; + code = FPE_FLTUND; + break; + case GEN_FLTINV: + signo = SIGFPE; + code = FPE_FLTINV; + break; + case GEN_FLTINE: + signo = SIGFPE; + code = FPE_FLTRES; + break; + case GEN_ROPRAND: + signo = SIGFPE; + code = __SI_FAULT; + break; + + case GEN_DECOVF: + case GEN_DECDIV: + case GEN_DECINV: + case GEN_ASSERTERR: + case GEN_NULPTRERR: + case GEN_STKOVF: + case GEN_STRLENERR: + case GEN_SUBSTRERR: + case GEN_RANGERR: + case GEN_SUBRNG: + case GEN_SUBRNG1: + case GEN_SUBRNG2: + case GEN_SUBRNG3: + case GEN_SUBRNG4: + case GEN_SUBRNG5: + case GEN_SUBRNG6: + case GEN_SUBRNG7: + default: + signo = SIGTRAP; + code = __SI_FAULT; + break; + } + + info.si_signo = signo; + info.si_errno = 0; + info.si_code = code; + info.si_addr = (void __user *) regs->pc; + send_sig_info(signo, &info, current); + return; + + case 4: /* opDEC */ + if (implver() == IMPLVER_EV4) { + long si_code; + + /* The some versions of SRM do not handle + the opDEC properly - they return the PC of the + opDEC fault, not the instruction after as the + Alpha architecture requires. Here we fix it up. + We do this by intentionally causing an opDEC + fault during the boot sequence and testing if + we get the correct PC. If not, we set a flag + to correct it every time through. */ + regs->pc += opDEC_fix; + + /* EV4 does not implement anything except normal + rounding. Everything else will come here as + an illegal instruction. Emulate them. */ + si_code = alpha_fp_emul(regs->pc - 4); + if (si_code == 0) + return; + if (si_code > 0) { + info.si_signo = SIGFPE; + info.si_errno = 0; + info.si_code = si_code; + info.si_addr = (void __user *) regs->pc; + send_sig_info(SIGFPE, &info, current); + return; + } + } + break; + + case 3: /* FEN fault */ + /* Irritating users can call PAL_clrfen to disable the + FPU for the process. The kernel will then trap in + do_switch_stack and undo_switch_stack when we try + to save and restore the FP registers. + + Given that GCC by default generates code that uses the + FP registers, PAL_clrfen is not useful except for DoS + attacks. So turn the bleeding FPU back on and be done + with it. */ + current_thread_info()->pcb.flags |= 1; + __reload_thread(¤t_thread_info()->pcb); + return; + + case 5: /* illoc */ + default: /* unexpected instruction-fault type */ + ; + } + + info.si_signo = SIGILL; + info.si_errno = 0; + info.si_code = ILL_ILLOPC; + info.si_addr = (void __user *) regs->pc; + send_sig_info(SIGILL, &info, current); +} + +/* There is an ifdef in the PALcode in MILO that enables a + "kernel debugging entry point" as an unprivileged call_pal. + + We don't want to have anything to do with it, but unfortunately + several versions of MILO included in distributions have it enabled, + and if we don't put something on the entry point we'll oops. */ + +asmlinkage void +do_entDbg(struct pt_regs *regs) +{ + siginfo_t info; + + die_if_kernel("Instruction fault", regs, 0, NULL); + + info.si_signo = SIGILL; + info.si_errno = 0; + info.si_code = ILL_ILLOPC; + info.si_addr = (void __user *) regs->pc; + force_sig_info(SIGILL, &info, current); +} + + +/* + * entUna has a different register layout to be reasonably simple. It + * needs access to all the integer registers (the kernel doesn't use + * fp-regs), and it needs to have them in order for simpler access. + * + * Due to the non-standard register layout (and because we don't want + * to handle floating-point regs), user-mode unaligned accesses are + * handled separately by do_entUnaUser below. + * + * Oh, btw, we don't handle the "gp" register correctly, but if we fault + * on a gp-register unaligned load/store, something is _very_ wrong + * in the kernel anyway.. + */ +struct allregs { + unsigned long regs[32]; + unsigned long ps, pc, gp, a0, a1, a2; +}; + +struct unaligned_stat { + unsigned long count, va, pc; +} unaligned[2]; + + +/* Macro for exception fixup code to access integer registers. */ +#define una_reg(r) (regs.regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)]) + + +asmlinkage void +do_entUna(void * va, unsigned long opcode, unsigned long reg, + unsigned long a3, unsigned long a4, unsigned long a5, + struct allregs regs) +{ + long error, tmp1, tmp2, tmp3, tmp4; + unsigned long pc = regs.pc - 4; + const struct exception_table_entry *fixup; + + unaligned[0].count++; + unaligned[0].va = (unsigned long) va; + unaligned[0].pc = pc; + + /* We don't want to use the generic get/put unaligned macros as + we want to trap exceptions. Only if we actually get an + exception will we decide whether we should have caught it. */ + + switch (opcode) { + case 0x0c: /* ldwu */ + __asm__ __volatile__( + "1: ldq_u %1,0(%3)\n" + "2: ldq_u %2,1(%3)\n" + " extwl %1,%3,%1\n" + " extwh %2,%3,%2\n" + "3:\n" + ".section __ex_table,\"a\"\n" + " .long 1b - .\n" + " lda %1,3b-1b(%0)\n" + " .long 2b - .\n" + " lda %2,3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto got_exception; + una_reg(reg) = tmp1|tmp2; + return; + + case 0x28: /* ldl */ + __asm__ __volatile__( + "1: ldq_u %1,0(%3)\n" + "2: ldq_u %2,3(%3)\n" + " extll %1,%3,%1\n" + " extlh %2,%3,%2\n" + "3:\n" + ".section __ex_table,\"a\"\n" + " .long 1b - .\n" + " lda %1,3b-1b(%0)\n" + " .long 2b - .\n" + " lda %2,3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto got_exception; + una_reg(reg) = (int)(tmp1|tmp2); + return; + + case 0x29: /* ldq */ + __asm__ __volatile__( + "1: ldq_u %1,0(%3)\n" + "2: ldq_u %2,7(%3)\n" + " extql %1,%3,%1\n" + " extqh %2,%3,%2\n" + "3:\n" + ".section __ex_table,\"a\"\n" + " .long 1b - .\n" + " lda %1,3b-1b(%0)\n" + " .long 2b - .\n" + " lda %2,3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto got_exception; + una_reg(reg) = tmp1|tmp2; + return; + + /* Note that the store sequences do not indicate that they change + memory because it _should_ be affecting nothing in this context. + (Otherwise we have other, much larger, problems.) */ + case 0x0d: /* stw */ + __asm__ __volatile__( + "1: ldq_u %2,1(%5)\n" + "2: ldq_u %1,0(%5)\n" + " inswh %6,%5,%4\n" + " inswl %6,%5,%3\n" + " mskwh %2,%5,%2\n" + " mskwl %1,%5,%1\n" + " or %2,%4,%2\n" + " or %1,%3,%1\n" + "3: stq_u %2,1(%5)\n" + "4: stq_u %1,0(%5)\n" + "5:\n" + ".section __ex_table,\"a\"\n" + " .long 1b - .\n" + " lda %2,5b-1b(%0)\n" + " .long 2b - .\n" + " lda %1,5b-2b(%0)\n" + " .long 3b - .\n" + " lda $31,5b-3b(%0)\n" + " .long 4b - .\n" + " lda $31,5b-4b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), + "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "r"(una_reg(reg)), "0"(0)); + if (error) + goto got_exception; + return; + + case 0x2c: /* stl */ + __asm__ __volatile__( + "1: ldq_u %2,3(%5)\n" + "2: ldq_u %1,0(%5)\n" + " inslh %6,%5,%4\n" + " insll %6,%5,%3\n" + " msklh %2,%5,%2\n" + " mskll %1,%5,%1\n" + " or %2,%4,%2\n" + " or %1,%3,%1\n" + "3: stq_u %2,3(%5)\n" + "4: stq_u %1,0(%5)\n" + "5:\n" + ".section __ex_table,\"a\"\n" + " .long 1b - .\n" + " lda %2,5b-1b(%0)\n" + " .long 2b - .\n" + " lda %1,5b-2b(%0)\n" + " .long 3b - .\n" + " lda $31,5b-3b(%0)\n" + " .long 4b - .\n" + " lda $31,5b-4b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), + "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "r"(una_reg(reg)), "0"(0)); + if (error) + goto got_exception; + return; + + case 0x2d: /* stq */ + __asm__ __volatile__( + "1: ldq_u %2,7(%5)\n" + "2: ldq_u %1,0(%5)\n" + " insqh %6,%5,%4\n" + " insql %6,%5,%3\n" + " mskqh %2,%5,%2\n" + " mskql %1,%5,%1\n" + " or %2,%4,%2\n" + " or %1,%3,%1\n" + "3: stq_u %2,7(%5)\n" + "4: stq_u %1,0(%5)\n" + "5:\n" + ".section __ex_table,\"a\"\n\t" + " .long 1b - .\n" + " lda %2,5b-1b(%0)\n" + " .long 2b - .\n" + " lda %1,5b-2b(%0)\n" + " .long 3b - .\n" + " lda $31,5b-3b(%0)\n" + " .long 4b - .\n" + " lda $31,5b-4b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), + "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "r"(una_reg(reg)), "0"(0)); + if (error) + goto got_exception; + return; + } + + lock_kernel(); + printk("Bad unaligned kernel access at %016lx: %p %lx %ld\n", + pc, va, opcode, reg); + do_exit(SIGSEGV); + +got_exception: + /* Ok, we caught the exception, but we don't want it. Is there + someone to pass it along to? */ + if ((fixup = search_exception_tables(pc)) != 0) { + unsigned long newpc; + newpc = fixup_exception(una_reg, fixup, pc); + + printk("Forwarding unaligned exception at %lx (%lx)\n", + pc, newpc); + + (®s)->pc = newpc; + return; + } + + /* + * Yikes! No one to forward the exception to. + * Since the registers are in a weird format, dump them ourselves. + */ + lock_kernel(); + + printk("%s(%d): unhandled unaligned exception\n", + current->comm, current->pid); + + printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n", + pc, una_reg(26), regs.ps); + printk("r0 = %016lx r1 = %016lx r2 = %016lx\n", + una_reg(0), una_reg(1), una_reg(2)); + printk("r3 = %016lx r4 = %016lx r5 = %016lx\n", + una_reg(3), una_reg(4), una_reg(5)); + printk("r6 = %016lx r7 = %016lx r8 = %016lx\n", + una_reg(6), una_reg(7), una_reg(8)); + printk("r9 = %016lx r10= %016lx r11= %016lx\n", + una_reg(9), una_reg(10), una_reg(11)); + printk("r12= %016lx r13= %016lx r14= %016lx\n", + una_reg(12), una_reg(13), una_reg(14)); + printk("r15= %016lx\n", una_reg(15)); + printk("r16= %016lx r17= %016lx r18= %016lx\n", + una_reg(16), una_reg(17), una_reg(18)); + printk("r19= %016lx r20= %016lx r21= %016lx\n", + una_reg(19), una_reg(20), una_reg(21)); + printk("r22= %016lx r23= %016lx r24= %016lx\n", + una_reg(22), una_reg(23), una_reg(24)); + printk("r25= %016lx r27= %016lx r28= %016lx\n", + una_reg(25), una_reg(27), una_reg(28)); + printk("gp = %016lx sp = %p\n", regs.gp, ®s+1); + + dik_show_code((unsigned int *)pc); + dik_show_trace((unsigned long *)(®s+1)); + + if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) { + printk("die_if_kernel recursion detected.\n"); + local_irq_enable(); + while (1); + } + do_exit(SIGSEGV); +} + +/* + * Convert an s-floating point value in memory format to the + * corresponding value in register format. The exponent + * needs to be remapped to preserve non-finite values + * (infinities, not-a-numbers, denormals). + */ +static inline unsigned long +s_mem_to_reg (unsigned long s_mem) +{ + unsigned long frac = (s_mem >> 0) & 0x7fffff; + unsigned long sign = (s_mem >> 31) & 0x1; + unsigned long exp_msb = (s_mem >> 30) & 0x1; + unsigned long exp_low = (s_mem >> 23) & 0x7f; + unsigned long exp; + + exp = (exp_msb << 10) | exp_low; /* common case */ + if (exp_msb) { + if (exp_low == 0x7f) { + exp = 0x7ff; + } + } else { + if (exp_low == 0x00) { + exp = 0x000; + } else { + exp |= (0x7 << 7); + } + } + return (sign << 63) | (exp << 52) | (frac << 29); +} + +/* + * Convert an s-floating point value in register format to the + * corresponding value in memory format. + */ +static inline unsigned long +s_reg_to_mem (unsigned long s_reg) +{ + return ((s_reg >> 62) << 30) | ((s_reg << 5) >> 34); +} + +/* + * Handle user-level unaligned fault. Handling user-level unaligned + * faults is *extremely* slow and produces nasty messages. A user + * program *should* fix unaligned faults ASAP. + * + * Notice that we have (almost) the regular kernel stack layout here, + * so finding the appropriate registers is a little more difficult + * than in the kernel case. + * + * Finally, we handle regular integer load/stores only. In + * particular, load-linked/store-conditionally and floating point + * load/stores are not supported. The former make no sense with + * unaligned faults (they are guaranteed to fail) and I don't think + * the latter will occur in any decent program. + * + * Sigh. We *do* have to handle some FP operations, because GCC will + * uses them as temporary storage for integer memory to memory copies. + * However, we need to deal with stt/ldt and sts/lds only. + */ + +#define OP_INT_MASK ( 1L << 0x28 | 1L << 0x2c /* ldl stl */ \ + | 1L << 0x29 | 1L << 0x2d /* ldq stq */ \ + | 1L << 0x0c | 1L << 0x0d /* ldwu stw */ \ + | 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */ + +#define OP_WRITE_MASK ( 1L << 0x26 | 1L << 0x27 /* sts stt */ \ + | 1L << 0x2c | 1L << 0x2d /* stl stq */ \ + | 1L << 0x0d | 1L << 0x0e ) /* stw stb */ + +#define R(x) ((size_t) &((struct pt_regs *)0)->x) + +static int unauser_reg_offsets[32] = { + R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8), + /* r9 ... r15 are stored in front of regs. */ + -56, -48, -40, -32, -24, -16, -8, + R(r16), R(r17), R(r18), + R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26), + R(r27), R(r28), R(gp), + 0, 0 +}; + +#undef R + +asmlinkage void +do_entUnaUser(void __user * va, unsigned long opcode, + unsigned long reg, struct pt_regs *regs) +{ + static int cnt = 0; + static long last_time = 0; + + unsigned long tmp1, tmp2, tmp3, tmp4; + unsigned long fake_reg, *reg_addr = &fake_reg; + siginfo_t info; + long error; + + /* Check the UAC bits to decide what the user wants us to do + with the unaliged access. */ + + if (!test_thread_flag (TIF_UAC_NOPRINT)) { + if (cnt >= 5 && jiffies - last_time > 5*HZ) { + cnt = 0; + } + if (++cnt < 5) { + printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n", + current->comm, current->pid, + regs->pc - 4, va, opcode, reg); + } + last_time = jiffies; + } + if (test_thread_flag (TIF_UAC_SIGBUS)) + goto give_sigbus; + /* Not sure why you'd want to use this, but... */ + if (test_thread_flag (TIF_UAC_NOFIX)) + return; + + /* Don't bother reading ds in the access check since we already + know that this came from the user. Also rely on the fact that + the page at TASK_SIZE is unmapped and so can't be touched anyway. */ + if (!__access_ok((unsigned long)va, 0, USER_DS)) + goto give_sigsegv; + + ++unaligned[1].count; + unaligned[1].va = (unsigned long)va; + unaligned[1].pc = regs->pc - 4; + + if ((1L << opcode) & OP_INT_MASK) { + /* it's an integer load/store */ + if (reg < 30) { + reg_addr = (unsigned long *) + ((char *)regs + unauser_reg_offsets[reg]); + } else if (reg == 30) { + /* usp in PAL regs */ + fake_reg = rdusp(); + } else { + /* zero "register" */ + fake_reg = 0; + } + } + + /* We don't want to use the generic get/put unaligned macros as + we want to trap exceptions. Only if we actually get an + exception will we decide whether we should have caught it. */ + + switch (opcode) { + case 0x0c: /* ldwu */ + __asm__ __volatile__( + "1: ldq_u %1,0(%3)\n" + "2: ldq_u %2,1(%3)\n" + " extwl %1,%3,%1\n" + " extwh %2,%3,%2\n" + "3:\n" + ".section __ex_table,\"a\"\n" + " .long 1b - .\n" + " lda %1,3b-1b(%0)\n" + " .long 2b - .\n" + " lda %2,3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + *reg_addr = tmp1|tmp2; + break; + + case 0x22: /* lds */ + __asm__ __volatile__( + "1: ldq_u %1,0(%3)\n" + "2: ldq_u %2,3(%3)\n" + " extll %1,%3,%1\n" + " extlh %2,%3,%2\n" + "3:\n" + ".section __ex_table,\"a\"\n" + " .long 1b - .\n" + " lda %1,3b-1b(%0)\n" + " .long 2b - .\n" + " lda %2,3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + alpha_write_fp_reg(reg, s_mem_to_reg((int)(tmp1|tmp2))); + return; + + case 0x23: /* ldt */ + __asm__ __volatile__( + "1: ldq_u %1,0(%3)\n" + "2: ldq_u %2,7(%3)\n" + " extql %1,%3,%1\n" + " extqh %2,%3,%2\n" + "3:\n" + ".section __ex_table,\"a\"\n" + " .long 1b - .\n" + " lda %1,3b-1b(%0)\n" + " .long 2b - .\n" + " lda %2,3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + alpha_write_fp_reg(reg, tmp1|tmp2); + return; + + case 0x28: /* ldl */ + __asm__ __volatile__( + "1: ldq_u %1,0(%3)\n" + "2: ldq_u %2,3(%3)\n" + " extll %1,%3,%1\n" + " extlh %2,%3,%2\n" + "3:\n" + ".section __ex_table,\"a\"\n" + " .long 1b - .\n" + " lda %1,3b-1b(%0)\n" + " .long 2b - .\n" + " lda %2,3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + *reg_addr = (int)(tmp1|tmp2); + break; + + case 0x29: /* ldq */ + __asm__ __volatile__( + "1: ldq_u %1,0(%3)\n" + "2: ldq_u %2,7(%3)\n" + " extql %1,%3,%1\n" + " extqh %2,%3,%2\n" + "3:\n" + ".section __ex_table,\"a\"\n" + " .long 1b - .\n" + " lda %1,3b-1b(%0)\n" + " .long 2b - .\n" + " lda %2,3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + *reg_addr = tmp1|tmp2; + break; + + /* Note that the store sequences do not indicate that they change + memory because it _should_ be affecting nothing in this context. + (Otherwise we have other, much larger, problems.) */ + case 0x0d: /* stw */ + __asm__ __volatile__( + "1: ldq_u %2,1(%5)\n" + "2: ldq_u %1,0(%5)\n" + " inswh %6,%5,%4\n" + " inswl %6,%5,%3\n" + " mskwh %2,%5,%2\n" + " mskwl %1,%5,%1\n" + " or %2,%4,%2\n" + " or %1,%3,%1\n" + "3: stq_u %2,1(%5)\n" + "4: stq_u %1,0(%5)\n" + "5:\n" + ".section __ex_table,\"a\"\n" + " .long 1b - .\n" + " lda %2,5b-1b(%0)\n" + " .long 2b - .\n" + " lda %1,5b-2b(%0)\n" + " .long 3b - .\n" + " lda $31,5b-3b(%0)\n" + " .long 4b - .\n" + " lda $31,5b-4b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), + "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "r"(*reg_addr), "0"(0)); + if (error) + goto give_sigsegv; + return; + + case 0x26: /* sts */ + fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg)); + /* FALLTHRU */ + + case 0x2c: /* stl */ + __asm__ __volatile__( + "1: ldq_u %2,3(%5)\n" + "2: ldq_u %1,0(%5)\n" + " inslh %6,%5,%4\n" + " insll %6,%5,%3\n" + " msklh %2,%5,%2\n" + " mskll %1,%5,%1\n" + " or %2,%4,%2\n" + " or %1,%3,%1\n" + "3: stq_u %2,3(%5)\n" + "4: stq_u %1,0(%5)\n" + "5:\n" + ".section __ex_table,\"a\"\n" + " .long 1b - .\n" + " lda %2,5b-1b(%0)\n" + " .long 2b - .\n" + " lda %1,5b-2b(%0)\n" + " .long 3b - .\n" + " lda $31,5b-3b(%0)\n" + " .long 4b - .\n" + " lda $31,5b-4b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), + "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "r"(*reg_addr), "0"(0)); + if (error) + goto give_sigsegv; + return; + + case 0x27: /* stt */ + fake_reg = alpha_read_fp_reg(reg); + /* FALLTHRU */ + + case 0x2d: /* stq */ + __asm__ __volatile__( + "1: ldq_u %2,7(%5)\n" + "2: ldq_u %1,0(%5)\n" + " insqh %6,%5,%4\n" + " insql %6,%5,%3\n" + " mskqh %2,%5,%2\n" + " mskql %1,%5,%1\n" + " or %2,%4,%2\n" + " or %1,%3,%1\n" + "3: stq_u %2,7(%5)\n" + "4: stq_u %1,0(%5)\n" + "5:\n" + ".section __ex_table,\"a\"\n\t" + " .long 1b - .\n" + " lda %2,5b-1b(%0)\n" + " .long 2b - .\n" + " lda %1,5b-2b(%0)\n" + " .long 3b - .\n" + " lda $31,5b-3b(%0)\n" + " .long 4b - .\n" + " lda $31,5b-4b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), + "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "r"(*reg_addr), "0"(0)); + if (error) + goto give_sigsegv; + return; + + default: + /* What instruction were you trying to use, exactly? */ + goto give_sigbus; + } + + /* Only integer loads should get here; everyone else returns early. */ + if (reg == 30) + wrusp(fake_reg); + return; + +give_sigsegv: + regs->pc -= 4; /* make pc point to faulting insn */ + info.si_signo = SIGSEGV; + info.si_errno = 0; + + /* We need to replicate some of the logic in mm/fault.c, + since we don't have access to the fault code in the + exception handling return path. */ + if (!__access_ok((unsigned long)va, 0, USER_DS)) + info.si_code = SEGV_ACCERR; + else { + struct mm_struct *mm = current->mm; + down_read(&mm->mmap_sem); + if (find_vma(mm, (unsigned long)va)) + info.si_code = SEGV_ACCERR; + else + info.si_code = SEGV_MAPERR; + up_read(&mm->mmap_sem); + } + info.si_addr = va; + send_sig_info(SIGSEGV, &info, current); + return; + +give_sigbus: + regs->pc -= 4; + info.si_signo = SIGBUS; + info.si_errno = 0; + info.si_code = BUS_ADRALN; + info.si_addr = va; + send_sig_info(SIGBUS, &info, current); + return; +} + +void __init +trap_init(void) +{ + /* Tell PAL-code what global pointer we want in the kernel. */ + register unsigned long gptr __asm__("$29"); + wrkgp(gptr); + + /* Hack for Multia (UDB) and JENSEN: some of their SRMs have + a bug in the handling of the opDEC fault. Fix it up if so. */ + if (implver() == IMPLVER_EV4) + opDEC_check(); + + wrent(entArith, 1); + wrent(entMM, 2); + wrent(entIF, 3); + wrent(entUna, 4); + wrent(entSys, 5); + wrent(entDbg, 6); +} diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S new file mode 100644 index 0000000..0922e07 --- /dev/null +++ b/arch/alpha/kernel/vmlinux.lds.S @@ -0,0 +1,149 @@ +#include <linux/config.h> +#include <asm-generic/vmlinux.lds.h> + +OUTPUT_FORMAT("elf64-alpha") +OUTPUT_ARCH(alpha) +ENTRY(__start) +PHDRS { kernel PT_LOAD ; } +jiffies = jiffies_64; +SECTIONS +{ +#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS + . = 0xfffffc0000310000; +#else + . = 0xfffffc0001010000; +#endif + + _text = .; /* Text and read-only data */ + .text : { + *(.text) + SCHED_TEXT + LOCK_TEXT + *(.fixup) + *(.gnu.warning) + } :kernel + _etext = .; /* End of text section */ + + . = ALIGN(16); + __start___ex_table = .; /* Exception table */ + __ex_table : { *(__ex_table) } + __stop___ex_table = .; + + RODATA + + /* Will be freed after init */ + . = ALIGN(8192); /* Init code and data */ + __init_begin = .; + .init.text : { + _sinittext = .; + *(.init.text) + _einittext = .; + } + .init.data : { *(.init.data) } + + . = ALIGN(16); + __setup_start = .; + .init.setup : { *(.init.setup) } + __setup_end = .; + + . = ALIGN(8); + __initcall_start = .; + .initcall.init : { + *(.initcall1.init) + *(.initcall2.init) + *(.initcall3.init) + *(.initcall4.init) + *(.initcall5.init) + *(.initcall6.init) + *(.initcall7.init) + } + __initcall_end = .; + + . = ALIGN(8192); + __initramfs_start = .; + .init.ramfs : { *(.init.ramfs) } + __initramfs_end = .; + + . = ALIGN(8); + .con_initcall.init : { + __con_initcall_start = .; + *(.con_initcall.init) + __con_initcall_end = .; + } + + . = ALIGN(8); + SECURITY_INIT + + . = ALIGN(64); + __per_cpu_start = .; + .data.percpu : { *(.data.percpu) } + __per_cpu_end = .; + + . = ALIGN(2*8192); + __init_end = .; + /* Freed after init ends here */ + + /* Note 2 page alignment above. */ + .data.init_thread : { *(.data.init_thread) } + + . = ALIGN(8192); + .data.page_aligned : { *(.data.page_aligned) } + + . = ALIGN(64); + .data.cacheline_aligned : { *(.data.cacheline_aligned) } + + _data = .; + .data : { /* Data */ + *(.data) + CONSTRUCTORS + } + + .got : { *(.got) } + .sdata : { *(.sdata) } + + _edata = .; /* End of data section */ + + __bss_start = .; + .sbss : { *(.sbss) *(.scommon) } + .bss : { *(.bss) *(COMMON) } + __bss_stop = .; + + _end = .; + + /* Sections to be discarded */ + /DISCARD/ : { *(.exit.text) *(.exit.data) *(.exitcall.exit) } + + .mdebug 0 : { *(.mdebug) } + .note 0 : { *(.note) } + .comment 0 : { *(.comment) } + + /* Stabs debugging sections */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } +} |