diff options
author | Thomas Gleixner <tglx@tglx.tec.linutronix.de> | 2005-06-26 23:20:36 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@mtd.linutronix.de> | 2005-06-26 23:20:36 +0200 |
commit | 7ca6448dbfb398bba36eda3c01bc14b86c3675be (patch) | |
tree | 82d934ebf07f22a2c64c3b6d82ec24082878b43a /include | |
parent | f1f67a9874f1a4bba1adff6d694aa52e5f52ff1a (diff) | |
parent | 7d681b23d6cc14a8c026ea6756242cb522cbbcae (diff) | |
download | op-kernel-dev-7ca6448dbfb398bba36eda3c01bc14b86c3675be.zip op-kernel-dev-7ca6448dbfb398bba36eda3c01bc14b86c3675be.tar.gz |
Merge with rsync://fileserver/linux
Update to Linus latest
Diffstat (limited to 'include')
589 files changed, 22961 insertions, 4901 deletions
diff --git a/include/asm-alpha/agp.h b/include/asm-alpha/agp.h index c99dbbb..ef855a3 100644 --- a/include/asm-alpha/agp.h +++ b/include/asm-alpha/agp.h @@ -10,4 +10,14 @@ #define flush_agp_mappings() #define flush_agp_cache() mb() +/* Convert a physical address to an address suitable for the GART. */ +#define phys_to_gart(x) (x) +#define gart_to_phys(x) (x) + +/* GATT allocation. Returns/accepts GATT kernel virtual address. */ +#define alloc_gatt_pages(order) \ + ((char *)__get_free_pages(GFP_KERNEL, (order))) +#define free_gatt_pages(table, order) \ + free_pages((unsigned long)(table), (order)) + #endif diff --git a/include/asm-alpha/mmzone.h b/include/asm-alpha/mmzone.h index 726c150..a011ef4 100644 --- a/include/asm-alpha/mmzone.h +++ b/include/asm-alpha/mmzone.h @@ -57,7 +57,6 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n) * Given a kernel address, find the home node of the underlying memory. */ #define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr)) -#define node_mem_map(nid) (NODE_DATA(nid)->node_mem_map) #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) #define local_mapnr(kvaddr) \ @@ -108,7 +107,7 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n) #define pfn_to_page(pfn) \ ({ \ unsigned long kaddr = (unsigned long)__va((pfn) << PAGE_SHIFT); \ - (node_mem_map(kvaddr_to_nid(kaddr)) + local_mapnr(kaddr)); \ + (NODE_DATA(kvaddr_to_nid(kaddr))->node_mem_map + local_mapnr(kaddr)); \ }) #define page_to_pfn(page) \ diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h index cbc173a..9950706 100644 --- a/include/asm-alpha/smp.h +++ b/include/asm-alpha/smp.h @@ -43,7 +43,7 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS]; #define PROC_CHANGE_PENALTY 20 #define hard_smp_processor_id() __hard_smp_processor_id() -#define smp_processor_id() (current_thread_info()->cpu) +#define raw_smp_processor_id() (current_thread_info()->cpu) extern cpumask_t cpu_present_mask; extern cpumask_t cpu_online_map; diff --git a/include/asm-arm/arch-aaec2000/aaec2000.h b/include/asm-arm/arch-aaec2000/aaec2000.h new file mode 100644 index 0000000..0e9b7e1 --- /dev/null +++ b/include/asm-arm/arch-aaec2000/aaec2000.h @@ -0,0 +1,151 @@ +/* + * linux/include/asm-arm/arch-aaec2000/aaec2000.h + * + * AAEC-2000 registers definition + * + * Copyright (c) 2005 Nicolas Bellido Y Ortega + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARCH_AAEC2000_H +#define __ASM_ARCH_AAEC2000_H + +#ifndef __ASM_ARCH_HARDWARE_H +#error You must include hardware.h not this file +#endif /* __ASM_ARCH_HARDWARE_H */ + +/* Interrupt controller */ +#define IRQ_BASE __REG(0x80000500) +#define IRQ_INTSR __REG(0x80000500) /* Int Status Register */ +#define IRQ_INTRSR __REG(0x80000504) /* Int Raw (unmasked) Status */ +#define IRQ_INTENS __REG(0x80000508) /* Int Enable Set */ +#define IRQ_INTENC __REG(0x8000050c) /* Int Enable Clear */ + +/* UART 1 */ +#define UART1_BASE __REG(0x80000600) +#define UART1_DR __REG(0x80000600) /* Data/FIFO Register */ +#define UART1_LCR __REG(0x80000604) /* Link Control Register */ +#define UART1_BRCR __REG(0x80000608) /* Baud Rate Control Register */ +#define UART1_CR __REG(0x8000060c) /* Control Register */ +#define UART1_SR __REG(0x80000610) /* Status Register */ +#define UART1_INT __REG(0x80000614) /* Interrupt Status Register */ +#define UART1_INTM __REG(0x80000618) /* Interrupt Mask Register */ +#define UART1_INTRES __REG(0x8000061c) /* Int Result (masked status) Register */ + +/* UART 2 */ +#define UART2_BASE __REG(0x80000700) +#define UART2_DR __REG(0x80000700) /* Data/FIFO Register */ +#define UART2_LCR __REG(0x80000704) /* Link Control Register */ +#define UART2_BRCR __REG(0x80000708) /* Baud Rate Control Register */ +#define UART2_CR __REG(0x8000070c) /* Control Register */ +#define UART2_SR __REG(0x80000710) /* Status Register */ +#define UART2_INT __REG(0x80000714) /* Interrupt Status Register */ +#define UART2_INTM __REG(0x80000718) /* Interrupt Mask Register */ +#define UART2_INTRES __REG(0x8000071c) /* Int Result (masked status) Register */ + +/* UART 3 */ +#define UART3_BASE __REG(0x80000800) +#define UART3_DR __REG(0x80000800) /* Data/FIFO Register */ +#define UART3_LCR __REG(0x80000804) /* Link Control Register */ +#define UART3_BRCR __REG(0x80000808) /* Baud Rate Control Register */ +#define UART3_CR __REG(0x8000080c) /* Control Register */ +#define UART3_SR __REG(0x80000810) /* Status Register */ +#define UART3_INT __REG(0x80000814) /* Interrupt Status Register */ +#define UART3_INTM __REG(0x80000818) /* Interrupt Mask Register */ +#define UART3_INTRES __REG(0x8000081c) /* Int Result (masked status) Register */ + +/* These are used in some places */ +#define _UART1_BASE __PREG(UART1_BASE) +#define _UART2_BASE __PREG(UART2_BASE) +#define _UART3_BASE __PREG(UART3_BASE) + +/* UART Registers Offsets */ +#define UART_DR 0x00 +#define UART_LCR 0x04 +#define UART_BRCR 0x08 +#define UART_CR 0x0c +#define UART_SR 0x10 +#define UART_INT 0x14 +#define UART_INTM 0x18 +#define UART_INTRES 0x1c + +/* UART_LCR Bitmask */ +#define UART_LCR_BRK (1 << 0) /* Send Break */ +#define UART_LCR_PEN (1 << 1) /* Parity Enable */ +#define UART_LCR_EP (1 << 2) /* Even/Odd Parity */ +#define UART_LCR_S2 (1 << 3) /* One/Two Stop bits */ +#define UART_LCR_FIFO (1 << 4) /* FIFO Enable */ +#define UART_LCR_WL5 (0 << 5) /* Word Length - 5 bits */ +#define UART_LCR_WL6 (1 << 5) /* Word Length - 6 bits */ +#define UART_LCR_WL7 (1 << 6) /* Word Length - 7 bits */ +#define UART_LCR_WL8 (1 << 7) /* Word Length - 8 bits */ + +/* UART_CR Bitmask */ +#define UART_CR_EN (1 << 0) /* UART Enable */ +#define UART_CR_SIR (1 << 1) /* IrDA SIR Enable */ +#define UART_CR_SIRLP (1 << 2) /* Low Power IrDA Enable */ +#define UART_CR_RXP (1 << 3) /* Receive Pin Polarity */ +#define UART_CR_TXP (1 << 4) /* Transmit Pin Polarity */ +#define UART_CR_MXP (1 << 5) /* Modem Pin Polarity */ +#define UART_CR_LOOP (1 << 6) /* Loopback Mode */ + +/* UART_SR Bitmask */ +#define UART_SR_CTS (1 << 0) /* Clear To Send Status */ +#define UART_SR_DSR (1 << 1) /* Data Set Ready Status */ +#define UART_SR_DCD (1 << 2) /* Data Carrier Detect Status */ +#define UART_SR_TxBSY (1 << 3) /* Transmitter Busy Status */ +#define UART_SR_RxFE (1 << 4) /* Receive FIFO Empty Status */ +#define UART_SR_TxFF (1 << 5) /* Transmit FIFO Full Status */ +#define UART_SR_RxFF (1 << 6) /* Receive FIFO Full Status */ +#define UART_SR_TxFE (1 << 7) /* Transmit FIFO Empty Status */ + +/* UART_INT Bitmask */ +#define UART_INT_RIS (1 << 0) /* Rx Interrupt */ +#define UART_INT_TIS (1 << 1) /* Tx Interrupt */ +#define UART_INT_MIS (1 << 2) /* Modem Interrupt */ +#define UART_INT_RTIS (1 << 3) /* Receive Timeout Interrupt */ + +/* Timer 1 */ +#define TIMER1_BASE __REG(0x80000c00) +#define TIMER1_LOAD __REG(0x80000c00) /* Timer 1 Load Register */ +#define TIMER1_VAL __REG(0x80000c04) /* Timer 1 Value Register */ +#define TIMER1_CTRL __REG(0x80000c08) /* Timer 1 Control Register */ +#define TIMER1_CLEAR __REG(0x80000c0c) /* Timer 1 Clear Register */ + +/* Timer 2 */ +#define TIMER2_BASE __REG(0x80000d00) +#define TIMER2_LOAD __REG(0x80000d00) /* Timer 2 Load Register */ +#define TIMER2_VAL __REG(0x80000d04) /* Timer 2 Value Register */ +#define TIMER2_CTRL __REG(0x80000d08) /* Timer 2 Control Register */ +#define TIMER2_CLEAR __REG(0x80000d0c) /* Timer 2 Clear Register */ + +/* Timer 3 */ +#define TIMER3_BASE __REG(0x80000e00) +#define TIMER3_LOAD __REG(0x80000e00) /* Timer 3 Load Register */ +#define TIMER3_VAL __REG(0x80000e04) /* Timer 3 Value Register */ +#define TIMER3_CTRL __REG(0x80000e08) /* Timer 3 Control Register */ +#define TIMER3_CLEAR __REG(0x80000e0c) /* Timer 3 Clear Register */ + +/* Timer Control register bits */ +#define TIMER_CTRL_ENABLE (1 << 7) /* Enable (Start° Timer */ +#define TIMER_CTRL_PERIODIC (1 << 6) /* Periodic Running Mode */ +#define TIMER_CTRL_FREE_RUNNING (0 << 6) /* Normal Running Mode */ +#define TIMER_CTRL_CLKSEL_508K (1 << 3) /* 508KHz Clock select (Timer 1, 2) */ +#define TIMER_CTRL_CLKSEL_2K (0 << 3) /* 2KHz Clock Select (Timer 1, 2)*/ + +/* Power and State Control */ +#define POWER_BASE __REG(0x80000400) +#define POWER_PWRSR __REG(0x80000400) /* Power Status Register */ +#define POWER_PWRCNT __REG(0x80000404) /* Power/Clock control */ +#define POWER_HALT __REG(0x80000408) /* Power Idle Mode */ +#define POWER_STDBY __REG(0x8000040c) /* Power Standby Mode */ +#define POWER_BLEOI __REG(0x80000410) /* Battery Low End of Interrupt */ +#define POWER_MCEOI __REG(0x80000414) /* Media Changed EoI */ +#define POWER_TEOI __REG(0x80000418) /* Tick EoI */ +#define POWER_STFCLR __REG(0x8000041c) /* NbFlg, RSTFlg, PFFlg, CLDFlg Clear */ +#define POWER_CLKSET __REG(0x80000420) /* Clock Speed Control */ + +#endif /* __ARM_ARCH_AAEC2000_H */ diff --git a/include/asm-arm/arch-aaec2000/debug-macro.S b/include/asm-arm/arch-aaec2000/debug-macro.S new file mode 100644 index 0000000..e4f1fa5 --- /dev/null +++ b/include/asm-arm/arch-aaec2000/debug-macro.S @@ -0,0 +1,36 @@ +/* linux/include/asm-arm/arch-aaec2000/debug-macro.S + * + * Debugging macro include header + * + * Copyright (c) 2005 Nicolas Bellido Y Ortega + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + + .macro addruart,rx + mrc p15, 0, \rx, c1, c0 + tst \rx, #1 @ MMU enabled? + moveq \rx, #0x80000000 @ physical + movne \rx, #io_p2v(0x80000000) @ virtual + orr \rx, \rx, #0x00000800 + .endm + + .macro senduart,rd,rx + str \rd, [\rx, #0] + .endm + + .macro busyuart,rd,rx +1002: ldr \rd, [\rx, #0x10] + tst \rd, #(1 << 7) + beq 1002b + .endm + + .macro waituart,rd,rx +#if 0 +1001: ldr \rd, [\rx, #0x10] + tst \rd, #(1 << 5) + beq 1001b +#endif + .endm diff --git a/include/asm-arm/arch-aaec2000/dma.h b/include/asm-arm/arch-aaec2000/dma.h new file mode 100644 index 0000000..28c890b --- /dev/null +++ b/include/asm-arm/arch-aaec2000/dma.h @@ -0,0 +1,17 @@ +/* + * linux/include/asm-arm/arch-aaec2000/dma.h + * + * Copyright (c) 2005 Nicolas Bellido Y Ortega + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARCH_DMA_H +#define __ASM_ARCH_DMA_H + +#define MAX_DMA_ADDRESS 0xffffffff +#define MAX_DMA_CHANNELS 0 + +#endif diff --git a/include/asm-arm/arch-aaec2000/entry-macro.S b/include/asm-arm/arch-aaec2000/entry-macro.S new file mode 100644 index 0000000..df31313 --- /dev/null +++ b/include/asm-arm/arch-aaec2000/entry-macro.S @@ -0,0 +1,33 @@ +/* + * linux/include/asm-arm/arch-aaec2000/entry-macro.S + * + * Low-level IRQ helper for aaec-2000 based platforms + * + * Copyright (c) 2005 Nicolas Bellido Y Ortega + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + + .macro disable_fiq + .endm + + .macro get_irqnr_and_base, irqnr, irqstat, base, tmp + mov r4, #0xf8000000 + add r4, r4, #0x00000500 + mov \base, r4 + ldr \irqstat, [\base, #0] + cmp \irqstat, #0 + bne 1001f + ldr \irqnr, =NR_IRQS+1 + b 1003f +1001: mov \irqnr, #0 +1002: ands \tmp, \irqstat, #1 + mov \irqstat, \irqstat, LSR #1 + add \irqnr, \irqnr, #1 + beq 1002b + sub \irqnr, \irqnr, #1 +1003: + .endm diff --git a/include/asm-arm/arch-aaec2000/hardware.h b/include/asm-arm/arch-aaec2000/hardware.h new file mode 100644 index 0000000..4c37219e --- /dev/null +++ b/include/asm-arm/arch-aaec2000/hardware.h @@ -0,0 +1,49 @@ +/* + * linux/include/asm-arm/arch-aaec2000/hardware.h + * + * Copyright (c) 2005 Nicolas Bellido Y Ortega + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARCH_HARDWARE_H +#define __ASM_ARCH_HARDWARE_H + +#include <linux/config.h> + +/* The kernel is loaded at physical address 0xf8000000. + * We map the IO space a bit after + */ +#define PIO_APB_BASE 0x80000000 +#define VIO_APB_BASE 0xf8000000 +#define IO_APB_LENGTH 0x2000 +#define PIO_AHB_BASE 0x80002000 +#define VIO_AHB_BASE 0xf8002000 +#define IO_AHB_LENGTH 0x2000 + +#define VIO_BASE VIO_APB_BASE +#define PIO_BASE PIO_APB_BASE + +#define io_p2v(x) ( (x) - PIO_BASE + VIO_BASE ) +#define io_v2p(x) ( (x) + PIO_BASE - VIO_BASE ) + +#ifndef __ASSEMBLY__ + +#include <asm/types.h> + +/* FIXME: Is it needed to optimize this a la pxa ?? */ +#define __REG(x) (*((volatile u32 *)io_p2v(x))) +#define __PREG(x) (io_v2p((u32)&(x))) + +#else /* __ASSEMBLY__ */ + +#define __REG(x) io_p2v(x) +#define __PREG(x) io_v2p(x) + +#endif + +#include "aaec2000.h" + +#endif /* __ASM_ARCH_HARDWARE_H */ diff --git a/include/asm-arm/arch-aaec2000/io.h b/include/asm-arm/arch-aaec2000/io.h new file mode 100644 index 0000000..c58a8d1 --- /dev/null +++ b/include/asm-arm/arch-aaec2000/io.h @@ -0,0 +1,19 @@ +/* + * linux/include/asm-arm/arch-aaec2000/io.h + * + * Copied from asm/arch/sa1100/io.h + */ +#ifndef __ASM_ARM_ARCH_IO_H +#define __ASM_ARM_ARCH_IO_H + +#define IO_SPACE_LIMIT 0xffffffff + +/* + * We don't actually have real ISA nor PCI buses, but there is so many + * drivers out there that might just work if we fake them... + */ +#define __io(a) ((void __iomem *)(a)) +#define __mem_pci(a) (a) +#define __mem_isa(a) (a) + +#endif diff --git a/include/asm-arm/arch-aaec2000/irqs.h b/include/asm-arm/arch-aaec2000/irqs.h new file mode 100644 index 0000000..de25222 --- /dev/null +++ b/include/asm-arm/arch-aaec2000/irqs.h @@ -0,0 +1,46 @@ +/* + * linux/include/asm-arm/arch-aaec2000/irqs.h + * + * Copyright (c) 2005 Nicolas Bellido Y Ortega + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARCH_IRQS_H +#define __ASM_ARCH_IRQS_H + + +#define INT_GPIOF0_FIQ 0 /* External GPIO Port F O Fast Interrupt Input */ +#define INT_BL_FIQ 1 /* Battery Low Fast Interrupt */ +#define INT_WE_FIQ 2 /* Watchdog Expired Fast Interrupt */ +#define INT_MV_FIQ 3 /* Media Changed Interrupt */ +#define INT_SC 4 /* Sound Codec Interrupt */ +#define INT_GPIO1 5 /* GPIO Port F Configurable Int 1 */ +#define INT_GPIO2 6 /* GPIO Port F Configurable Int 2 */ +#define INT_GPIO3 7 /* GPIO Port F Configurable Int 3 */ +#define INT_TMR1_OFL 8 /* Timer 1 Overflow Interrupt */ +#define INT_TMR2_OFL 9 /* Timer 2 Overflow Interrupt */ +#define INT_RTC_CM 10 /* RTC Compare Match Interrupt */ +#define INT_TICK 11 /* 64Hz Tick Interrupt */ +#define INT_UART1 12 /* UART1 Interrupt */ +#define INT_UART2 13 /* UART2 & Modem State Changed Interrupt */ +#define INT_LCD 14 /* LCD Interrupt */ +#define INT_SSI 15 /* SSI End of Transfer Interrupt */ +#define INT_UART3 16 /* UART3 Interrupt */ +#define INT_SCI 17 /* SCI Interrupt */ +#define INT_AAC 18 /* Advanced Audio Codec Interrupt */ +#define INT_MMC 19 /* MMC Interrupt */ +#define INT_USB 20 /* USB Interrupt */ +#define INT_DMA 21 /* DMA Interrupt */ +#define INT_TMR3_UOFL 22 /* Timer 3 Underflow Interrupt */ +#define INT_GPIO4 23 /* GPIO Port F Configurable Int 4 */ +#define INT_GPIO5 24 /* GPIO Port F Configurable Int 4 */ +#define INT_GPIO6 25 /* GPIO Port F Configurable Int 4 */ +#define INT_GPIO7 26 /* GPIO Port F Configurable Int 4 */ +#define INT_BMI 27 /* BMI Interrupt */ + +#define NR_IRQS (INT_BMI + 1) + +#endif /* __ASM_ARCH_IRQS_H */ diff --git a/include/asm-arm/arch-aaec2000/memory.h b/include/asm-arm/arch-aaec2000/memory.h new file mode 100644 index 0000000..681b6a6 --- /dev/null +++ b/include/asm-arm/arch-aaec2000/memory.h @@ -0,0 +1,73 @@ +/* + * linux/include/asm-arm/arch-aaec2000/memory.h + * + * Copyright (c) 2005 Nicolas Bellido Y Ortega + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARCH_MEMORY_H +#define __ASM_ARCH_MEMORY_H + +#include <linux/config.h> + +#define PHYS_OFFSET (0xf0000000UL) + +#define __virt_to_bus(x) __virt_to_phys(x) +#define __bus_to_virt(x) __phys_to_virt(x) + +#ifdef CONFIG_DISCONTIGMEM + +/* + * The nodes are the followings: + * + * node 0: 0xf000.0000 - 0xf3ff.ffff + * node 1: 0xf400.0000 - 0xf7ff.ffff + * node 2: 0xf800.0000 - 0xfbff.ffff + * node 3: 0xfc00.0000 - 0xffff.ffff + */ + +/* + * Given a kernel address, find the home node of the underlying memory. + */ +#define KVADDR_TO_NID(addr) \ + (((unsigned long)(addr) - PAGE_OFFSET) >> NODE_MAX_MEM_SHIFT) + +/* + * Given a page frame number, convert it to a node id. + */ +#define PFN_TO_NID(pfn) \ + (((pfn) - PHYS_PFN_OFFSET) >> (NODE_MAX_MEM_SHIFT - PAGE_SHIFT)) + +/* + * Given a kaddr, ADDR_TO_MAPBASE finds the owning node of the memory + * and return the mem_map of that node. + */ +#define ADDR_TO_MAPBASE(kaddr) NODE_MEM_MAP(KVADDR_TO_NID(kaddr)) + +/* + * Given a page frame number, find the owning node of the memory + * and return the mem_map of that node. + */ +#define PFN_TO_MAPBASE(pfn) NODE_MEM_MAP(PFN_TO_NID(pfn)) + +/* + * Given a kaddr, LOCAL_MEM_MAP finds the owning node of the memory + * and returns the index corresponding to the appropriate page in the + * node's mem_map. + */ +#define LOCAL_MAP_NR(addr) \ + (((unsigned long)(addr) & (NODE_MAX_MEM_SIZE - 1)) >> PAGE_SHIFT) + +#define NODE_MAX_MEM_SHIFT 26 +#define NODE_MAX_MEM_SIZE (1 << NODE_MAX_MEM_SHIFT) + +#else + +#define PFN_TO_NID(addr) (0) + +#endif /* CONFIG_DISCONTIGMEM */ + +#endif /* __ASM_ARCH_MEMORY_H */ diff --git a/include/asm-arm/arch-aaec2000/param.h b/include/asm-arm/arch-aaec2000/param.h new file mode 100644 index 0000000..139936c --- /dev/null +++ b/include/asm-arm/arch-aaec2000/param.h @@ -0,0 +1,15 @@ +/* + * linux/include/asm-arm/arch-aaec2000/param.h + * + * Copyright (c) 2005 Nicolas Bellido Y Ortega + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARCH_PARAM_H +#define __ASM_ARCH_PARAM_H + +#endif /* __ASM_ARCH_PARAM_H */ + diff --git a/include/asm-arm/arch-aaec2000/system.h b/include/asm-arm/arch-aaec2000/system.h new file mode 100644 index 0000000..08de97b --- /dev/null +++ b/include/asm-arm/arch-aaec2000/system.h @@ -0,0 +1,24 @@ +/* + * linux/include/asm-arm/arch-aaed2000/system.h + * + * Copyright (c) 2005 Nicolas Bellido Y Ortega + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARCH_SYSTEM_H +#define __ASM_ARCH_SYSTEM_H + +static inline void arch_idle(void) +{ + cpu_do_idle(); +} + +static inline void arch_reset(char mode) +{ + cpu_reset(0); +} + +#endif /* __ASM_ARCH_SYSTEM_H */ diff --git a/include/asm-arm/arch-aaec2000/timex.h b/include/asm-arm/arch-aaec2000/timex.h new file mode 100644 index 0000000..f5708b3 --- /dev/null +++ b/include/asm-arm/arch-aaec2000/timex.h @@ -0,0 +1,18 @@ +/* + * linux/include/asm-arm/arch-aaec2000/timex.h + * + * AAEC-2000 Architecture timex specification + * + * Copyright (c) 2005 Nicolas Bellido Y Ortega + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARCH_TIMEX_H +#define __ASM_ARCH_TIMEX_H + +#define CLOCK_TICK_RATE 508000 + +#endif /* __ASM_ARCH_TIMEX_H */ diff --git a/include/asm-arm/arch-aaec2000/uncompress.h b/include/asm-arm/arch-aaec2000/uncompress.h new file mode 100644 index 0000000..fff0c94 --- /dev/null +++ b/include/asm-arm/arch-aaec2000/uncompress.h @@ -0,0 +1,47 @@ +/* + * linux/include/asm-arm/arch-aaec2000/uncompress.h + * + * Copyright (c) 2005 Nicolas Bellido Y Ortega + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARCH_UNCOMPRESS_H +#define __ASM_ARCH_UNCOMPRESS_H + +#include "hardware.h" + +#define UART(x) (*(volatile unsigned long *)(serial_port + (x))) + +static void putstr( const char *s ) +{ + unsigned long serial_port; + do { + serial_port = _UART3_BASE; + if (UART(UART_CR) & UART_CR_EN) break; + serial_port = _UART1_BASE; + if (UART(UART_CR) & UART_CR_EN) break; + serial_port = _UART2_BASE; + if (UART(UART_CR) & UART_CR_EN) break; + return; + } while (0); + + for (; *s; s++) { + /* wait for space in the UART's transmitter */ + while ((UART(UART_SR) & UART_SR_TxFF)); + /* send the character out. */ + UART(UART_DR) = *s; + /* if a LF, also do CR... */ + if (*s == 10) { + while ((UART(UART_SR) & UART_SR_TxFF)); + UART(UART_DR) = 13; + } + } +} + +#define arch_decomp_setup() +#define arch_decomp_wdog() + +#endif /* __ASM_ARCH_UNCOMPRESS_H */ diff --git a/include/asm-arm/arch-aaec2000/vmalloc.h b/include/asm-arm/arch-aaec2000/vmalloc.h new file mode 100644 index 0000000..ecb991e --- /dev/null +++ b/include/asm-arm/arch-aaec2000/vmalloc.h @@ -0,0 +1,16 @@ +/* + * linux/include/asm-arm/arch-aaec2000/vmalloc.h + * + * Copyright (c) 2005 Nicolas Bellido Y Ortega + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARCH_VMALLOC_H +#define __ASM_ARCH_VMALLOC_H + +#define VMALLOC_END (PAGE_OFFSET + 0x10000000) + +#endif /* __ASM_ARCH_VMALLOC_H */ diff --git a/include/asm-arm/arch-integrator/platform.h b/include/asm-arm/arch-integrator/platform.h index bd364f5..96ad3d2 100644 --- a/include/asm-arm/arch-integrator/platform.h +++ b/include/asm-arm/arch-integrator/platform.h @@ -293,7 +293,11 @@ #define INTEGRATOR_DBG_SWITCH (INTEGRATOR_DBG_BASE + INTEGRATOR_DBG_SWITCH_OFFSET) +#if defined(CONFIG_ARCH_INTEGRATOR_AP) #define INTEGRATOR_GPIO_BASE 0x1B000000 /* GPIO */ +#elif defined(CONFIG_ARCH_INTEGRATOR_CP) +#define INTEGRATOR_GPIO_BASE 0xC9000000 /* GPIO */ +#endif /* ------------------------------------------------------------------------ * KMI keyboard/mouse definitions diff --git a/include/asm-arm/arch-integrator/smp.h b/include/asm-arm/arch-integrator/smp.h new file mode 100644 index 0000000..0ec7093 --- /dev/null +++ b/include/asm-arm/arch-integrator/smp.h @@ -0,0 +1,19 @@ +#ifndef ASMARM_ARCH_SMP_H +#define ASMARM_ARCH_SMP_H + +#include <linux/config.h> + +#include <asm/arch/hardware.h> +#include <asm/io.h> + +#define hard_smp_processor_id() \ + ({ \ + unsigned int cpunum; \ + __asm__("mrc p15, 0, %0, c0, c0, 5" \ + : "=r" (cpunum)); \ + cpunum &= 0x0F; \ + }) + +extern void secondary_scan_irqs(void); + +#endif diff --git a/include/asm-arm/arch-ixp2000/gpio.h b/include/asm-arm/arch-ixp2000/gpio.h index 84634af..03cbbe1 100644 --- a/include/asm-arm/arch-ixp2000/gpio.h +++ b/include/asm-arm/arch-ixp2000/gpio.h @@ -1,5 +1,5 @@ /* - * include/asm-arm/arch-ixp2000/ixp2000-gpio.h + * include/asm-arm/arch-ixp2000/gpio.h * * Copyright (C) 2002 Intel Corporation. * @@ -16,26 +16,18 @@ * Use this instead of directly setting the GPIO registers. * GPIOs may also be used as GPIOs (e.g. for emulating i2c/smb) */ -#ifndef _ASM_ARCH_IXP2000_GPIO_H_ -#define _ASM_ARCH_IXP2000_GPIO_H_ +#ifndef __ASM_ARCH_GPIO_H +#define __ASM_ARCH_GPIO_H #ifndef __ASSEMBLY__ -#define GPIO_OUT 0x0 -#define GPIO_IN 0x80 + +#define GPIO_IN 0 +#define GPIO_OUT 1 #define IXP2000_GPIO_LOW 0 #define IXP2000_GPIO_HIGH 1 -#define GPIO_NO_EDGES 0 -#define GPIO_FALLING_EDGE 1 -#define GPIO_RISING_EDGE 2 -#define GPIO_BOTH_EDGES 3 -#define GPIO_LEVEL_LOW 4 -#define GPIO_LEVEL_HIGH 8 - -extern void set_GPIO_IRQ_edge(int gpio_nr, int edge); -extern void set_GPIO_IRQ_level(int gpio_nr, int level); -extern void gpio_line_config(int line, int style); +extern void gpio_line_config(int line, int direction); static inline int gpio_line_get(int line) { @@ -45,11 +37,12 @@ static inline int gpio_line_get(int line) static inline void gpio_line_set(int line, int value) { if (value == IXP2000_GPIO_HIGH) { - ixp_reg_write(IXP2000_GPIO_POSR, BIT(line)); - } else if (value == IXP2000_GPIO_LOW) - ixp_reg_write(IXP2000_GPIO_POCR, BIT(line)); + ixp2000_reg_write(IXP2000_GPIO_POSR, 1 << line); + } else if (value == IXP2000_GPIO_LOW) { + ixp2000_reg_write(IXP2000_GPIO_POCR, 1 << line); + } } #endif /* !__ASSEMBLY__ */ -#endif /* ASM_ARCH_IXP2000_GPIO_H_ */ +#endif /* ASM_ARCH_IXP2000_GPIO_H_ */ diff --git a/include/asm-arm/arch-ixp2000/io.h b/include/asm-arm/arch-ixp2000/io.h index a8e3c2d..3241cd6 100644 --- a/include/asm-arm/arch-ixp2000/io.h +++ b/include/asm-arm/arch-ixp2000/io.h @@ -17,18 +17,23 @@ #define IO_SPACE_LIMIT 0xffffffff #define __mem_pci(a) (a) -#define ___io(p) ((void __iomem *)((p)+IXP2000_PCI_IO_VIRT_BASE)) /* - * The IXP2400 before revision B0 asserts byte lanes for PCI I/O + * The A? revisions of the IXP2000s assert byte lanes for PCI I/O * transactions the other way round (MEM transactions don't have this - * issue), so we need to override the standard functions. B0 and later - * have a bit that can be set to 1 to get the 'proper' behavior, but - * since that isn't available on the A? revisions we just keep doing - * things manually. + * issue), so if we want to support those models, we need to override + * the standard I/O functions. + * + * B0 and later have a bit that can be set to 1 to get the proper + * behavior for I/O transactions, which then allows us to use the + * standard I/O functions. This is what we do if the user does not + * explicitly ask for support for pre-B0. */ -#define alignb(addr) (void __iomem *)((unsigned long)addr ^ 3) -#define alignw(addr) (void __iomem *)((unsigned long)addr ^ 2) +#ifdef CONFIG_IXP2000_SUPPORT_BROKEN_PCI_IO +#define ___io(p) ((void __iomem *)((p)+IXP2000_PCI_IO_VIRT_BASE)) + +#define alignb(addr) (void __iomem *)((unsigned long)(addr) ^ 3) +#define alignw(addr) (void __iomem *)((unsigned long)(addr) ^ 2) #define outb(v,p) __raw_writeb((v),alignb(___io(p))) #define outw(v,p) __raw_writew((v),alignw(___io(p))) @@ -48,6 +53,81 @@ #define insw(p,d,l) __raw_readsw(alignw(___io(p)),d,l) #define insl(p,d,l) __raw_readsl(___io(p),d,l) +#define __is_io_address(p) ((((unsigned long)(p)) & ~(IXP2000_PCI_IO_SIZE - 1)) == IXP2000_PCI_IO_VIRT_BASE) + +#define ioread8(p) \ + ({ \ + unsigned int __v; \ + \ + if (__is_io_address(p)) { \ + __v = __raw_readb(alignb(p)); \ + } else { \ + __v = __raw_readb(p); \ + } \ + \ + __v; \ + }) \ + +#define ioread16(p) \ + ({ \ + unsigned int __v; \ + \ + if (__is_io_address(p)) { \ + __v = __raw_readw(alignw(p)); \ + } else { \ + __v = le16_to_cpu(__raw_readw(p)); \ + } \ + \ + __v; \ + }) + +#define ioread32(p) \ + ({ \ + unsigned int __v; \ + \ + if (__is_io_address(p)) { \ + __v = __raw_readl(p); \ + } else { \ + __v = le32_to_cpu(__raw_readl(p)); \ + } \ + \ + __v; \ + }) + +#define iowrite8(v,p) \ + ({ \ + if (__is_io_address(p)) { \ + __raw_writeb((v), alignb(p)); \ + } else { \ + __raw_writeb((v), p); \ + } \ + }) + +#define iowrite16(v,p) \ + ({ \ + if (__is_io_address(p)) { \ + __raw_writew((v), alignw(p)); \ + } else { \ + __raw_writew(cpu_to_le16(v), p); \ + } \ + }) + +#define iowrite32(v,p) \ + ({ \ + if (__is_io_address(p)) { \ + __raw_writel((v), p); \ + } else { \ + __raw_writel(cpu_to_le32(v), p); \ + } \ + }) + +#define ioport_map(port, nr) ___io(port) + +#define ioport_unmap(addr) +#else +#define __io(p) ((void __iomem *)((p)+IXP2000_PCI_IO_VIRT_BASE)) +#endif + #ifdef CONFIG_ARCH_IXDP2X01 /* @@ -75,8 +155,8 @@ static inline void insw(u32 ptr, void *buf, int length) * Is this cycle meant for the CS8900? */ if ((machine_is_ixdp2401() || machine_is_ixdp2801()) && - ((port >= IXDP2X01_CS8900_VIRT_BASE) && - (port <= IXDP2X01_CS8900_VIRT_END))) { + (((u32)port >= (u32)IXDP2X01_CS8900_VIRT_BASE) && + ((u32)port <= (u32)IXDP2X01_CS8900_VIRT_END))) { u8 *buf8 = (u8*)buf; register u32 tmp32; @@ -100,8 +180,8 @@ static inline void outsw(u32 ptr, void *buf, int length) * Is this cycle meant for the CS8900? */ if ((machine_is_ixdp2401() || machine_is_ixdp2801()) && - ((port >= IXDP2X01_CS8900_VIRT_BASE) && - (port <= IXDP2X01_CS8900_VIRT_END))) { + (((u32)port >= (u32)IXDP2X01_CS8900_VIRT_BASE) && + ((u32)port <= (u32)IXDP2X01_CS8900_VIRT_END))) { register u32 tmp32; u8 *buf8 = (u8*)buf; do { @@ -124,8 +204,8 @@ static inline u16 inw(u32 ptr) * Is this cycle meant for the CS8900? */ if ((machine_is_ixdp2401() || machine_is_ixdp2801()) && - ((port >= IXDP2X01_CS8900_VIRT_BASE) && - (port <= IXDP2X01_CS8900_VIRT_END))) { + (((u32)port >= (u32)IXDP2X01_CS8900_VIRT_BASE) && + ((u32)port <= (u32)IXDP2X01_CS8900_VIRT_END))) { return (u16)(*port); } @@ -137,8 +217,8 @@ static inline void outw(u16 value, u32 ptr) register volatile u32 *port = (volatile u32 *)ptr; if ((machine_is_ixdp2401() || machine_is_ixdp2801()) && - ((port >= IXDP2X01_CS8900_VIRT_BASE) && - (port <= IXDP2X01_CS8900_VIRT_END))) { + (((u32)port >= (u32)IXDP2X01_CS8900_VIRT_BASE) && + ((u32)port <= (u32)IXDP2X01_CS8900_VIRT_END))) { *port = value; return; } diff --git a/include/asm-arm/arch-ixp2000/ixp2000-regs.h b/include/asm-arm/arch-ixp2000/ixp2000-regs.h index 6c56708..5eb47d4 100644 --- a/include/asm-arm/arch-ixp2000/ixp2000-regs.h +++ b/include/asm-arm/arch-ixp2000/ixp2000-regs.h @@ -241,7 +241,7 @@ #define PCI_CONTROL_BE_DEI (1 << 21) /* Big Endian Data Enable In */ #define PCI_CONTROL_BE_BEO (1 << 20) /* Big Endian Byte Enable Out */ #define PCI_CONTROL_BE_BEI (1 << 19) /* Big Endian Byte Enable In */ -#define PCI_CONTROL_PNR (1 << 17) /* PCI Not Reset bit */ +#define PCI_CONTROL_IEE (1 << 17) /* I/O cycle Endian swap Enable */ #define IXP2000_PCI_RST_REL (1 << 2) #define CFG_RST_DIR (*IXP2000_PCI_CONTROL & IXP2000_PCICNTL_PCF) @@ -363,6 +363,7 @@ #define IXP2000_MIN_REV_MASK 0x0000000F #define IXP2000_PROD_ID_MASK 0xFFFFFFFF +#define IXP2000_PRODUCT_ID GLOBAL_REG(0x00) #define IXP2000_MISC_CONTROL GLOBAL_REG(0x04) #define IXP2000_MSF_CLK_CNTRL GLOBAL_REG(0x08) #define IXP2000_RESET0 GLOBAL_REG(0x0c) diff --git a/include/asm-arm/arch-ixp2000/platform.h b/include/asm-arm/arch-ixp2000/platform.h index 901bba6..52ded51 100644 --- a/include/asm-arm/arch-ixp2000/platform.h +++ b/include/asm-arm/arch-ixp2000/platform.h @@ -138,30 +138,10 @@ struct ixp2000_flash_data { unsigned long (*bank_setup)(unsigned long); }; -/* - * GPIO helper functions - */ -#define GPIO_IN 0 -#define GPIO_OUT 1 - -extern void gpio_line_config(int line, int style); - -static inline int gpio_line_get(int line) -{ - return (((*IXP2000_GPIO_PLR) >> line) & 1); -} - -static inline void gpio_line_set(int line, int value) -{ - if (value) - ixp2000_reg_write(IXP2000_GPIO_POSR, (1 << line)); - else - ixp2000_reg_write(IXP2000_GPIO_POCR, (1 << line)); -} - struct ixp2000_i2c_pins { unsigned long sda_pin; unsigned long scl_pin; }; + #endif /* !__ASSEMBLY__ */ diff --git a/include/asm-arm/arch-ixp4xx/debug-macro.S b/include/asm-arm/arch-ixp4xx/debug-macro.S index 4499ae8..45a6c6c 100644 --- a/include/asm-arm/arch-ixp4xx/debug-macro.S +++ b/include/asm-arm/arch-ixp4xx/debug-macro.S @@ -14,6 +14,7 @@ mrc p15, 0, \rx, c1, c0 tst \rx, #1 @ MMU enabled? moveq \rx, #0xc8000000 + orrne \rx, \rx, #0x00b00000 movne \rx, #0xff000000 add \rx,\rx,#3 @ Uart regs are at off set of 3 if @ byte writes used - Big Endian. diff --git a/include/asm-arm/arch-ixp4xx/ixp4xx-regs.h b/include/asm-arm/arch-ixp4xx/ixp4xx-regs.h index 8eeb1db..004696a 100644 --- a/include/asm-arm/arch-ixp4xx/ixp4xx-regs.h +++ b/include/asm-arm/arch-ixp4xx/ixp4xx-regs.h @@ -69,6 +69,16 @@ #define IXP4XX_PERIPHERAL_BASE_VIRT (0xFFBF2000) #define IXP4XX_PERIPHERAL_REGION_SIZE (0x0000C000) +/* + * Debug UART + * + * This is basically a remap of UART1 into a region that is section + * aligned so that it * can be used with the low-level debug code. + */ +#define IXP4XX_DEBUG_UART_BASE_PHYS (0xC8000000) +#define IXP4XX_DEBUG_UART_BASE_VIRT (0xffb00000) +#define IXP4XX_DEBUG_UART_REGION_SIZE (0x00001000) + #define IXP4XX_EXP_CS0_OFFSET 0x00 #define IXP4XX_EXP_CS1_OFFSET 0x04 #define IXP4XX_EXP_CS2_OFFSET 0x08 diff --git a/include/asm-arm/arch-omap/tps65010.h b/include/asm-arm/arch-omap/tps65010.h index 0f97bb2..b9aa2b3 100644 --- a/include/asm-arm/arch-omap/tps65010.h +++ b/include/asm-arm/arch-omap/tps65010.h @@ -30,6 +30,66 @@ /* * ---------------------------------------------------------------------------- + * Registers, all 8 bits + * ---------------------------------------------------------------------------- + */ + +#define TPS_CHGSTATUS 0x01 +# define TPS_CHG_USB (1 << 7) +# define TPS_CHG_AC (1 << 6) +# define TPS_CHG_THERM (1 << 5) +# define TPS_CHG_TERM (1 << 4) +# define TPS_CHG_TAPER_TMO (1 << 3) +# define TPS_CHG_CHG_TMO (1 << 2) +# define TPS_CHG_PRECHG_TMO (1 << 1) +# define TPS_CHG_TEMP_ERR (1 << 0) +#define TPS_REGSTATUS 0x02 +# define TPS_REG_ONOFF (1 << 7) +# define TPS_REG_COVER (1 << 6) +# define TPS_REG_UVLO (1 << 5) +# define TPS_REG_NO_CHG (1 << 4) /* tps65013 */ +# define TPS_REG_PG_LD02 (1 << 3) +# define TPS_REG_PG_LD01 (1 << 2) +# define TPS_REG_PG_MAIN (1 << 1) +# define TPS_REG_PG_CORE (1 << 0) +#define TPS_MASK1 0x03 +#define TPS_MASK2 0x04 +#define TPS_ACKINT1 0x05 +#define TPS_ACKINT2 0x06 +#define TPS_CHGCONFIG 0x07 +# define TPS_CHARGE_POR (1 << 7) /* 65010/65012 */ +# define TPS65013_AUA (1 << 7) /* 65011/65013 */ +# define TPS_CHARGE_RESET (1 << 6) +# define TPS_CHARGE_FAST (1 << 5) +# define TPS_CHARGE_CURRENT (3 << 3) +# define TPS_VBUS_500MA (1 << 2) +# define TPS_VBUS_CHARGING (1 << 1) +# define TPS_CHARGE_ENABLE (1 << 0) +#define TPS_LED1_ON 0x08 +#define TPS_LED1_PER 0x09 +#define TPS_LED2_ON 0x0a +#define TPS_LED2_PER 0x0b +#define TPS_VDCDC1 0x0c +# define TPS_ENABLE_LP (1 << 3) +#define TPS_VDCDC2 0x0d +#define TPS_VREGS1 0x0e +# define TPS_LDO2_ENABLE (1 << 7) +# define TPS_LDO2_OFF (1 << 6) +# define TPS_VLDO2_3_0V (3 << 4) +# define TPS_VLDO2_2_75V (2 << 4) +# define TPS_VLDO2_2_5V (1 << 4) +# define TPS_VLDO2_1_8V (0 << 4) +# define TPS_LDO1_ENABLE (1 << 3) +# define TPS_LDO1_OFF (1 << 2) +# define TPS_VLDO1_3_0V (3 << 0) +# define TPS_VLDO1_2_75V (2 << 0) +# define TPS_VLDO1_2_5V (1 << 0) +# define TPS_VLDO1_ADJ (0 << 0) +#define TPS_MASK3 0x0f +#define TPS_DEFGPIO 0x10 + +/* + * ---------------------------------------------------------------------------- * Macros used by exported functions * ---------------------------------------------------------------------------- */ @@ -71,10 +131,26 @@ extern int tps65010_set_gpio_out_value(unsigned gpio, unsigned value); */ extern int tps65010_set_led(unsigned led, unsigned mode); +/* tps65010_set_vib parameter: + * value: ON or OFF + */ +extern int tps65010_set_vib(unsigned value); + /* tps65010_set_low_pwr parameter: * mode: ON or OFF */ extern int tps65010_set_low_pwr(unsigned mode); +/* tps65010_config_vregs1 parameter: + * value to be written to VREGS1 register + * Note: The complete register is written, set all bits you need + */ +extern int tps65010_config_vregs1(unsigned value); + +/* tps65013_set_low_pwr parameter: + * mode: ON or OFF + */ +extern int tps65013_set_low_pwr(unsigned mode); + #endif /* __ASM_ARCH_TPS65010_H */ diff --git a/include/asm-arm/arch-pxa/pxa-regs.h b/include/asm-arm/arch-pxa/pxa-regs.h index 39741d3..b5e54a9e 100644 --- a/include/asm-arm/arch-pxa/pxa-regs.h +++ b/include/asm-arm/arch-pxa/pxa-regs.h @@ -1296,6 +1296,7 @@ #define GPIO111_MMCDAT3 111 /* MMC DAT3 (PXA27x) */ #define GPIO111_MMCCS1 111 /* MMC Chip Select 1 (PXA27x) */ #define GPIO112_MMCCMD 112 /* MMC CMD (PXA27x) */ +#define GPIO113_I2S_SYSCLK 113 /* I2S System Clock (PXA27x) */ #define GPIO113_AC97_RESET_N 113 /* AC97 NRESET on (PXA27x) */ /* GPIO alternate function mode & direction */ @@ -1428,6 +1429,7 @@ #define GPIO111_MMCDAT3_MD (111 | GPIO_ALT_FN_1_OUT) #define GPIO110_MMCCS1_MD (111 | GPIO_ALT_FN_1_OUT) #define GPIO112_MMCCMD_MD (112 | GPIO_ALT_FN_1_OUT) +#define GPIO113_I2S_SYSCLK_MD (113 | GPIO_ALT_FN_1_OUT) #define GPIO113_AC97_RESET_N_MD (113 | GPIO_ALT_FN_2_OUT) #define GPIO117_I2CSCL_MD (117 | GPIO_ALT_FN_1_OUT) #define GPIO118_I2CSDA_MD (118 | GPIO_ALT_FN_1_IN) diff --git a/include/asm-arm/arch-versatile/hardware.h b/include/asm-arm/arch-versatile/hardware.h index d5fb4a2..41c1bee 100644 --- a/include/asm-arm/arch-versatile/hardware.h +++ b/include/asm-arm/arch-versatile/hardware.h @@ -25,19 +25,26 @@ #include <asm/sizes.h> #include <asm/arch/platform.h> -// FIXME = PCI settings need to be fixed!!!!! - /* - * Similar to above, but for PCI addresses (memory, IO, Config and the - * V3 chip itself). WARNING: this has to mirror definitions in platform.h + * PCI space virtual addresses */ -#define PCI_MEMORY_VADDR 0xe8000000 -#define PCI_CONFIG_VADDR 0xec000000 -#define PCI_V3_VADDR 0xed000000 -#define PCI_IO_VADDR 0xee000000 +#define VERSATILE_PCI_VIRT_BASE 0xe8000000 +#define VERSATILE_PCI_CFG_VIRT_BASE 0xe9000000 + +#if 0 +#define VERSATILE_PCI_VIRT_MEM_BASE0 0xf4000000 +#define VERSATILE_PCI_VIRT_MEM_BASE1 0xf5000000 +#define VERSATILE_PCI_VIRT_MEM_BASE2 0xf6000000 + +#define PCIO_BASE VERSATILE_PCI_VIRT_MEM_BASE0 +#define PCIMEM_BASE VERSATILE_PCI_VIRT_MEM_BASE1 +#endif + +/* CIK guesswork */ +#define PCIBIOS_MIN_IO 0x44000000 +#define PCIBIOS_MIN_MEM 0x50000000 -#define PCIO_BASE PCI_IO_VADDR -#define PCIMEM_BASE PCI_MEMORY_VADDR +#define pcibios_assign_all_busses() 1 /* macro to get at IO space when running virtually */ #define IO_ADDRESS(x) (((x) & 0x0fffffff) + (((x) >> 4) & 0x0f000000) + 0xf0000000) diff --git a/include/asm-arm/arch-versatile/io.h b/include/asm-arm/arch-versatile/io.h index dbb7158..9f895bf 100644 --- a/include/asm-arm/arch-versatile/io.h +++ b/include/asm-arm/arch-versatile/io.h @@ -20,7 +20,7 @@ #ifndef __ASM_ARM_ARCH_IO_H #define __ASM_ARM_ARCH_IO_H -#define IO_SPACE_LIMIT 0xffff +#define IO_SPACE_LIMIT 0xffffffff #define __io(a) ((void __iomem *)(a)) #define __mem_pci(a) (a) diff --git a/include/asm-arm/arch-versatile/platform.h b/include/asm-arm/arch-versatile/platform.h index 2598d1f..cbdd9fb 100644 --- a/include/asm-arm/arch-versatile/platform.h +++ b/include/asm-arm/arch-versatile/platform.h @@ -76,7 +76,7 @@ #define VERSATILE_SYS_NVFLAGSSET_OFFSET 0x38 #define VERSATILE_SYS_NVFLAGSCLR_OFFSET 0x3C #define VERSATILE_SYS_RESETCTL_OFFSET 0x40 -#define VERSATILE_SYS_PICCTL_OFFSET 0x44 +#define VERSATILE_SYS_PCICTL_OFFSET 0x44 #define VERSATILE_SYS_MCI_OFFSET 0x48 #define VERSATILE_SYS_FLASH_OFFSET 0x4C #define VERSATILE_SYS_CLCD_OFFSET 0x50 @@ -114,7 +114,7 @@ #define VERSATILE_SYS_NVFLAGSSET (VERSATILE_SYS_BASE + VERSATILE_SYS_NVFLAGSSET_OFFSET) #define VERSATILE_SYS_NVFLAGSCLR (VERSATILE_SYS_BASE + VERSATILE_SYS_NVFLAGSCLR_OFFSET) #define VERSATILE_SYS_RESETCTL (VERSATILE_SYS_BASE + VERSATILE_SYS_RESETCTL_OFFSET) -#define VERSATILE_SYS_PICCTL (VERSATILE_SYS_BASE + VERSATILE_SYS_PICCTL_OFFSET) +#define VERSATILE_SYS_PCICTL (VERSATILE_SYS_BASE + VERSATILE_SYS_PCICTL_OFFSET) #define VERSATILE_SYS_MCI (VERSATILE_SYS_BASE + VERSATILE_SYS_MCI_OFFSET) #define VERSATILE_SYS_FLASH (VERSATILE_SYS_BASE + VERSATILE_SYS_FLASH_OFFSET) #define VERSATILE_SYS_CLCD (VERSATILE_SYS_BASE + VERSATILE_SYS_CLCD_OFFSET) @@ -225,7 +225,20 @@ #define VERSATILE_SSMC_BASE 0x20000000 /* SSMC */ #define VERSATILE_IB2_BASE 0x24000000 /* IB2 module */ #define VERSATILE_MBX_BASE 0x40000000 /* MBX */ + +/* PCI space */ #define VERSATILE_PCI_BASE 0x41000000 /* PCI Interface */ +#define VERSATILE_PCI_CFG_BASE 0x42000000 +#define VERSATILE_PCI_MEM_BASE0 0x44000000 +#define VERSATILE_PCI_MEM_BASE1 0x50000000 +#define VERSATILE_PCI_MEM_BASE2 0x60000000 +/* Sizes of above maps */ +#define VERSATILE_PCI_BASE_SIZE 0x01000000 +#define VERSATILE_PCI_CFG_BASE_SIZE 0x02000000 +#define VERSATILE_PCI_MEM_BASE0_SIZE 0x0c000000 /* 32Mb */ +#define VERSATILE_PCI_MEM_BASE1_SIZE 0x10000000 /* 256Mb */ +#define VERSATILE_PCI_MEM_BASE2_SIZE 0x10000000 /* 256Mb */ + #define VERSATILE_SDRAM67_BASE 0x70000000 /* SDRAM banks 6 and 7 */ #define VERSATILE_LT_BASE 0x80000000 /* Logic Tile expansion */ @@ -498,11 +511,17 @@ /* * IB2 Versatile/AB expansion board definitions */ -#define VERSATILE_IB2_CAMERA_BANK 0x24000000 -#define VERSATILE_IB2_KBD_DATAREG 0x25000000 -#define VERSATILE_IB2_IER 0x26000000 /* for VICINTSOURCE27 */ -#define VERSATILE_IB2_CTRL 0x27000000 -#define VERSATILE_IB2_STAT 0x27000004 +#define VERSATILE_IB2_CAMERA_BANK VERSATILE_IB2_BASE +#define VERSATILE_IB2_KBD_DATAREG (VERSATILE_IB2_BASE + 0x01000000) + +/* VICINTSOURCE27 */ +#define VERSATILE_IB2_INT_BASE (VERSATILE_IB2_BASE + 0x02000000) +#define VERSATILE_IB2_IER (VERSATILE_IB2_INT_BASE + 0) +#define VERSATILE_IB2_ISR (VERSATILE_IB2_INT_BASE + 4) + +#define VERSATILE_IB2_CTL_BASE (VERSATILE_IB2_BASE + 0x03000000) +#define VERSATILE_IB2_CTRL (VERSATILE_IB2_CTL_BASE + 0) +#define VERSATILE_IB2_STAT (VERSATILE_IB2_CTL_BASE + 4) #endif #endif diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h index 09ffeed..035cdcf 100644 --- a/include/asm-arm/cacheflush.h +++ b/include/asm-arm/cacheflush.h @@ -16,6 +16,9 @@ #include <asm/mman.h> #include <asm/glue.h> +#include <asm/shmparam.h> + +#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) /* * Cache Model diff --git a/include/asm-arm/elf.h b/include/asm-arm/elf.h index cbceacbe..a1696ba 100644 --- a/include/asm-arm/elf.h +++ b/include/asm-arm/elf.h @@ -38,9 +38,9 @@ typedef struct user_fp elf_fpregset_t; */ #define ELF_CLASS ELFCLASS32 #ifdef __ARMEB__ -#define ELF_DATA ELFDATA2MSB; +#define ELF_DATA ELFDATA2MSB #else -#define ELF_DATA ELFDATA2LSB; +#define ELF_DATA ELFDATA2LSB #endif #define ELF_ARCH EM_ARM diff --git a/include/asm-arm/io.h b/include/asm-arm/io.h index 658ffa3..cc4b5f5 100644 --- a/include/asm-arm/io.h +++ b/include/asm-arm/io.h @@ -273,6 +273,35 @@ extern void __iounmap(void __iomem *addr); #endif /* + * io{read,write}{8,16,32} macros + */ +#ifndef ioread8 +#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __v; }) +#define ioread16(p) ({ unsigned int __v = le16_to_cpu(__raw_readw(p)); __v; }) +#define ioread32(p) ({ unsigned int __v = le32_to_cpu(__raw_readl(p)); __v; }) + +#define iowrite8(v,p) __raw_writeb(v, p) +#define iowrite16(v,p) __raw_writew(cpu_to_le16(v), p) +#define iowrite32(v,p) __raw_writel(cpu_to_le32(v), p) + +#define ioread8_rep(p,d,c) __raw_readsb(p,d,c) +#define ioread16_rep(p,d,c) __raw_readsw(p,d,c) +#define ioread32_rep(p,d,c) __raw_readsl(p,d,c) + +#define iowrite8_rep(p,s,c) __raw_writesb(p,s,c) +#define iowrite16_rep(p,s,c) __raw_writesw(p,s,c) +#define iowrite32_rep(p,s,c) __raw_writesl(p,s,c) + +extern void __iomem *ioport_map(unsigned long port, unsigned int nr); +extern void ioport_unmap(void __iomem *addr); +#endif + +struct pci_dev; + +extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen); +extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr); + +/* * can the hardware map this into one segment or not, given no other * constraints. */ diff --git a/include/asm-arm/mach/time.h b/include/asm-arm/mach/time.h index 5cf4fd6..047980a 100644 --- a/include/asm-arm/mach/time.h +++ b/include/asm-arm/mach/time.h @@ -39,8 +39,29 @@ struct sys_timer { void (*suspend)(void); void (*resume)(void); unsigned long (*offset)(void); + +#ifdef CONFIG_NO_IDLE_HZ + struct dyn_tick_timer *dyn_tick; +#endif +}; + +#ifdef CONFIG_NO_IDLE_HZ + +#define DYN_TICK_SKIPPING (1 << 2) +#define DYN_TICK_ENABLED (1 << 1) +#define DYN_TICK_SUITABLE (1 << 0) + +struct dyn_tick_timer { + unsigned int state; /* Current state */ + int (*enable)(void); /* Enables dynamic tick */ + int (*disable)(void); /* Disables dynamic tick */ + void (*reprogram)(unsigned long); /* Reprograms the timer */ + int (*handler)(int, void *, struct pt_regs *); }; +void timer_dyn_reprogram(void); +#endif + extern struct sys_timer *system_timer; extern void timer_tick(struct pt_regs *); diff --git a/include/asm-arm/pgalloc.h b/include/asm-arm/pgalloc.h index e814f81..bc18ff4 100644 --- a/include/asm-arm/pgalloc.h +++ b/include/asm-arm/pgalloc.h @@ -89,6 +89,13 @@ static inline void pte_free(struct page *pte) __free_page(pte); } +static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval) +{ + pmdp[0] = __pmd(pmdval); + pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); + flush_pmd_entry(pmdp); +} + /* * Populate the pmdp entry with a pointer to the pte. This pmd is part * of the mm address space. @@ -99,32 +106,19 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) { unsigned long pte_ptr = (unsigned long)ptep; - unsigned long pmdval; - - BUG_ON(mm != &init_mm); /* * The pmd must be loaded with the physical * address of the PTE table */ pte_ptr -= PTRS_PER_PTE * sizeof(void *); - pmdval = __pa(pte_ptr) | _PAGE_KERNEL_TABLE; - pmdp[0] = __pmd(pmdval); - pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); - flush_pmd_entry(pmdp); + __pmd_populate(pmdp, __pa(pte_ptr) | _PAGE_KERNEL_TABLE); } static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep) { - unsigned long pmdval; - - BUG_ON(mm == &init_mm); - - pmdval = page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE; - pmdp[0] = __pmd(pmdval); - pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); - flush_pmd_entry(pmdp); + __pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE); } #endif diff --git a/include/asm-arm/signal.h b/include/asm-arm/signal.h index 46e69ae..760f6e65 100644 --- a/include/asm-arm/signal.h +++ b/include/asm-arm/signal.h @@ -114,6 +114,7 @@ typedef unsigned long sigset_t; #define SIGSTKSZ 8192 #ifdef __KERNEL__ +#define SA_TIMER 0x40000000 #define SA_IRQNOMASK 0x08000000 #endif diff --git a/include/asm-arm/smp.h b/include/asm-arm/smp.h index f21fd8f..6c6c60a 100644 --- a/include/asm-arm/smp.h +++ b/include/asm-arm/smp.h @@ -21,7 +21,7 @@ # error "<asm-arm/smp.h> included in non-SMP build" #endif -#define smp_processor_id() (current_thread_info()->cpu) +#define raw_smp_processor_id() (current_thread_info()->cpu) extern cpumask_t cpu_present_mask; #define cpu_possible_map cpu_present_mask @@ -55,4 +55,18 @@ extern void smp_cross_call(cpumask_t callmap); */ extern int boot_secondary(unsigned int cpu, struct task_struct *); +/* + * Perform platform specific initialisation of the specified CPU. + */ +extern void platform_secondary_init(unsigned int cpu); + +/* + * Initial data for bringing up a secondary CPU. + */ +struct secondary_data { + unsigned long pgdir; + void *stack; +}; +extern struct secondary_data secondary_data; + #endif /* ifndef __ASM_ARM_SMP_H */ diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h index b13a8da..3d0d286 100644 --- a/include/asm-arm/system.h +++ b/include/asm-arm/system.h @@ -104,6 +104,7 @@ extern void show_pte(struct mm_struct *mm, unsigned long addr); extern void __show_regs(struct pt_regs *); extern int cpu_architecture(void); +extern void cpu_init(void); #define set_cr(x) \ __asm__ __volatile__( \ @@ -144,34 +145,12 @@ extern unsigned int user_debug; #define set_wmb(var, value) do { var = value; wmb(); } while (0) #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); -#ifdef CONFIG_SMP /* - * Define our own context switch locking. This allows us to enable - * interrupts over the context switch, otherwise we end up with high - * interrupt latency. The real problem area is switch_mm() which may - * do a full cache flush. + * switch_mm() may do a full cache flush over the context switch, + * so enable interrupts over the context switch to avoid high + * latency. */ -#define prepare_arch_switch(rq,next) \ -do { \ - spin_lock(&(next)->switch_lock); \ - spin_unlock_irq(&(rq)->lock); \ -} while (0) - -#define finish_arch_switch(rq,prev) \ - spin_unlock(&(prev)->switch_lock) - -#define task_running(rq,p) \ - ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock)) -#else -/* - * Our UP-case is more simple, but we assume knowledge of how - * spin_unlock_irq() and friends are implemented. This avoids - * us needlessly decrementing and incrementing the preempt count. - */ -#define prepare_arch_switch(rq,next) local_irq_enable() -#define finish_arch_switch(rq,prev) spin_unlock(&(rq)->lock) -#define task_running(rq,p) ((rq)->curr == (p)) -#endif +#define __ARCH_WANT_INTERRUPTS_ON_CTXSW /* * switch_to(prev, next) should switch from task `prev' to `next' @@ -307,7 +286,7 @@ do { \ ({ \ unsigned long flags; \ local_save_flags(flags); \ - flags & PSR_I_BIT; \ + (int)(flags & PSR_I_BIT); \ }) #ifdef CONFIG_SMP diff --git a/include/asm-arm/thread_info.h b/include/asm-arm/thread_info.h index 66c585c..8252a4c 100644 --- a/include/asm-arm/thread_info.h +++ b/include/asm-arm/thread_info.h @@ -49,7 +49,7 @@ struct cpu_context_save { */ struct thread_info { unsigned long flags; /* low level flags */ - __s32 preempt_count; /* 0 => preemptable, <0 => bug */ + int preempt_count; /* 0 => preemptable, <0 => bug */ mm_segment_t addr_limit; /* address limit */ struct task_struct *task; /* main task structure */ struct exec_domain *exec_domain; /* execution domain */ diff --git a/include/asm-arm26/elf.h b/include/asm-arm26/elf.h index 8b14947..5a47fdb 100644 --- a/include/asm-arm26/elf.h +++ b/include/asm-arm26/elf.h @@ -36,7 +36,7 @@ typedef struct { void *null; } elf_fpregset_t; * These are used to set parameters in the core dumps. */ #define ELF_CLASS ELFCLASS32 -#define ELF_DATA ELFDATA2LSB; +#define ELF_DATA ELFDATA2LSB #define ELF_ARCH EM_ARM #define USE_ELF_CORE_DUMP diff --git a/include/asm-arm26/signal.h b/include/asm-arm26/signal.h index dedb292..37ad253 100644 --- a/include/asm-arm26/signal.h +++ b/include/asm-arm26/signal.h @@ -166,9 +166,6 @@ typedef struct sigaltstack { #include <asm/sigcontext.h> #define sigmask(sig) (1UL << ((sig) - 1)) -//FIXME!!! -//#define HAVE_ARCH_GET_SIGNAL_TO_DELIVER - #endif diff --git a/include/asm-arm26/thread_info.h b/include/asm-arm26/thread_info.h index 50f41b5..aff3e56 100644 --- a/include/asm-arm26/thread_info.h +++ b/include/asm-arm26/thread_info.h @@ -44,7 +44,7 @@ struct cpu_context_save { */ struct thread_info { unsigned long flags; /* low level flags */ - __s32 preempt_count; /* 0 => preemptable, <0 => bug */ + int preempt_count; /* 0 => preemptable, <0 => bug */ mm_segment_t addr_limit; /* address limit */ struct task_struct *task; /* main task structure */ struct exec_domain *exec_domain; /* execution domain */ diff --git a/include/asm-cris/thread_info.h b/include/asm-cris/thread_info.h index 53193fe..5ba4b78 100644 --- a/include/asm-cris/thread_info.h +++ b/include/asm-cris/thread_info.h @@ -31,7 +31,7 @@ struct thread_info { struct exec_domain *exec_domain; /* execution domain */ unsigned long flags; /* low level flags */ __u32 cpu; /* current CPU */ - __s32 preempt_count; /* 0 => preemptable, <0 => BUG */ + int preempt_count; /* 0 => preemptable, <0 => BUG */ mm_segment_t addr_limit; /* thread address space: 0-0xBFFFFFFF for user-thead diff --git a/include/asm-frv/thread_info.h b/include/asm-frv/thread_info.h index b80a97f..c8cba78 100644 --- a/include/asm-frv/thread_info.h +++ b/include/asm-frv/thread_info.h @@ -33,7 +33,7 @@ struct thread_info { unsigned long flags; /* low level flags */ unsigned long status; /* thread-synchronous flags */ __u32 cpu; /* current CPU */ - __s32 preempt_count; /* 0 => preemptable, <0 => BUG */ + int preempt_count; /* 0 => preemptable, <0 => BUG */ mm_segment_t addr_limit; /* thread address space: 0-0xBFFFFFFF for user-thead diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 3b709b8..9044aeb 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -29,7 +29,7 @@ do { \ #define DEFINE_PER_CPU(type, name) \ __typeof__(type) per_cpu__##name -#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var)) +#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) #define __get_cpu_var(var) per_cpu__##var #endif /* SMP */ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 1f4ec7b..f405935 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -125,6 +125,9 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY #define page_test_and_clear_dirty(page) (0) +#define pte_maybe_dirty(pte) pte_dirty(pte) +#else +#define pte_maybe_dirty(pte) (1) #endif #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h index ec96e8b..5d9d70c 100644 --- a/include/asm-generic/topology.h +++ b/include/asm-generic/topology.h @@ -41,8 +41,15 @@ #ifndef node_to_first_cpu #define node_to_first_cpu(node) (0) #endif +#ifndef pcibus_to_node +#define pcibus_to_node(node) (-1) +#endif + #ifndef pcibus_to_cpumask -#define pcibus_to_cpumask(bus) (cpu_online_map) +#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \ + CPU_MASK_ALL : \ + node_to_cpumask(pcibus_to_node(bus)) \ + ) #endif #endif /* _ASM_GENERIC_TOPOLOGY_H */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 99cef06..b3bb326 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -73,7 +73,7 @@ } #define SECURITY_INIT \ - .security_initcall.init : { \ + .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__security_initcall_start) = .; \ *(.security_initcall.init) \ VMLINUX_SYMBOL(__security_initcall_end) = .; \ diff --git a/include/asm-h8300/kmap_types.h b/include/asm-h8300/kmap_types.h index 82431ed..1ec8a34 100644 --- a/include/asm-h8300/kmap_types.h +++ b/include/asm-h8300/kmap_types.h @@ -1,5 +1,5 @@ -#ifndef _ASM_KMAP_TYPES_H -#define _ASM_KMAP_TYPES_H +#ifndef _ASM_H8300_KMAP_TYPES_H +#define _ASM_H8300_KMAP_TYPES_H enum km_type { KM_BOUNCE_READ, @@ -13,6 +13,8 @@ enum km_type { KM_PTE1, KM_IRQ0, KM_IRQ1, + KM_SOFTIRQ0, + KM_SOFTIRQ1, KM_TYPE_NR }; diff --git a/include/asm-h8300/mman.h b/include/asm-h8300/mman.h index abe0885..63f727a 100644 --- a/include/asm-h8300/mman.h +++ b/include/asm-h8300/mman.h @@ -4,6 +4,7 @@ #define PROT_READ 0x1 /* page can be read */ #define PROT_WRITE 0x2 /* page can be written */ #define PROT_EXEC 0x4 /* page can be executed */ +#define PROT_SEM 0x8 /* page may be used for atomic ops */ #define PROT_NONE 0x0 /* page can not be accessed */ #define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ #define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ @@ -19,6 +20,8 @@ #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ #define MAP_LOCKED 0x2000 /* pages are locked */ #define MAP_NORESERVE 0x4000 /* don't check for reservations */ +#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ +#define MAP_NONBLOCK 0x10000 /* do not block on IO */ #define MS_ASYNC 1 /* sync memory asynchronously */ #define MS_INVALIDATE 2 /* invalidate the caches */ diff --git a/include/asm-h8300/thread_info.h b/include/asm-h8300/thread_info.h index b07c934..bfcc755 100644 --- a/include/asm-h8300/thread_info.h +++ b/include/asm-h8300/thread_info.h @@ -23,7 +23,7 @@ struct thread_info { struct exec_domain *exec_domain; /* execution domain */ unsigned long flags; /* low level flags */ int cpu; /* cpu we're on */ - int preempt_count; /* 0 => preemptable, <0 => BUG*/ + int preempt_count; /* 0 => preemptable, <0 => BUG */ struct restart_block restart_block; }; diff --git a/include/asm-i386/agp.h b/include/asm-i386/agp.h index a917ff5..b82f5f3 100644 --- a/include/asm-i386/agp.h +++ b/include/asm-i386/agp.h @@ -21,4 +21,14 @@ int unmap_page_from_agp(struct page *page); worth it. Would need a page for it. */ #define flush_agp_cache() asm volatile("wbinvd":::"memory") +/* Convert a physical address to an address suitable for the GART. */ +#define phys_to_gart(x) (x) +#define gart_to_phys(x) (x) + +/* GATT allocation. Returns/accepts GATT kernel virtual address. */ +#define alloc_gatt_pages(order) \ + ((char *)__get_free_pages(GFP_KERNEL, (order))) +#define free_gatt_pages(table, order) \ + free_pages((unsigned long)(table), (order)) + #endif diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h index a5810cf..6a1b188 100644 --- a/include/asm-i386/apic.h +++ b/include/asm-i386/apic.h @@ -5,6 +5,7 @@ #include <linux/pm.h> #include <asm/fixmap.h> #include <asm/apicdef.h> +#include <asm/processor.h> #include <asm/system.h> #define Dprintk(x...) @@ -16,8 +17,20 @@ #define APIC_VERBOSE 1 #define APIC_DEBUG 2 +extern int enable_local_apic; extern int apic_verbosity; +static inline void lapic_disable(void) +{ + enable_local_apic = -1; + clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); +} + +static inline void lapic_enable(void) +{ + enable_local_apic = 1; +} + /* * Define the default level of output to be very little * This can be turned up by using apic=verbose for more @@ -87,7 +100,7 @@ extern void (*wait_timer_tick)(void); extern int get_maxlvt(void); extern void clear_local_APIC(void); extern void connect_bsp_APIC (void); -extern void disconnect_bsp_APIC (void); +extern void disconnect_bsp_APIC (int virt_wire_setup); extern void disable_local_APIC (void); extern void lapic_shutdown (void); extern int verify_local_APIC (void); diff --git a/include/asm-i386/apicdef.h b/include/asm-i386/apicdef.h index c689554..0fed5e3 100644 --- a/include/asm-i386/apicdef.h +++ b/include/asm-i386/apicdef.h @@ -86,11 +86,12 @@ #define APIC_LVT_REMOTE_IRR (1<<14) #define APIC_INPUT_POLARITY (1<<13) #define APIC_SEND_PENDING (1<<12) +#define APIC_MODE_MASK 0x700 #define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7) #define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8)) #define APIC_MODE_FIXED 0x0 #define APIC_MODE_NMI 0x4 -#define APIC_MODE_EXINT 0x7 +#define APIC_MODE_EXTINT 0x7 #define APIC_LVT1 0x360 #define APIC_LVTERR 0x370 #define APIC_TMICT 0x380 diff --git a/include/asm-i386/checksum.h b/include/asm-i386/checksum.h index 6413420..f949e44 100644 --- a/include/asm-i386/checksum.h +++ b/include/asm-i386/checksum.h @@ -3,6 +3,8 @@ #include <linux/in6.h> +#include <asm/uaccess.h> + /* * computes the checksum of a memory block at buff, length len, * and adds in "sum" (32-bit) diff --git a/include/asm-i386/cpu.h b/include/asm-i386/cpu.h index 002740b..e7252c2 100644 --- a/include/asm-i386/cpu.h +++ b/include/asm-i386/cpu.h @@ -5,6 +5,7 @@ #include <linux/cpu.h> #include <linux/topology.h> #include <linux/nodemask.h> +#include <linux/percpu.h> #include <asm/node.h> @@ -16,4 +17,5 @@ extern int arch_register_cpu(int num); extern void arch_unregister_cpu(int); #endif +DECLARE_PER_CPU(int, cpu_state); #endif /* _ASM_I386_CPU_H_ */ diff --git a/include/asm-i386/genapic.h b/include/asm-i386/genapic.h index fc813b2..b3783a3 100644 --- a/include/asm-i386/genapic.h +++ b/include/asm-i386/genapic.h @@ -78,7 +78,6 @@ struct genapic { .int_delivery_mode = INT_DELIVERY_MODE, \ .int_dest_mode = INT_DEST_MODE, \ .no_balance_irq = NO_BALANCE_IRQ, \ - .no_ioapic_check = NO_IOAPIC_CHECK, \ .ESR_DISABLE = esr_disable, \ .apic_destination_logical = APIC_DEST_LOGICAL, \ APICFUNC(apic_id_registered), \ diff --git a/include/asm-i386/highmem.h b/include/asm-i386/highmem.h index 1df42bf..0fd3313 100644 --- a/include/asm-i386/highmem.h +++ b/include/asm-i386/highmem.h @@ -70,6 +70,7 @@ void *kmap(struct page *page); void kunmap(struct page *page); void *kmap_atomic(struct page *page, enum km_type type); void kunmap_atomic(void *kvaddr, enum km_type type); +void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); struct page *kmap_atomic_to_page(void *ptr); #define flush_cache_kmaps() do { } while (0) diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h index 05b9e61..270f198 100644 --- a/include/asm-i386/irq.h +++ b/include/asm-i386/irq.h @@ -29,13 +29,19 @@ extern void release_vm86_irqs(struct task_struct *); #ifdef CONFIG_4KSTACKS extern void irq_ctx_init(int cpu); + extern void irq_ctx_exit(int cpu); # define __ARCH_HAS_DO_SOFTIRQ #else # define irq_ctx_init(cpu) do { } while (0) +# define irq_ctx_exit(cpu) do { } while (0) #endif #ifdef CONFIG_IRQBALANCE extern int irqbalance_disable(char *str); #endif +#ifdef CONFIG_HOTPLUG_CPU +extern void fixup_irqs(cpumask_t map); +#endif + #endif /* _ASM_IRQ_H */ diff --git a/include/asm-i386/kdebug.h b/include/asm-i386/kdebug.h index de6498b..b3f8d5f 100644 --- a/include/asm-i386/kdebug.h +++ b/include/asm-i386/kdebug.h @@ -18,7 +18,7 @@ struct die_args { }; /* Note - you should never unregister because that can race with NMIs. - If you really want to do it first unregister - then synchronize_kernel - then free. + If you really want to do it first unregister - then synchronize_sched - then free. */ int register_die_notifier(struct notifier_block *nb); extern struct notifier_block *i386die_chain; diff --git a/include/asm-i386/kexec.h b/include/asm-i386/kexec.h new file mode 100644 index 0000000..6ed2a03 --- /dev/null +++ b/include/asm-i386/kexec.h @@ -0,0 +1,33 @@ +#ifndef _I386_KEXEC_H +#define _I386_KEXEC_H + +#include <asm/fixmap.h> + +/* + * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. + * I.e. Maximum page that is mapped directly into kernel memory, + * and kmap is not required. + * + * Someone correct me if FIXADDR_START - PAGEOFFSET is not the correct + * calculation for the amount of memory directly mappable into the + * kernel memory space. + */ + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) +/* Maximum address we can use for the control code buffer */ +#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE + +#define KEXEC_CONTROL_CODE_SIZE 4096 + +/* The native architecture */ +#define KEXEC_ARCH KEXEC_ARCH_386 + +#define MAX_NOTE_BYTES 1024 +typedef u32 note_buf_t[MAX_NOTE_BYTES/4]; + +extern note_buf_t crash_notes[]; + +#endif /* _I386_KEXEC_H */ diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h index 4092f68..8b6d3a9 100644 --- a/include/asm-i386/kprobes.h +++ b/include/asm-i386/kprobes.h @@ -39,6 +39,9 @@ typedef u8 kprobe_opcode_t; : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry +#define ARCH_SUPPORTS_KRETPROBES + +void kretprobe_trampoline(void); /* Architecture specific copy of original instruction*/ struct arch_specific_insn { diff --git a/include/asm-i386/linkage.h b/include/asm-i386/linkage.h index af3d857..f4a6eba 100644 --- a/include/asm-i386/linkage.h +++ b/include/asm-i386/linkage.h @@ -5,9 +5,7 @@ #define FASTCALL(x) x __attribute__((regparm(3))) #define fastcall __attribute__((regparm(3))) -#ifdef CONFIG_REGPARM -# define prevent_tail_call(ret) __asm__ ("" : "=r" (ret) : "0" (ret)) -#endif +#define prevent_tail_call(ret) __asm__ ("" : "=r" (ret) : "0" (ret)) #ifdef CONFIG_X86_ALIGNMENT_16 #define __ALIGN .align 16,0x90 diff --git a/include/asm-i386/mach-bigsmp/mach_apic.h b/include/asm-i386/mach-bigsmp/mach_apic.h index 2339868..ba936d4 100644 --- a/include/asm-i386/mach-bigsmp/mach_apic.h +++ b/include/asm-i386/mach-bigsmp/mach_apic.h @@ -14,8 +14,6 @@ #define NO_BALANCE_IRQ (1) #define esr_disable (1) -#define NO_IOAPIC_CHECK (0) - static inline int apic_id_registered(void) { return (1); diff --git a/include/asm-i386/mach-default/mach_apic.h b/include/asm-i386/mach-default/mach_apic.h index 627f1cd0..3ef6292 100644 --- a/include/asm-i386/mach-default/mach_apic.h +++ b/include/asm-i386/mach-default/mach_apic.h @@ -19,8 +19,6 @@ static inline cpumask_t target_cpus(void) #define NO_BALANCE_IRQ (0) #define esr_disable (0) -#define NO_IOAPIC_CHECK (0) - #define INT_DELIVERY_MODE dest_LowestPrio #define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ diff --git a/include/asm-i386/mach-default/mach_ipi.h b/include/asm-i386/mach-default/mach_ipi.h index 6f2b17a..cc756a6 100644 --- a/include/asm-i386/mach-default/mach_ipi.h +++ b/include/asm-i386/mach-default/mach_ipi.h @@ -4,11 +4,34 @@ void send_IPI_mask_bitmask(cpumask_t mask, int vector); void __send_IPI_shortcut(unsigned int shortcut, int vector); +extern int no_broadcast; + static inline void send_IPI_mask(cpumask_t mask, int vector) { send_IPI_mask_bitmask(mask, vector); } +static inline void __local_send_IPI_allbutself(int vector) +{ + if (no_broadcast) { + cpumask_t mask = cpu_online_map; + int this_cpu = get_cpu(); + + cpu_clear(this_cpu, mask); + send_IPI_mask(mask, vector); + put_cpu(); + } else + __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); +} + +static inline void __local_send_IPI_all(int vector) +{ + if (no_broadcast) + send_IPI_mask(cpu_online_map, vector); + else + __send_IPI_shortcut(APIC_DEST_ALLINC, vector); +} + static inline void send_IPI_allbutself(int vector) { /* @@ -18,13 +41,13 @@ static inline void send_IPI_allbutself(int vector) if (!(num_online_cpus() > 1)) return; - __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); + __local_send_IPI_allbutself(vector); return; } static inline void send_IPI_all(int vector) { - __send_IPI_shortcut(APIC_DEST_ALLINC, vector); + __local_send_IPI_all(vector); } #endif /* __ASM_MACH_IPI_H */ diff --git a/include/asm-i386/mach-es7000/mach_apic.h b/include/asm-i386/mach-es7000/mach_apic.h index ceab2c4..b5f3f0d 100644 --- a/include/asm-i386/mach-es7000/mach_apic.h +++ b/include/asm-i386/mach-es7000/mach_apic.h @@ -38,8 +38,6 @@ static inline cpumask_t target_cpus(void) #define WAKE_SECONDARY_VIA_INIT #endif -#define NO_IOAPIC_CHECK (1) - static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) { return 0; diff --git a/include/asm-i386/mach-generic/mach_apic.h b/include/asm-i386/mach-generic/mach_apic.h index ab36d02..b13767a 100644 --- a/include/asm-i386/mach-generic/mach_apic.h +++ b/include/asm-i386/mach-generic/mach_apic.h @@ -5,7 +5,6 @@ #define esr_disable (genapic->ESR_DISABLE) #define NO_BALANCE_IRQ (genapic->no_balance_irq) -#define NO_IOAPIC_CHECK (genapic->no_ioapic_check) #define INT_DELIVERY_MODE (genapic->int_delivery_mode) #define INT_DEST_MODE (genapic->int_dest_mode) #undef APIC_DEST_LOGICAL diff --git a/include/asm-i386/mach-numaq/mach_apic.h b/include/asm-i386/mach-numaq/mach_apic.h index e1a0449..9d15809 100644 --- a/include/asm-i386/mach-numaq/mach_apic.h +++ b/include/asm-i386/mach-numaq/mach_apic.h @@ -17,8 +17,6 @@ static inline cpumask_t target_cpus(void) #define NO_BALANCE_IRQ (1) #define esr_disable (1) -#define NO_IOAPIC_CHECK (0) - #define INT_DELIVERY_MODE dest_LowestPrio #define INT_DEST_MODE 0 /* physical delivery on LOCAL quad */ diff --git a/include/asm-i386/mach-numaq/mach_ipi.h b/include/asm-i386/mach-numaq/mach_ipi.h index 1b46fd3..c604448 100644 --- a/include/asm-i386/mach-numaq/mach_ipi.h +++ b/include/asm-i386/mach-numaq/mach_ipi.h @@ -1,7 +1,7 @@ #ifndef __ASM_MACH_IPI_H #define __ASM_MACH_IPI_H -inline void send_IPI_mask_sequence(cpumask_t, int vector); +void send_IPI_mask_sequence(cpumask_t, int vector); static inline void send_IPI_mask(cpumask_t mask, int vector) { diff --git a/include/asm-i386/mach-summit/mach_apic.h b/include/asm-i386/mach-summit/mach_apic.h index 74e9cbc..3d6d129 100644 --- a/include/asm-i386/mach-summit/mach_apic.h +++ b/include/asm-i386/mach-summit/mach_apic.h @@ -7,8 +7,6 @@ #define esr_disable (1) #define NO_BALANCE_IRQ (0) -#define NO_IOAPIC_CHECK (1) /* Don't check I/O APIC ID for xAPIC */ - /* In clustered mode, the high nibble of APIC ID is a cluster number. * The low nibble is a 4-bit bitmap. */ #define XAPIC_DEST_CPUS_SHIFT 4 diff --git a/include/asm-i386/mach-visws/mach_apic.h b/include/asm-i386/mach-visws/mach_apic.h index 4e6cdfb..de438c71 100644 --- a/include/asm-i386/mach-visws/mach_apic.h +++ b/include/asm-i386/mach-visws/mach_apic.h @@ -9,8 +9,6 @@ #define no_balance_irq (0) #define esr_disable (0) -#define NO_IOAPIC_CHECK (0) - #define INT_DELIVERY_MODE dest_LowestPrio #define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h index 13830ae..33ce5d3 100644 --- a/include/asm-i386/mmzone.h +++ b/include/asm-i386/mmzone.h @@ -8,7 +8,9 @@ #include <asm/smp.h> -#ifdef CONFIG_DISCONTIGMEM +#if CONFIG_NUMA +extern struct pglist_data *node_data[]; +#define NODE_DATA(nid) (node_data[nid]) #ifdef CONFIG_NUMA #ifdef CONFIG_X86_NUMAQ @@ -21,8 +23,28 @@ #define get_zholes_size(n) (0) #endif /* CONFIG_NUMA */ -extern struct pglist_data *node_data[]; -#define NODE_DATA(nid) (node_data[nid]) +extern int get_memcfg_numa_flat(void ); +/* + * This allows any one NUMA architecture to be compiled + * for, and still fall back to the flat function if it + * fails. + */ +static inline void get_memcfg_numa(void) +{ +#ifdef CONFIG_X86_NUMAQ + if (get_memcfg_numaq()) + return; +#elif CONFIG_ACPI_SRAT + if (get_memcfg_from_srat()) + return; +#endif + + get_memcfg_numa_flat(); +} + +#endif /* CONFIG_NUMA */ + +#ifdef CONFIG_DISCONTIGMEM /* * generic node memory support, the following assumptions apply: @@ -48,26 +70,6 @@ static inline int pfn_to_nid(unsigned long pfn) #endif } -/* - * Following are macros that are specific to this numa platform. - */ -#define reserve_bootmem(addr, size) \ - reserve_bootmem_node(NODE_DATA(0), (addr), (size)) -#define alloc_bootmem(x) \ - __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) -#define alloc_bootmem_low(x) \ - __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0) -#define alloc_bootmem_pages(x) \ - __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) -#define alloc_bootmem_low_pages(x) \ - __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) -#define alloc_bootmem_node(ignore, x) \ - __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) -#define alloc_bootmem_pages_node(ignore, x) \ - __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) -#define alloc_bootmem_low_pages_node(ignore, x) \ - __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) - #define node_localnr(pfn, nid) ((pfn) - node_data[nid]->node_start_pfn) /* @@ -79,7 +81,6 @@ static inline int pfn_to_nid(unsigned long pfn) */ #define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT) -#define node_mem_map(nid) (NODE_DATA(nid)->node_mem_map) #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) #define node_end_pfn(nid) \ ({ \ @@ -100,7 +101,7 @@ static inline int pfn_to_nid(unsigned long pfn) ({ \ unsigned long __pfn = pfn; \ int __node = pfn_to_nid(__pfn); \ - &node_mem_map(__node)[node_localnr(__pfn,__node)]; \ + &NODE_DATA(__node)->node_mem_map[node_localnr(__pfn,__node)]; \ }) #define page_to_pfn(pg) \ @@ -122,26 +123,34 @@ static inline int pfn_valid(int pfn) return (pfn < node_end_pfn(nid)); return 0; } -#endif +#endif /* CONFIG_X86_NUMAQ */ + +#endif /* CONFIG_DISCONTIGMEM */ + +#ifdef CONFIG_NEED_MULTIPLE_NODES -extern int get_memcfg_numa_flat(void ); /* - * This allows any one NUMA architecture to be compiled - * for, and still fall back to the flat function if it - * fails. + * Following are macros that are specific to this numa platform. */ -static inline void get_memcfg_numa(void) -{ -#ifdef CONFIG_X86_NUMAQ - if (get_memcfg_numaq()) - return; -#elif CONFIG_ACPI_SRAT - if (get_memcfg_from_srat()) - return; -#endif +#define reserve_bootmem(addr, size) \ + reserve_bootmem_node(NODE_DATA(0), (addr), (size)) +#define alloc_bootmem(x) \ + __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_low(x) \ + __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0) +#define alloc_bootmem_pages(x) \ + __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_low_pages(x) \ + __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) +#define alloc_bootmem_node(ignore, x) \ + __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_pages_node(ignore, x) \ + __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_low_pages_node(ignore, x) \ + __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) - get_memcfg_numa_flat(); -} +#endif /* CONFIG_NEED_MULTIPLE_NODES */ + +extern int early_pfn_to_nid(unsigned long pfn); -#endif /* CONFIG_DISCONTIGMEM */ #endif /* _ASM_MMZONE_H_ */ diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h index ed13969..8d93f73 100644 --- a/include/asm-i386/page.h +++ b/include/asm-i386/page.h @@ -68,6 +68,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; #define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA +#define ARCH_HAS_HUGETLB_CLEAN_STALE_PGTABLE #endif #define pgd_val(x) ((x).pgd) @@ -119,13 +120,18 @@ static __inline__ int get_order(unsigned long size) extern int sysctl_legacy_va_layout; +extern int page_is_ram(unsigned long pagenr); + #endif /* __ASSEMBLY__ */ #ifdef __ASSEMBLY__ #define __PAGE_OFFSET (0xC0000000) +#define __PHYSICAL_START CONFIG_PHYSICAL_START #else #define __PAGE_OFFSET (0xC0000000UL) +#define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START) #endif +#define __KERNEL_START (__PAGE_OFFSET + __PHYSICAL_START) #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) @@ -134,11 +140,11 @@ extern int sysctl_legacy_va_layout; #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) -#ifndef CONFIG_DISCONTIGMEM +#ifdef CONFIG_FLATMEM #define pfn_to_page(pfn) (mem_map + (pfn)) #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) #define pfn_valid(pfn) ((pfn) < max_mapnr) -#endif /* !CONFIG_DISCONTIGMEM */ +#endif /* CONFIG_FLATMEM */ #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) diff --git a/include/asm-i386/param.h b/include/asm-i386/param.h index b644052..fa02e67 100644 --- a/include/asm-i386/param.h +++ b/include/asm-i386/param.h @@ -1,8 +1,10 @@ +#include <linux/config.h> + #ifndef _ASMi386_PARAM_H #define _ASMi386_PARAM_H #ifdef __KERNEL__ -# define HZ 1000 /* Internal kernel timer frequency */ +# define HZ CONFIG_HZ /* Internal kernel timer frequency */ # define USER_HZ 100 /* .. some user interfaces are in "ticks" */ # define CLOCKS_PER_SEC (USER_HZ) /* like times() */ #endif diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index 8d60c2b..77c6497f 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h @@ -236,6 +236,7 @@ static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; } static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; } +static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PRESENT | _PAGE_PSE; return pte; } #ifdef CONFIG_X86_PAE # include <asm/pgtable-3level.h> @@ -275,7 +276,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, */ #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) -#define mk_pte_huge(entry) ((entry).pte_low |= _PAGE_PRESENT | _PAGE_PSE) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { @@ -398,9 +398,9 @@ extern void noexec_setup(const char *str); #endif /* !__ASSEMBLY__ */ -#ifndef CONFIG_DISCONTIGMEM +#ifdef CONFIG_FLATMEM #define kern_addr_valid(addr) (1) -#endif /* !CONFIG_DISCONTIGMEM */ +#endif /* CONFIG_FLATMEM */ #define io_remap_page_range(vma, vaddr, paddr, size, prot) \ remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot) diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index 359bb01..6f0f93d 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h @@ -501,12 +501,16 @@ static inline void load_esp0(struct tss_struct *tss, struct thread_struct *threa } while (0) /* - * This special macro can be used to load a debugging register + * These special macros can be used to get or set a debugging register */ -#define loaddebug(thread,register) \ - __asm__("movl %0,%%db" #register \ - : /* no output */ \ - :"r" ((thread)->debugreg[register])) +#define get_debugreg(var, register) \ + __asm__("movl %%db" #register ", %0" \ + :"=r" (var)) +#define set_debugreg(value, register) \ + __asm__("movl %0,%%db" #register \ + : /* no output */ \ + :"r" (value)) + /* Forward declaration, a strange C thing */ struct task_struct; @@ -687,5 +691,7 @@ extern void select_idle_routine(const struct cpuinfo_x86 *c); #define cache_line_size() (boot_cpu_data.x86_cache_alignment) extern unsigned long boot_option_idle_override; +extern void enable_sep_cpu(void); +extern int sysenter_setup(void); #endif /* __ASM_I386_PROCESSOR_H */ diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h index 8618914..eef9f93 100644 --- a/include/asm-i386/ptrace.h +++ b/include/asm-i386/ptrace.h @@ -57,7 +57,8 @@ struct pt_regs { #ifdef __KERNEL__ struct task_struct; extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); -#define user_mode(regs) ((VM_MASK & (regs)->eflags) || (3 & (regs)->xcs)) +#define user_mode(regs) (3 & (regs)->xcs) +#define user_mode_vm(regs) ((VM_MASK & (regs)->eflags) || user_mode(regs)) #define instruction_pointer(regs) ((regs)->eip) #if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER) extern unsigned long profile_pc(struct pt_regs *regs); diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h index e03a206..edad9b4 100644 --- a/include/asm-i386/smp.h +++ b/include/asm-i386/smp.h @@ -42,16 +42,23 @@ extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs); extern void smp_invalidate_rcv(void); /* Process an NMI */ extern void (*mtrr_hook) (void); extern void zap_low_mappings (void); +extern void lock_ipi_call_lock(void); +extern void unlock_ipi_call_lock(void); #define MAX_APICID 256 extern u8 x86_cpu_to_apicid[]; +#ifdef CONFIG_HOTPLUG_CPU +extern void cpu_exit_clear(void); +extern void cpu_uninit(void); +#endif + /* * This function is needed by all SMP systems. It must _always_ be valid * from the initial startup. We map APIC_BASE very early in page_setup(), * so this is correct in the x86 case. */ -#define __smp_processor_id() (current_thread_info()->cpu) +#define raw_smp_processor_id() (current_thread_info()->cpu) extern cpumask_t cpu_callout_map; extern cpumask_t cpu_callin_map; @@ -83,6 +90,9 @@ static __inline int logical_smp_processor_id(void) } #endif + +extern int __cpu_disable(void); +extern void __cpu_die(unsigned int cpu); #endif /* !__ASSEMBLY__ */ #define NO_PROC_ID 0xFF /* No processor magic marker */ diff --git a/include/asm-i386/sparsemem.h b/include/asm-i386/sparsemem.h new file mode 100644 index 0000000..cfeed99 --- /dev/null +++ b/include/asm-i386/sparsemem.h @@ -0,0 +1,31 @@ +#ifndef _I386_SPARSEMEM_H +#define _I386_SPARSEMEM_H +#ifdef CONFIG_SPARSEMEM + +/* + * generic non-linear memory support: + * + * 1) we will not split memory into more chunks than will fit into the + * flags field of the struct page + */ + +/* + * SECTION_SIZE_BITS 2^N: how big each section will be + * MAX_PHYSADDR_BITS 2^N: how much physical address space we have + * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space + */ +#ifdef CONFIG_X86_PAE +#define SECTION_SIZE_BITS 30 +#define MAX_PHYSADDR_BITS 36 +#define MAX_PHYSMEM_BITS 36 +#else +#define SECTION_SIZE_BITS 26 +#define MAX_PHYSADDR_BITS 32 +#define MAX_PHYSMEM_BITS 32 +#endif + +/* XXX: FIXME -- wli */ +#define kern_addr_valid(kaddr) (0) + +#endif /* CONFIG_SPARSEMEM */ +#endif /* _I386_SPARSEMEM_H */ diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h index 6a78ac5..02c8f5d 100644 --- a/include/asm-i386/string.h +++ b/include/asm-i386/string.h @@ -116,7 +116,8 @@ __asm__ __volatile__( "orb $1,%%al\n" "3:" :"=a" (__res), "=&S" (d0), "=&D" (d1) - :"1" (cs),"2" (ct)); + :"1" (cs),"2" (ct) + :"memory"); return __res; } @@ -138,8 +139,9 @@ __asm__ __volatile__( "3:\tsbbl %%eax,%%eax\n\t" "orb $1,%%al\n" "4:" - :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2) - :"1" (cs),"2" (ct),"3" (count)); + :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2) + :"1" (cs),"2" (ct),"3" (count) + :"memory"); return __res; } @@ -158,7 +160,9 @@ __asm__ __volatile__( "movl $1,%1\n" "2:\tmovl %1,%0\n\t" "decl %0" - :"=a" (__res), "=&S" (d0) : "1" (s),"0" (c)); + :"=a" (__res), "=&S" (d0) + :"1" (s),"0" (c) + :"memory"); return __res; } @@ -175,7 +179,9 @@ __asm__ __volatile__( "leal -1(%%esi),%0\n" "2:\ttestb %%al,%%al\n\t" "jne 1b" - :"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c)); + :"=g" (__res), "=&S" (d0), "=&a" (d1) + :"0" (0),"1" (s),"2" (c) + :"memory"); return __res; } @@ -189,7 +195,9 @@ __asm__ __volatile__( "scasb\n\t" "notl %0\n\t" "decl %0" - :"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffffu)); + :"=c" (__res), "=&D" (d0) + :"1" (s),"a" (0), "0" (0xffffffffu) + :"memory"); return __res; } @@ -333,7 +341,9 @@ __asm__ __volatile__( "je 1f\n\t" "movl $1,%0\n" "1:\tdecl %0" - :"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count)); + :"=D" (__res), "=&c" (d0) + :"a" (c),"0" (cs),"1" (count) + :"memory"); return __res; } @@ -369,7 +379,7 @@ __asm__ __volatile__( "je 2f\n\t" "stosb\n" "2:" - : "=&c" (d0), "=&D" (d1) + :"=&c" (d0), "=&D" (d1) :"a" (c), "q" (count), "0" (count/4), "1" ((long) s) :"memory"); return (s); @@ -392,7 +402,8 @@ __asm__ __volatile__( "jne 1b\n" "3:\tsubl %2,%0" :"=a" (__res), "=&d" (d0) - :"c" (s),"1" (count)); + :"c" (s),"1" (count) + :"memory"); return __res; } /* end of additional stuff */ @@ -473,7 +484,8 @@ static inline void * memscan(void * addr, int c, size_t size) "dec %%edi\n" "1:" : "=D" (addr), "=c" (size) - : "0" (addr), "1" (size), "a" (c)); + : "0" (addr), "1" (size), "a" (c) + : "memory"); return addr; } diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h index 2cd5727..95add81 100644 --- a/include/asm-i386/thread_info.h +++ b/include/asm-i386/thread_info.h @@ -31,7 +31,7 @@ struct thread_info { unsigned long flags; /* low level flags */ unsigned long status; /* thread-synchronous flags */ __u32 cpu; /* current CPU */ - __s32 preempt_count; /* 0 => preemptable, <0 => BUG */ + int preempt_count; /* 0 => preemptable, <0 => BUG */ mm_segment_t addr_limit; /* thread address space: diff --git a/include/asm-i386/timer.h b/include/asm-i386/timer.h index 40c54f6..dcf1e07 100644 --- a/include/asm-i386/timer.h +++ b/include/asm-i386/timer.h @@ -22,6 +22,7 @@ struct timer_opts { unsigned long (*get_offset)(void); unsigned long long (*monotonic_clock)(void); void (*delay)(unsigned long); + unsigned long (*read_timer)(void); }; struct init_timer_opts { @@ -52,7 +53,9 @@ extern struct init_timer_opts timer_cyclone_init; #endif extern unsigned long calibrate_tsc(void); +extern unsigned long read_timer_tsc(void); extern void init_cpu_khz(void); +extern int recalibrate_cpu_khz(void); #ifdef CONFIG_HPET_TIMER extern struct init_timer_opts timer_hpet_init; extern unsigned long calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr); diff --git a/include/asm-i386/timex.h b/include/asm-i386/timex.h index b41e484..292b5a6 100644 --- a/include/asm-i386/timex.h +++ b/include/asm-i386/timex.h @@ -47,6 +47,9 @@ static inline cycles_t get_cycles (void) return ret; } -extern unsigned long cpu_khz; +extern unsigned int cpu_khz; + +extern int read_current_timer(unsigned long *timer_value); +#define ARCH_HAS_READ_CURRENT_TIMER 1 #endif diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h index 98f9e68..2461b73 100644 --- a/include/asm-i386/topology.h +++ b/include/asm-i386/topology.h @@ -60,12 +60,8 @@ static inline int node_to_first_cpu(int node) return first_cpu(mask); } -/* Returns the number of the node containing PCI bus number 'busnr' */ -static inline cpumask_t __pcibus_to_cpumask(int busnr) -{ - return node_to_cpumask(mp_bus_id_to_node[busnr]); -} -#define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus->number) +#define pcibus_to_node(bus) mp_bus_id_to_node[(bus)->number] +#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)) /* sched_domains SD_NODE_INIT for NUMAQ machines */ #define SD_NODE_INIT (struct sched_domain) { \ @@ -78,11 +74,14 @@ static inline cpumask_t __pcibus_to_cpumask(int busnr) .imbalance_pct = 125, \ .cache_hot_time = (10*1000000), \ .cache_nice_tries = 1, \ + .busy_idx = 3, \ + .idle_idx = 1, \ + .newidle_idx = 2, \ + .wake_idx = 1, \ .per_cpu_gain = 100, \ .flags = SD_LOAD_BALANCE \ | SD_BALANCE_EXEC \ - | SD_BALANCE_NEWIDLE \ - | SD_WAKE_IDLE \ + | SD_BALANCE_FORK \ | SD_WAKE_BALANCE, \ .last_balance = jiffies, \ .balance_interval = 1, \ diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h index 61bcc1b..176413f 100644 --- a/include/asm-i386/unistd.h +++ b/include/asm-i386/unistd.h @@ -256,7 +256,7 @@ #define __NR_io_submit 248 #define __NR_io_cancel 249 #define __NR_fadvise64 250 - +#define __NR_set_zone_reclaim 251 #define __NR_exit_group 252 #define __NR_lookup_dcookie 253 #define __NR_epoll_create 254 diff --git a/include/asm-ia64/agp.h b/include/asm-ia64/agp.h index d1316f1..4e517f0 100644 --- a/include/asm-ia64/agp.h +++ b/include/asm-ia64/agp.h @@ -18,4 +18,14 @@ #define flush_agp_mappings() /* nothing */ #define flush_agp_cache() mb() +/* Convert a physical address to an address suitable for the GART. */ +#define phys_to_gart(x) (x) +#define gart_to_phys(x) (x) + +/* GATT allocation. Returns/accepts GATT kernel virtual address. */ +#define alloc_gatt_pages(order) \ + ((char *)__get_free_pages(GFP_KERNEL, (order))) +#define free_gatt_pages(table, order) \ + free_pages((unsigned long)(table), (order)) + #endif /* _ASM_IA64_AGP_H */ diff --git a/include/asm-ia64/break.h b/include/asm-ia64/break.h index 97c7b2d..8167828 100644 --- a/include/asm-ia64/break.h +++ b/include/asm-ia64/break.h @@ -12,6 +12,8 @@ * OS-specific debug break numbers: */ #define __IA64_BREAK_KDB 0x80100 +#define __IA64_BREAK_KPROBE 0x80200 +#define __IA64_BREAK_JPROBE 0x80300 /* * OS-specific break numbers: diff --git a/include/asm-ia64/compat.h b/include/asm-ia64/compat.h index cc0ff0a..0c05e5b 100644 --- a/include/asm-ia64/compat.h +++ b/include/asm-ia64/compat.h @@ -27,6 +27,7 @@ typedef u16 compat_ipc_pid_t; typedef s32 compat_daddr_t; typedef u32 compat_caddr_t; typedef __kernel_fsid_t compat_fsid_t; +typedef s32 compat_timer_t; typedef s32 compat_int_t; typedef s32 compat_long_t; diff --git a/include/asm-ia64/fcntl.h b/include/asm-ia64/fcntl.h index d193981..c9f8d83 100644 --- a/include/asm-ia64/fcntl.h +++ b/include/asm-ia64/fcntl.h @@ -81,4 +81,6 @@ struct flock { #define F_LINUX_SPECIFIC_BASE 1024 +#define force_o_largefile() ( ! (current->personality & PER_LINUX32) ) + #endif /* _ASM_IA64_FCNTL_H */ diff --git a/include/asm-ia64/kdebug.h b/include/asm-ia64/kdebug.h new file mode 100644 index 0000000..4d376e1 --- /dev/null +++ b/include/asm-ia64/kdebug.h @@ -0,0 +1,61 @@ +#ifndef _IA64_KDEBUG_H +#define _IA64_KDEBUG_H 1 +/* + * include/asm-ia64/kdebug.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) Intel Corporation, 2005 + * + * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy + * <anil.s.keshavamurthy@intel.com> adopted from + * include/asm-x86_64/kdebug.h + */ +#include <linux/notifier.h> + +struct pt_regs; + +struct die_args { + struct pt_regs *regs; + const char *str; + long err; + int trapnr; + int signr; +}; + +int register_die_notifier(struct notifier_block *nb); +extern struct notifier_block *ia64die_chain; + +enum die_val { + DIE_BREAK = 1, + DIE_SS, + DIE_PAGE_FAULT, +}; + +static inline int notify_die(enum die_val val, char *str, struct pt_regs *regs, + long err, int trap, int sig) +{ + struct die_args args = { + .regs = regs, + .str = str, + .err = err, + .trapnr = trap, + .signr = sig + }; + + return notifier_call_chain(&ia64die_chain, val, &args); +} + +#endif diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h new file mode 100644 index 0000000..7b70003 --- /dev/null +++ b/include/asm-ia64/kprobes.h @@ -0,0 +1,116 @@ +#ifndef _ASM_KPROBES_H +#define _ASM_KPROBES_H +/* + * Kernel Probes (KProbes) + * include/asm-ia64/kprobes.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2002, 2004 + * Copyright (C) Intel Corporation, 2005 + * + * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy + * <anil.s.keshavamurthy@intel.com> adapted from i386 + */ +#include <linux/types.h> +#include <linux/ptrace.h> +#include <asm/break.h> + +#define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6) + +typedef union cmp_inst { + struct { + unsigned long long qp : 6; + unsigned long long p1 : 6; + unsigned long long c : 1; + unsigned long long r2 : 7; + unsigned long long r3 : 7; + unsigned long long p2 : 6; + unsigned long long ta : 1; + unsigned long long x2 : 2; + unsigned long long tb : 1; + unsigned long long opcode : 4; + unsigned long long reserved : 23; + }f; + unsigned long long l; +} cmp_inst_t; + +struct kprobe; + +typedef struct _bundle { + struct { + unsigned long long template : 5; + unsigned long long slot0 : 41; + unsigned long long slot1_p0 : 64-46; + } quad0; + struct { + unsigned long long slot1_p1 : 41 - (64-46); + unsigned long long slot2 : 41; + } quad1; +} __attribute__((__aligned__(16))) bundle_t; + +#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry + +#define SLOT0_OPCODE_SHIFT (37) +#define SLOT1_p1_OPCODE_SHIFT (37 - (64-46)) +#define SLOT2_OPCODE_SHIFT (37) + +#define INDIRECT_CALL_OPCODE (1) +#define IP_RELATIVE_CALL_OPCODE (5) +#define IP_RELATIVE_BRANCH_OPCODE (4) +#define IP_RELATIVE_PREDICT_OPCODE (7) +#define LONG_BRANCH_OPCODE (0xC) +#define LONG_CALL_OPCODE (0xD) + +typedef struct kprobe_opcode { + bundle_t bundle; +} kprobe_opcode_t; + +struct fnptr { + unsigned long ip; + unsigned long gp; +}; + +/* Architecture specific copy of original instruction*/ +struct arch_specific_insn { + /* copy of the instruction to be emulated */ + kprobe_opcode_t insn; + #define INST_FLAG_FIX_RELATIVE_IP_ADDR 1 + #define INST_FLAG_FIX_BRANCH_REG 2 + unsigned long inst_flag; + unsigned short target_br_reg; +}; + +/* ia64 does not need this */ +static inline void jprobe_return(void) +{ +} + +/* ia64 does not need this */ +static inline void arch_copy_kprobe(struct kprobe *p) +{ +} + +#ifdef CONFIG_KPROBES +extern int kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data); +#else /* !CONFIG_KPROBES */ +static inline int kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + return 0; +} +#endif +#endif /* _ASM_KPROBES_H */ diff --git a/include/asm-ia64/mmzone.h b/include/asm-ia64/mmzone.h index 9491dac..d32f51e 100644 --- a/include/asm-ia64/mmzone.h +++ b/include/asm-ia64/mmzone.h @@ -17,6 +17,20 @@ #ifdef CONFIG_DISCONTIGMEM +static inline int pfn_to_nid(unsigned long pfn) +{ +#ifdef CONFIG_NUMA + extern int paddr_to_nid(unsigned long); + int nid = paddr_to_nid(pfn << PAGE_SHIFT); + if (nid < 0) + return 0; + else + return nid; +#else + return 0; +#endif +} + #ifdef CONFIG_IA64_DIG /* DIG systems are small */ # define MAX_PHYSNODE_ID 8 # define NR_NODE_MEMBLKS (MAX_NUMNODES * 8) diff --git a/include/asm-ia64/param.h b/include/asm-ia64/param.h index 6c6b679..5e1e0d2 100644 --- a/include/asm-ia64/param.h +++ b/include/asm-ia64/param.h @@ -27,7 +27,7 @@ */ # define HZ 32 # else -# define HZ 1024 +# define HZ CONFIG_HZ # endif # define USER_HZ HZ # define CLOCKS_PER_SEC HZ /* frequency at which times() counts */ diff --git a/include/asm-ia64/percpu.h b/include/asm-ia64/percpu.h index 1e87f19..2b14dee 100644 --- a/include/asm-ia64/percpu.h +++ b/include/asm-ia64/percpu.h @@ -50,7 +50,7 @@ extern void *per_cpu_init(void); #else /* ! SMP */ -#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var)) +#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) #define __get_cpu_var(var) per_cpu__##var #define per_cpu_init() (__phys_per_cpu_start) diff --git a/include/asm-ia64/perfmon.h b/include/asm-ia64/perfmon.h index ed5416c..7f3333d 100644 --- a/include/asm-ia64/perfmon.h +++ b/include/asm-ia64/perfmon.h @@ -177,6 +177,10 @@ typedef union { extern long perfmonctl(int fd, int cmd, void *arg, int narg); +typedef struct { + void (*handler)(int irq, void *arg, struct pt_regs *regs); +} pfm_intr_handler_desc_t; + extern void pfm_save_regs (struct task_struct *); extern void pfm_load_regs (struct task_struct *); @@ -187,6 +191,10 @@ extern void pfm_syst_wide_update_task(struct task_struct *, unsigned long info, extern void pfm_inherit(struct task_struct *task, struct pt_regs *regs); extern void pfm_init_percpu(void); extern void pfm_handle_work(void); +extern int pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *h); +extern int pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *h); + + /* * Reset PMD register flags diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index ea121a0..48586e0 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h @@ -8,7 +8,7 @@ * This hopefully works with any (fixed) IA-64 page-size, as defined * in <asm/page.h>. * - * Copyright (C) 1998-2004 Hewlett-Packard Co + * Copyright (C) 1998-2005 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> */ @@ -283,6 +283,7 @@ ia64_phys_addr_valid (unsigned long addr) #define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A)) #define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D)) #define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D)) +#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_P)) /* * Macro to a page protection value as "uncacheable". Note that "protection" is really a @@ -551,7 +552,11 @@ do { \ /* These tell get_user_pages() that the first gate page is accessible from user-level. */ #define FIXADDR_USER_START GATE_ADDR -#define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE) +#ifdef HAVE_BUGGY_SEGREL +# define FIXADDR_USER_END (GATE_ADDR + 2*PAGE_SIZE) +#else +# define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE) +#endif #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index 9e1ba8b..91bbd1f 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h @@ -403,7 +403,10 @@ extern void ia64_setreg_unknown_kr (void); * task_struct at this point. */ -/* Return TRUE if task T owns the fph partition of the CPU we're running on. */ +/* + * Return TRUE if task T owns the fph partition of the CPU we're running on. + * Must be called from code that has preemption disabled. + */ #define ia64_is_local_fpu_owner(t) \ ({ \ struct task_struct *__ia64_islfo_task = (t); \ @@ -411,7 +414,10 @@ extern void ia64_setreg_unknown_kr (void); && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \ }) -/* Mark task T as owning the fph partition of the CPU we're running on. */ +/* + * Mark task T as owning the fph partition of the CPU we're running on. + * Must be called from code that has preemption disabled. + */ #define ia64_set_local_fpu_owner(t) do { \ struct task_struct *__ia64_slfo_task = (t); \ __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \ diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index 3ba1a06..a391435 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h @@ -46,7 +46,7 @@ ia64_get_lid (void) #define SMP_IRQ_REDIRECTION (1 << 0) #define SMP_IPI_REDIRECTION (1 << 1) -#define smp_processor_id() (current_thread_info()->cpu) +#define raw_smp_processor_id() (current_thread_info()->cpu) extern struct smp_boot_data { int cpu_count; diff --git a/include/asm-ia64/sn/mspec.h b/include/asm-ia64/sn/mspec.h new file mode 100644 index 0000000..dbe13c6 --- /dev/null +++ b/include/asm-ia64/sn/mspec.h @@ -0,0 +1,59 @@ +/* + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2001-2004 Silicon Graphics, Inc. All rights reserved. + */ + +#ifndef _ASM_IA64_SN_MSPEC_H +#define _ASM_IA64_SN_MSPEC_H + +#define FETCHOP_VAR_SIZE 64 /* 64 byte per fetchop variable */ + +#define FETCHOP_LOAD 0 +#define FETCHOP_INCREMENT 8 +#define FETCHOP_DECREMENT 16 +#define FETCHOP_CLEAR 24 + +#define FETCHOP_STORE 0 +#define FETCHOP_AND 24 +#define FETCHOP_OR 32 + +#define FETCHOP_CLEAR_CACHE 56 + +#define FETCHOP_LOAD_OP(addr, op) ( \ + *(volatile long *)((char*) (addr) + (op))) + +#define FETCHOP_STORE_OP(addr, op, x) ( \ + *(volatile long *)((char*) (addr) + (op)) = (long) (x)) + +#ifdef __KERNEL__ + +/* + * Each Atomic Memory Operation (AMO formerly known as fetchop) + * variable is 64 bytes long. The first 8 bytes are used. The + * remaining 56 bytes are unaddressable due to the operation taking + * that portion of the address. + * + * NOTE: The AMO_t _MUST_ be placed in either the first or second half + * of the cache line. The cache line _MUST NOT_ be used for anything + * other than additional AMO_t entries. This is because there are two + * addresses which reference the same physical cache line. One will + * be a cached entry with the memory type bits all set. This address + * may be loaded into processor cache. The AMO_t will be referenced + * uncached via the memory special memory type. If any portion of the + * cached cache-line is modified, when that line is flushed, it will + * overwrite the uncached value in physical memory and lead to + * inconsistency. + */ +typedef struct { + u64 variable; + u64 unused[7]; +} AMO_t; + + +#endif /* __KERNEL__ */ + +#endif /* _ASM_IA64_SN_MSPEC_H */ diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h index 56d74ca..eb0395a 100644 --- a/include/asm-ia64/sn/sn_sal.h +++ b/include/asm-ia64/sn/sn_sal.h @@ -115,6 +115,13 @@ #define SAL_IROUTER_INTR_XMIT SAL_CONSOLE_INTR_XMIT #define SAL_IROUTER_INTR_RECV SAL_CONSOLE_INTR_RECV +/* + * Error Handling Features + */ +#define SAL_ERR_FEAT_MCA_SLV_TO_OS_INIT_SLV 0x1 +#define SAL_ERR_FEAT_LOG_SBES 0x2 +#define SAL_ERR_FEAT_MFR_OVERRIDE 0x4 +#define SAL_ERR_FEAT_SBE_THRESHOLD 0xffff0000 /* * SAL Error Codes @@ -342,6 +349,25 @@ ia64_sn_plat_cpei_handler(void) } /* + * Set Error Handling Features + */ +static inline u64 +ia64_sn_plat_set_error_handling_features(void) +{ + struct ia64_sal_retval ret_stuff; + + ret_stuff.status = 0; + ret_stuff.v0 = 0; + ret_stuff.v1 = 0; + ret_stuff.v2 = 0; + SAL_CALL_REENTRANT(ret_stuff, SN_SAL_SET_ERROR_HANDLING_FEATURES, + (SAL_ERR_FEAT_MCA_SLV_TO_OS_INIT_SLV | SAL_ERR_FEAT_LOG_SBES), + 0, 0, 0, 0, 0, 0); + + return ret_stuff.status; +} + +/* * Checks for console input. */ static inline u64 @@ -472,7 +498,7 @@ static inline u64 ia64_sn_pod_mode(void) { struct ia64_sal_retval isrv; - SAL_CALL(isrv, SN_SAL_POD_MODE, 0, 0, 0, 0, 0, 0, 0); + SAL_CALL_REENTRANT(isrv, SN_SAL_POD_MODE, 0, 0, 0, 0, 0, 0, 0); if (isrv.status) return 0; return isrv.v0; diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 6f516e7..cd2cf76 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h @@ -183,8 +183,6 @@ do { \ #ifdef __KERNEL__ -#define prepare_to_switch() do { } while(0) - #ifdef CONFIG_IA32_SUPPORT # define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0) #else @@ -274,13 +272,7 @@ extern void ia64_load_extra (struct task_struct *task); * of that CPU which will not be released, because there we wait for the * tasklist_lock to become available. */ -#define prepare_arch_switch(rq, next) \ -do { \ - spin_lock(&(next)->switch_lock); \ - spin_unlock(&(rq)->lock); \ -} while (0) -#define finish_arch_switch(rq, prev) spin_unlock_irq(&(prev)->switch_lock) -#define task_running(rq, p) ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock)) +#define __ARCH_WANT_UNLOCKED_CTXSW #define ia64_platform_is(x) (strcmp(x, platform_name) == 0) diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h index 8d5b7e7..7dc8951 100644 --- a/include/asm-ia64/thread_info.h +++ b/include/asm-ia64/thread_info.h @@ -25,7 +25,7 @@ struct thread_info { __u32 flags; /* thread_info flags (see TIF_*) */ __u32 cpu; /* current CPU */ mm_segment_t addr_limit; /* user-level address space limit */ - __s32 preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ + int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ struct restart_block restart_block; struct { int signo; diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h index 21cf351..4e64c2a 100644 --- a/include/asm-ia64/topology.h +++ b/include/asm-ia64/topology.h @@ -42,25 +42,54 @@ void build_cpu_to_node_map(void); +#define SD_CPU_INIT (struct sched_domain) { \ + .span = CPU_MASK_NONE, \ + .parent = NULL, \ + .groups = NULL, \ + .min_interval = 1, \ + .max_interval = 4, \ + .busy_factor = 64, \ + .imbalance_pct = 125, \ + .cache_hot_time = (10*1000000), \ + .per_cpu_gain = 100, \ + .cache_nice_tries = 2, \ + .busy_idx = 2, \ + .idle_idx = 1, \ + .newidle_idx = 2, \ + .wake_idx = 1, \ + .forkexec_idx = 1, \ + .flags = SD_LOAD_BALANCE \ + | SD_BALANCE_NEWIDLE \ + | SD_BALANCE_EXEC \ + | SD_WAKE_AFFINE, \ + .last_balance = jiffies, \ + .balance_interval = 1, \ + .nr_balance_failed = 0, \ +} + /* sched_domains SD_NODE_INIT for IA64 NUMA machines */ #define SD_NODE_INIT (struct sched_domain) { \ .span = CPU_MASK_NONE, \ .parent = NULL, \ .groups = NULL, \ - .min_interval = 80, \ - .max_interval = 320, \ - .busy_factor = 320, \ + .min_interval = 8, \ + .max_interval = 8*(min(num_online_cpus(), 32)), \ + .busy_factor = 64, \ .imbalance_pct = 125, \ .cache_hot_time = (10*1000000), \ - .cache_nice_tries = 1, \ + .cache_nice_tries = 2, \ + .busy_idx = 3, \ + .idle_idx = 2, \ + .newidle_idx = 0, /* unused */ \ + .wake_idx = 1, \ + .forkexec_idx = 1, \ .per_cpu_gain = 100, \ .flags = SD_LOAD_BALANCE \ | SD_BALANCE_EXEC \ - | SD_BALANCE_NEWIDLE \ - | SD_WAKE_IDLE \ + | SD_BALANCE_FORK \ | SD_WAKE_BALANCE, \ .last_balance = jiffies, \ - .balance_interval = 1, \ + .balance_interval = 64, \ .nr_balance_failed = 0, \ } @@ -69,17 +98,21 @@ void build_cpu_to_node_map(void); .span = CPU_MASK_NONE, \ .parent = NULL, \ .groups = NULL, \ - .min_interval = 80, \ - .max_interval = 320, \ - .busy_factor = 320, \ - .imbalance_pct = 125, \ + .min_interval = 64, \ + .max_interval = 64*num_online_cpus(), \ + .busy_factor = 128, \ + .imbalance_pct = 133, \ .cache_hot_time = (10*1000000), \ .cache_nice_tries = 1, \ + .busy_idx = 3, \ + .idle_idx = 3, \ + .newidle_idx = 0, /* unused */ \ + .wake_idx = 0, /* unused */ \ + .forkexec_idx = 0, /* unused */ \ .per_cpu_gain = 100, \ - .flags = SD_LOAD_BALANCE \ - | SD_BALANCE_EXEC, \ + .flags = SD_LOAD_BALANCE, \ .last_balance = jiffies, \ - .balance_interval = 100*(63+num_online_cpus())/64, \ + .balance_interval = 64, \ .nr_balance_failed = 0, \ } diff --git a/include/asm-ia64/uncached.h b/include/asm-ia64/uncached.h new file mode 100644 index 0000000..b82d923 --- /dev/null +++ b/include/asm-ia64/uncached.h @@ -0,0 +1,12 @@ +/* + * Copyright (C) 2001-2005 Silicon Graphics, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + * Prototypes for the uncached page allocator + */ + +extern unsigned long uncached_alloc_page(int nid); +extern void uncached_free_page(unsigned long); diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h index 33e26c5..f7f43ec 100644 --- a/include/asm-ia64/unistd.h +++ b/include/asm-ia64/unistd.h @@ -263,6 +263,7 @@ #define __NR_add_key 1271 #define __NR_request_key 1272 #define __NR_keyctl 1273 +#define __NR_set_zone_reclaim 1276 #ifdef __KERNEL__ diff --git a/include/asm-m32r/div64.h b/include/asm-m32r/div64.h index 417a51b..6cd978c 100644 --- a/include/asm-m32r/div64.h +++ b/include/asm-m32r/div64.h @@ -1,38 +1 @@ -#ifndef _ASM_M32R_DIV64 -#define _ASM_M32R_DIV64 - -/* $Id$ */ - -/* unsigned long long division. - * Input: - * unsigned long long n - * unsigned long base - * Output: - * n = n / base; - * return value = n % base; - */ -#define do_div(n, base) \ -({ \ - unsigned long _res, _high, _mid, _low; \ - \ - _low = (n) & 0xffffffffUL; \ - _high = (n) >> 32; \ - if (_high) { \ - _mid = (_high % (unsigned long)(base)) << 16; \ - _high = _high / (unsigned long)(base); \ - _mid += _low >> 16; \ - _low &= 0x0000ffffUL; \ - _low += (_mid % (unsigned long)(base)) << 16; \ - _mid = _mid / (unsigned long)(base); \ - _res = _low % (unsigned long)(base); \ - _low = _low / (unsigned long)(base); \ - n = _low + ((long long)_mid << 16) + \ - ((long long)_high << 32); \ - } else { \ - _res = _low % (unsigned long)(base); \ - n = (_low / (unsigned long)(base)); \ - } \ - _res; \ -}) - -#endif /* _ASM_M32R_DIV64 */ +#include <asm-generic/div64.h> diff --git a/include/asm-m32r/ide.h b/include/asm-m32r/ide.h index be64f24..194393b 100644 --- a/include/asm-m32r/ide.h +++ b/include/asm-m32r/ide.h @@ -35,7 +35,7 @@ static __inline__ int ide_default_irq(unsigned long base) { switch (base) { -#if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_MAPPI2) +#if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_MAPPI2) || defined(CONFIG_PLAT_MAPPI3) case 0x1f0: return PLD_IRQ_CFIREQ; default: return 0; diff --git a/include/asm-m32r/m32102.h b/include/asm-m32r/m32102.h index b560340..cb98101 100644 --- a/include/asm-m32r/m32102.h +++ b/include/asm-m32r/m32102.h @@ -175,6 +175,7 @@ #define M32R_ICU_CR5_PORTL (0x210+M32R_ICU_OFFSET) /* INT4 */ #define M32R_ICU_CR6_PORTL (0x214+M32R_ICU_OFFSET) /* INT5 */ #define M32R_ICU_CR7_PORTL (0x218+M32R_ICU_OFFSET) /* INT6 */ +#define M32R_ICU_CR8_PORTL (0x219+M32R_ICU_OFFSET) /* INT7 */ #define M32R_ICU_CR16_PORTL (0x23C+M32R_ICU_OFFSET) /* MFT0 */ #define M32R_ICU_CR17_PORTL (0x240+M32R_ICU_OFFSET) /* MFT1 */ #define M32R_ICU_CR18_PORTL (0x244+M32R_ICU_OFFSET) /* MFT2 */ diff --git a/include/asm-m32r/m32102peri.h b/include/asm-m32r/m32102peri.h deleted file mode 100644 index 3c12955..0000000 --- a/include/asm-m32r/m32102peri.h +++ /dev/null @@ -1,468 +0,0 @@ -/* $Id$ - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2000,2001 by Hiroyuki Kondo - */ - -#ifndef __ASSEMBLY__ - -typedef void V; -typedef char B; -typedef short S; -typedef int W; -typedef long L; -typedef float F; -typedef double D; -typedef unsigned char UB; -typedef unsigned short US; -typedef unsigned int UW; -typedef unsigned long UL; -typedef const unsigned int CUW; - -/********************************* - -M32102 ICU - -*********************************/ -#define ICUISTS (UW *)0xa0EFF004 -#define ICUIREQ0 (UW *)0xa0EFF008 -#define ICUIREQ1 (UW *)0xa0EFF00C - -#define ICUSBICR (UW *)0xa0EFF018 -#define ICUIMASK (UW *)0xa0EFF01C - -#define ICUCR1 (UW *)0xa0EFF200 /* INT0 */ -#define ICUCR2 (UW *)0xa0EFF204 /* INT1 */ -#define ICUCR3 (UW *)0xa0EFF208 /* INT2 */ -#define ICUCR4 (UW *)0xa0EFF20C /* INT3 */ -#define ICUCR5 (UW *)0xa0EFF210 /* INT4 */ -#define ICUCR6 (UW *)0xa0EFF214 /* INT5 */ -#define ICUCR7 (UW *)0xa0EFF218 /* INT6 */ - -#define ICUCR16 (UW *)0xa0EFF23C /* MFT0 */ -#define ICUCR17 (UW *)0xa0EFF240 /* MFT1 */ -#define ICUCR18 (UW *)0xa0EFF244 /* MFT2 */ -#define ICUCR19 (UW *)0xa0EFF248 /* MFT3 */ -#define ICUCR20 (UW *)0xa0EFF24C /* MFT4 */ -#define ICUCR21 (UW *)0xa0EFF250 /* MFT5 */ - -#define ICUCR32 (UW *)0xa0EFF27C /* DMA0 */ -#define ICUCR33 (UW *)0xa0EFF280 /* DMA1 */ - -#define ICUCR48 (UW *)0xa0EFF2BC /* SIO0R */ -#define ICUCR49 (UW *)0xa0EFF2C0 /* SIO0S */ -#define ICUCR50 (UW *)0xa0EFF2C4 /* SIO1R */ -#define ICUCR51 (UW *)0xa0EFF2C8 /* SIO1S */ -#define ICUCR52 (UW *)0xa0EFF2CC /* SIO2R */ -#define ICUCR53 (UW *)0xa0EFF2D0 /* SIO2S */ -#define ICUCR54 (UW *)0xa0EFF2D4 /* SIO3R */ -#define ICUCR55 (UW *)0xa0EFF2D8 /* SIO3S */ -#define ICUCR56 (UW *)0xa0EFF2DC /* SIO4R */ -#define ICUCR57 (UW *)0xa0EFF2E0 /* SIO4S */ - -/********************************* - -M32102 MFT - -*********************************/ -#define MFTCR (US *)0xa0EFC002 -#define MFTRPR (UB *)0xa0EFC006 - -#define MFT0MOD (US *)0xa0EFC102 -#define MFT0BOS (US *)0xa0EFC106 -#define MFT0CUT (US *)0xa0EFC10A -#define MFT0RLD (US *)0xa0EFC10E -#define MFT0CRLD (US *)0xa0EFC112 - -#define MFT1MOD (US *)0xa0EFC202 -#define MFT1BOS (US *)0xa0EFC206 -#define MFT1CUT (US *)0xa0EFC20A -#define MFT1RLD (US *)0xa0EFC20E -#define MFT1CRLD (US *)0xa0EFC212 - -#define MFT2MOD (US *)0xa0EFC302 -#define MFT2BOS (US *)0xa0EFC306 -#define MFT2CUT (US *)0xa0EFC30A -#define MFT2RLD (US *)0xa0EFC30E -#define MFT2CRLD (US *)0xa0EFC312 - -#define MFT3MOD (US *)0xa0EFC402 -#define MFT3CUT (US *)0xa0EFC40A -#define MFT3RLD (US *)0xa0EFC40E -#define MFT3CRLD (US *)0xa0EFC412 - -#define MFT4MOD (US *)0xa0EFC502 -#define MFT4CUT (US *)0xa0EFC50A -#define MFT4RLD (US *)0xa0EFC50E -#define MFT4CRLD (US *)0xa0EFC512 - -#define MFT5MOD (US *)0xa0EFC602 -#define MFT5CUT (US *)0xa0EFC60A -#define MFT5RLD (US *)0xa0EFC60E -#define MFT5CRLD (US *)0xa0EFC612 - -/********************************* - -M32102 SIO - -*********************************/ - -#define SIO0CR (volatile int *)0xa0efd000 -#define SIO0MOD0 (volatile int *)0xa0efd004 -#define SIO0MOD1 (volatile int *)0xa0efd008 -#define SIO0STS (volatile int *)0xa0efd00c -#define SIO0IMASK (volatile int *)0xa0efd010 -#define SIO0BAUR (volatile int *)0xa0efd014 -#define SIO0RBAUR (volatile int *)0xa0efd018 -#define SIO0TXB (volatile int *)0xa0efd01c -#define SIO0RXB (volatile int *)0xa0efd020 - -#define SIO1CR (volatile int *)0xa0efd100 -#define SIO1MOD0 (volatile int *)0xa0efd104 -#define SIO1MOD1 (volatile int *)0xa0efd108 -#define SIO1STS (volatile int *)0xa0efd10c -#define SIO1IMASK (volatile int *)0xa0efd110 -#define SIO1BAUR (volatile int *)0xa0efd114 -#define SIO1RBAUR (volatile int *)0xa0efd118 -#define SIO1TXB (volatile int *)0xa0efd11c -#define SIO1RXB (volatile int *)0xa0efd120 -/********************************* - -M32102 PORT - -*********************************/ -#define PIEN (UB *)0xa0EF1003 /* input enable */ - -#define P0DATA (UB *)0xa0EF1020 /* data */ -#define P1DATA (UB *)0xa0EF1021 -#define P2DATA (UB *)0xa0EF1022 -#define P3DATA (UB *)0xa0EF1023 -#define P4DATA (UB *)0xa0EF1024 -#define P5DATA (UB *)0xa0EF1025 -#define P6DATA (UB *)0xa0EF1026 -#define P7DATA (UB *)0xa0EF1027 - -#define P0DIR (UB *)0xa0EF1040 /* direction */ -#define P1DIR (UB *)0xa0EF1041 -#define P2DIR (UB *)0xa0EF1042 -#define P3DIR (UB *)0xa0EF1043 -#define P4DIR (UB *)0xa0EF1044 -#define P5DIR (UB *)0xa0EF1045 -#define P6DIR (UB *)0xa0EF1046 -#define P7DIR (UB *)0xa0EF1047 - -#define P0MOD (US *)0xa0EF1060 /* mode control */ -#define P1MOD (US *)0xa0EF1062 -#define P2MOD (US *)0xa0EF1064 -#define P3MOD (US *)0xa0EF1066 -#define P4MOD (US *)0xa0EF1068 -#define P5MOD (US *)0xa0EF106A -#define P6MOD (US *)0xa0EF106C -#define P7MOD (US *)0xa0EF106E - -#define P0ODCR (UB *)0xa0EF1080 /* open-drain control */ -#define P1ODCR (UB *)0xa0EF1081 -#define P2ODCR (UB *)0xa0EF1082 -#define P3ODCR (UB *)0xa0EF1083 -#define P4ODCR (UB *)0xa0EF1084 -#define P5ODCR (UB *)0xa0EF1085 -#define P6ODCR (UB *)0xa0EF1086 -#define P7ODCR (UB *)0xa0EF1087 - -/********************************* - -M32102 Cache - -********************************/ - -#define MCCR (US *)0xFFFFFFFE - - -#else /* __ASSEMBLY__ */ - -;; -;; PIO 0x80ef1000 -;; - -#define PIEN 0xa0ef1000 - -#define P0DATA 0xa0ef1020 -#define P1DATA 0xa0ef1021 -#define P2DATA 0xa0ef1022 -#define P3DATA 0xa0ef1023 -#define P4DATA 0xa0ef1024 -#define P5DATA 0xa0ef1025 -#define P6DATA 0xa0ef1026 -#define P7DATA 0xa0ef1027 - -#define P0DIR 0xa0ef1040 -#define P1DIR 0xa0ef1041 -#define P2DIR 0xa0ef1042 -#define P3DIR 0xa0ef1043 -#define P4DIR 0xa0ef1044 -#define P5DIR 0xa0ef1045 -#define P6DIR 0xa0ef1046 -#define P7DIR 0xa0ef1047 - -#define P0MOD 0xa0ef1060 -#define P1MOD 0xa0ef1062 -#define P2MOD 0xa0ef1064 -#define P3MOD 0xa0ef1066 -#define P4MOD 0xa0ef1068 -#define P5MOD 0xa0ef106a -#define P6MOD 0xa0ef106c -#define P7MOD 0xa0ef106e -; -#define P0ODCR 0xa0ef1080 -#define P1ODCR 0xa0ef1081 -#define P2ODCR 0xa0ef1082 -#define P3ODCR 0xa0ef1083 -#define P4ODCR 0xa0ef1084 -#define P5ODCR 0xa0ef1085 -#define P6ODCR 0xa0ef1086 -#define P7ODCR 0xa0ef1087 - -;; -;; WDT 0xa0ef2000 -;; - -#define WDTCR 0xa0ef2000 - - -;; -;; CLK 0xa0ef4000 -;; - -#define CPUCLKCR 0xa0ef4000 -#define CLKMOD 0xa0ef4004 -#define PLLCR 0xa0ef4008 - - -;; -;; BSEL 0xa0ef5000 -;; - -#define BSEL0CR 0xa0ef5000 -#define BSEL1CR 0xa0ef5004 -#define BSEL2CR 0xa0ef5008 -#define BSEL3CR 0xa0ef500c -#define BSEL4CR 0xa0ef5010 -#define BSEL5CR 0xa0ef5014 - - -;; -;; SDRAMC 0xa0ef6000 -;; - -#define SDRF0 0xa0ef6000 -#define SDRF1 0xa0ef6004 -#define SDIR0 0xa0ef6008 -#define SDIR1 0xa0ef600c -#define SDBR 0xa0ef6010 - -;; CH0 -#define SD0ADR 0xa0ef6020 -#define SD0SZ 0xa0ef6022 -#define SD0ER 0xa0ef6024 -#define SD0TR 0xa0ef6028 -#define SD0MOD 0xa0ef602c - -;; CH1 -#define SD1ADR 0xa0ef6040 -#define SD1SZ 0xa0ef6042 -#define SD1ER 0xa0ef6044 -#define SD1TR 0xa0ef6048 -#define SD1MOD 0xa0ef604c - - -;; -;; DMAC 0xa0ef8000 -;; - -#define DMAEN 0xa0ef8000 -#define DMAISTS 0xa0ef8004 -#define DMAEDET 0xa0ef8008 -#define DMAASTS 0xa0ef800c - -;; CH0 -#define DMA0CR0 0xa0ef8100 -#define DMA0CR1 0xa0ef8104 -#define DMA0CSA 0xa0ef8108 -#define DMA0RSA 0xa0ef810c -#define DMA0CDA 0xa0ef8110 -#define DMA0RDA 0xa0ef8114 -#define DMA0CBCUT 0xa0ef8118 -#define DMA0RBCUT 0xa0ef811c - -;; CH1 -#define DMA1CR0 0xa0ef8200 -#define DMA1CR1 0xa0ef8204 -#define DMA1CSA 0xa0ef8208 -#define DMA1RSA 0xa0ef820c -#define DMA1CDA 0xa0ef8210 -#define DMA1RDA 0xa0ef8214 -#define DMA1CBCUT 0xa0ef8218 -#define DMA1RBCUT 0xa0ef821c - - -;; -;; MFT 0xa0efc000 -;; - -#define MFTCR 0xa0efc000 -#define MFTRPR 0xa0efc004 - -;; CH0 -#define MFT0MOD 0xa0efc100 -#define MFT0BOS 0xa0efc104 -#define MFT0CUT 0xa0efc108 -#define MFT0RLD 0xa0efc10c -#define MFT0CMPRLD 0xa0efc110 - -;; CH1 -#define MFT1MOD 0xa0efc200 -#define MFT1BOS 0xa0efc204 -#define MFT1CUT 0xa0efc208 -#define MFT1RLD 0xa0efc20c -#define MFT1CMPRLD 0xa0efc210 - -;; CH2 -#define MFT2MOD 0xa0efc300 -#define MFT2BOS 0xa0efc304 -#define MFT2CUT 0xa0efc308 -#define MFT2RLD 0xa0efc30c -#define MFT2CMPRLD 0xa0efc310 - -;; CH3 -#define MFT3MOD 0xa0efc400 -#define MFT3BOS 0xa0efc404 -#define MFT3CUT 0xa0efc408 -#define MFT3RLD 0xa0efc40c -#define MFT3CMPRLD 0xa0efc410 - -;; CH4 -#define MFT4MOD 0xa0efc500 -#define MFT4BOS 0xa0efc504 -#define MFT4CUT 0xa0efc508 -#define MFT4RLD 0xa0efc50c -#define MFT4CMPRLD 0xa0efc510 - -;; CH5 -#define MFT5MOD 0xa0efc600 -#define MFT5BOS 0xa0efc604 -#define MFT5CUT 0xa0efc608 -#define MFT5RLD 0xa0efc60c -#define MFT5CMPRLD 0xa0efc610 - - -;; -;; SIO 0xa0efd000 -;; - -;; CH0 -#define SIO0CR 0xa0efd000 -#define SIO0MOD0 0xa0efd004 -#define SIO0MOD1 0xa0efd008 -#define SIO0STS 0xa0efd00c -#define SIO0IMASK 0xa0efd010 -#define SIO0BAUR 0xa0efd014 -#define SIO0RBAUR 0xa0efd018 -#define SIO0TXB 0xa0efd01c -#define SIO0RXB 0xa0efd020 - -;; CH1 -#define SIO1CR 0xa0efd100 -#define SIO1MOD0 0xa0efd104 -#define SIO1MOD1 0xa0efd108 -#define SIO1STS 0xa0efd10c -#define SIO1IMASK 0xa0efd110 -#define SIO1BAUR 0xa0efd114 -#define SIO1RBAUR 0xa0efd118 -#define SIO1TXB 0xa0efd11c -#define SIO1RXB 0xa0efd120 - -;; CH2 -#define SIO2CR 0xa0efd200 -#define SIO2MOD0 0xa0efd204 -#define SIO2MOD1 0xa0efd208 -#define SIO2STS 0xa0efd20c -#define SIO2IMASK 0xa0efd210 -#define SIO2BAUR 0xa0efd214 -#define SIO2RBAUR 0xa0efd218 -#define SIO2TXB 0xa0efd21c -#define SIO2RXB 0xa0efd220 - -;; CH3 -#define SIO3CR 0xa0efd300 -#define SIO3MOD0 0xa0efd304 -#define SIO3MOD1 0xa0efd308 -#define SIO3STS 0xa0efd30c -#define SIO3IMASK 0xa0efd310 -#define SIO3BAUR 0xa0efd314 -#define SIO3RBAUR 0xa0efd318 -#define SIO3TXB 0xa0efd31c -#define SIO3RXB 0xa0efd320 - -;; CH4 -#define SIO4CR 0xa0efd400 -#define SIO4MOD0 0xa0efd404 -#define SIO4MOD1 0xa0efd408 -#define SIO4STS 0xa0efd40c -#define SIO4IMASK 0xa0efd410 -#define SIO4BAUR 0xa0efd414 -#define SIO4RBAUR 0xa0efd418 -#define SIO4TXB 0xa0efd41c -#define SIO4RXB 0xa0efd420 - - -;; -;; ICU 0xa0eff000 -;; - -#define ICUISTS 0xa0eff004 -#define ICUIREQ0 0xa0eff008 -#define ICUIREQ1 0xa0eff00c - -#define ICUSBICR 0xa0eff018 -#define ICUIMASK 0xa0eff01c - -#define ICUCR1 0xa0eff200 -#define ICUCR2 0xa0eff204 -#define ICUCR3 0xa0eff208 -#define ICUCR4 0xa0eff20c -#define ICUCR5 0xa0eff210 -#define ICUCR6 0xa0eff214 -#define ICUCR7 0xa0eff218 - -#define ICUCR16 0xa0eff23c -#define ICUCR17 0xa0eff240 -#define ICUCR18 0xa0eff244 -#define ICUCR19 0xa0eff248 -#define ICUCR20 0xa0eff24c -#define ICUCR21 0xa0eff250 - -#define ICUCR32 0xa0eff27c -#define ICUCR33 0xa0eff280 - -#define ICUCR48 0xa0eff2bc -#define ICUCR49 0xa0eff2c0 -#define ICUCR50 0xa0eff2c4 -#define ICUCR51 0xa0eff2c8 -#define ICUCR52 0xa0eff2cc -#define ICUCR53 0xa0eff2d0 -#define ICUCR54 0xa0eff2d4 -#define ICUCR55 0xa0eff2d8 -#define ICUCR56 0xa0eff2dc -#define ICUCR57 0xa0eff2e0 - -;; -;; CACHE -;; - -#define MCCR 0xfffffffc - - -#endif /* __ASSEMBLY__ */ diff --git a/include/asm-m32r/m32r.h b/include/asm-m32r/m32r.h index f116649..ec142be 100644 --- a/include/asm-m32r/m32r.h +++ b/include/asm-m32r/m32r.h @@ -16,7 +16,6 @@ || defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_M32102) \ || defined(CONFIG_CHIP_OPSP) #include <asm/m32102.h> -#include <asm/m32102peri.h> #endif /* Platform type */ @@ -36,6 +35,10 @@ #include <asm/mappi2/mappi2_pld.h> #endif /* CONFIG_PLAT_MAPPI2 */ +#if defined(CONFIG_PLAT_MAPPI3) +#include <asm/mappi3/mappi3_pld.h> +#endif /* CONFIG_PLAT_MAPPI3 */ + #if defined(CONFIG_PLAT_USRV) #include <asm/m32700ut/m32700ut_pld.h> #endif diff --git a/include/asm-m32r/mappi3/mappi3_pld.h b/include/asm-m32r/mappi3/mappi3_pld.h new file mode 100644 index 0000000..3f1551f --- /dev/null +++ b/include/asm-m32r/mappi3/mappi3_pld.h @@ -0,0 +1,143 @@ +/* + * include/asm/mappi3/mappi3_pld.h + * + * Definitions for Extended IO Logic on MAPPI3 board. + * based on m32700ut_pld.h + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of + * this archive for more details. + * + */ + +#ifndef _MAPPI3_PLD_H +#define _MAPPI3_PLD_H + +#ifndef __ASSEMBLY__ +/* FIXME: + * Some C functions use non-cache address, so can't define non-cache address. + */ +#define PLD_BASE (0x1c000000 /* + NONCACHE_OFFSET */) +#define __reg8 (volatile unsigned char *) +#define __reg16 (volatile unsigned short *) +#define __reg32 (volatile unsigned int *) +#else +#define PLD_BASE (0x1c000000 + NONCACHE_OFFSET) +#define __reg8 +#define __reg16 +#define __reg32 +#endif /* __ASSEMBLY__ */ + +/* CFC */ +#define PLD_CFRSTCR __reg16(PLD_BASE + 0x0000) +#define PLD_CFSTS __reg16(PLD_BASE + 0x0002) +#define PLD_CFIMASK __reg16(PLD_BASE + 0x0004) +#define PLD_CFBUFCR __reg16(PLD_BASE + 0x0006) +#define PLD_CFCR0 __reg16(PLD_BASE + 0x000a) +#define PLD_CFCR1 __reg16(PLD_BASE + 0x000c) + +/* MMC */ +#define PLD_MMCCR __reg16(PLD_BASE + 0x4000) +#define PLD_MMCMOD __reg16(PLD_BASE + 0x4002) +#define PLD_MMCSTS __reg16(PLD_BASE + 0x4006) +#define PLD_MMCBAUR __reg16(PLD_BASE + 0x400a) +#define PLD_MMCCMDBCUT __reg16(PLD_BASE + 0x400c) +#define PLD_MMCCDTBCUT __reg16(PLD_BASE + 0x400e) +#define PLD_MMCDET __reg16(PLD_BASE + 0x4010) +#define PLD_MMCWP __reg16(PLD_BASE + 0x4012) +#define PLD_MMCWDATA __reg16(PLD_BASE + 0x5000) +#define PLD_MMCRDATA __reg16(PLD_BASE + 0x6000) +#define PLD_MMCCMDDATA __reg16(PLD_BASE + 0x7000) +#define PLD_MMCRSPDATA __reg16(PLD_BASE + 0x7006) + +/* Power Control of MMC and CF */ +#define PLD_CPCR __reg16(PLD_BASE + 0x14000) + + +/*==== ICU ====*/ +#define M32R_IRQ_PC104 (5) /* INT4(PC/104) */ +#define M32R_IRQ_I2C (28) /* I2C-BUS */ +#define PLD_IRQ_CFIREQ (6) /* INT5 CFC Card Interrupt */ +#define PLD_IRQ_CFC_INSERT (7) /* INT6 CFC Card Insert */ +#define PLD_IRQ_CFC_EJECT (8) /* INT7 CFC Card Eject */ +#define PLD_IRQ_MMCCARD (43) /* MMC Card Insert */ +#define PLD_IRQ_MMCIRQ (44) /* MMC Transfer Done */ + + +#if 0 +/* LED Control + * + * 1: DIP swich side + * 2: Reset switch side + */ +#define PLD_IOLEDCR __reg16(PLD_BASE + 0x14002) +#define PLD_IOLED_1_ON 0x001 +#define PLD_IOLED_1_OFF 0x000 +#define PLD_IOLED_2_ON 0x002 +#define PLD_IOLED_2_OFF 0x000 + +/* DIP Switch + * 0: Write-protect of Flash Memory (0:protected, 1:non-protected) + * 1: - + * 2: - + * 3: - + */ +#define PLD_IOSWSTS __reg16(PLD_BASE + 0x14004) +#define PLD_IOSWSTS_IOSW2 0x0200 +#define PLD_IOSWSTS_IOSW1 0x0100 +#define PLD_IOSWSTS_IOWP0 0x0001 + +#endif + +/* CRC */ +#define PLD_CRC7DATA __reg16(PLD_BASE + 0x18000) +#define PLD_CRC7INDATA __reg16(PLD_BASE + 0x18002) +#define PLD_CRC16DATA __reg16(PLD_BASE + 0x18004) +#define PLD_CRC16INDATA __reg16(PLD_BASE + 0x18006) +#define PLD_CRC16ADATA __reg16(PLD_BASE + 0x18008) +#define PLD_CRC16AINDATA __reg16(PLD_BASE + 0x1800a) + + +#if 0 +/* RTC */ +#define PLD_RTCCR __reg16(PLD_BASE + 0x1c000) +#define PLD_RTCBAUR __reg16(PLD_BASE + 0x1c002) +#define PLD_RTCWRDATA __reg16(PLD_BASE + 0x1c004) +#define PLD_RTCRDDATA __reg16(PLD_BASE + 0x1c006) +#define PLD_RTCRSTODT __reg16(PLD_BASE + 0x1c008) + +/* SIO0 */ +#define PLD_ESIO0CR __reg16(PLD_BASE + 0x20000) +#define PLD_ESIO0CR_TXEN 0x0001 +#define PLD_ESIO0CR_RXEN 0x0002 +#define PLD_ESIO0MOD0 __reg16(PLD_BASE + 0x20002) +#define PLD_ESIO0MOD0_CTSS 0x0040 +#define PLD_ESIO0MOD0_RTSS 0x0080 +#define PLD_ESIO0MOD1 __reg16(PLD_BASE + 0x20004) +#define PLD_ESIO0MOD1_LMFS 0x0010 +#define PLD_ESIO0STS __reg16(PLD_BASE + 0x20006) +#define PLD_ESIO0STS_TEMP 0x0001 +#define PLD_ESIO0STS_TXCP 0x0002 +#define PLD_ESIO0STS_RXCP 0x0004 +#define PLD_ESIO0STS_TXSC 0x0100 +#define PLD_ESIO0STS_RXSC 0x0200 +#define PLD_ESIO0STS_TXREADY (PLD_ESIO0STS_TXCP | PLD_ESIO0STS_TEMP) +#define PLD_ESIO0INTCR __reg16(PLD_BASE + 0x20008) +#define PLD_ESIO0INTCR_TXIEN 0x0002 +#define PLD_ESIO0INTCR_RXCEN 0x0004 +#define PLD_ESIO0BAUR __reg16(PLD_BASE + 0x2000a) +#define PLD_ESIO0TXB __reg16(PLD_BASE + 0x2000c) +#define PLD_ESIO0RXB __reg16(PLD_BASE + 0x2000e) + +/* SIM Card */ +#define PLD_SCCR __reg16(PLD_BASE + 0x38000) +#define PLD_SCMOD __reg16(PLD_BASE + 0x38004) +#define PLD_SCSTS __reg16(PLD_BASE + 0x38006) +#define PLD_SCINTCR __reg16(PLD_BASE + 0x38008) +#define PLD_SCBAUR __reg16(PLD_BASE + 0x3800a) +#define PLD_SCTXB __reg16(PLD_BASE + 0x3800c) +#define PLD_SCRXB __reg16(PLD_BASE + 0x3800e) + +#endif + +#endif /* _MAPPI3_PLD.H */ diff --git a/include/asm-m32r/mmzone.h b/include/asm-m32r/mmzone.h index ebf0228..d58878e 100644 --- a/include/asm-m32r/mmzone.h +++ b/include/asm-m32r/mmzone.h @@ -14,7 +14,6 @@ extern struct pglist_data *node_data[]; #define NODE_DATA(nid) (node_data[nid]) #define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn) -#define node_mem_map(nid) (NODE_DATA(nid)->node_mem_map) #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) #define node_end_pfn(nid) \ ({ \ @@ -32,7 +31,7 @@ extern struct pglist_data *node_data[]; ({ \ unsigned long __pfn = pfn; \ int __node = pfn_to_nid(__pfn); \ - &node_mem_map(__node)[node_localnr(__pfn,__node)]; \ + &NODE_DATA(__node)->node_mem_map[node_localnr(__pfn,__node)]; \ }) #define page_to_pfn(pg) \ diff --git a/include/asm-m32r/smp.h b/include/asm-m32r/smp.h index 8cd4d0d..b9a20cd 100644 --- a/include/asm-m32r/smp.h +++ b/include/asm-m32r/smp.h @@ -66,7 +66,7 @@ extern volatile int cpu_2_physid[NR_CPUS]; #define physid_to_cpu(physid) physid_2_cpu[physid] #define cpu_to_physid(cpu_id) cpu_2_physid[cpu_id] -#define smp_processor_id() (current_thread_info()->cpu) +#define raw_smp_processor_id() (current_thread_info()->cpu) extern cpumask_t cpu_callout_map; #define cpu_possible_map cpu_callout_map diff --git a/include/asm-m32r/thread_info.h b/include/asm-m32r/thread_info.h index 9f3a0fc..7a6be77 100644 --- a/include/asm-m32r/thread_info.h +++ b/include/asm-m32r/thread_info.h @@ -28,7 +28,7 @@ struct thread_info { unsigned long flags; /* low level flags */ unsigned long status; /* thread-synchronous flags */ __u32 cpu; /* current CPU */ - __s32 preempt_count; /* 0 => preemptable, <0 => BUG */ + int preempt_count; /* 0 => preemptable, <0 => BUG */ mm_segment_t addr_limit; /* thread address space: 0-0xBFFFFFFF for user-thread diff --git a/include/asm-m32r/topology.h b/include/asm-m32r/topology.h index 299a89d..d607eb3 100644 --- a/include/asm-m32r/topology.h +++ b/include/asm-m32r/topology.h @@ -1,48 +1,6 @@ -/* - * linux/include/asm-generic/topology.h - * - * Written by: Matthew Dobson, IBM Corporation - * - * Copyright (C) 2002, IBM Corp. - * - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Send feedback to <colpatch@us.ibm.com> - */ #ifndef _ASM_M32R_TOPOLOGY_H #define _ASM_M32R_TOPOLOGY_H -/* Other architectures wishing to use this simple topology API should fill - in the below functions as appropriate in their own <asm/topology.h> file. */ - -#define cpu_to_node(cpu) (0) - -#ifndef parent_node -#define parent_node(node) (0) -#endif -#ifndef node_to_cpumask -#define node_to_cpumask(node) (cpu_online_map) -#endif -#ifndef node_to_first_cpu -#define node_to_first_cpu(node) (0) -#endif -#ifndef pcibus_to_cpumask -#define pcibus_to_cpumask(bus) (cpu_online_map) -#endif +#include <asm-generic/topology.h> #endif /* _ASM_M32R_TOPOLOGY_H */ diff --git a/include/asm-m68k/thread_info.h b/include/asm-m68k/thread_info.h index 5f58939..2aed24f 100644 --- a/include/asm-m68k/thread_info.h +++ b/include/asm-m68k/thread_info.h @@ -8,7 +8,7 @@ struct thread_info { struct task_struct *task; /* main task structure */ struct exec_domain *exec_domain; /* execution domain */ - __s32 preempt_count; /* 0 => preemptable, <0 => BUG */ + int preempt_count; /* 0 => preemptable, <0 => BUG */ __u32 cpu; /* should always be 0 on m68k */ struct restart_block restart_block; diff --git a/include/asm-m68knommu/thread_info.h b/include/asm-m68knommu/thread_info.h index c8153b7..7b9a3fa 100644 --- a/include/asm-m68knommu/thread_info.h +++ b/include/asm-m68knommu/thread_info.h @@ -36,7 +36,7 @@ struct thread_info { struct exec_domain *exec_domain; /* execution domain */ unsigned long flags; /* low level flags */ int cpu; /* cpu we're on */ - int preempt_count; /* 0 => preemptable, <0 => BUG*/ + int preempt_count; /* 0 => preemptable, <0 => BUG */ struct restart_block restart_block; }; diff --git a/include/asm-mips/compat.h b/include/asm-mips/compat.h index dce9207..d78002a 100644 --- a/include/asm-mips/compat.h +++ b/include/asm-mips/compat.h @@ -29,6 +29,7 @@ typedef s32 compat_caddr_t; typedef struct { s32 val[2]; } compat_fsid_t; +typedef s32 compat_timer_t; typedef s32 compat_int_t; typedef s32 compat_long_t; diff --git a/include/asm-mips/mmzone.h b/include/asm-mips/mmzone.h index 29ee13b..d721143 100644 --- a/include/asm-mips/mmzone.h +++ b/include/asm-mips/mmzone.h @@ -8,6 +8,8 @@ #include <asm/page.h> #include <mmzone.h> +#ifdef CONFIG_DISCONTIGMEM + #define kvaddr_to_nid(kvaddr) pa_to_nid(__pa(kvaddr)) #define pfn_to_nid(pfn) pa_to_nid((pfn) << PAGE_SHIFT) @@ -36,4 +38,6 @@ /* XXX: FIXME -- wli */ #define kern_addr_valid(addr) (0) +#endif /* CONFIG_DISCONTIGMEM */ + #endif /* _ASM_MMZONE_H_ */ diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h index d1bf824..5cae35c 100644 --- a/include/asm-mips/page.h +++ b/include/asm-mips/page.h @@ -127,7 +127,7 @@ static __inline__ int get_order(unsigned long size) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) -#ifndef CONFIG_DISCONTIGMEM +#ifndef CONFIG_NEED_MULTIPLE_NODES #define pfn_to_page(pfn) (mem_map + (pfn)) #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) #define pfn_valid(pfn) ((pfn) < max_mapnr) diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h index 8788432..e76ccd6 100644 --- a/include/asm-mips/pgtable.h +++ b/include/asm-mips/pgtable.h @@ -350,7 +350,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, __update_cache(vma, address, pte); } -#ifndef CONFIG_DISCONTIGMEM +#ifndef CONFIG_NEED_MULTIPLE_NODES #define kern_addr_valid(addr) (1) #endif diff --git a/include/asm-mips/smp.h b/include/asm-mips/smp.h index 8ba370e..5618f1e 100644 --- a/include/asm-mips/smp.h +++ b/include/asm-mips/smp.h @@ -21,7 +21,7 @@ #include <linux/cpumask.h> #include <asm/atomic.h> -#define smp_processor_id() (current_thread_info()->cpu) +#define raw_smp_processor_id() (current_thread_info()->cpu) /* Map from cpu id to sequential logical cpu number. This will only not be idempotent when cpus failed to come on-line. */ diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h index 888fd89..169f3d4 100644 --- a/include/asm-mips/system.h +++ b/include/asm-mips/system.h @@ -422,16 +422,10 @@ extern void __die_if_kernel(const char *, struct pt_regs *, const char *file, extern int stop_a_enabled; /* - * Taken from include/asm-ia64/system.h; prevents deadlock on SMP + * See include/asm-ia64/system.h; prevents deadlock on SMP * systems. */ -#define prepare_arch_switch(rq, next) \ -do { \ - spin_lock(&(next)->switch_lock); \ - spin_unlock(&(rq)->lock); \ -} while (0) -#define finish_arch_switch(rq, prev) spin_unlock_irq(&(prev)->switch_lock) -#define task_running(rq, p) ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock)) +#define __ARCH_WANT_UNLOCKED_CTXSW #define arch_align_stack(x) (x) diff --git a/include/asm-mips/thread_info.h b/include/asm-mips/thread_info.h index 7689003..42fcd6f 100644 --- a/include/asm-mips/thread_info.h +++ b/include/asm-mips/thread_info.h @@ -27,7 +27,7 @@ struct thread_info { struct exec_domain *exec_domain; /* execution domain */ unsigned long flags; /* low level flags */ __u32 cpu; /* current CPU */ - __s32 preempt_count; /* 0 => preemptable, <0 => BUG */ + int preempt_count; /* 0 => preemptable, <0 => BUG */ mm_segment_t addr_limit; /* thread address space: 0-0xBFFFFFFF for user-thead diff --git a/include/asm-mips/vr41xx/giu.h b/include/asm-mips/vr41xx/giu.h new file mode 100644 index 0000000..8590885 --- /dev/null +++ b/include/asm-mips/vr41xx/giu.h @@ -0,0 +1,69 @@ +/* + * Include file for NEC VR4100 series General-purpose I/O Unit. + * + * Copyright (C) 2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef __NEC_VR41XX_GIU_H +#define __NEC_VR41XX_GIU_H + +typedef enum { + IRQ_TRIGGER_LEVEL, + IRQ_TRIGGER_EDGE, + IRQ_TRIGGER_EDGE_FALLING, + IRQ_TRIGGER_EDGE_RISING, +} irq_trigger_t; + +typedef enum { + IRQ_SIGNAL_THROUGH, + IRQ_SIGNAL_HOLD, +} irq_signal_t; + +extern void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger, irq_signal_t signal); + +typedef enum { + IRQ_LEVEL_LOW, + IRQ_LEVEL_HIGH, +} irq_level_t; + +extern void vr41xx_set_irq_level(unsigned int pin, irq_level_t level); + +typedef enum { + GPIO_DATA_LOW, + GPIO_DATA_HIGH, + GPIO_DATA_INVAL, +} gpio_data_t; + +extern gpio_data_t vr41xx_gpio_get_pin(unsigned int pin); +extern int vr41xx_gpio_set_pin(unsigned int pin, gpio_data_t data); + +typedef enum { + GPIO_INPUT, + GPIO_OUTPUT, + GPIO_OUTPUT_DISABLE, +} gpio_direction_t; + +extern int vr41xx_gpio_set_direction(unsigned int pin, gpio_direction_t dir); + +typedef enum { + GPIO_PULL_DOWN, + GPIO_PULL_UP, + GPIO_PULL_DISABLE, +} gpio_pull_t; + +extern int vr41xx_gpio_pullupdown(unsigned int pin, gpio_pull_t pull); + +#endif /* __NEC_VR41XX_GIU_H */ diff --git a/include/asm-mips/vr41xx/vr41xx.h b/include/asm-mips/vr41xx/vr41xx.h index ad0d1ea..7d41e44 100644 --- a/include/asm-mips/vr41xx/vr41xx.h +++ b/include/asm-mips/vr41xx/vr41xx.h @@ -126,7 +126,6 @@ extern void vr41xx_mask_clock(vr41xx_clock_t clock); #define GIU_IRQ_BASE 40 #define GIU_IRQ(x) (GIU_IRQ_BASE + (x)) /* IRQ 40-71 */ #define GIU_IRQ_LAST GIU_IRQ(31) -#define GIU_IRQ_TO_PIN(x) ((x) - GIU_IRQ_BASE) /* Pin 0-31 */ extern int vr41xx_set_intassign(unsigned int irq, unsigned char intassign); extern int vr41xx_cascade_irq(unsigned int irq, int (*get_irq_number)(int irq)); @@ -197,38 +196,4 @@ extern void vr41xx_disable_csiint(uint16_t mask); extern void vr41xx_enable_bcuint(void); extern void vr41xx_disable_bcuint(void); -/* - * General-Purpose I/O Unit - */ -enum { - TRIGGER_LEVEL, - TRIGGER_EDGE, - TRIGGER_EDGE_FALLING, - TRIGGER_EDGE_RISING -}; - -enum { - SIGNAL_THROUGH, - SIGNAL_HOLD -}; - -extern void vr41xx_set_irq_trigger(int pin, int trigger, int hold); - -enum { - LEVEL_LOW, - LEVEL_HIGH -}; - -extern void vr41xx_set_irq_level(int pin, int level); - -enum { - PIO_INPUT, - PIO_OUTPUT -}; - -enum { - DATA_LOW, - DATA_HIGH -}; - #endif /* __NEC_VR41XX_H */ diff --git a/include/asm-parisc/compat.h b/include/asm-parisc/compat.h index ca0eac6..7630d1a 100644 --- a/include/asm-parisc/compat.h +++ b/include/asm-parisc/compat.h @@ -24,7 +24,7 @@ typedef u16 compat_nlink_t; typedef u16 compat_ipc_pid_t; typedef s32 compat_daddr_t; typedef u32 compat_caddr_t; -typedef u32 compat_timer_t; +typedef s32 compat_timer_t; typedef s32 compat_int_t; typedef s32 compat_long_t; diff --git a/include/asm-parisc/mmzone.h b/include/asm-parisc/mmzone.h index 928bf50..595d3dc 100644 --- a/include/asm-parisc/mmzone.h +++ b/include/asm-parisc/mmzone.h @@ -19,7 +19,6 @@ extern struct node_map_data node_data[]; */ #define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT) -#define node_mem_map(nid) (NODE_DATA(nid)->node_mem_map) #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) #define node_end_pfn(nid) \ ({ \ @@ -38,7 +37,7 @@ extern struct node_map_data node_data[]; ({ \ unsigned long __pfn = (pfn); \ int __node = pfn_to_nid(__pfn); \ - &node_mem_map(__node)[node_localnr(__pfn,__node)]; \ + &NODE_DATA(__node)->node_mem_map[node_localnr(__pfn,__node)]; \ }) #define page_to_pfn(pg) \ diff --git a/include/asm-parisc/smp.h b/include/asm-parisc/smp.h index fde77ac..9413f67 100644 --- a/include/asm-parisc/smp.h +++ b/include/asm-parisc/smp.h @@ -51,7 +51,7 @@ extern void smp_send_reschedule(int cpu); extern unsigned long cpu_present_mask; -#define smp_processor_id() (current_thread_info()->cpu) +#define raw_smp_processor_id() (current_thread_info()->cpu) #endif /* CONFIG_SMP */ diff --git a/include/asm-parisc/thread_info.h b/include/asm-parisc/thread_info.h index fe9b7f8..57bbb76 100644 --- a/include/asm-parisc/thread_info.h +++ b/include/asm-parisc/thread_info.h @@ -12,7 +12,7 @@ struct thread_info { unsigned long flags; /* thread_info flags (see TIF_*) */ mm_segment_t addr_limit; /* user-level address space limit */ __u32 cpu; /* current CPU */ - __s32 preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ + int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ struct restart_block restart_block; }; diff --git a/include/asm-ppc/agp.h b/include/asm-ppc/agp.h index be27cfa8..ca9e423 100644 --- a/include/asm-ppc/agp.h +++ b/include/asm-ppc/agp.h @@ -10,4 +10,14 @@ #define flush_agp_mappings() #define flush_agp_cache() mb() +/* Convert a physical address to an address suitable for the GART. */ +#define phys_to_gart(x) (x) +#define gart_to_phys(x) (x) + +/* GATT allocation. Returns/accepts GATT kernel virtual address. */ +#define alloc_gatt_pages(order) \ + ((char *)__get_free_pages(GFP_KERNEL, (order))) +#define free_gatt_pages(table, order) \ + free_pages((unsigned long)(table), (order)) + #endif diff --git a/include/asm-ppc/cpm2.h b/include/asm-ppc/cpm2.h index 42fd106..c5883db 100644 --- a/include/asm-ppc/cpm2.h +++ b/include/asm-ppc/cpm2.h @@ -1039,6 +1039,52 @@ typedef struct im_idma { #define CMXSCR_TS4CS_CLK7 0x00000006 /* SCC4 Tx Clock Source is CLK7 */ #define CMXSCR_TS4CS_CLK8 0x00000007 /* SCC4 Tx Clock Source is CLK8 */ +/*----------------------------------------------------------------------- + * SIUMCR - SIU Module Configuration Register 4-31 + */ +#define SIUMCR_BBD 0x80000000 /* Bus Busy Disable */ +#define SIUMCR_ESE 0x40000000 /* External Snoop Enable */ +#define SIUMCR_PBSE 0x20000000 /* Parity Byte Select Enable */ +#define SIUMCR_CDIS 0x10000000 /* Core Disable */ +#define SIUMCR_DPPC00 0x00000000 /* Data Parity Pins Configuration*/ +#define SIUMCR_DPPC01 0x04000000 /* - " - */ +#define SIUMCR_DPPC10 0x08000000 /* - " - */ +#define SIUMCR_DPPC11 0x0c000000 /* - " - */ +#define SIUMCR_L2CPC00 0x00000000 /* L2 Cache Pins Configuration */ +#define SIUMCR_L2CPC01 0x01000000 /* - " - */ +#define SIUMCR_L2CPC10 0x02000000 /* - " - */ +#define SIUMCR_L2CPC11 0x03000000 /* - " - */ +#define SIUMCR_LBPC00 0x00000000 /* Local Bus Pins Configuration */ +#define SIUMCR_LBPC01 0x00400000 /* - " - */ +#define SIUMCR_LBPC10 0x00800000 /* - " - */ +#define SIUMCR_LBPC11 0x00c00000 /* - " - */ +#define SIUMCR_APPC00 0x00000000 /* Address Parity Pins Configuration*/ +#define SIUMCR_APPC01 0x00100000 /* - " - */ +#define SIUMCR_APPC10 0x00200000 /* - " - */ +#define SIUMCR_APPC11 0x00300000 /* - " - */ +#define SIUMCR_CS10PC00 0x00000000 /* CS10 Pin Configuration */ +#define SIUMCR_CS10PC01 0x00040000 /* - " - */ +#define SIUMCR_CS10PC10 0x00080000 /* - " - */ +#define SIUMCR_CS10PC11 0x000c0000 /* - " - */ +#define SIUMCR_BCTLC00 0x00000000 /* Buffer Control Configuration */ +#define SIUMCR_BCTLC01 0x00010000 /* - " - */ +#define SIUMCR_BCTLC10 0x00020000 /* - " - */ +#define SIUMCR_BCTLC11 0x00030000 /* - " - */ +#define SIUMCR_MMR00 0x00000000 /* Mask Masters Requests */ +#define SIUMCR_MMR01 0x00004000 /* - " - */ +#define SIUMCR_MMR10 0x00008000 /* - " - */ +#define SIUMCR_MMR11 0x0000c000 /* - " - */ +#define SIUMCR_LPBSE 0x00002000 /* LocalBus Parity Byte Select Enable*/ + +/*----------------------------------------------------------------------- + * SCCR - System Clock Control Register 9-8 +*/ +#define SCCR_PCI_MODE 0x00000100 /* PCI Mode */ +#define SCCR_PCI_MODCK 0x00000080 /* Value of PCI_MODCK pin */ +#define SCCR_PCIDF_MSK 0x00000078 /* PCI division factor */ +#define SCCR_PCIDF_SHIFT 3 + + #endif /* __CPM2__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-ppc/fsl_ocp.h b/include/asm-ppc/fsl_ocp.h deleted file mode 100644 index 050fbba..0000000 --- a/include/asm-ppc/fsl_ocp.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * include/asm-ppc/fsl_ocp.h - * - * Definitions for the on-chip peripherals on Freescale PPC processors - * - * Maintainer: Kumar Gala (kumar.gala@freescale.com) - * - * Copyright 2004 Freescale Semiconductor, Inc - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifdef __KERNEL__ -#ifndef __ASM_FS_OCP_H__ -#define __ASM_FS_OCP_H__ - -/* A table of information for supporting the Gianfar Ethernet Controller - * This helps identify which enet controller we are dealing with, - * and what type of enet controller it is - */ -struct ocp_gfar_data { - uint interruptTransmit; - uint interruptError; - uint interruptReceive; - uint interruptPHY; - uint flags; - uint phyid; - uint phyregidx; - unsigned char mac_addr[6]; -}; - -/* Flags in the flags field */ -#define GFAR_HAS_COALESCE 0x20 -#define GFAR_HAS_RMON 0x10 -#define GFAR_HAS_MULTI_INTR 0x08 -#define GFAR_FIRM_SET_MACADDR 0x04 -#define GFAR_HAS_PHY_INTR 0x02 /* if not set use a timer */ -#define GFAR_HAS_GIGABIT 0x01 - -/* Data structure for I2C support. Just contains a couple flags - * to distinguish various I2C implementations*/ -struct ocp_fs_i2c_data { - uint flags; -}; - -/* Flags for I2C */ -#define FS_I2C_SEPARATE_DFSRR 0x02 -#define FS_I2C_CLOCK_5200 0x01 - -#endif /* __ASM_FS_OCP_H__ */ -#endif /* __KERNEL__ */ diff --git a/include/asm-ppc/irq.h b/include/asm-ppc/irq.h index 06b86be..a9b3332 100644 --- a/include/asm-ppc/irq.h +++ b/include/asm-ppc/irq.h @@ -176,7 +176,7 @@ static __inline__ int irq_canonicalize(int irq) */ #include <asm/mpc85xx.h> -/* The MPC8560 openpic has 32 internal interrupts and 12 external +/* The MPC8548 openpic has 48 internal interrupts and 12 external * interrupts. * * We are "flattening" the interrupt vectors of the cascaded CPM @@ -184,7 +184,7 @@ static __inline__ int irq_canonicalize(int irq) * single integer. */ #define NR_CPM_INTS 64 -#define NR_EPIC_INTS 44 +#define NR_EPIC_INTS 60 #ifndef NR_8259_INTS #define NR_8259_INTS 0 #endif @@ -223,9 +223,15 @@ static __inline__ int irq_canonicalize(int irq) #define MPC85xx_IRQ_RIO_RX (12 + MPC85xx_OPENPIC_IRQ_OFFSET) #define MPC85xx_IRQ_TSEC1_TX (13 + MPC85xx_OPENPIC_IRQ_OFFSET) #define MPC85xx_IRQ_TSEC1_RX (14 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_TSEC3_TX (15 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_TSEC3_RX (16 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_TSEC3_ERROR (17 + MPC85xx_OPENPIC_IRQ_OFFSET) #define MPC85xx_IRQ_TSEC1_ERROR (18 + MPC85xx_OPENPIC_IRQ_OFFSET) #define MPC85xx_IRQ_TSEC2_TX (19 + MPC85xx_OPENPIC_IRQ_OFFSET) #define MPC85xx_IRQ_TSEC2_RX (20 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_TSEC4_TX (21 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_TSEC4_RX (22 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_TSEC4_ERROR (23 + MPC85xx_OPENPIC_IRQ_OFFSET) #define MPC85xx_IRQ_TSEC2_ERROR (24 + MPC85xx_OPENPIC_IRQ_OFFSET) #define MPC85xx_IRQ_FEC (25 + MPC85xx_OPENPIC_IRQ_OFFSET) #define MPC85xx_IRQ_DUART (26 + MPC85xx_OPENPIC_IRQ_OFFSET) @@ -235,18 +241,18 @@ static __inline__ int irq_canonicalize(int irq) #define MPC85xx_IRQ_CPM (30 + MPC85xx_OPENPIC_IRQ_OFFSET) /* The 12 external interrupt lines */ -#define MPC85xx_IRQ_EXT0 (32 + MPC85xx_OPENPIC_IRQ_OFFSET) -#define MPC85xx_IRQ_EXT1 (33 + MPC85xx_OPENPIC_IRQ_OFFSET) -#define MPC85xx_IRQ_EXT2 (34 + MPC85xx_OPENPIC_IRQ_OFFSET) -#define MPC85xx_IRQ_EXT3 (35 + MPC85xx_OPENPIC_IRQ_OFFSET) -#define MPC85xx_IRQ_EXT4 (36 + MPC85xx_OPENPIC_IRQ_OFFSET) -#define MPC85xx_IRQ_EXT5 (37 + MPC85xx_OPENPIC_IRQ_OFFSET) -#define MPC85xx_IRQ_EXT6 (38 + MPC85xx_OPENPIC_IRQ_OFFSET) -#define MPC85xx_IRQ_EXT7 (39 + MPC85xx_OPENPIC_IRQ_OFFSET) -#define MPC85xx_IRQ_EXT8 (40 + MPC85xx_OPENPIC_IRQ_OFFSET) -#define MPC85xx_IRQ_EXT9 (41 + MPC85xx_OPENPIC_IRQ_OFFSET) -#define MPC85xx_IRQ_EXT10 (42 + MPC85xx_OPENPIC_IRQ_OFFSET) -#define MPC85xx_IRQ_EXT11 (43 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_EXT0 (48 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_EXT1 (49 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_EXT2 (50 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_EXT3 (51 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_EXT4 (52 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_EXT5 (53 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_EXT6 (54 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_EXT7 (55 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_EXT8 (56 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_EXT9 (57 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_EXT10 (58 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_EXT11 (59 + MPC85xx_OPENPIC_IRQ_OFFSET) /* CPM related interrupts */ #define SIU_INT_ERROR ((uint)0x00+CPM_IRQ_OFFSET) diff --git a/include/asm-ppc/kexec.h b/include/asm-ppc/kexec.h new file mode 100644 index 0000000..7319131 --- /dev/null +++ b/include/asm-ppc/kexec.h @@ -0,0 +1,38 @@ +#ifndef _PPC_KEXEC_H +#define _PPC_KEXEC_H + +#ifdef CONFIG_KEXEC + +/* + * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. + * I.e. Maximum page that is mapped directly into kernel memory, + * and kmap is not required. + * + * Someone correct me if FIXADDR_START - PAGEOFFSET is not the correct + * calculation for the amount of memory directly mappable into the + * kernel memory space. + */ + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) +/* Maximum address we can use for the control code buffer */ +#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE + +#define KEXEC_CONTROL_CODE_SIZE 4096 + +/* The native architecture */ +#define KEXEC_ARCH KEXEC_ARCH_PPC + +#ifndef __ASSEMBLY__ + +struct kimage; + +extern void machine_kexec_simple(struct kimage *image); + +#endif /* __ASSEMBLY__ */ + +#endif /* CONFIG_KEXEC */ + +#endif /* _PPC_KEXEC_H */ diff --git a/include/asm-ppc/m8260_pci.h b/include/asm-ppc/m8260_pci.h index 163a6b9..bf9e05d 100644 --- a/include/asm-ppc/m8260_pci.h +++ b/include/asm-ppc/m8260_pci.h @@ -19,6 +19,7 @@ * Define the vendor/device ID for the MPC8265. */ #define PCI_DEVICE_ID_MPC8265 ((0x18C0 << 16) | PCI_VENDOR_ID_MOTOROLA) +#define PCI_DEVICE_ID_MPC8272 ((0x18C1 << 16) | PCI_VENDOR_ID_MOTOROLA) #define M8265_PCIBR0 0x101ac #define M8265_PCIBR1 0x101b0 diff --git a/include/asm-ppc/machdep.h b/include/asm-ppc/machdep.h index b78d408..1d4ab70 100644 --- a/include/asm-ppc/machdep.h +++ b/include/asm-ppc/machdep.h @@ -4,6 +4,7 @@ #include <linux/config.h> #include <linux/init.h> +#include <linux/kexec.h> #include <asm/setup.h> #include <asm/page.h> @@ -114,6 +115,36 @@ struct machdep_calls { /* functions for dealing with other cpus */ struct smp_ops_t *smp_ops; #endif /* CONFIG_SMP */ + +#ifdef CONFIG_KEXEC + /* Called to shutdown machine specific hardware not already controlled + * by other drivers. + * XXX Should we move this one out of kexec scope? + */ + void (*machine_shutdown)(void); + + /* Called to do the minimal shutdown needed to run a kexec'd kernel + * to run successfully. + * XXX Should we move this one out of kexec scope? + */ + void (*machine_crash_shutdown)(void); + + /* Called to do what every setup is needed on image and the + * reboot code buffer. Returns 0 on success. + * Provide your own (maybe dummy) implementation if your platform + * claims to support kexec. + */ + int (*machine_kexec_prepare)(struct kimage *image); + + /* Called to handle any machine specific cleanup on image */ + void (*machine_kexec_cleanup)(struct kimage *image); + + /* Called to perform the _real_ kexec. + * Do NOT allocate memory or fail here. We are past the point of + * no return. + */ + void (*machine_kexec)(struct kimage *image); +#endif /* CONFIG_KEXEC */ }; extern struct machdep_calls ppc_md; diff --git a/include/asm-ppc/mmu.h b/include/asm-ppc/mmu.h index d465aee..9205db4 100644 --- a/include/asm-ppc/mmu.h +++ b/include/asm-ppc/mmu.h @@ -405,7 +405,7 @@ typedef struct _P601_BAT { #define MAS0_TLBSEL(x) ((x << 28) & 0x30000000) #define MAS0_ESEL(x) ((x << 16) & 0x0FFF0000) -#define MAS0_NV 0x00000FFF +#define MAS0_NV(x) ((x) & 0x00000FFF) #define MAS1_VALID 0x80000000 #define MAS1_IPROT 0x40000000 diff --git a/include/asm-ppc/mmu_context.h b/include/asm-ppc/mmu_context.h index 9222fa6..ccabbce 100644 --- a/include/asm-ppc/mmu_context.h +++ b/include/asm-ppc/mmu_context.h @@ -63,7 +63,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) #define LAST_CONTEXT 255 #define FIRST_CONTEXT 1 -#elif defined(CONFIG_E500) +#elif defined(CONFIG_E200) || defined(CONFIG_E500) #define NO_CONTEXT 256 #define LAST_CONTEXT 255 #define FIRST_CONTEXT 1 diff --git a/include/asm-ppc/mpc10x.h b/include/asm-ppc/mpc10x.h index d8e7e2d..f5196a4 100644 --- a/include/asm-ppc/mpc10x.h +++ b/include/asm-ppc/mpc10x.h @@ -159,6 +159,12 @@ extern unsigned long ioremap_base; #define MPC10X_MAPA_EUMB_BASE (ioremap_base - MPC10X_EUMB_SIZE) #define MPC10X_MAPB_EUMB_BASE MPC10X_MAPA_EUMB_BASE +enum ppc_sys_devices { + MPC10X_IIC1, + MPC10X_DMA0, + MPC10X_DMA1, + MPC10X_DUART, +}; int mpc10x_bridge_init(struct pci_controller *hose, uint current_map, diff --git a/include/asm-ppc/mpc8260.h b/include/asm-ppc/mpc8260.h index d820894..89eb8a2 100644 --- a/include/asm-ppc/mpc8260.h +++ b/include/asm-ppc/mpc8260.h @@ -41,7 +41,7 @@ #endif #ifdef CONFIG_PCI_8260 -#include <syslib/m8260_pci.h> +#include <syslib/m82xx_pci.h> #endif /* Make sure the memory translation stuff is there if PCI not used. diff --git a/include/asm-ppc/mpc85xx.h b/include/asm-ppc/mpc85xx.h index 22713e3..516984e 100644 --- a/include/asm-ppc/mpc85xx.h +++ b/include/asm-ppc/mpc85xx.h @@ -25,7 +25,7 @@ #ifdef CONFIG_MPC8540_ADS #include <platforms/85xx/mpc8540_ads.h> #endif -#ifdef CONFIG_MPC8555_CDS +#if defined(CONFIG_MPC8555_CDS) || defined(CONFIG_MPC8548_CDS) #include <platforms/85xx/mpc8555_cds.h> #endif #ifdef CONFIG_MPC8560_ADS @@ -74,7 +74,7 @@ extern unsigned char __res[]; #define MPC85xx_GUTS_OFFSET (0xe0000) #define MPC85xx_GUTS_SIZE (0x01000) #define MPC85xx_IIC1_OFFSET (0x03000) -#define MPC85xx_IIC1_SIZE (0x01000) +#define MPC85xx_IIC1_SIZE (0x00100) #define MPC85xx_OPENPIC_OFFSET (0x40000) #define MPC85xx_OPENPIC_SIZE (0x40000) #define MPC85xx_PCI1_OFFSET (0x08000) @@ -127,8 +127,64 @@ enum ppc_sys_devices { MPC85xx_CPM_MCC2, MPC85xx_CPM_SMC1, MPC85xx_CPM_SMC2, + MPC85xx_eTSEC1, + MPC85xx_eTSEC2, + MPC85xx_eTSEC3, + MPC85xx_eTSEC4, + MPC85xx_IIC2, }; +/* Internal interrupts are all Level Sensitive, and Positive Polarity */ +#define MPC85XX_INTERNAL_IRQ_SENSES \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 0 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 1 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 2 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 3 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 4 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 5 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 6 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 7 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 8 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 9 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 10 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 11 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 12 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 13 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 14 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 15 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 16 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 17 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 18 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 19 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 20 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 21 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 22 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 23 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 24 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 25 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 26 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 27 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 28 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 29 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 30 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 31 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 32 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 33 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 34 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 35 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 36 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 37 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 38 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 39 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 40 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 41 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 42 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 43 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 44 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 45 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 46 */ \ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE) /* Internal 47 */ + #endif /* CONFIG_85xx */ #endif /* __ASM_MPC85xx_H__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-ppc/ocp.h b/include/asm-ppc/ocp.h index b98db3c..983116f 100644 --- a/include/asm-ppc/ocp.h +++ b/include/asm-ppc/ocp.h @@ -189,7 +189,7 @@ extern void ocp_for_each_device(void(*callback)(struct ocp_device *, void *arg), /* Sysfs support */ #define OCP_SYSFS_ADDTL(type, format, name, field) \ static ssize_t \ -show_##name##_##field(struct device *dev, char *buf) \ +show_##name##_##field(struct device *dev, struct device_attribute *attr, char *buf) \ { \ struct ocp_device *odev = to_ocp_dev(dev); \ type *add = odev->def->additions; \ @@ -202,10 +202,6 @@ static DEVICE_ATTR(name##_##field, S_IRUGO, show_##name##_##field, NULL); #include <asm/ibm_ocp.h> #endif -#ifdef CONFIG_FSL_OCP -#include <asm/fsl_ocp.h> -#endif - #endif /* CONFIG_PPC_OCP */ #endif /* __OCP_H__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h index a38606d..4d4b20c 100644 --- a/include/asm-ppc/pgtable.h +++ b/include/asm-ppc/pgtable.h @@ -267,8 +267,6 @@ extern unsigned long ioremap_bot, ioremap_base; #define _PMD_PRESENT_MASK (PAGE_MASK) #define _PMD_BAD (~PAGE_MASK) -#define NUM_TLBCAMS (16) - #elif defined(CONFIG_8xx) /* Definitions for 8xx embedded chips. */ #define _PAGE_PRESENT 0x0001 /* Page is valid */ diff --git a/include/asm-ppc/ppc_asm.h b/include/asm-ppc/ppc_asm.h index 13fa8e7..f76221d 100644 --- a/include/asm-ppc/ppc_asm.h +++ b/include/asm-ppc/ppc_asm.h @@ -174,6 +174,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) #define CLR_TOP32(r) #endif /* CONFIG_PPC64BRIDGE */ +#define RFCI .long 0x4c000066 /* rfci instruction */ +#define RFDI .long 0x4c00004e /* rfdi instruction */ #define RFMCI .long 0x4c00004c /* rfmci instruction */ #ifdef CONFIG_IBM405_ERR77 diff --git a/include/asm-ppc/ppc_sys.h b/include/asm-ppc/ppc_sys.h index 24b991c..8ea6245 100644 --- a/include/asm-ppc/ppc_sys.h +++ b/include/asm-ppc/ppc_sys.h @@ -27,6 +27,8 @@ #include <asm/mpc85xx.h> #elif defined(CONFIG_PPC_MPC52xx) #include <asm/mpc52xx.h> +#elif defined(CONFIG_MPC10X_BRIDGE) +#include <asm/mpc10x.h> #else #error "need definition of ppc_sys_devices" #endif diff --git a/include/asm-ppc/reg.h b/include/asm-ppc/reg.h index c418aab..88b4222 100644 --- a/include/asm-ppc/reg.h +++ b/include/asm-ppc/reg.h @@ -160,6 +160,7 @@ #define HID0_ICFI (1<<11) /* Instr. Cache Flash Invalidate */ #define HID0_DCI (1<<10) /* Data Cache Invalidate */ #define HID0_SPD (1<<9) /* Speculative disable */ +#define HID0_DAPUEN (1<<8) /* Debug APU enable */ #define HID0_SGE (1<<7) /* Store Gathering Enable */ #define HID0_SIED (1<<7) /* Serial Instr. Execution [Disable] */ #define HID0_DFCA (1<<6) /* Data Cache Flush Assist */ diff --git a/include/asm-ppc/reg_booke.h b/include/asm-ppc/reg_booke.h index 45c5e6f..00ad9c7 100644 --- a/include/asm-ppc/reg_booke.h +++ b/include/asm-ppc/reg_booke.h @@ -165,6 +165,8 @@ do { \ #define SPRN_MCSRR1 0x23B /* Machine Check Save and Restore Register 1 */ #define SPRN_MCSR 0x23C /* Machine Check Status Register */ #define SPRN_MCAR 0x23D /* Machine Check Address Register */ +#define SPRN_DSRR0 0x23E /* Debug Save and Restore Register 0 */ +#define SPRN_DSRR1 0x23F /* Debug Save and Restore Register 1 */ #define SPRN_MAS0 0x270 /* MMU Assist Register 0 */ #define SPRN_MAS1 0x271 /* MMU Assist Register 1 */ #define SPRN_MAS2 0x272 /* MMU Assist Register 2 */ @@ -264,6 +266,17 @@ do { \ #define MCSR_BUS_IPERR 0x00000002UL /* Instruction parity Error */ #define MCSR_BUS_RPERR 0x00000001UL /* Read parity Error */ #endif +#ifdef CONFIG_E200 +#define MCSR_MCP 0x80000000UL /* Machine Check Input Pin */ +#define MCSR_CP_PERR 0x20000000UL /* Cache Push Parity Error */ +#define MCSR_CPERR 0x10000000UL /* Cache Parity Error */ +#define MCSR_EXCP_ERR 0x08000000UL /* ISI, ITLB, or Bus Error on 1st insn + fetch for an exception handler */ +#define MCSR_BUS_IRERR 0x00000010UL /* Read Bus Error on instruction fetch*/ +#define MCSR_BUS_DRERR 0x00000008UL /* Read Bus Error on data load */ +#define MCSR_BUS_WRERR 0x00000004UL /* Write Bus Error on buffered + store or cache line push */ +#endif /* Bit definitions for the DBSR. */ /* @@ -311,6 +324,7 @@ do { \ #define ESR_ST 0x00800000 /* Store Operation */ #define ESR_DLK 0x00200000 /* Data Cache Locking */ #define ESR_ILK 0x00100000 /* Instr. Cache Locking */ +#define ESR_PUO 0x00040000 /* Unimplemented Operation exception */ #define ESR_BO 0x00020000 /* Byte Ordering */ /* Bit definitions related to the DBCR0. */ @@ -387,10 +401,12 @@ do { \ #define ICCR_CACHE 1 /* Cacheable */ /* Bit definitions for L1CSR0. */ +#define L1CSR0_CLFC 0x00000100 /* Cache Lock Bits Flash Clear */ #define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */ +#define L1CSR0_CFI 0x00000002 /* Cache Flash Invalidate */ #define L1CSR0_DCE 0x00000001 /* Data Cache Enable */ -/* Bit definitions for L1CSR0. */ +/* Bit definitions for L1CSR1. */ #define L1CSR1_ICLFR 0x00000100 /* Instr Cache Lock Bits Flash Reset */ #define L1CSR1_ICFI 0x00000002 /* Instr Cache Flash Invalidate */ #define L1CSR1_ICE 0x00000001 /* Instr Cache Enable */ diff --git a/include/asm-ppc/seccomp.h b/include/asm-ppc/seccomp.h new file mode 100644 index 0000000..666c4da --- /dev/null +++ b/include/asm-ppc/seccomp.h @@ -0,0 +1,10 @@ +#ifndef _ASM_SECCOMP_H + +#include <linux/unistd.h> + +#define __NR_seccomp_read __NR_read +#define __NR_seccomp_write __NR_write +#define __NR_seccomp_exit __NR_exit +#define __NR_seccomp_sigreturn __NR_rt_sigreturn + +#endif /* _ASM_SECCOMP_H */ diff --git a/include/asm-ppc/sigcontext.h b/include/asm-ppc/sigcontext.h index f82dccc..b7a417e 100644 --- a/include/asm-ppc/sigcontext.h +++ b/include/asm-ppc/sigcontext.h @@ -2,7 +2,7 @@ #define _ASM_PPC_SIGCONTEXT_H #include <asm/ptrace.h> - +#include <linux/compiler.h> struct sigcontext { unsigned long _unused[4]; diff --git a/include/asm-ppc/smp.h b/include/asm-ppc/smp.h index ebfb614..17530c2 100644 --- a/include/asm-ppc/smp.h +++ b/include/asm-ppc/smp.h @@ -44,7 +44,7 @@ extern void smp_message_recv(int, struct pt_regs *); #define NO_PROC_ID 0xFF /* No processor magic marker */ #define PROC_CHANGE_PENALTY 20 -#define smp_processor_id() (current_thread_info()->cpu) +#define raw_smp_processor_id() (current_thread_info()->cpu) extern int __cpu_up(unsigned int cpu); diff --git a/include/asm-ppc/thread_info.h b/include/asm-ppc/thread_info.h index f7f0152..27903db 100644 --- a/include/asm-ppc/thread_info.h +++ b/include/asm-ppc/thread_info.h @@ -20,7 +20,8 @@ struct thread_info { unsigned long flags; /* low level flags */ unsigned long local_flags; /* non-racy flags */ int cpu; /* cpu we're on */ - int preempt_count; + int preempt_count; /* 0 => preemptable, + <0 => BUG */ struct restart_block restart_block; }; @@ -77,12 +78,19 @@ static inline struct thread_info *current_thread_info(void) #define TIF_POLLING_NRFLAG 4 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_MEMDIE 5 +#define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */ +#define TIF_SECCOMP 7 /* secure computing */ + /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) +#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) +#define _TIF_SECCOMP (1<<TIF_SECCOMP) + +#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP) /* * Non racy (local) flags bit numbers diff --git a/include/asm-ppc64/agp.h b/include/asm-ppc64/agp.h index be27cfa8..ca9e423 100644 --- a/include/asm-ppc64/agp.h +++ b/include/asm-ppc64/agp.h @@ -10,4 +10,14 @@ #define flush_agp_mappings() #define flush_agp_cache() mb() +/* Convert a physical address to an address suitable for the GART. */ +#define phys_to_gart(x) (x) +#define gart_to_phys(x) (x) + +/* GATT allocation. Returns/accepts GATT kernel virtual address. */ +#define alloc_gatt_pages(order) \ + ((char *)__get_free_pages(GFP_KERNEL, (order))) +#define free_gatt_pages(table, order) \ + free_pages((unsigned long)(table), (order)) + #endif diff --git a/include/asm-ppc64/compat.h b/include/asm-ppc64/compat.h index 09c28d2..12414f5 100644 --- a/include/asm-ppc64/compat.h +++ b/include/asm-ppc64/compat.h @@ -26,6 +26,7 @@ typedef s32 compat_daddr_t; typedef u32 compat_caddr_t; typedef __kernel_fsid_t compat_fsid_t; typedef s32 compat_key_t; +typedef s32 compat_timer_t; typedef s32 compat_int_t; typedef s32 compat_long_t; diff --git a/include/asm-ppc64/dma.h b/include/asm-ppc64/dma.h index d693b80..dfd1f69 100644 --- a/include/asm-ppc64/dma.h +++ b/include/asm-ppc64/dma.h @@ -27,6 +27,8 @@ /* Doesn't really apply... */ #define MAX_DMA_ADDRESS (~0UL) +#if !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI) + #define dma_outb outb #define dma_inb inb @@ -323,4 +325,5 @@ extern int isa_dma_bridge_buggy; #else #define isa_dma_bridge_buggy (0) #endif +#endif /* !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI) */ #endif /* _ASM_DMA_H */ diff --git a/include/asm-ppc64/elf.h b/include/asm-ppc64/elf.h index 6c42d61..085eedb 100644 --- a/include/asm-ppc64/elf.h +++ b/include/asm-ppc64/elf.h @@ -221,9 +221,7 @@ do { \ set_thread_flag(TIF_ABI_PENDING); \ else \ clear_thread_flag(TIF_ABI_PENDING); \ - if (ibcs2) \ - set_personality(PER_SVR4); \ - else if (current->personality != PER_LINUX32) \ + if (personality(current->personality) != PER_LINUX32) \ set_personality(PER_LINUX); \ } while (0) diff --git a/include/asm-ppc64/iSeries/HvCall.h b/include/asm-ppc64/iSeries/HvCall.h index d9a2e74..c3f1947 100644 --- a/include/asm-ppc64/iSeries/HvCall.h +++ b/include/asm-ppc64/iSeries/HvCall.h @@ -1,84 +1,36 @@ /* * HvCall.h * Copyright (C) 2001 Mike Corrigan IBM Corporation - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -//=========================================================================== -// -// This file contains the "hypervisor call" interface which is used to -// drive the hypervisor from the OS. -// -//=========================================================================== +/* + * This file contains the "hypervisor call" interface which is used to + * drive the hypervisor from the OS. + */ #ifndef _HVCALL_H #define _HVCALL_H -//------------------------------------------------------------------- -// Standard Includes -//------------------------------------------------------------------- #include <asm/iSeries/HvCallSc.h> #include <asm/iSeries/HvTypes.h> #include <asm/paca.h> -/* -enum HvCall_ReturnCode -{ - HvCall_Good = 0, - HvCall_Partial = 1, - HvCall_NotOwned = 2, - HvCall_NotFreed = 3, - HvCall_UnspecifiedError = 4 -}; - -enum HvCall_TypeOfSIT -{ - HvCall_ReduceOnly = 0, - HvCall_Unconditional = 1 -}; - -enum HvCall_TypeOfYield -{ - HvCall_YieldTimed = 0, // Yield until specified time - HvCall_YieldToActive = 1, // Yield until all active procs have run - HvCall_YieldToProc = 2 // Yield until the specified processor has run -}; - -enum HvCall_InterruptMasks -{ - HvCall_MaskIPI = 0x00000001, - HvCall_MaskLpEvent = 0x00000002, - HvCall_MaskLpProd = 0x00000004, - HvCall_MaskTimeout = 0x00000008 -}; - -enum HvCall_VaryOffChunkRc -{ - HvCall_VaryOffSucceeded = 0, - HvCall_VaryOffWithdrawn = 1, - HvCall_ChunkInLoadArea = 2, - HvCall_ChunkInHPT = 3, - HvCall_ChunkNotAccessible = 4, - HvCall_ChunkInUse = 5 -}; -*/ - /* Type of yield for HvCallBaseYieldProcessor */ -#define HvCall_YieldTimed 0 // Yield until specified time (tb) -#define HvCall_YieldToActive 1 // Yield until all active procs have run -#define HvCall_YieldToProc 2 // Yield until the specified processor has run +#define HvCall_YieldTimed 0 /* Yield until specified time (tb) */ +#define HvCall_YieldToActive 1 /* Yield until all active procs have run */ +#define HvCall_YieldToProc 2 /* Yield until the specified processor has run */ /* interrupt masks for setEnabledInterrupts */ #define HvCall_MaskIPI 0x00000001 @@ -86,7 +38,7 @@ enum HvCall_VaryOffChunkRc #define HvCall_MaskLpProd 0x00000004 #define HvCall_MaskTimeout 0x00000008 -/* Log buffer formats */ +/* Log buffer formats */ #define HvCall_LogBuffer_ASCII 0 #define HvCall_LogBuffer_EBCDIC 1 @@ -95,7 +47,7 @@ enum HvCall_VaryOffChunkRc #define HvCallBaseGetHwPatch HvCallBase + 2 #define HvCallBaseReIplSpAttn HvCallBase + 3 #define HvCallBaseSetASR HvCallBase + 4 -#define HvCallBaseSetASRAndRfi HvCallBase + 5 +#define HvCallBaseSetASRAndRfi HvCallBase + 5 #define HvCallBaseSetIMR HvCallBase + 6 #define HvCallBaseSendIPI HvCallBase + 7 #define HvCallBaseTerminateMachine HvCallBase + 8 @@ -115,91 +67,47 @@ enum HvCall_VaryOffChunkRc #define HvCallBaseGetLogBufferCodePage HvCallBase + 22 #define HvCallBaseGetLogBufferFormat HvCallBase + 23 #define HvCallBaseGetLogBufferLength HvCallBase + 24 -#define HvCallBaseReadLogBuffer HvCallBase + 25 +#define HvCallBaseReadLogBuffer HvCallBase + 25 #define HvCallBaseSetLogBufferFormatAndCodePage HvCallBase + 26 -#define HvCallBaseWriteLogBuffer HvCallBase + 27 +#define HvCallBaseWriteLogBuffer HvCallBase + 27 #define HvCallBaseRouter28 HvCallBase + 28 #define HvCallBaseRouter29 HvCallBase + 29 #define HvCallBaseRouter30 HvCallBase + 30 -#define HvCallBaseSetDebugBus HvCallBase + 31 +#define HvCallBaseSetDebugBus HvCallBase + 31 -#define HvCallCcSetDABR HvCallCc + 7 +#define HvCallCcSetDABR HvCallCc + 7 -//===================================================================================== -static inline void HvCall_setVirtualDecr(void) +static inline void HvCall_setVirtualDecr(void) { - /* Ignore any error return codes - most likely means that the target value for the - * LP has been increased and this vary off would bring us below the new target. */ + /* + * Ignore any error return codes - most likely means that the + * target value for the LP has been increased and this vary off + * would bring us below the new target. + */ HvCall0(HvCallBaseSetVirtualDecr); } -//===================================================================== -static inline void HvCall_yieldProcessor(unsigned typeOfYield, u64 yieldParm) -{ - HvCall2( HvCallBaseYieldProcessor, typeOfYield, yieldParm ); -} -//===================================================================== -static inline void HvCall_setEnabledInterrupts(u64 enabledInterrupts) -{ - HvCall1(HvCallBaseSetEnabledInterrupts,enabledInterrupts); -} - -//===================================================================== -static inline void HvCall_clearLogBuffer(HvLpIndex lpindex) -{ - HvCall1(HvCallBaseClearLogBuffer,lpindex); -} - -//===================================================================== -static inline u32 HvCall_getLogBufferCodePage(HvLpIndex lpindex) -{ - u32 retVal = HvCall1(HvCallBaseGetLogBufferCodePage,lpindex); - return retVal; -} - -//===================================================================== -static inline int HvCall_getLogBufferFormat(HvLpIndex lpindex) -{ - int retVal = HvCall1(HvCallBaseGetLogBufferFormat,lpindex); - return retVal; -} - -//===================================================================== -static inline u32 HvCall_getLogBufferLength(HvLpIndex lpindex) -{ - u32 retVal = HvCall1(HvCallBaseGetLogBufferLength,lpindex); - return retVal; -} -//===================================================================== -static inline void HvCall_setLogBufferFormatAndCodepage(int format, u32 codePage) +static inline void HvCall_yieldProcessor(unsigned typeOfYield, u64 yieldParm) { - HvCall2(HvCallBaseSetLogBufferFormatAndCodePage,format, codePage); + HvCall2(HvCallBaseYieldProcessor, typeOfYield, yieldParm); } -//===================================================================== -int HvCall_readLogBuffer(HvLpIndex lpindex, void *buffer, u64 bufLen); -void HvCall_writeLogBuffer(const void *buffer, u64 bufLen); - -//===================================================================== -static inline void HvCall_sendIPI(struct paca_struct * targetPaca) +static inline void HvCall_setEnabledInterrupts(u64 enabledInterrupts) { - HvCall1( HvCallBaseSendIPI, targetPaca->paca_index ); + HvCall1(HvCallBaseSetEnabledInterrupts, enabledInterrupts); } -//===================================================================== -static inline void HvCall_terminateMachineSrc(void) +static inline void HvCall_setLogBufferFormatAndCodepage(int format, + u32 codePage) { - HvCall0( HvCallBaseTerminateMachineSrc ); + HvCall2(HvCallBaseSetLogBufferFormatAndCodePage, format, codePage); } -static inline void HvCall_setDABR(unsigned long val) -{ - HvCall1(HvCallCcSetDABR, val); -} +extern void HvCall_writeLogBuffer(const void *buffer, u64 bufLen); -static inline void HvCall_setDebugBus(unsigned long val) +static inline void HvCall_sendIPI(struct paca_struct *targetPaca) { - HvCall1(HvCallBaseSetDebugBus, val); + HvCall1(HvCallBaseSendIPI, targetPaca->paca_index); } #endif /* _HVCALL_H */ diff --git a/include/asm-ppc64/iSeries/HvCallCfg.h b/include/asm-ppc64/iSeries/HvCallCfg.h deleted file mode 100644 index 9f40f16..0000000 --- a/include/asm-ppc64/iSeries/HvCallCfg.h +++ /dev/null @@ -1,213 +0,0 @@ -/* - * HvCallCfg.h - * Copyright (C) 2001 Mike Corrigan IBM Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -//===================================================================================== -// -// This file contains the "hypervisor call" interface which is used to -// drive the hypervisor from the OS. -// -//===================================================================================== -#ifndef _HVCALLCFG_H -#define _HVCALLCFG_H - -//------------------------------------------------------------------- -// Standard Includes -//------------------------------------------------------------------- -#include <asm/iSeries/HvCallSc.h> -#include <asm/iSeries/HvTypes.h> - -//------------------------------------------------------------------------------------- -// Constants -//------------------------------------------------------------------------------------- - -enum HvCallCfg_ReqQual -{ - HvCallCfg_Cur = 0, - HvCallCfg_Init = 1, - HvCallCfg_Max = 2, - HvCallCfg_Min = 3 -}; - -#define HvCallCfgGetLps HvCallCfg + 0 -#define HvCallCfgGetActiveLpMap HvCallCfg + 1 -#define HvCallCfgGetLpVrmIndex HvCallCfg + 2 -#define HvCallCfgGetLpMinSupportedPlicVrmIndex HvCallCfg + 3 -#define HvCallCfgGetLpMinCompatablePlicVrmIndex HvCallCfg + 4 -#define HvCallCfgGetLpVrmName HvCallCfg + 5 -#define HvCallCfgGetSystemPhysicalProcessors HvCallCfg + 6 -#define HvCallCfgGetPhysicalProcessors HvCallCfg + 7 -#define HvCallCfgGetSystemMsChunks HvCallCfg + 8 -#define HvCallCfgGetMsChunks HvCallCfg + 9 -#define HvCallCfgGetInteractivePercentage HvCallCfg + 10 -#define HvCallCfgIsBusDedicated HvCallCfg + 11 -#define HvCallCfgGetBusOwner HvCallCfg + 12 -#define HvCallCfgGetBusAllocation HvCallCfg + 13 -#define HvCallCfgGetBusUnitOwner HvCallCfg + 14 -#define HvCallCfgGetBusUnitAllocation HvCallCfg + 15 -#define HvCallCfgGetVirtualBusPool HvCallCfg + 16 -#define HvCallCfgGetBusUnitInterruptProc HvCallCfg + 17 -#define HvCallCfgGetConfiguredBusUnitsForIntProc HvCallCfg + 18 -#define HvCallCfgGetRioSanBusPool HvCallCfg + 19 -#define HvCallCfgGetSharedPoolIndex HvCallCfg + 20 -#define HvCallCfgGetSharedProcUnits HvCallCfg + 21 -#define HvCallCfgGetNumProcsInSharedPool HvCallCfg + 22 -#define HvCallCfgRouter23 HvCallCfg + 23 -#define HvCallCfgRouter24 HvCallCfg + 24 -#define HvCallCfgRouter25 HvCallCfg + 25 -#define HvCallCfgRouter26 HvCallCfg + 26 -#define HvCallCfgRouter27 HvCallCfg + 27 -#define HvCallCfgGetMinRuntimeMsChunks HvCallCfg + 28 -#define HvCallCfgSetMinRuntimeMsChunks HvCallCfg + 29 -#define HvCallCfgGetVirtualLanIndexMap HvCallCfg + 30 -#define HvCallCfgGetLpExecutionMode HvCallCfg + 31 -#define HvCallCfgGetHostingLpIndex HvCallCfg + 32 - -//==================================================================== -static inline HvLpIndex HvCallCfg_getLps(void) -{ - HvLpIndex retVal = HvCall0(HvCallCfgGetLps); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; -} -//==================================================================== -static inline int HvCallCfg_isBusDedicated(u64 busIndex) -{ - int retVal = HvCall1(HvCallCfgIsBusDedicated,busIndex); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; -} -//==================================================================== -static inline HvLpIndex HvCallCfg_getBusOwner(u64 busIndex) -{ - HvLpIndex retVal = HvCall1(HvCallCfgGetBusOwner,busIndex); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; -} -//==================================================================== -static inline HvLpIndexMap HvCallCfg_getBusAllocation(u64 busIndex) -{ - HvLpIndexMap retVal = HvCall1(HvCallCfgGetBusAllocation,busIndex); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; -} -//==================================================================== -static inline HvLpIndexMap HvCallCfg_getActiveLpMap(void) -{ - HvLpIndexMap retVal = HvCall0(HvCallCfgGetActiveLpMap); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; -} -//==================================================================== -static inline HvLpVirtualLanIndexMap HvCallCfg_getVirtualLanIndexMap(HvLpIndex lp) -{ - // This is a new function in V5R1 so calls to this on older - // hypervisors will return -1 - u64 retVal = HvCall1(HvCallCfgGetVirtualLanIndexMap, lp); - if(retVal == -1) - retVal = 0; - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; -} -//=================================================================== -static inline u64 HvCallCfg_getSystemMsChunks(void) -{ - u64 retVal = HvCall0(HvCallCfgGetSystemMsChunks); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; -} -//=================================================================== -static inline u64 HvCallCfg_getMsChunks(HvLpIndex lp,enum HvCallCfg_ReqQual qual) -{ - u64 retVal = HvCall2(HvCallCfgGetMsChunks,lp,qual); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; -} -//=================================================================== -static inline u64 HvCallCfg_getMinRuntimeMsChunks(HvLpIndex lp) -{ - // NOTE: This function was added in v5r1 so older hypervisors will return a -1 value - u64 retVal = HvCall1(HvCallCfgGetMinRuntimeMsChunks,lp); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; -} -//=================================================================== -static inline u64 HvCallCfg_setMinRuntimeMsChunks(u64 chunks) -{ - u64 retVal = HvCall1(HvCallCfgSetMinRuntimeMsChunks,chunks); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; -} -//=================================================================== -static inline u64 HvCallCfg_getSystemPhysicalProcessors(void) -{ - u64 retVal = HvCall0(HvCallCfgGetSystemPhysicalProcessors); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; -} -//=================================================================== -static inline u64 HvCallCfg_getPhysicalProcessors(HvLpIndex lp,enum HvCallCfg_ReqQual qual) -{ - u64 retVal = HvCall2(HvCallCfgGetPhysicalProcessors,lp,qual); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; -} -//=================================================================== -static inline u64 HvCallCfg_getConfiguredBusUnitsForInterruptProc(HvLpIndex lp, - u16 hvLogicalProcIndex) -{ - u64 retVal = HvCall2(HvCallCfgGetConfiguredBusUnitsForIntProc,lp,hvLogicalProcIndex); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; - -} -//================================================================== -static inline HvLpSharedPoolIndex HvCallCfg_getSharedPoolIndex(HvLpIndex lp) -{ - HvLpSharedPoolIndex retVal = - HvCall1(HvCallCfgGetSharedPoolIndex,lp); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; - -} -//================================================================== -static inline u64 HvCallCfg_getSharedProcUnits(HvLpIndex lp,enum HvCallCfg_ReqQual qual) -{ - u64 retVal = HvCall2(HvCallCfgGetSharedProcUnits,lp,qual); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; - -} -//================================================================== -static inline u64 HvCallCfg_getNumProcsInSharedPool(HvLpSharedPoolIndex sPI) -{ - u16 retVal = HvCall1(HvCallCfgGetNumProcsInSharedPool,sPI); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; - -} -//================================================================== -static inline HvLpIndex HvCallCfg_getHostingLpIndex(HvLpIndex lp) -{ - u64 retVal = HvCall1(HvCallCfgGetHostingLpIndex,lp); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; - -} - -#endif /* _HVCALLCFG_H */ diff --git a/include/asm-ppc64/iSeries/HvCallEvent.h b/include/asm-ppc64/iSeries/HvCallEvent.h index 191ddce..5d9a327 100644 --- a/include/asm-ppc64/iSeries/HvCallEvent.h +++ b/include/asm-ppc64/iSeries/HvCallEvent.h @@ -1,32 +1,28 @@ /* * HvCallEvent.h * Copyright (C) 2001 Mike Corrigan IBM Corporation - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - /* - * This file contains the "hypervisor call" interface which is used to - * drive the hypervisor from the OS. + * This file contains the "hypervisor call" interface which is used to + * drive the hypervisor from the OS. */ #ifndef _HVCALLEVENT_H #define _HVCALLEVENT_H -/* - * Standard Includes - */ #include <asm/iSeries/HvCallSc.h> #include <asm/iSeries/HvTypes.h> #include <asm/abs_addr.h> @@ -71,7 +67,7 @@ typedef u64 HvLpDma_Rc; #define HvCallEventCloseLpEventPath HvCallEvent + 2 #define HvCallEventDmaBufList HvCallEvent + 3 #define HvCallEventDmaSingle HvCallEvent + 4 -#define HvCallEventDmaToSp HvCallEvent + 5 +#define HvCallEventDmaToSp HvCallEvent + 5 #define HvCallEventGetOverflowLpEvents HvCallEvent + 6 #define HvCallEventGetSourceLpInstanceId HvCallEvent + 7 #define HvCallEventGetTargetLpInstanceId HvCallEvent + 8 @@ -85,14 +81,12 @@ typedef u64 HvLpDma_Rc; static inline void HvCallEvent_getOverflowLpEvents(u8 queueIndex) { - HvCall1(HvCallEventGetOverflowLpEvents,queueIndex); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); + HvCall1(HvCallEventGetOverflowLpEvents, queueIndex); } static inline void HvCallEvent_setInterLpQueueIndex(u8 queueIndex) { - HvCall1(HvCallEventSetInterLpQueueIndex,queueIndex); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); + HvCall1(HvCallEventSetInterLpQueueIndex, queueIndex); } static inline void HvCallEvent_setLpEventStack(u8 queueIndex, @@ -103,7 +97,6 @@ static inline void HvCallEvent_setLpEventStack(u8 queueIndex, abs_addr = virt_to_abs(eventStackAddr); HvCall3(HvCallEventSetLpEventStack, queueIndex, abs_addr, eventStackSize); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); } static inline void HvCallEvent_setLpEventQueueInterruptProc(u8 queueIndex, @@ -111,22 +104,18 @@ static inline void HvCallEvent_setLpEventQueueInterruptProc(u8 queueIndex, { HvCall2(HvCallEventSetLpEventQueueInterruptProc, queueIndex, lpLogicalProcIndex); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); } static inline HvLpEvent_Rc HvCallEvent_signalLpEvent(struct HvLpEvent *event) { u64 abs_addr; - HvLpEvent_Rc retVal; #ifdef DEBUG_SENDEVENT printk("HvCallEvent_signalLpEvent: *event = %016lx\n ", (unsigned long)event); #endif abs_addr = virt_to_abs(event); - retVal = (HvLpEvent_Rc)HvCall1(HvCallEventSignalLpEvent, abs_addr); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; + return HvCall1(HvCallEventSignalLpEvent, abs_addr); } static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp, @@ -136,9 +125,7 @@ static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp, u64 eventData1, u64 eventData2, u64 eventData3, u64 eventData4, u64 eventData5) { - HvLpEvent_Rc retVal; - - // Pack the misc bits into a single Dword to pass to PLIC + /* Pack the misc bits into a single Dword to pass to PLIC */ union { struct HvCallEvent_PackedParms parms; u64 dword; @@ -152,67 +139,49 @@ static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp, packed.parms.xSourceInstId = sourceInstanceId; packed.parms.xTargetInstId = targetInstanceId; - retVal = (HvLpEvent_Rc)HvCall7(HvCallEventSignalLpEventParms, - packed.dword, correlationToken, eventData1,eventData2, - eventData3,eventData4, eventData5); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; + return HvCall7(HvCallEventSignalLpEventParms, packed.dword, + correlationToken, eventData1, eventData2, + eventData3, eventData4, eventData5); } static inline HvLpEvent_Rc HvCallEvent_ackLpEvent(struct HvLpEvent *event) { u64 abs_addr; - HvLpEvent_Rc retVal; abs_addr = virt_to_abs(event); - retVal = (HvLpEvent_Rc)HvCall1(HvCallEventAckLpEvent, abs_addr); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; + return HvCall1(HvCallEventAckLpEvent, abs_addr); } static inline HvLpEvent_Rc HvCallEvent_cancelLpEvent(struct HvLpEvent *event) { u64 abs_addr; - HvLpEvent_Rc retVal; abs_addr = virt_to_abs(event); - retVal = (HvLpEvent_Rc)HvCall1(HvCallEventCancelLpEvent, abs_addr); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; + return HvCall1(HvCallEventCancelLpEvent, abs_addr); } static inline HvLpInstanceId HvCallEvent_getSourceLpInstanceId( HvLpIndex targetLp, HvLpEvent_Type type) { - HvLpInstanceId retVal; - - retVal = HvCall2(HvCallEventGetSourceLpInstanceId, targetLp, type); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; + return HvCall2(HvCallEventGetSourceLpInstanceId, targetLp, type); } static inline HvLpInstanceId HvCallEvent_getTargetLpInstanceId( HvLpIndex targetLp, HvLpEvent_Type type) { - HvLpInstanceId retVal; - - retVal = HvCall2(HvCallEventGetTargetLpInstanceId, targetLp, type); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; + return HvCall2(HvCallEventGetTargetLpInstanceId, targetLp, type); } static inline void HvCallEvent_openLpEventPath(HvLpIndex targetLp, HvLpEvent_Type type) { HvCall2(HvCallEventOpenLpEventPath, targetLp, type); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); } static inline void HvCallEvent_closeLpEventPath(HvLpIndex targetLp, HvLpEvent_Type type) { HvCall2(HvCallEventCloseLpEventPath, targetLp, type); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); } static inline HvLpDma_Rc HvCallEvent_dmaBufList(HvLpEvent_Type type, @@ -224,8 +193,7 @@ static inline HvLpDma_Rc HvCallEvent_dmaBufList(HvLpEvent_Type type, /* Do these need to be converted to absolute addresses? */ u64 localBufList, u64 remoteBufList, u32 transferLength) { - HvLpDma_Rc retVal; - // Pack the misc bits into a single Dword to pass to PLIC + /* Pack the misc bits into a single Dword to pass to PLIC */ union { struct HvCallEvent_PackedDmaParms parms; u64 dword; @@ -241,11 +209,8 @@ static inline HvLpDma_Rc HvCallEvent_dmaBufList(HvLpEvent_Type type, packed.parms.xLocalInstId = localInstanceId; packed.parms.xRemoteInstId = remoteInstanceId; - retVal = (HvLpDma_Rc)HvCall4(HvCallEventDmaBufList, - packed.dword, localBufList, remoteBufList, - transferLength); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; + return HvCall4(HvCallEventDmaBufList, packed.dword, localBufList, + remoteBufList, transferLength); } static inline HvLpDma_Rc HvCallEvent_dmaSingle(HvLpEvent_Type type, @@ -256,8 +221,7 @@ static inline HvLpDma_Rc HvCallEvent_dmaSingle(HvLpEvent_Type type, HvLpDma_AddressType remoteAddressType, u64 localAddrOrTce, u64 remoteAddrOrTce, u32 transferLength) { - HvLpDma_Rc retVal; - // Pack the misc bits into a single Dword to pass to PLIC + /* Pack the misc bits into a single Dword to pass to PLIC */ union { struct HvCallEvent_PackedDmaParms parms; u64 dword; @@ -273,25 +237,17 @@ static inline HvLpDma_Rc HvCallEvent_dmaSingle(HvLpEvent_Type type, packed.parms.xLocalInstId = localInstanceId; packed.parms.xRemoteInstId = remoteInstanceId; - retVal = (HvLpDma_Rc)HvCall4(HvCallEventDmaSingle, - packed.dword, localAddrOrTce, remoteAddrOrTce, - transferLength); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; + return (HvLpDma_Rc)HvCall4(HvCallEventDmaSingle, packed.dword, + localAddrOrTce, remoteAddrOrTce, transferLength); } -static inline HvLpDma_Rc HvCallEvent_dmaToSp(void* local, u32 remote, +static inline HvLpDma_Rc HvCallEvent_dmaToSp(void *local, u32 remote, u32 length, HvLpDma_Direction dir) { u64 abs_addr; - HvLpDma_Rc retVal; abs_addr = virt_to_abs(local); - retVal = (HvLpDma_Rc)HvCall4(HvCallEventDmaToSp, abs_addr, remote, - length, dir); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; + return HvCall4(HvCallEventDmaToSp, abs_addr, remote, length, dir); } - #endif /* _HVCALLEVENT_H */ diff --git a/include/asm-ppc64/iSeries/HvCallHpt.h b/include/asm-ppc64/iSeries/HvCallHpt.h index da76987..66f3822 100644 --- a/include/asm-ppc64/iSeries/HvCallHpt.h +++ b/include/asm-ppc64/iSeries/HvCallHpt.h @@ -1,17 +1,17 @@ /* * HvCallHpt.h * Copyright (C) 2001 Mike Corrigan IBM Corporation - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA @@ -19,21 +19,15 @@ #ifndef _HVCALLHPT_H #define _HVCALLHPT_H -//============================================================================ -// -// This file contains the "hypervisor call" interface which is used to -// drive the hypervisor from the OS. -// -//============================================================================ +/* + * This file contains the "hypervisor call" interface which is used to + * drive the hypervisor from the OS. + */ #include <asm/iSeries/HvCallSc.h> #include <asm/iSeries/HvTypes.h> #include <asm/mmu.h> -//----------------------------------------------------------------------------- -// Constants -//----------------------------------------------------------------------------- - #define HvCallHptGetHptAddress HvCallHpt + 0 #define HvCallHptGetHptPages HvCallHpt + 1 #define HvCallHptSetPp HvCallHpt + 5 @@ -47,81 +41,63 @@ #define HvCallHptInvalidateSetSwBitsGet HvCallHpt + 18 -//============================================================================ -static inline u64 HvCallHpt_getHptAddress(void) +static inline u64 HvCallHpt_getHptAddress(void) { - u64 retval = HvCall0(HvCallHptGetHptAddress); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retval; + return HvCall0(HvCallHptGetHptAddress); } -//============================================================================ -static inline u64 HvCallHpt_getHptPages(void) -{ - u64 retval = HvCall0(HvCallHptGetHptPages); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retval; + +static inline u64 HvCallHpt_getHptPages(void) +{ + return HvCall0(HvCallHptGetHptPages); } -//============================================================================= -static inline void HvCallHpt_setPp(u32 hpteIndex, u8 value) + +static inline void HvCallHpt_setPp(u32 hpteIndex, u8 value) { - HvCall2( HvCallHptSetPp, hpteIndex, value ); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); + HvCall2(HvCallHptSetPp, hpteIndex, value); } -//============================================================================= -static inline void HvCallHpt_setSwBits(u32 hpteIndex, u8 bitson, u8 bitsoff ) + +static inline void HvCallHpt_setSwBits(u32 hpteIndex, u8 bitson, u8 bitsoff) { - HvCall3( HvCallHptSetSwBits, hpteIndex, bitson, bitsoff ); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); + HvCall3(HvCallHptSetSwBits, hpteIndex, bitson, bitsoff); } -//============================================================================= -static inline void HvCallHpt_invalidateNoSyncICache(u32 hpteIndex) - + +static inline void HvCallHpt_invalidateNoSyncICache(u32 hpteIndex) { - HvCall1( HvCallHptInvalidateNoSyncICache, hpteIndex ); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); + HvCall1(HvCallHptInvalidateNoSyncICache, hpteIndex); } -//============================================================================= -static inline u64 HvCallHpt_invalidateSetSwBitsGet(u32 hpteIndex, u8 bitson, u8 bitsoff ) - + +static inline u64 HvCallHpt_invalidateSetSwBitsGet(u32 hpteIndex, u8 bitson, + u8 bitsoff) { u64 compressedStatus; - compressedStatus = HvCall4( HvCallHptInvalidateSetSwBitsGet, hpteIndex, bitson, bitsoff, 1 ); - HvCall1( HvCallHptInvalidateNoSyncICache, hpteIndex ); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); + + compressedStatus = HvCall4(HvCallHptInvalidateSetSwBitsGet, + hpteIndex, bitson, bitsoff, 1); + HvCall1(HvCallHptInvalidateNoSyncICache, hpteIndex); return compressedStatus; } -//============================================================================= -static inline u64 HvCallHpt_findValid( HPTE *hpte, u64 vpn ) + +static inline u64 HvCallHpt_findValid(HPTE *hpte, u64 vpn) { - u64 retIndex = HvCall3Ret16( HvCallHptFindValid, hpte, vpn, 0, 0 ); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retIndex; + return HvCall3Ret16(HvCallHptFindValid, hpte, vpn, 0, 0); } -//============================================================================= -static inline u64 HvCallHpt_findNextValid( HPTE *hpte, u32 hpteIndex, u8 bitson, u8 bitsoff ) + +static inline u64 HvCallHpt_findNextValid(HPTE *hpte, u32 hpteIndex, + u8 bitson, u8 bitsoff) { - u64 retIndex = HvCall3Ret16( HvCallHptFindNextValid, hpte, hpteIndex, bitson, bitsoff ); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retIndex; + return HvCall3Ret16(HvCallHptFindNextValid, hpte, hpteIndex, + bitson, bitsoff); } -//============================================================================= -static inline void HvCallHpt_get( HPTE *hpte, u32 hpteIndex ) + +static inline void HvCallHpt_get(HPTE *hpte, u32 hpteIndex) { - HvCall2Ret16( HvCallHptGet, hpte, hpteIndex, 0 ); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); + HvCall2Ret16(HvCallHptGet, hpte, hpteIndex, 0); } -//============================================================================ -static inline void HvCallHpt_addValidate( u32 hpteIndex, - u32 hBit, - HPTE *hpte ) - + +static inline void HvCallHpt_addValidate(u32 hpteIndex, u32 hBit, HPTE *hpte) { - HvCall4( HvCallHptAddValidate, hpteIndex, - hBit, (*((u64 *)hpte)), (*(((u64 *)hpte)+1)) ); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); + HvCall4(HvCallHptAddValidate, hpteIndex, hBit, (*((u64 *)hpte)), + (*(((u64 *)hpte)+1))); } - -//============================================================================= - #endif /* _HVCALLHPT_H */ diff --git a/include/asm-ppc64/iSeries/HvCallPci.h b/include/asm-ppc64/iSeries/HvCallPci.h index 6887b61..c8d675c 100644 --- a/include/asm-ppc64/iSeries/HvCallPci.h +++ b/include/asm-ppc64/iSeries/HvCallPci.h @@ -1,26 +1,26 @@ -/************************************************************************/ -/* Provides the Hypervisor PCI calls for iSeries Linux Parition. */ -/* Copyright (C) 2001 <Wayne G Holm> <IBM Corporation> */ -/* */ -/* This program is free software; you can redistribute it and/or modify */ -/* it under the terms of the GNU General Public License as published by */ -/* the Free Software Foundation; either version 2 of the License, or */ -/* (at your option) any later version. */ -/* */ -/* This program is distributed in the hope that it will be useful, */ -/* but WITHOUT ANY WARRANTY; without even the implied warranty of */ -/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ -/* GNU General Public License for more details. */ -/* */ -/* You should have received a copy of the GNU General Public License */ -/* along with this program; if not, write to the: */ -/* Free Software Foundation, Inc., */ -/* 59 Temple Place, Suite 330, */ -/* Boston, MA 02111-1307 USA */ -/************************************************************************/ -/* Change Activity: */ -/* Created, Jan 9, 2001 */ -/************************************************************************/ +/* + * Provides the Hypervisor PCI calls for iSeries Linux Parition. + * Copyright (C) 2001 <Wayne G Holm> <IBM Corporation> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the: + * Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, + * Boston, MA 02111-1307 USA + * + * Change Activity: + * Created, Jan 9, 2001 + */ #ifndef _HVCALLPCI_H #define _HVCALLPCI_H @@ -34,8 +34,8 @@ */ struct HvCallPci_DsaAddr { u16 busNumber; /* PHB index? */ - u8 subBusNumber; /* PCI bus number? */ - u8 deviceId; /* device and function? */ + u8 subBusNumber; /* PCI bus number? */ + u8 deviceId; /* device and function? */ u8 barNumber; u8 reserved[3]; }; @@ -52,34 +52,37 @@ struct HvCallPci_LoadReturn { enum HvCallPci_DeviceType { HvCallPci_NodeDevice = 1, - HvCallPci_SpDevice = 2, - HvCallPci_IopDevice = 3, - HvCallPci_BridgeDevice = 4, - HvCallPci_MultiFunctionDevice = 5, - HvCallPci_IoaDevice = 6 + HvCallPci_SpDevice = 2, + HvCallPci_IopDevice = 3, + HvCallPci_BridgeDevice = 4, + HvCallPci_MultiFunctionDevice = 5, + HvCallPci_IoaDevice = 6 }; struct HvCallPci_DeviceInfo { - u32 deviceType; // See DeviceType enum for values + u32 deviceType; /* See DeviceType enum for values */ }; - + struct HvCallPci_BusUnitInfo { - u32 sizeReturned; // length of data returned - u32 deviceType; // see DeviceType enum for values + u32 sizeReturned; /* length of data returned */ + u32 deviceType; /* see DeviceType enum for values */ }; struct HvCallPci_BridgeInfo { - struct HvCallPci_BusUnitInfo busUnitInfo; // Generic bus unit info - u8 subBusNumber; // Bus number of secondary bus - u8 maxAgents; // Max idsels on secondary bus - u8 maxSubBusNumber; // Max Sub Bus - u8 logicalSlotNumber; // Logical Slot Number for IOA + struct HvCallPci_BusUnitInfo busUnitInfo; /* Generic bus unit info */ + u8 subBusNumber; /* Bus number of secondary bus */ + u8 maxAgents; /* Max idsels on secondary bus */ + u8 maxSubBusNumber; /* Max Sub Bus */ + u8 logicalSlotNumber; /* Logical Slot Number for IOA */ }; - -// Maximum BusUnitInfo buffer size. Provided for clients so they can allocate -// a buffer big enough for any type of bus unit. Increase as needed. + +/* + * Maximum BusUnitInfo buffer size. Provided for clients so + * they can allocate a buffer big enough for any type of bus + * unit. Increase as needed. + */ enum {HvCallPci_MaxBusUnitInfoSize = 128}; struct HvCallPci_BarParms { @@ -89,12 +92,12 @@ struct HvCallPci_BarParms { u64 protectStart; u64 protectEnd; u64 relocationOffset; - u64 pciAddress; + u64 pciAddress; u64 reserved[3]; -}; +}; enum HvCallPci_VpdType { - HvCallPci_BusVpd = 1, + HvCallPci_BusVpd = 1, HvCallPci_BusAdapterVpd = 2 }; @@ -123,15 +126,13 @@ enum HvCallPci_VpdType { #define HvCallPciUnmaskInterrupts HvCallPci + 49 #define HvCallPciGetBusUnitInfo HvCallPci + 50 -//============================================================================ static inline u64 HvCallPci_configLoad8(u16 busNumber, u8 subBusNumber, - u8 deviceId, u32 offset, - u8 *value) + u8 deviceId, u32 offset, u8 *value) { struct HvCallPci_DsaAddr dsa; struct HvCallPci_LoadReturn retVal; - *((u64*)&dsa) = 0; + *((u64*)&dsa) = 0; dsa.busNumber = busNumber; dsa.subBusNumber = subBusNumber; @@ -139,21 +140,18 @@ static inline u64 HvCallPci_configLoad8(u16 busNumber, u8 subBusNumber, HvCall3Ret16(HvCallPciConfigLoad8, &retVal, *(u64 *)&dsa, offset, 0); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - *value = retVal.value; return retVal.rc; } -//============================================================================ + static inline u64 HvCallPci_configLoad16(u16 busNumber, u8 subBusNumber, - u8 deviceId, u32 offset, - u16 *value) + u8 deviceId, u32 offset, u16 *value) { struct HvCallPci_DsaAddr dsa; struct HvCallPci_LoadReturn retVal; - *((u64*)&dsa) = 0; + *((u64*)&dsa) = 0; dsa.busNumber = busNumber; dsa.subBusNumber = subBusNumber; @@ -161,21 +159,18 @@ static inline u64 HvCallPci_configLoad16(u16 busNumber, u8 subBusNumber, HvCall3Ret16(HvCallPciConfigLoad16, &retVal, *(u64 *)&dsa, offset, 0); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - *value = retVal.value; return retVal.rc; } -//============================================================================ -static inline u64 HvCallPci_configLoad32(u16 busNumber, u8 subBusNumber, - u8 deviceId, u32 offset, - u32 *value) + +static inline u64 HvCallPci_configLoad32(u16 busNumber, u8 subBusNumber, + u8 deviceId, u32 offset, u32 *value) { struct HvCallPci_DsaAddr dsa; struct HvCallPci_LoadReturn retVal; - *((u64*)&dsa) = 0; + *((u64*)&dsa) = 0; dsa.busNumber = busNumber; dsa.subBusNumber = subBusNumber; @@ -183,84 +178,61 @@ static inline u64 HvCallPci_configLoad32(u16 busNumber, u8 subBusNumber, HvCall3Ret16(HvCallPciConfigLoad32, &retVal, *(u64 *)&dsa, offset, 0); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - *value = retVal.value; return retVal.rc; } -//============================================================================ -static inline u64 HvCallPci_configStore8(u16 busNumber, u8 subBusNumber, - u8 deviceId, u32 offset, - u8 value) + +static inline u64 HvCallPci_configStore8(u16 busNumber, u8 subBusNumber, + u8 deviceId, u32 offset, u8 value) { struct HvCallPci_DsaAddr dsa; - u64 retVal; - *((u64*)&dsa) = 0; + *((u64*)&dsa) = 0; dsa.busNumber = busNumber; dsa.subBusNumber = subBusNumber; dsa.deviceId = deviceId; - retVal = HvCall4(HvCallPciConfigStore8, *(u64 *)&dsa, offset, value, 0); - - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - - return retVal; + return HvCall4(HvCallPciConfigStore8, *(u64 *)&dsa, offset, value, 0); } -//============================================================================ -static inline u64 HvCallPci_configStore16(u16 busNumber, u8 subBusNumber, - u8 deviceId, u32 offset, - u16 value) + +static inline u64 HvCallPci_configStore16(u16 busNumber, u8 subBusNumber, + u8 deviceId, u32 offset, u16 value) { struct HvCallPci_DsaAddr dsa; - u64 retVal; - *((u64*)&dsa) = 0; + *((u64*)&dsa) = 0; dsa.busNumber = busNumber; dsa.subBusNumber = subBusNumber; dsa.deviceId = deviceId; - retVal = HvCall4(HvCallPciConfigStore16, *(u64 *)&dsa, offset, value, 0); - - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - - return retVal; + return HvCall4(HvCallPciConfigStore16, *(u64 *)&dsa, offset, value, 0); } -//============================================================================ -static inline u64 HvCallPci_configStore32(u16 busNumber, u8 subBusNumber, - u8 deviceId, u32 offset, - u32 value) + +static inline u64 HvCallPci_configStore32(u16 busNumber, u8 subBusNumber, + u8 deviceId, u32 offset, u32 value) { struct HvCallPci_DsaAddr dsa; - u64 retVal; - *((u64*)&dsa) = 0; + *((u64*)&dsa) = 0; dsa.busNumber = busNumber; dsa.subBusNumber = subBusNumber; dsa.deviceId = deviceId; - retVal = HvCall4(HvCallPciConfigStore32, *(u64 *)&dsa, offset, value, 0); - - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - - return retVal; + return HvCall4(HvCallPciConfigStore32, *(u64 *)&dsa, offset, value, 0); } -//============================================================================ -static inline u64 HvCallPci_barLoad8(u16 busNumberParm, - u8 subBusParm, - u8 deviceIdParm, - u8 barNumberParm, - u64 offsetParm, - u8* valueParm) + +static inline u64 HvCallPci_barLoad8(u16 busNumberParm, u8 subBusParm, + u8 deviceIdParm, u8 barNumberParm, u64 offsetParm, + u8 *valueParm) { struct HvCallPci_DsaAddr dsa; struct HvCallPci_LoadReturn retVal; - *((u64*)&dsa) = 0; + *((u64*)&dsa) = 0; dsa.busNumber = busNumberParm; dsa.subBusNumber = subBusParm; @@ -269,24 +241,19 @@ static inline u64 HvCallPci_barLoad8(u16 busNumberParm, HvCall3Ret16(HvCallPciBarLoad8, &retVal, *(u64 *)&dsa, offsetParm, 0); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - *valueParm = retVal.value; return retVal.rc; } -//============================================================================ -static inline u64 HvCallPci_barLoad16(u16 busNumberParm, - u8 subBusParm, - u8 deviceIdParm, - u8 barNumberParm, - u64 offsetParm, - u16* valueParm) + +static inline u64 HvCallPci_barLoad16(u16 busNumberParm, u8 subBusParm, + u8 deviceIdParm, u8 barNumberParm, u64 offsetParm, + u16 *valueParm) { struct HvCallPci_DsaAddr dsa; struct HvCallPci_LoadReturn retVal; - *((u64*)&dsa) = 0; + *((u64*)&dsa) = 0; dsa.busNumber = busNumberParm; dsa.subBusNumber = subBusParm; @@ -295,24 +262,19 @@ static inline u64 HvCallPci_barLoad16(u16 busNumberParm, HvCall3Ret16(HvCallPciBarLoad16, &retVal, *(u64 *)&dsa, offsetParm, 0); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - *valueParm = retVal.value; return retVal.rc; } -//============================================================================ -static inline u64 HvCallPci_barLoad32(u16 busNumberParm, - u8 subBusParm, - u8 deviceIdParm, - u8 barNumberParm, - u64 offsetParm, - u32* valueParm) + +static inline u64 HvCallPci_barLoad32(u16 busNumberParm, u8 subBusParm, + u8 deviceIdParm, u8 barNumberParm, u64 offsetParm, + u32 *valueParm) { struct HvCallPci_DsaAddr dsa; struct HvCallPci_LoadReturn retVal; - *((u64*)&dsa) = 0; + *((u64*)&dsa) = 0; dsa.busNumber = busNumberParm; dsa.subBusNumber = subBusParm; @@ -321,24 +283,19 @@ static inline u64 HvCallPci_barLoad32(u16 busNumberParm, HvCall3Ret16(HvCallPciBarLoad32, &retVal, *(u64 *)&dsa, offsetParm, 0); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - *valueParm = retVal.value; return retVal.rc; } -//============================================================================ -static inline u64 HvCallPci_barLoad64(u16 busNumberParm, - u8 subBusParm, - u8 deviceIdParm, - u8 barNumberParm, - u64 offsetParm, - u64* valueParm) + +static inline u64 HvCallPci_barLoad64(u16 busNumberParm, u8 subBusParm, + u8 deviceIdParm, u8 barNumberParm, u64 offsetParm, + u64 *valueParm) { struct HvCallPci_DsaAddr dsa; struct HvCallPci_LoadReturn retVal; - *((u64*)&dsa) = 0; + *((u64*)&dsa) = 0; dsa.busNumber = busNumberParm; dsa.subBusNumber = subBusParm; @@ -347,112 +304,81 @@ static inline u64 HvCallPci_barLoad64(u16 busNumberParm, HvCall3Ret16(HvCallPciBarLoad64, &retVal, *(u64 *)&dsa, offsetParm, 0); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - *valueParm = retVal.value; return retVal.rc; } -//============================================================================ -static inline u64 HvCallPci_barStore8(u16 busNumberParm, - u8 subBusParm, - u8 deviceIdParm, - u8 barNumberParm, - u64 offsetParm, - u8 valueParm) + +static inline u64 HvCallPci_barStore8(u16 busNumberParm, u8 subBusParm, + u8 deviceIdParm, u8 barNumberParm, u64 offsetParm, + u8 valueParm) { struct HvCallPci_DsaAddr dsa; - u64 retVal; *((u64*)&dsa) = 0; - + dsa.busNumber = busNumberParm; dsa.subBusNumber = subBusParm; dsa.deviceId = deviceIdParm; dsa.barNumber = barNumberParm; - retVal = HvCall4(HvCallPciBarStore8, *(u64 *)&dsa, offsetParm, valueParm, 0); - - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - - return retVal; + return HvCall4(HvCallPciBarStore8, *(u64 *)&dsa, offsetParm, + valueParm, 0); } -//============================================================================ -static inline u64 HvCallPci_barStore16(u16 busNumberParm, - u8 subBusParm, - u8 deviceIdParm, - u8 barNumberParm, - u64 offsetParm, - u16 valueParm) + +static inline u64 HvCallPci_barStore16(u16 busNumberParm, u8 subBusParm, + u8 deviceIdParm, u8 barNumberParm, u64 offsetParm, + u16 valueParm) { struct HvCallPci_DsaAddr dsa; - u64 retVal; *((u64*)&dsa) = 0; - + dsa.busNumber = busNumberParm; dsa.subBusNumber = subBusParm; dsa.deviceId = deviceIdParm; dsa.barNumber = barNumberParm; - retVal = HvCall4(HvCallPciBarStore16, *(u64 *)&dsa, offsetParm, valueParm, 0); - - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - - return retVal; + return HvCall4(HvCallPciBarStore16, *(u64 *)&dsa, offsetParm, + valueParm, 0); } -//============================================================================ -static inline u64 HvCallPci_barStore32(u16 busNumberParm, - u8 subBusParm, - u8 deviceIdParm, - u8 barNumberParm, - u64 offsetParm, - u32 valueParm) + +static inline u64 HvCallPci_barStore32(u16 busNumberParm, u8 subBusParm, + u8 deviceIdParm, u8 barNumberParm, u64 offsetParm, + u32 valueParm) { struct HvCallPci_DsaAddr dsa; - u64 retVal; *((u64*)&dsa) = 0; - + dsa.busNumber = busNumberParm; dsa.subBusNumber = subBusParm; dsa.deviceId = deviceIdParm; dsa.barNumber = barNumberParm; - retVal = HvCall4(HvCallPciBarStore32, *(u64 *)&dsa, offsetParm, valueParm, 0); - - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - - return retVal; + return HvCall4(HvCallPciBarStore32, *(u64 *)&dsa, offsetParm, + valueParm, 0); } -//============================================================================ -static inline u64 HvCallPci_barStore64(u16 busNumberParm, - u8 subBusParm, - u8 deviceIdParm, - u8 barNumberParm, - u64 offsetParm, - u64 valueParm) + +static inline u64 HvCallPci_barStore64(u16 busNumberParm, u8 subBusParm, + u8 deviceIdParm, u8 barNumberParm, u64 offsetParm, + u64 valueParm) { struct HvCallPci_DsaAddr dsa; - u64 retVal; *((u64*)&dsa) = 0; - + dsa.busNumber = busNumberParm; dsa.subBusNumber = subBusParm; dsa.deviceId = deviceIdParm; dsa.barNumber = barNumberParm; - retVal = HvCall4(HvCallPciBarStore64, *(u64 *)&dsa, offsetParm, valueParm, 0); - - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - - return retVal; + return HvCall4(HvCallPciBarStore64, *(u64 *)&dsa, offsetParm, + valueParm, 0); } -//============================================================================ -static inline u64 HvCallPci_eoi(u16 busNumberParm, - u8 subBusParm, - u8 deviceIdParm) + +static inline u64 HvCallPci_eoi(u16 busNumberParm, u8 subBusParm, + u8 deviceIdParm) { struct HvCallPci_DsaAddr dsa; struct HvCallPci_LoadReturn retVal; @@ -465,20 +391,13 @@ static inline u64 HvCallPci_eoi(u16 busNumberParm, HvCall1Ret16(HvCallPciEoi, &retVal, *(u64*)&dsa); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal.rc; } -//============================================================================ -static inline u64 HvCallPci_getBarParms(u16 busNumberParm, - u8 subBusParm, - u8 deviceIdParm, - u8 barNumberParm, - u64 parms, - u32 sizeofParms) + +static inline u64 HvCallPci_getBarParms(u16 busNumberParm, u8 subBusParm, + u8 deviceIdParm, u8 barNumberParm, u64 parms, u32 sizeofParms) { struct HvCallPci_DsaAddr dsa; - u64 retVal; *((u64*)&dsa) = 0; @@ -487,62 +406,41 @@ static inline u64 HvCallPci_getBarParms(u16 busNumberParm, dsa.deviceId = deviceIdParm; dsa.barNumber = barNumberParm; - retVal = HvCall3(HvCallPciGetBarParms, *(u64*)&dsa, parms, sizeofParms); - - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - - return retVal; + return HvCall3(HvCallPciGetBarParms, *(u64*)&dsa, parms, sizeofParms); } -//============================================================================ -static inline u64 HvCallPci_maskFisr(u16 busNumberParm, - u8 subBusParm, - u8 deviceIdParm, - u64 fisrMask) + +static inline u64 HvCallPci_maskFisr(u16 busNumberParm, u8 subBusParm, + u8 deviceIdParm, u64 fisrMask) { struct HvCallPci_DsaAddr dsa; - u64 retVal; - *((u64*)&dsa) = 0; + *((u64*)&dsa) = 0; dsa.busNumber = busNumberParm; dsa.subBusNumber = subBusParm; dsa.deviceId = deviceIdParm; - retVal = HvCall2(HvCallPciMaskFisr, *(u64*)&dsa, fisrMask); - - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - - return retVal; + return HvCall2(HvCallPciMaskFisr, *(u64*)&dsa, fisrMask); } -//============================================================================ -static inline u64 HvCallPci_unmaskFisr(u16 busNumberParm, - u8 subBusParm, - u8 deviceIdParm, - u64 fisrMask) + +static inline u64 HvCallPci_unmaskFisr(u16 busNumberParm, u8 subBusParm, + u8 deviceIdParm, u64 fisrMask) { struct HvCallPci_DsaAddr dsa; - u64 retVal; - *((u64*)&dsa) = 0; + *((u64*)&dsa) = 0; dsa.busNumber = busNumberParm; dsa.subBusNumber = subBusParm; dsa.deviceId = deviceIdParm; - retVal = HvCall2(HvCallPciUnmaskFisr, *(u64*)&dsa, fisrMask); - - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - - return retVal; + return HvCall2(HvCallPciUnmaskFisr, *(u64*)&dsa, fisrMask); } -//============================================================================ -static inline u64 HvCallPci_setSlotReset(u16 busNumberParm, - u8 subBusParm, - u8 deviceIdParm, - u64 onNotOff) + +static inline u64 HvCallPci_setSlotReset(u16 busNumberParm, u8 subBusParm, + u8 deviceIdParm, u64 onNotOff) { struct HvCallPci_DsaAddr dsa; - u64 retVal; *((u64*)&dsa) = 0; @@ -550,21 +448,13 @@ static inline u64 HvCallPci_setSlotReset(u16 busNumberParm, dsa.subBusNumber = subBusParm; dsa.deviceId = deviceIdParm; - retVal = HvCall2(HvCallPciSetSlotReset, *(u64*)&dsa, onNotOff); - - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - - return retVal; + return HvCall2(HvCallPciSetSlotReset, *(u64*)&dsa, onNotOff); } -//============================================================================ -static inline u64 HvCallPci_getDeviceInfo(u16 busNumberParm, - u8 subBusParm, - u8 deviceNumberParm, - u64 parms, - u32 sizeofParms) + +static inline u64 HvCallPci_getDeviceInfo(u16 busNumberParm, u8 subBusParm, + u8 deviceNumberParm, u64 parms, u32 sizeofParms) { struct HvCallPci_DsaAddr dsa; - u64 retVal; *((u64*)&dsa) = 0; @@ -572,102 +462,72 @@ static inline u64 HvCallPci_getDeviceInfo(u16 busNumberParm, dsa.subBusNumber = subBusParm; dsa.deviceId = deviceNumberParm << 4; - retVal = HvCall3(HvCallPciGetDeviceInfo, *(u64*)&dsa, parms, sizeofParms); - - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - - return retVal; + return HvCall3(HvCallPciGetDeviceInfo, *(u64*)&dsa, parms, sizeofParms); } -//============================================================================ -static inline u64 HvCallPci_maskInterrupts(u16 busNumberParm, - u8 subBusParm, - u8 deviceIdParm, - u64 interruptMask) + +static inline u64 HvCallPci_maskInterrupts(u16 busNumberParm, u8 subBusParm, + u8 deviceIdParm, u64 interruptMask) { struct HvCallPci_DsaAddr dsa; - u64 retVal; - *((u64*)&dsa) = 0; + *((u64*)&dsa) = 0; dsa.busNumber = busNumberParm; dsa.subBusNumber = subBusParm; dsa.deviceId = deviceIdParm; - retVal = HvCall2(HvCallPciMaskInterrupts, *(u64*)&dsa, interruptMask); - - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - - return retVal; + return HvCall2(HvCallPciMaskInterrupts, *(u64*)&dsa, interruptMask); } -//============================================================================ -static inline u64 HvCallPci_unmaskInterrupts(u16 busNumberParm, - u8 subBusParm, - u8 deviceIdParm, - u64 interruptMask) + +static inline u64 HvCallPci_unmaskInterrupts(u16 busNumberParm, u8 subBusParm, + u8 deviceIdParm, u64 interruptMask) { struct HvCallPci_DsaAddr dsa; - u64 retVal; - *((u64*)&dsa) = 0; + *((u64*)&dsa) = 0; dsa.busNumber = busNumberParm; dsa.subBusNumber = subBusParm; dsa.deviceId = deviceIdParm; - retVal = HvCall2(HvCallPciUnmaskInterrupts, *(u64*)&dsa, interruptMask); - - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - - return retVal; + return HvCall2(HvCallPciUnmaskInterrupts, *(u64*)&dsa, interruptMask); } -//============================================================================ -static inline u64 HvCallPci_getBusUnitInfo(u16 busNumberParm, - u8 subBusParm, - u8 deviceIdParm, - u64 parms, - u32 sizeofParms) +static inline u64 HvCallPci_getBusUnitInfo(u16 busNumberParm, u8 subBusParm, + u8 deviceIdParm, u64 parms, u32 sizeofParms) { struct HvCallPci_DsaAddr dsa; - u64 retVal; - *((u64*)&dsa) = 0; + *((u64*)&dsa) = 0; dsa.busNumber = busNumberParm; dsa.subBusNumber = subBusParm; dsa.deviceId = deviceIdParm; - retVal = HvCall3(HvCallPciGetBusUnitInfo, *(u64*)&dsa, parms, sizeofParms); - - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - - return retVal; + return HvCall3(HvCallPciGetBusUnitInfo, *(u64*)&dsa, parms, + sizeofParms); } -//============================================================================ -static inline int HvCallPci_getBusVpd(u16 busNumParm, u64 destParm, u16 sizeParm) +static inline int HvCallPci_getBusVpd(u16 busNumParm, u64 destParm, + u16 sizeParm) { - int xRetSize; - u64 xRc = HvCall4(HvCallPciGetCardVpd, busNumParm, destParm, sizeParm, HvCallPci_BusVpd); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); + u64 xRc = HvCall4(HvCallPciGetCardVpd, busNumParm, destParm, + sizeParm, HvCallPci_BusVpd); if (xRc == -1) - xRetSize = -1; + return -1; else - xRetSize = xRc & 0xFFFF; - return xRetSize; + return xRc & 0xFFFF; } -//============================================================================ -static inline int HvCallPci_getBusAdapterVpd(u16 busNumParm, u64 destParm, u16 sizeParm) +static inline int HvCallPci_getBusAdapterVpd(u16 busNumParm, u64 destParm, + u16 sizeParm) { - int xRetSize; - u64 xRc = HvCall4(HvCallPciGetCardVpd, busNumParm, destParm, sizeParm, HvCallPci_BusAdapterVpd); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); + u64 xRc = HvCall4(HvCallPciGetCardVpd, busNumParm, destParm, + sizeParm, HvCallPci_BusAdapterVpd); if (xRc == -1) - xRetSize = -1; + return -1; else - xRetSize = xRc & 0xFFFF; - return xRetSize; + return xRc & 0xFFFF; } -//============================================================================ + #endif /* _HVCALLPCI_H */ diff --git a/include/asm-ppc64/iSeries/HvCallSc.h b/include/asm-ppc64/iSeries/HvCallSc.h index eea2584..a62cef3 100644 --- a/include/asm-ppc64/iSeries/HvCallSc.h +++ b/include/asm-ppc64/iSeries/HvCallSc.h @@ -1,17 +1,17 @@ /* * HvCallSc.h * Copyright (C) 2001 Mike Corrigan IBM Corporation - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA @@ -19,7 +19,7 @@ #ifndef _HVCALLSC_H #define _HVCALLSC_H -#include <asm/iSeries/HvTypes.h> +#include <linux/types.h> #define HvCallBase 0x8000000000000000ul #define HvCallCc 0x8001000000000000ul @@ -30,22 +30,22 @@ #define HvCallSm 0x8007000000000000ul #define HvCallXm 0x8009000000000000ul -u64 HvCall0( u64 ); -u64 HvCall1( u64, u64 ); -u64 HvCall2( u64, u64, u64 ); -u64 HvCall3( u64, u64, u64, u64 ); -u64 HvCall4( u64, u64, u64, u64, u64 ); -u64 HvCall5( u64, u64, u64, u64, u64, u64 ); -u64 HvCall6( u64, u64, u64, u64, u64, u64, u64 ); -u64 HvCall7( u64, u64, u64, u64, u64, u64, u64, u64 ); +extern u64 HvCall0(u64); +extern u64 HvCall1(u64, u64); +extern u64 HvCall2(u64, u64, u64); +extern u64 HvCall3(u64, u64, u64, u64); +extern u64 HvCall4(u64, u64, u64, u64, u64); +extern u64 HvCall5(u64, u64, u64, u64, u64, u64); +extern u64 HvCall6(u64, u64, u64, u64, u64, u64, u64); +extern u64 HvCall7(u64, u64, u64, u64, u64, u64, u64, u64); -u64 HvCall0Ret16( u64, void * ); -u64 HvCall1Ret16( u64, void *, u64 ); -u64 HvCall2Ret16( u64, void *, u64, u64 ); -u64 HvCall3Ret16( u64, void *, u64, u64, u64 ); -u64 HvCall4Ret16( u64, void *, u64, u64, u64, u64 ); -u64 HvCall5Ret16( u64, void *, u64, u64, u64, u64, u64 ); -u64 HvCall6Ret16( u64, void *, u64, u64, u64, u64, u64, u64 ); -u64 HvCall7Ret16( u64, void *, u64, u64 ,u64 ,u64 ,u64 ,u64 ,u64 ); +extern u64 HvCall0Ret16(u64, void *); +extern u64 HvCall1Ret16(u64, void *, u64); +extern u64 HvCall2Ret16(u64, void *, u64, u64); +extern u64 HvCall3Ret16(u64, void *, u64, u64, u64); +extern u64 HvCall4Ret16(u64, void *, u64, u64, u64, u64); +extern u64 HvCall5Ret16(u64, void *, u64, u64, u64, u64, u64); +extern u64 HvCall6Ret16(u64, void *, u64, u64, u64, u64, u64, u64); +extern u64 HvCall7Ret16(u64, void *, u64, u64 ,u64 ,u64 ,u64 ,u64 ,u64); #endif /* _HVCALLSC_H */ diff --git a/include/asm-ppc64/iSeries/HvCallSm.h b/include/asm-ppc64/iSeries/HvCallSm.h index 9050c94..8a3dbb0 100644 --- a/include/asm-ppc64/iSeries/HvCallSm.h +++ b/include/asm-ppc64/iSeries/HvCallSm.h @@ -1,17 +1,17 @@ /* * HvCallSm.h * Copyright (C) 2001 Mike Corrigan IBM Corporation - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA @@ -19,34 +19,20 @@ #ifndef _HVCALLSM_H #define _HVCALLSM_H -//============================================================================ -// -// This file contains the "hypervisor call" interface which is used to -// drive the hypervisor from the OS. -// -//============================================================================ +/* + * This file contains the "hypervisor call" interface which is used to + * drive the hypervisor from the OS. + */ -//------------------------------------------------------------------- -// Standard Includes -//------------------------------------------------------------------- #include <asm/iSeries/HvCallSc.h> #include <asm/iSeries/HvTypes.h> -//----------------------------------------------------------------------------- -// Constants -//----------------------------------------------------------------------------- - #define HvCallSmGet64BitsOfAccessMap HvCallSm + 11 - -//============================================================================ -static inline u64 HvCallSm_get64BitsOfAccessMap( - HvLpIndex lpIndex, u64 indexIntoBitMap ) +static inline u64 HvCallSm_get64BitsOfAccessMap(HvLpIndex lpIndex, + u64 indexIntoBitMap) { - u64 retval = HvCall2(HvCallSmGet64BitsOfAccessMap, lpIndex, - indexIntoBitMap ); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retval; + return HvCall2(HvCallSmGet64BitsOfAccessMap, lpIndex, indexIntoBitMap); } -//============================================================================ + #endif /* _HVCALLSM_H */ diff --git a/include/asm-ppc64/iSeries/HvCallXm.h b/include/asm-ppc64/iSeries/HvCallXm.h index bfb898f..8b9ba60 100644 --- a/include/asm-ppc64/iSeries/HvCallXm.h +++ b/include/asm-ppc64/iSeries/HvCallXm.h @@ -1,30 +1,13 @@ -//============================================================================ -// Header File Id -// Name______________: HvCallXm.H -// -// Description_______: -// -// This file contains the "hypervisor call" interface which is used to -// drive the hypervisor from SLIC. -// -//============================================================================ +/* + * This file contains the "hypervisor call" interface which is used to + * drive the hypervisor from SLIC. + */ #ifndef _HVCALLXM_H #define _HVCALLXM_H -//------------------------------------------------------------------- -// Forward declarations -//------------------------------------------------------------------- - -//------------------------------------------------------------------- -// Standard Includes -//------------------------------------------------------------------- #include <asm/iSeries/HvCallSc.h> #include <asm/iSeries/HvTypes.h> -//----------------------------------------------------------------------------- -// Constants -//----------------------------------------------------------------------------- - #define HvCallXmGetTceTableParms HvCallXm + 0 #define HvCallXmTestBus HvCallXm + 1 #define HvCallXmConnectBusUnit HvCallXm + 2 @@ -33,63 +16,63 @@ #define HvCallXmSetTce HvCallXm + 11 #define HvCallXmSetTces HvCallXm + 13 +/* + * Structure passed to HvCallXm_getTceTableParms + */ +struct iommu_table_cb { + unsigned long itc_busno; /* Bus number for this tce table */ + unsigned long itc_start; /* Will be NULL for secondary */ + unsigned long itc_totalsize; /* Size (in pages) of whole table */ + unsigned long itc_offset; /* Index into real tce table of the + start of our section */ + unsigned long itc_size; /* Size (in pages) of our section */ + unsigned long itc_index; /* Index of this tce table */ + unsigned short itc_maxtables; /* Max num of tables for partition */ + unsigned char itc_virtbus; /* Flag to indicate virtual bus */ + unsigned char itc_slotno; /* IOA Tce Slot Index */ + unsigned char itc_rsvd[4]; +}; - -//============================================================================ -static inline void HvCallXm_getTceTableParms(u64 cb) +static inline void HvCallXm_getTceTableParms(u64 cb) { HvCall1(HvCallXmGetTceTableParms, cb); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); } -//============================================================================ -static inline u64 HvCallXm_setTce(u64 tceTableToken, u64 tceOffset, u64 tce) -{ - u64 retval = HvCall3(HvCallXmSetTce, tceTableToken, tceOffset, tce ); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retval; -} -//============================================================================ -static inline u64 HvCallXm_setTces(u64 tceTableToken, u64 tceOffset, u64 numTces, u64 tce1, u64 tce2, u64 tce3, u64 tce4) -{ - u64 retval = HvCall7(HvCallXmSetTces, tceTableToken, tceOffset, numTces, - tce1, tce2, tce3, tce4 ); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retval; + +static inline u64 HvCallXm_setTce(u64 tceTableToken, u64 tceOffset, u64 tce) +{ + return HvCall3(HvCallXmSetTce, tceTableToken, tceOffset, tce); } -//============================================================================= -static inline u64 HvCallXm_testBus(u16 busNumber) + +static inline u64 HvCallXm_setTces(u64 tceTableToken, u64 tceOffset, + u64 numTces, u64 tce1, u64 tce2, u64 tce3, u64 tce4) { - u64 retVal = HvCall1(HvCallXmTestBus, busNumber); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; + return HvCall7(HvCallXmSetTces, tceTableToken, tceOffset, numTces, + tce1, tce2, tce3, tce4); } -//===================================================================================== -static inline u64 HvCallXm_testBusUnit(u16 busNumber, u8 subBusNumber, u8 deviceId) + +static inline u64 HvCallXm_testBus(u16 busNumber) { - u64 busUnitNumber = (subBusNumber << 8) | deviceId; - u64 retVal = HvCall2(HvCallXmTestBusUnit, busNumber, busUnitNumber); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; + return HvCall1(HvCallXmTestBus, busNumber); } -//===================================================================================== -static inline u64 HvCallXm_connectBusUnit(u16 busNumber, u8 subBusNumber, u8 deviceId, - u64 interruptToken) + +static inline u64 HvCallXm_testBusUnit(u16 busNumber, u8 subBusNumber, + u8 deviceId) { - u64 busUnitNumber = (subBusNumber << 8) | deviceId; - u64 queueIndex = 0; // HvLpConfig::mapDsaToQueueIndex(HvLpDSA(busNumber, xBoard, xCard)); + return HvCall2(HvCallXmTestBusUnit, busNumber, + (subBusNumber << 8) | deviceId); +} - u64 retVal = HvCall5(HvCallXmConnectBusUnit, busNumber, busUnitNumber, - interruptToken, 0, queueIndex); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; +static inline u64 HvCallXm_connectBusUnit(u16 busNumber, u8 subBusNumber, + u8 deviceId, u64 interruptToken) +{ + return HvCall5(HvCallXmConnectBusUnit, busNumber, + (subBusNumber << 8) | deviceId, interruptToken, 0, + 0 /* HvLpConfig::mapDsaToQueueIndex(HvLpDSA(busNumber, xBoard, xCard)) */); } -//===================================================================================== -static inline u64 HvCallXm_loadTod(void) + +static inline u64 HvCallXm_loadTod(void) { - u64 retVal = HvCall0(HvCallXmLoadTod); - // getPaca()->adjustHmtForNoOfSpinLocksHeld(); - return retVal; + return HvCall0(HvCallXmLoadTod); } -//===================================================================================== #endif /* _HVCALLXM_H */ diff --git a/include/asm-ppc64/iSeries/HvLpConfig.h b/include/asm-ppc64/iSeries/HvLpConfig.h index bdbd70f..f1cf1e7 100644 --- a/include/asm-ppc64/iSeries/HvLpConfig.h +++ b/include/asm-ppc64/iSeries/HvLpConfig.h @@ -1,17 +1,17 @@ /* * HvLpConfig.h * Copyright (C) 2001 Mike Corrigan IBM Corporation - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA @@ -19,262 +19,120 @@ #ifndef _HVLPCONFIG_H #define _HVLPCONFIG_H -//=========================================================================== -// -// This file contains the interface to the LPAR configuration data -// to determine which resources should be allocated to each partition. -// -//=========================================================================== +/* + * This file contains the interface to the LPAR configuration data + * to determine which resources should be allocated to each partition. + */ -#include <asm/iSeries/HvCallCfg.h> +#include <asm/iSeries/HvCallSc.h> #include <asm/iSeries/HvTypes.h> #include <asm/iSeries/ItLpNaca.h> -#include <asm/iSeries/LparData.h> -//------------------------------------------------------------------- -// Constants -//------------------------------------------------------------------- +enum { + HvCallCfg_Cur = 0, + HvCallCfg_Init = 1, + HvCallCfg_Max = 2, + HvCallCfg_Min = 3 +}; + +#define HvCallCfgGetSystemPhysicalProcessors HvCallCfg + 6 +#define HvCallCfgGetPhysicalProcessors HvCallCfg + 7 +#define HvCallCfgGetMsChunks HvCallCfg + 9 +#define HvCallCfgGetSharedPoolIndex HvCallCfg + 20 +#define HvCallCfgGetSharedProcUnits HvCallCfg + 21 +#define HvCallCfgGetNumProcsInSharedPool HvCallCfg + 22 +#define HvCallCfgGetVirtualLanIndexMap HvCallCfg + 30 +#define HvCallCfgGetHostingLpIndex HvCallCfg + 32 extern HvLpIndex HvLpConfig_getLpIndex_outline(void); -//=================================================================== static inline HvLpIndex HvLpConfig_getLpIndex(void) { return itLpNaca.xLpIndex; } -//=================================================================== + static inline HvLpIndex HvLpConfig_getPrimaryLpIndex(void) { return itLpNaca.xPrimaryLpIndex; } -//================================================================= -static inline HvLpIndex HvLpConfig_getLps(void) -{ - return HvCallCfg_getLps(); -} -//================================================================= -static inline HvLpIndexMap HvLpConfig_getActiveLpMap(void) -{ - return HvCallCfg_getActiveLpMap(); -} -//================================================================= -static inline u64 HvLpConfig_getSystemMsMegs(void) -{ - return HvCallCfg_getSystemMsChunks() / HVCHUNKSPERMEG; -} -//================================================================= -static inline u64 HvLpConfig_getSystemMsChunks(void) -{ - return HvCallCfg_getSystemMsChunks(); -} -//================================================================= -static inline u64 HvLpConfig_getSystemMsPages(void) -{ - return HvCallCfg_getSystemMsChunks() * HVPAGESPERCHUNK; -} -//================================================================ -static inline u64 HvLpConfig_getMsMegs(void) -{ - return HvCallCfg_getMsChunks(HvLpConfig_getLpIndex(),HvCallCfg_Cur) / HVCHUNKSPERMEG; -} -//================================================================ -static inline u64 HvLpConfig_getMsChunks(void) -{ - return HvCallCfg_getMsChunks(HvLpConfig_getLpIndex(),HvCallCfg_Cur); -} -//================================================================ -static inline u64 HvLpConfig_getMsPages(void) -{ - return HvCallCfg_getMsChunks(HvLpConfig_getLpIndex(),HvCallCfg_Cur) * HVPAGESPERCHUNK; -} -//================================================================ -static inline u64 HvLpConfig_getMinMsMegs(void) -{ - return HvCallCfg_getMsChunks(HvLpConfig_getLpIndex(),HvCallCfg_Min) / HVCHUNKSPERMEG; -} -//================================================================ -static inline u64 HvLpConfig_getMinMsChunks(void) -{ - return HvCallCfg_getMsChunks(HvLpConfig_getLpIndex(),HvCallCfg_Min); -} -//================================================================ -static inline u64 HvLpConfig_getMinMsPages(void) -{ - return HvCallCfg_getMsChunks(HvLpConfig_getLpIndex(),HvCallCfg_Min) * HVPAGESPERCHUNK; -} -//================================================================ -static inline u64 HvLpConfig_getMinRuntimeMsMegs(void) -{ - return HvCallCfg_getMinRuntimeMsChunks(HvLpConfig_getLpIndex()) / HVCHUNKSPERMEG; -} -//=============================================================== -static inline u64 HvLpConfig_getMinRuntimeMsChunks(void) -{ - return HvCallCfg_getMinRuntimeMsChunks(HvLpConfig_getLpIndex()); -} -//=============================================================== -static inline u64 HvLpConfig_getMinRuntimeMsPages(void) -{ - return HvCallCfg_getMinRuntimeMsChunks(HvLpConfig_getLpIndex()) * HVPAGESPERCHUNK; -} -//=============================================================== -static inline u64 HvLpConfig_getMaxMsMegs(void) -{ - return HvCallCfg_getMsChunks(HvLpConfig_getLpIndex(),HvCallCfg_Max) / HVCHUNKSPERMEG; -} -//=============================================================== -static inline u64 HvLpConfig_getMaxMsChunks(void) -{ - return HvCallCfg_getMsChunks(HvLpConfig_getLpIndex(),HvCallCfg_Max); -} -//=============================================================== -static inline u64 HvLpConfig_getMaxMsPages(void) -{ - return HvCallCfg_getMsChunks(HvLpConfig_getLpIndex(),HvCallCfg_Max) * HVPAGESPERCHUNK; -} -//=============================================================== -static inline u64 HvLpConfig_getInitMsMegs(void) -{ - return HvCallCfg_getMsChunks(HvLpConfig_getLpIndex(),HvCallCfg_Init) / HVCHUNKSPERMEG; -} -//=============================================================== -static inline u64 HvLpConfig_getInitMsChunks(void) -{ - return HvCallCfg_getMsChunks(HvLpConfig_getLpIndex(),HvCallCfg_Init); -} -//=============================================================== -static inline u64 HvLpConfig_getInitMsPages(void) -{ return HvCallCfg_getMsChunks(HvLpConfig_getLpIndex(),HvCallCfg_Init) * HVPAGESPERCHUNK; -} -//=============================================================== -static inline u64 HvLpConfig_getSystemPhysicalProcessors(void) -{ - return HvCallCfg_getSystemPhysicalProcessors(); -} -//=============================================================== -static inline u64 HvLpConfig_getSystemLogicalProcessors(void) -{ - return HvCallCfg_getSystemPhysicalProcessors() * (/*getPaca()->getSecondaryThreadCount() +*/ 1); -} -//=============================================================== -static inline u64 HvLpConfig_getNumProcsInSharedPool(HvLpSharedPoolIndex sPI) -{ - return HvCallCfg_getNumProcsInSharedPool(sPI); -} -//=============================================================== -static inline u64 HvLpConfig_getPhysicalProcessors(void) -{ - return HvCallCfg_getPhysicalProcessors(HvLpConfig_getLpIndex(),HvCallCfg_Cur); -} -//=============================================================== -static inline u64 HvLpConfig_getLogicalProcessors(void) -{ - return HvCallCfg_getPhysicalProcessors(HvLpConfig_getLpIndex(),HvCallCfg_Cur) * (/*getPaca()->getSecondaryThreadCount() +*/ 1); -} -//=============================================================== -static inline HvLpSharedPoolIndex HvLpConfig_getSharedPoolIndex(void) -{ - return HvCallCfg_getSharedPoolIndex(HvLpConfig_getLpIndex()); -} -//=============================================================== -static inline u64 HvLpConfig_getSharedProcUnits(void) -{ - return HvCallCfg_getSharedProcUnits(HvLpConfig_getLpIndex(),HvCallCfg_Cur); -} -//=============================================================== -static inline u64 HvLpConfig_getMinSharedProcUnits(void) -{ - return HvCallCfg_getSharedProcUnits(HvLpConfig_getLpIndex(),HvCallCfg_Min); -} -//=============================================================== -static inline u64 HvLpConfig_getMaxSharedProcUnits(void) -{ - return HvCallCfg_getSharedProcUnits(HvLpConfig_getLpIndex(),HvCallCfg_Max); -} -//=============================================================== -static inline u64 HvLpConfig_getMinPhysicalProcessors(void) -{ - return HvCallCfg_getPhysicalProcessors(HvLpConfig_getLpIndex(),HvCallCfg_Min); -} -//=============================================================== -static inline u64 HvLpConfig_getMinLogicalProcessors(void) -{ - return HvCallCfg_getPhysicalProcessors(HvLpConfig_getLpIndex(),HvCallCfg_Min) * (/*getPaca()->getSecondaryThreadCount() +*/ 1); -} -//=============================================================== -static inline u64 HvLpConfig_getMaxPhysicalProcessors(void) -{ - return HvCallCfg_getPhysicalProcessors(HvLpConfig_getLpIndex(),HvCallCfg_Max); -} -//=============================================================== -static inline u64 HvLpConfig_getMaxLogicalProcessors(void) -{ - return HvCallCfg_getPhysicalProcessors(HvLpConfig_getLpIndex(),HvCallCfg_Max) * (/*getPaca()->getSecondaryThreadCount() +*/ 1); -} -//=============================================================== -static inline u64 HvLpConfig_getInitPhysicalProcessors(void) + +static inline u64 HvLpConfig_getMsChunks(void) { - return HvCallCfg_getPhysicalProcessors(HvLpConfig_getLpIndex(),HvCallCfg_Init); + return HvCall2(HvCallCfgGetMsChunks, HvLpConfig_getLpIndex(), + HvCallCfg_Cur); } -//=============================================================== -static inline u64 HvLpConfig_getInitLogicalProcessors(void) + +static inline u64 HvLpConfig_getSystemPhysicalProcessors(void) { - return HvCallCfg_getPhysicalProcessors(HvLpConfig_getLpIndex(),HvCallCfg_Init) * (/*getPaca()->getSecondaryThreadCount() +*/ 1); + return HvCall0(HvCallCfgGetSystemPhysicalProcessors); } -//================================================================ -static inline HvLpVirtualLanIndexMap HvLpConfig_getVirtualLanIndexMap(void) + +static inline u64 HvLpConfig_getNumProcsInSharedPool(HvLpSharedPoolIndex sPI) { - return HvCallCfg_getVirtualLanIndexMap(HvLpConfig_getLpIndex_outline()); + return (u16)HvCall1(HvCallCfgGetNumProcsInSharedPool, sPI); } -//=============================================================== -static inline HvLpVirtualLanIndexMap HvLpConfig_getVirtualLanIndexMapForLp(HvLpIndex lp) + +static inline u64 HvLpConfig_getPhysicalProcessors(void) { - return HvCallCfg_getVirtualLanIndexMap(lp); + return HvCall2(HvCallCfgGetPhysicalProcessors, HvLpConfig_getLpIndex(), + HvCallCfg_Cur); } -//================================================================ -static inline HvLpIndex HvLpConfig_getBusOwner(HvBusNumber busNumber) + +static inline HvLpSharedPoolIndex HvLpConfig_getSharedPoolIndex(void) { - return HvCallCfg_getBusOwner(busNumber); + return HvCall1(HvCallCfgGetSharedPoolIndex, HvLpConfig_getLpIndex()); } -//=============================================================== -static inline int HvLpConfig_isBusDedicated(HvBusNumber busNumber) + +static inline u64 HvLpConfig_getSharedProcUnits(void) { - return HvCallCfg_isBusDedicated(busNumber); + return HvCall2(HvCallCfgGetSharedProcUnits, HvLpConfig_getLpIndex(), + HvCallCfg_Cur); } -//================================================================ -static inline HvLpIndexMap HvLpConfig_getBusAllocation(HvBusNumber busNumber) + +static inline u64 HvLpConfig_getMaxSharedProcUnits(void) { - return HvCallCfg_getBusAllocation(busNumber); + return HvCall2(HvCallCfgGetSharedProcUnits, HvLpConfig_getLpIndex(), + HvCallCfg_Max); } -//================================================================ -// returns the absolute real address of the load area -static inline u64 HvLpConfig_getLoadAddress(void) + +static inline u64 HvLpConfig_getMaxPhysicalProcessors(void) { - return itLpNaca.xLoadAreaAddr & 0x7fffffffffffffff; + return HvCall2(HvCallCfgGetPhysicalProcessors, HvLpConfig_getLpIndex(), + HvCallCfg_Max); } -//================================================================ -static inline u64 HvLpConfig_getLoadPages(void) -{ - return itLpNaca.xLoadAreaChunks * HVPAGESPERCHUNK; + +static inline HvLpVirtualLanIndexMap HvLpConfig_getVirtualLanIndexMapForLp( + HvLpIndex lp) +{ + /* + * This is a new function in V5R1 so calls to this on older + * hypervisors will return -1 + */ + u64 retVal = HvCall1(HvCallCfgGetVirtualLanIndexMap, lp); + if (retVal == -1) + retVal = 0; + return retVal; } -//================================================================ -static inline int HvLpConfig_isBusOwnedByThisLp(HvBusNumber busNumber) + +static inline HvLpVirtualLanIndexMap HvLpConfig_getVirtualLanIndexMap(void) { - HvLpIndex busOwner = HvLpConfig_getBusOwner(busNumber); - return (busOwner == HvLpConfig_getLpIndex()); + return HvLpConfig_getVirtualLanIndexMapForLp( + HvLpConfig_getLpIndex_outline()); } -//================================================================ -static inline int HvLpConfig_doLpsCommunicateOnVirtualLan(HvLpIndex lp1, HvLpIndex lp2) + +static inline int HvLpConfig_doLpsCommunicateOnVirtualLan(HvLpIndex lp1, + HvLpIndex lp2) { - HvLpVirtualLanIndexMap virtualLanIndexMap1 = HvCallCfg_getVirtualLanIndexMap( lp1 ); - HvLpVirtualLanIndexMap virtualLanIndexMap2 = HvCallCfg_getVirtualLanIndexMap( lp2 ); + HvLpVirtualLanIndexMap virtualLanIndexMap1 = + HvLpConfig_getVirtualLanIndexMapForLp(lp1); + HvLpVirtualLanIndexMap virtualLanIndexMap2 = + HvLpConfig_getVirtualLanIndexMapForLp(lp2); return ((virtualLanIndexMap1 & virtualLanIndexMap2) != 0); } -//================================================================ -static inline HvLpIndex HvLpConfig_getHostingLpIndex(HvLpIndex lp) + +static inline HvLpIndex HvLpConfig_getHostingLpIndex(HvLpIndex lp) { - return HvCallCfg_getHostingLpIndex(lp); + return HvCall1(HvCallCfgGetHostingLpIndex, lp); } -//================================================================ #endif /* _HVLPCONFIG_H */ diff --git a/include/asm-ppc64/iSeries/HvLpEvent.h b/include/asm-ppc64/iSeries/HvLpEvent.h index 30936e43..865000d 100644 --- a/include/asm-ppc64/iSeries/HvLpEvent.h +++ b/include/asm-ppc64/iSeries/HvLpEvent.h @@ -1,27 +1,24 @@ /* * HvLpEvent.h * Copyright (C) 2001 Mike Corrigan IBM Corporation - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -//====================================================================== -// -// This file contains the class for HV events in the system. -// -//===================================================================== +/* This file contains the class for HV events in the system. */ + #ifndef _HVLPEVENT_H #define _HVLPEVENT_H @@ -30,69 +27,70 @@ #include <asm/iSeries/HvTypes.h> #include <asm/iSeries/HvCallEvent.h> -//===================================================================== -// -// HvLpEvent is the structure for Lp Event messages passed between -// partitions through PLIC. -// -//===================================================================== - -struct HvEventFlags -{ - u8 xValid:1; // Indicates a valid request x00-x00 - u8 xRsvd1:4; // Reserved ... - u8 xAckType:1; // Immediate or deferred ... - u8 xAckInd:1; // Indicates if ACK required ... - u8 xFunction:1; // Interrupt or Acknowledge ... +/* + * HvLpEvent is the structure for Lp Event messages passed between + * partitions through PLIC. + */ + +struct HvEventFlags { + u8 xValid:1; /* Indicates a valid request x00-x00 */ + u8 xRsvd1:4; /* Reserved ... */ + u8 xAckType:1; /* Immediate or deferred ... */ + u8 xAckInd:1; /* Indicates if ACK required ... */ + u8 xFunction:1; /* Interrupt or Acknowledge ... */ }; -struct HvLpEvent -{ - struct HvEventFlags xFlags; // Event flags x00-x00 - u8 xType; // Type of message x01-x01 - u16 xSubtype; // Subtype for event x02-x03 - u8 xSourceLp; // Source LP x04-x04 - u8 xTargetLp; // Target LP x05-x05 - u8 xSizeMinus1; // Size of Derived class - 1 x06-x06 - u8 xRc; // RC for Ack flows x07-x07 - u16 xSourceInstanceId; // Source sides instance id x08-x09 - u16 xTargetInstanceId; // Target sides instance id x0A-x0B +struct HvLpEvent { + struct HvEventFlags xFlags; /* Event flags x00-x00 */ + u8 xType; /* Type of message x01-x01 */ + u16 xSubtype; /* Subtype for event x02-x03 */ + u8 xSourceLp; /* Source LP x04-x04 */ + u8 xTargetLp; /* Target LP x05-x05 */ + u8 xSizeMinus1; /* Size of Derived class - 1 x06-x06 */ + u8 xRc; /* RC for Ack flows x07-x07 */ + u16 xSourceInstanceId; /* Source sides instance id x08-x09 */ + u16 xTargetInstanceId; /* Target sides instance id x0A-x0B */ union { - u32 xSubtypeData; // Data usable by the subtype x0C-x0F - u16 xSubtypeDataShort[2]; // Data as 2 shorts - u8 xSubtypeDataChar[4]; // Data as 4 chars + u32 xSubtypeData; /* Data usable by the subtype x0C-x0F */ + u16 xSubtypeDataShort[2]; /* Data as 2 shorts */ + u8 xSubtypeDataChar[4]; /* Data as 4 chars */ } x; - u64 xCorrelationToken; // Unique value for source/type x10-x17 + u64 xCorrelationToken; /* Unique value for source/type x10-x17 */ }; -// Lp Event handler function typedef void (*LpEventHandler)(struct HvLpEvent *, struct pt_regs *); -// Register a handler for an event type -// returns 0 on success -extern int HvLpEvent_registerHandler( HvLpEvent_Type eventType, LpEventHandler hdlr); - -// Unregister a handler for an event type -// This call will sleep until the handler being removed is guaranteed to -// be no longer executing on any CPU. Do not call with locks held. -// -// returns 0 on success -// Unregister will fail if there are any paths open for the type -extern int HvLpEvent_unregisterHandler( HvLpEvent_Type eventType ); +/* Register a handler for an event type - returns 0 on success */ +extern int HvLpEvent_registerHandler(HvLpEvent_Type eventType, + LpEventHandler hdlr); -// Open an Lp Event Path for an event type -// returns 0 on success -// openPath will fail if there is no handler registered for the event type. -// The lpIndex specified is the partition index for the target partition -// (for VirtualIo, VirtualLan and SessionMgr) other types specify zero) -extern int HvLpEvent_openPath( HvLpEvent_Type eventType, HvLpIndex lpIndex ); +/* + * Unregister a handler for an event type + * + * This call will sleep until the handler being removed is guaranteed to + * be no longer executing on any CPU. Do not call with locks held. + * + * returns 0 on success + * Unregister will fail if there are any paths open for the type + */ +extern int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType); +/* + * Open an Lp Event Path for an event type + * returns 0 on success + * openPath will fail if there is no handler registered for the event type. + * The lpIndex specified is the partition index for the target partition + * (for VirtualIo, VirtualLan and SessionMgr) other types specify zero) + */ +extern int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex); -// Close an Lp Event Path for a type and partition -// returns 0 on sucess -extern int HvLpEvent_closePath( HvLpEvent_Type eventType, HvLpIndex lpIndex ); +/* + * Close an Lp Event Path for a type and partition + * returns 0 on sucess + */ +extern int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex); #define HvLpEvent_Type_Hypervisor 0 #define HvLpEvent_Type_MachineFac 1 @@ -141,4 +139,4 @@ extern int HvLpEvent_closePath( HvLpEvent_Type eventType, HvLpIndex lpIndex ); #define HvLpDma_Rc_InvalidAddress 4 #define HvLpDma_Rc_InvalidLength 5 -#endif // _HVLPEVENT_H +#endif /* _HVLPEVENT_H */ diff --git a/include/asm-ppc64/iSeries/HvReleaseData.h b/include/asm-ppc64/iSeries/HvReleaseData.h index 183e5e7..01a1f13 100644 --- a/include/asm-ppc64/iSeries/HvReleaseData.h +++ b/include/asm-ppc64/iSeries/HvReleaseData.h @@ -1,17 +1,17 @@ /* * HvReleaseData.h * Copyright (C) 2001 Mike Corrigan IBM Corporation - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA @@ -19,47 +19,45 @@ #ifndef _HVRELEASEDATA_H #define _HVRELEASEDATA_H -//============================================================================= -// -// This control block contains the critical information about the -// release so that it can be changed in the future (ie, the virtual -// address of the OS's NACA). -// +/* + * This control block contains the critical information about the + * release so that it can be changed in the future (ie, the virtual + * address of the OS's NACA). + */ #include <asm/types.h> #include <asm/naca.h> -//============================================================================= -// -// When we IPL a secondary partition, we will check if if the -// secondary xMinPlicVrmIndex > the primary xVrmIndex. -// If it is then this tells PLIC that this secondary is not -// supported running on this "old" of a level of PLIC. -// -// Likewise, we will compare the primary xMinSlicVrmIndex to -// the secondary xVrmIndex. -// If the primary xMinSlicVrmDelta > secondary xVrmDelta then we -// know that this PLIC does not support running an OS "that old". -// -//============================================================================= +/* + * When we IPL a secondary partition, we will check if if the + * secondary xMinPlicVrmIndex > the primary xVrmIndex. + * If it is then this tells PLIC that this secondary is not + * supported running on this "old" of a level of PLIC. + * + * Likewise, we will compare the primary xMinSlicVrmIndex to + * the secondary xVrmIndex. + * If the primary xMinSlicVrmDelta > secondary xVrmDelta then we + * know that this PLIC does not support running an OS "that old". + */ -struct HvReleaseData -{ - u32 xDesc; // Descriptor "HvRD" ebcdic x00-x03 - u16 xSize; // Size of this control block x04-x05 - u16 xVpdAreasPtrOffset; // Offset in NACA of ItVpdAreas x06-x07 - struct naca_struct * xSlicNacaAddr; // Virt addr of SLIC NACA x08-x0F - u32 xMsNucDataOffset; // Offset of Linux Mapping Data x10-x13 - u32 xRsvd1; // Reserved x14-x17 - u16 xTagsMode:1; // 0 == tags active, 1 == tags inactive - u16 xAddressSize:1; // 0 == 64-bit, 1 == 32-bit - u16 xNoSharedProcs:1; // 0 == shared procs, 1 == no shared - u16 xNoHMT:1; // 0 == allow HMT, 1 == no HMT - u16 xRsvd2:12; // Reserved x18-x19 - u16 xVrmIndex; // VRM Index of OS image x1A-x1B - u16 xMinSupportedPlicVrmIndex;// Min PLIC level (soft) x1C-x1D - u16 xMinCompatablePlicVrmIndex;// Min PLIC levelP (hard) x1E-x1F - char xVrmName[12]; // Displayable name x20-x2B - char xRsvd3[20]; // Reserved x2C-x3F +struct HvReleaseData { + u32 xDesc; /* Descriptor "HvRD" ebcdic x00-x03 */ + u16 xSize; /* Size of this control block x04-x05 */ + u16 xVpdAreasPtrOffset; /* Offset in NACA of ItVpdAreas x06-x07 */ + struct naca_struct *xSlicNacaAddr; /* Virt addr of SLIC NACA x08-x0F */ + u32 xMsNucDataOffset; /* Offset of Linux Mapping Data x10-x13 */ + u32 xRsvd1; /* Reserved x14-x17 */ + u16 xTagsMode:1; /* 0 == tags active, 1 == tags inactive */ + u16 xAddressSize:1; /* 0 == 64-bit, 1 == 32-bit */ + u16 xNoSharedProcs:1; /* 0 == shared procs, 1 == no shared */ + u16 xNoHMT:1; /* 0 == allow HMT, 1 == no HMT */ + u16 xRsvd2:12; /* Reserved x18-x19 */ + u16 xVrmIndex; /* VRM Index of OS image x1A-x1B */ + u16 xMinSupportedPlicVrmIndex; /* Min PLIC level (soft) x1C-x1D */ + u16 xMinCompatablePlicVrmIndex; /* Min PLIC levelP (hard) x1E-x1F */ + char xVrmName[12]; /* Displayable name x20-x2B */ + char xRsvd3[20]; /* Reserved x2C-x3F */ }; +extern struct HvReleaseData hvReleaseData; + #endif /* _HVRELEASEDATA_H */ diff --git a/include/asm-ppc64/iSeries/HvTypes.h b/include/asm-ppc64/iSeries/HvTypes.h index 3ec49c1..b1ef2b4 100644 --- a/include/asm-ppc64/iSeries/HvTypes.h +++ b/include/asm-ppc64/iSeries/HvTypes.h @@ -1,17 +1,17 @@ /* * HvTypes.h * Copyright (C) 2001 Mike Corrigan IBM Corporation - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA @@ -19,74 +19,60 @@ #ifndef _HVTYPES_H #define _HVTYPES_H -//=========================================================================== -// Header File Id -// Name______________: HvTypes.H -// -// Description_______: -// -// General typedefs for the hypervisor. -// -// Declared Class(es): -// -//=========================================================================== +/* + * General typedefs for the hypervisor. + */ #include <asm/types.h> -//------------------------------------------------------------------- -// Typedefs -//------------------------------------------------------------------- typedef u8 HvLpIndex; typedef u16 HvLpInstanceId; -typedef u64 HvLpTOD; -typedef u64 HvLpSystemSerialNum; -typedef u8 HvLpDeviceSerialNum[12]; -typedef u16 HvLpSanHwSet; -typedef u16 HvLpBus; -typedef u16 HvLpBoard; -typedef u16 HvLpCard; -typedef u8 HvLpDeviceType[4]; -typedef u8 HvLpDeviceModel[3]; -typedef u64 HvIoToken; -typedef u8 HvLpName[8]; +typedef u64 HvLpTOD; +typedef u64 HvLpSystemSerialNum; +typedef u8 HvLpDeviceSerialNum[12]; +typedef u16 HvLpSanHwSet; +typedef u16 HvLpBus; +typedef u16 HvLpBoard; +typedef u16 HvLpCard; +typedef u8 HvLpDeviceType[4]; +typedef u8 HvLpDeviceModel[3]; +typedef u64 HvIoToken; +typedef u8 HvLpName[8]; typedef u32 HvIoId; typedef u64 HvRealMemoryIndex; -typedef u32 HvLpIndexMap; // Must hold HvMaxArchitectedLps bits!!! +typedef u32 HvLpIndexMap; /* Must hold HVMAXARCHITECTEDLPS bits!!! */ typedef u16 HvLpVrmIndex; typedef u32 HvXmGenerationId; -typedef u8 HvLpBusPool; -typedef u8 HvLpSharedPoolIndex; +typedef u8 HvLpBusPool; +typedef u8 HvLpSharedPoolIndex; typedef u16 HvLpSharedProcUnitsX100; typedef u8 HvLpVirtualLanIndex; -typedef u16 HvLpVirtualLanIndexMap; // Must hold HvMaxArchitectedVirtualLans bits!!! -typedef u16 HvBusNumber; // Hypervisor Bus Number -typedef u8 HvSubBusNumber; // Hypervisor SubBus Number -typedef u8 HvAgentId; // Hypervisor DevFn +typedef u16 HvLpVirtualLanIndexMap; /* Must hold HVMAXARCHITECTEDVIRTUALLANS bits!!! */ +typedef u16 HvBusNumber; /* Hypervisor Bus Number */ +typedef u8 HvSubBusNumber; /* Hypervisor SubBus Number */ +typedef u8 HvAgentId; /* Hypervisor DevFn */ + +#define HVMAXARCHITECTEDLPS 32 +#define HVMAXARCHITECTEDVIRTUALLANS 16 +#define HVMAXARCHITECTEDVIRTUALDISKS 32 +#define HVMAXARCHITECTEDVIRTUALCDROMS 8 +#define HVMAXARCHITECTEDVIRTUALTAPES 8 +#define HVCHUNKSIZE (256 * 1024) +#define HVPAGESIZE (4 * 1024) +#define HVLPMINMEGSPRIMARY 256 +#define HVLPMINMEGSSECONDARY 64 +#define HVCHUNKSPERMEG 4 +#define HVPAGESPERMEG 256 +#define HVPAGESPERCHUNK 64 -#define HVMAXARCHITECTEDLPS 32 -#define HVMAXARCHITECTEDVIRTUALLANS 16 -#define HVMAXARCHITECTEDVIRTUALDISKS 32 -#define HVMAXARCHITECTEDVIRTUALCDROMS 8 -#define HVMAXARCHITECTEDVIRTUALTAPES 8 -#define HVCHUNKSIZE 256 * 1024 -#define HVPAGESIZE 4 * 1024 -#define HVLPMINMEGSPRIMARY 256 -#define HVLPMINMEGSSECONDARY 64 -#define HVCHUNKSPERMEG 4 -#define HVPAGESPERMEG 256 -#define HVPAGESPERCHUNK 64 - -#define HvMaxArchitectedLps ((HvLpIndex)HVMAXARCHITECTEDLPS) -#define HvMaxArchitectedVirtualLans ((HvLpVirtualLanIndex)16) #define HvLpIndexInvalid ((HvLpIndex)0xff) -//-------------------------------------------------------------------- -// Enums for the sub-components under PLIC -// Used in HvCall and HvPrimaryCall -//-------------------------------------------------------------------- -enum HvCallCompIds -{ +/* + * Enums for the sub-components under PLIC + * Used in HvCall and HvPrimaryCall + */ +enum { HvCallCompId = 0, HvCallCpuCtlsCompId = 1, HvCallCfgCompId = 2, @@ -97,18 +83,18 @@ enum HvCallCompIds HvCallSmCompId = 7, HvCallSpdCompId = 8, HvCallXmCompId = 9, - HvCallRioCompId = 10, + HvCallRioCompId = 10, HvCallRsvd3CompId = 11, HvCallRsvd2CompId = 12, HvCallRsvd1CompId = 13, HvCallMaxCompId = 14, - HvPrimaryCallCompId = 0, + HvPrimaryCallCompId = 0, HvPrimaryCallCfgCompId = 1, - HvPrimaryCallPciCompId = 2, + HvPrimaryCallPciCompId = 2, HvPrimaryCallSmCompId = 3, HvPrimaryCallSpdCompId = 4, HvPrimaryCallXmCompId = 5, - HvPrimaryCallRioCompId = 6, + HvPrimaryCallRioCompId = 6, HvPrimaryCallRsvd7CompId = 7, HvPrimaryCallRsvd6CompId = 8, HvPrimaryCallRsvd5CompId = 9, @@ -116,7 +102,7 @@ enum HvCallCompIds HvPrimaryCallRsvd3CompId = 11, HvPrimaryCallRsvd2CompId = 12, HvPrimaryCallRsvd1CompId = 13, - HvPrimaryCallMaxCompId = HvCallMaxCompId + HvPrimaryCallMaxCompId = HvCallMaxCompId }; struct HvLpBufferList { diff --git a/include/asm-ppc64/iSeries/IoHriMainStore.h b/include/asm-ppc64/iSeries/IoHriMainStore.h index ff00e86..45ed3ea 100644 --- a/include/asm-ppc64/iSeries/IoHriMainStore.h +++ b/include/asm-ppc64/iSeries/IoHriMainStore.h @@ -1,17 +1,17 @@ /* * IoHriMainStore.h * Copyright (C) 2001 Mike Corrigan IBM Corporation - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA @@ -21,7 +21,7 @@ #define _IOHRIMAINSTORE_H /* Main Store Vpd for Condor,iStar,sStar */ -struct IoHriMainStoreSegment4 { +struct IoHriMainStoreSegment4 { u8 msArea0Exists:1; u8 msArea1Exists:1; u8 msArea2Exists:1; @@ -51,7 +51,7 @@ struct IoHriMainStoreSegment4 { u8 msArea1HasRiserVpd:1; u8 msArea2HasRiserVpd:1; u8 msArea3HasRiserVpd:1; - u8 reserved5:4; + u8 reserved5:4; u8 reserved6; u16 reserved7; @@ -82,8 +82,8 @@ struct IoHriMainStoreVpdFruData { }; struct IoHriMainStoreAdrRangeBlock { - void * blockStart __attribute((packed)); - void * blockEnd __attribute((packed)); + void *blockStart __attribute((packed)); + void *blockEnd __attribute((packed)); u32 blockProcChipId __attribute((packed)); }; @@ -102,7 +102,7 @@ struct IoHriMainStoreArea4 { u32 procNodeId __attribute((packed)); u32 numAdrRangeBlocks __attribute((packed)); - struct IoHriMainStoreAdrRangeBlock xAdrRangeBlock[MaxAreaAdrRangeBlocks] __attribute((packed)); + struct IoHriMainStoreAdrRangeBlock xAdrRangeBlock[MaxAreaAdrRangeBlocks] __attribute((packed)); struct IoHriMainStoreChipInfo1 chipInfo0 __attribute((packed)); struct IoHriMainStoreChipInfo1 chipInfo1 __attribute((packed)); @@ -113,17 +113,17 @@ struct IoHriMainStoreArea4 { struct IoHriMainStoreChipInfo1 chipInfo6 __attribute((packed)); struct IoHriMainStoreChipInfo1 chipInfo7 __attribute((packed)); - void * msRamAreaArray __attribute((packed)); + void *msRamAreaArray __attribute((packed)); u32 msRamAreaArrayNumEntries __attribute((packed)); u32 msRamAreaArrayEntrySize __attribute((packed)); u32 numaDimmExists __attribute((packed)); u32 numaDimmFunctional __attribute((packed)); - void * numaDimmArray __attribute((packed)); + void *numaDimmArray __attribute((packed)); u32 numaDimmArrayNumEntries __attribute((packed)); u32 numaDimmArrayEntrySize __attribute((packed)); - struct IoHriMainStoreVpdIdData idData __attribute((packed)); + struct IoHriMainStoreVpdIdData idData __attribute((packed)); u64 powerData __attribute((packed)); u64 cardAssemblyPartNum __attribute((packed)); @@ -143,7 +143,7 @@ struct IoHriMainStoreArea4 { }; -struct IoHriMainStoreSegment5 { +struct IoHriMainStoreSegment5 { u16 reserved1; u8 reserved2; u8 msVpdFormat; @@ -151,17 +151,16 @@ struct IoHriMainStoreSegment5 { u32 totalMainStore; u64 maxConfiguredMsAdr; - struct IoHriMainStoreArea4* msAreaArray; + struct IoHriMainStoreArea4 *msAreaArray; u32 msAreaArrayNumEntries; u32 msAreaArrayEntrySize; - u32 msAreaExists; + u32 msAreaExists; u32 msAreaFunctional; u64 reserved3; }; +extern u64 xMsVpd[]; - -#endif // _IOHRIMAINSTORE_H - +#endif /* _IOHRIMAINSTORE_H */ diff --git a/include/asm-ppc64/iSeries/IoHriProcessorVpd.h b/include/asm-ppc64/iSeries/IoHriProcessorVpd.h index 9654338..73b73d8 100644 --- a/include/asm-ppc64/iSeries/IoHriProcessorVpd.h +++ b/include/asm-ppc64/iSeries/IoHriProcessorVpd.h @@ -1,17 +1,17 @@ /* * IoHriProcessorVpd.h * Copyright (C) 2001 Mike Corrigan IBM Corporation - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA @@ -19,16 +19,12 @@ #ifndef _IOHRIPROCESSORVPD_H #define _IOHRIPROCESSORVPD_H -//=================================================================== -// -// This struct maps Processor Vpd that is DMAd to SLIC by CSP -// - #include <asm/types.h> -struct IoHriProcessorVpd -{ - +/* + * This struct maps Processor Vpd that is DMAd to SLIC by CSP + */ +struct IoHriProcessorVpd { u8 xFormat; // VPD format indicator x00-x00 u8 xProcStatus:8; // Processor State x01-x01 u8 xSecondaryThreadCount; // Secondary thread cnt x02-x02 @@ -40,12 +36,12 @@ struct IoHriProcessorVpd u16 xRsvd2; // Reserved x06-x07 u32 xHwNodeId; // Hardware node id x08-x0B u32 xHwProcId; // Hardware processor id x0C-x0F - + u32 xTypeNum; // Card Type/CCIN number x10-x13 u32 xModelNum; // Model/Feature number x14-x17 u64 xSerialNum; // Serial number x18-x1F - char xPartNum[12]; // Book Part or FPU number x20-x2B - char xMfgID[4]; // Manufacturing ID x2C-x2F + char xPartNum[12]; // Book Part or FPU number x20-x2B + char xMfgID[4]; // Manufacturing ID x2C-x2F u32 xProcFreq; // Processor Frequency x30-x33 u32 xTimeBaseFreq; // Time Base Frequency x34-x37 @@ -71,7 +67,7 @@ struct IoHriProcessorVpd u32 xDataL3CacheSizeKB; // L3 data cache size in KB x80-x83 u32 xDataL3CacheLineSize; // L3 data cache block size x84-x87 u64 xRsvd6; // Reserved x88-x8F - + u64 xFruLabel; // Card Location Label x90-x97 u8 xSlotsOnCard; // Slots on card (0=no slots) x98-x98 u8 xPartLocFlag; // Location flag (0-pluggable 1-imbedded) x99-x99 @@ -79,10 +75,12 @@ struct IoHriProcessorVpd u8 xSmartCardPortNo; // Smart card port number x9C-x9C u8 xRsvd7; // Reserved x9D-x9D u16 xFrameIdAndRackUnit; // Frame ID and rack unit adr x9E-x9F - + u8 xRsvd8[24]; // Reserved xA0-xB7 - char xProcSrc[72]; // CSP format SRC xB8-xFF + char xProcSrc[72]; // CSP format SRC xB8-xFF }; +extern struct IoHriProcessorVpd xIoHriProcessorVpd[]; + #endif /* _IOHRIPROCESSORVPD_H */ diff --git a/include/asm-ppc64/iSeries/ItExtVpdPanel.h b/include/asm-ppc64/iSeries/ItExtVpdPanel.h index dee6b12..4c546a8 100644 --- a/include/asm-ppc64/iSeries/ItExtVpdPanel.h +++ b/include/asm-ppc64/iSeries/ItExtVpdPanel.h @@ -1,17 +1,17 @@ /* * ItExtVpdPanel.h * Copyright (C) 2002 Dave Boutcher IBM Corporation - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA @@ -20,39 +20,33 @@ #define _ITEXTVPDPANEL_H /* - * - * This struct maps the panel information + * This struct maps the panel information * * Warning: * This data must match the architecture for the panel information - * */ - -/*------------------------------------------------------------------- - * Standard Includes - *------------------------------------------------------------------- -*/ #include <asm/types.h> -struct ItExtVpdPanel -{ - // Definition of the Extended Vpd On Panel Data Area - char systemSerial[8]; - char mfgID[4]; - char reserved1[24]; - char machineType[4]; - char systemID[6]; - char somUniqueCnt[4]; - char serialNumberCount; - char reserved2[7]; - u16 bbu3; - u16 bbu2; - u16 bbu1; - char xLocationLabel[8]; - u8 xRsvd1[6]; - u16 xFrameId; - u8 xRsvd2[48]; +struct ItExtVpdPanel { + /* Definition of the Extended Vpd On Panel Data Area */ + char systemSerial[8]; + char mfgID[4]; + char reserved1[24]; + char machineType[4]; + char systemID[6]; + char somUniqueCnt[4]; + char serialNumberCount; + char reserved2[7]; + u16 bbu3; + u16 bbu2; + u16 bbu1; + char xLocationLabel[8]; + u8 xRsvd1[6]; + u16 xFrameId; + u8 xRsvd2[48]; }; -#endif /* _ITEXTVPDPANEL_H */ +extern struct ItExtVpdPanel xItExtVpdPanel; + +#endif /* _ITEXTVPDPANEL_H */ diff --git a/include/asm-ppc64/iSeries/ItIplParmsReal.h b/include/asm-ppc64/iSeries/ItIplParmsReal.h index 4d8b430..ae3417d 100644 --- a/include/asm-ppc64/iSeries/ItIplParmsReal.h +++ b/include/asm-ppc64/iSeries/ItIplParmsReal.h @@ -1,17 +1,17 @@ /* * ItIplParmsReal.h * Copyright (C) 2001 Mike Corrigan IBM Corporation - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA @@ -19,58 +19,53 @@ #ifndef _ITIPLPARMSREAL_H #define _ITIPLPARMSREAL_H -//============================================================================== -// -// This struct maps the IPL Parameters DMA'd from the SP. -// -// Warning: -// This data must map in exactly 64 bytes and match the architecture for -// the IPL parms -// -//============================================================================= - +/* + * This struct maps the IPL Parameters DMA'd from the SP. + * + * Warning: + * This data must map in exactly 64 bytes and match the architecture for + * the IPL parms + */ -//------------------------------------------------------------------- -// Standard Includes -//------------------------------------------------------------------- #include <asm/types.h> -struct ItIplParmsReal -{ - u8 xFormat; // Defines format of IplParms x00-x00 - u8 xRsvd01:6; // Reserved x01-x01 - u8 xAlternateSearch:1; // Alternate search indicator ... - u8 xUaSupplied:1; // UA Supplied on programmed IPL ... - u8 xLsUaFormat; // Format byte for UA x02-x02 - u8 xRsvd02; // Reserved x03-x03 - u32 xLsUa; // LS UA x04-x07 - u32 xUnusedLsLid; // First OS LID to load x08-x0B - u16 xLsBusNumber; // LS Bus Number x0C-x0D - u8 xLsCardAdr; // LS Card Address x0E-x0E - u8 xLsBoardAdr; // LS Board Address x0F-x0F - u32 xRsvd03; // Reserved x10-x13 - u8 xSpcnPresent:1; // SPCN present x14-x14 - u8 xCpmPresent:1; // CPM present ... - u8 xRsvd04:6; // Reserved ... - u8 xRsvd05:4; // Reserved x15-x15 - u8 xKeyLock:4; // Keylock setting ... - u8 xRsvd06:6; // Reserved x16-x16 - u8 xIplMode:2; // Ipl mode (A|B|C|D) ... - u8 xHwIplType; // Fast v slow v slow EC HW IPL x17-x17 - u16 xCpmEnabledIpl:1; // CPM in effect when IPL initiated x18-x19 - u16 xPowerOnResetIpl:1; // Indicate POR condition ... - u16 xMainStorePreserved:1; // Main Storage is preserved ... - u16 xRsvd07:13; // Reserved ... - u16 xIplSource:16; // Ipl source x1A-x1B - u8 xIplReason:8; // Reason for this IPL x1C-x1C - u8 xRsvd08; // Reserved x1D-x1D - u16 xRsvd09; // Reserved x1E-x1F - u16 xSysBoxType; // System Box Type x20-x21 - u16 xSysProcType; // System Processor Type x22-x23 - u32 xRsvd10; // Reserved x24-x27 - u64 xRsvd11; // Reserved x28-x2F - u64 xRsvd12; // Reserved x30-x37 - u64 xRsvd13; // Reserved x38-x3F +struct ItIplParmsReal { + u8 xFormat; // Defines format of IplParms x00-x00 + u8 xRsvd01:6; // Reserved x01-x01 + u8 xAlternateSearch:1; // Alternate search indicator ... + u8 xUaSupplied:1; // UA Supplied on programmed IPL... + u8 xLsUaFormat; // Format byte for UA x02-x02 + u8 xRsvd02; // Reserved x03-x03 + u32 xLsUa; // LS UA x04-x07 + u32 xUnusedLsLid; // First OS LID to load x08-x0B + u16 xLsBusNumber; // LS Bus Number x0C-x0D + u8 xLsCardAdr; // LS Card Address x0E-x0E + u8 xLsBoardAdr; // LS Board Address x0F-x0F + u32 xRsvd03; // Reserved x10-x13 + u8 xSpcnPresent:1; // SPCN present x14-x14 + u8 xCpmPresent:1; // CPM present ... + u8 xRsvd04:6; // Reserved ... + u8 xRsvd05:4; // Reserved x15-x15 + u8 xKeyLock:4; // Keylock setting ... + u8 xRsvd06:6; // Reserved x16-x16 + u8 xIplMode:2; // Ipl mode (A|B|C|D) ... + u8 xHwIplType; // Fast v slow v slow EC HW IPL x17-x17 + u16 xCpmEnabledIpl:1; // CPM in effect when IPL initiatedx18-x19 + u16 xPowerOnResetIpl:1; // Indicate POR condition ... + u16 xMainStorePreserved:1; // Main Storage is preserved ... + u16 xRsvd07:13; // Reserved ... + u16 xIplSource:16; // Ipl source x1A-x1B + u8 xIplReason:8; // Reason for this IPL x1C-x1C + u8 xRsvd08; // Reserved x1D-x1D + u16 xRsvd09; // Reserved x1E-x1F + u16 xSysBoxType; // System Box Type x20-x21 + u16 xSysProcType; // System Processor Type x22-x23 + u32 xRsvd10; // Reserved x24-x27 + u64 xRsvd11; // Reserved x28-x2F + u64 xRsvd12; // Reserved x30-x37 + u64 xRsvd13; // Reserved x38-x3F }; +extern struct ItIplParmsReal xItIplParmsReal; + #endif /* _ITIPLPARMSREAL_H */ diff --git a/include/asm-ppc64/iSeries/ItLpNaca.h b/include/asm-ppc64/iSeries/ItLpNaca.h index 5baffdd..225d017 100644 --- a/include/asm-ppc64/iSeries/ItLpNaca.h +++ b/include/asm-ppc64/iSeries/ItLpNaca.h @@ -1,17 +1,17 @@ /* * ItLpNaca.h * Copyright (C) 2001 Mike Corrigan IBM Corporation - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA @@ -19,18 +19,15 @@ #ifndef _ITLPNACA_H #define _ITLPNACA_H -//============================================================================= -// -// This control block contains the data that is shared between the -// hypervisor (PLIC) and the OS. -// -//============================================================================= +#include <linux/types.h> -struct ItLpNaca -{ -//============================================================================= +/* + * This control block contains the data that is shared between the + * hypervisor (PLIC) and the OS. + */ + +struct ItLpNaca { // CACHE_LINE_1 0x0000 - 0x007F Contains read-only data -//============================================================================= u32 xDesc; // Eye catcher x00-x03 u16 xSize; // Size of this class x04-x05 u16 xIntHdlrOffset; // Offset to IntHdlr array x06-x07 @@ -59,30 +56,25 @@ struct ItLpNaca u64 xLoadAreaAddr; // ER address of load area x28-x2F u32 xLoadAreaChunks; // Chunks for the load area x30-x33 u32 xPaseSysCallCRMask; // Mask used to test CR before x34-x37 - // doing an ASR switch on PASE - // system call. - u64 xSlicSegmentTablePtr; // Pointer to Slic seg table. x38-x3f - u8 xRsvd1_4[64]; // x40-x7F - -//============================================================================= + // doing an ASR switch on PASE + // system call. + u64 xSlicSegmentTablePtr; // Pointer to Slic seg table. x38-x3f + u8 xRsvd1_4[64]; // x40-x7F + // CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data -//============================================================================= u8 xRsvd2_0[128]; // Reserved x00-x7F -//============================================================================= // CACHE_LINE_3-6 0x0100 - 0x02FF Contains LP Queue indicators -// NB: Padding required to keep xInterrruptHdlr at x300 which is required +// NB: Padding required to keep xInterrruptHdlr at x300 which is required // for v4r4 PLIC. -//============================================================================= u8 xOldLpQueue[128]; // LP Queue needed for v4r4 100-17F u8 xRsvd3_0[384]; // Reserved 180-2FF -//============================================================================= + // CACHE_LINE_7-8 0x0300 - 0x03FF Contains the address of the OS interrupt // handlers -//============================================================================= u64 xInterruptHdlr[32]; // Interrupt handlers 300-x3FF }; -//============================================================================= +extern struct ItLpNaca itLpNaca; #endif /* _ITLPNACA_H */ diff --git a/include/asm-ppc64/iSeries/ItLpQueue.h b/include/asm-ppc64/iSeries/ItLpQueue.h index 4f4dde2..393299e 100644 --- a/include/asm-ppc64/iSeries/ItLpQueue.h +++ b/include/asm-ppc64/iSeries/ItLpQueue.h @@ -1,17 +1,17 @@ /* * ItLpQueue.h * Copyright (C) 2001 Mike Corrigan IBM Corporation - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA @@ -19,54 +19,54 @@ #ifndef _ITLPQUEUE_H #define _ITLPQUEUE_H -//============================================================================= -// -// This control block defines the simple LP queue structure that is -// shared between the hypervisor (PLIC) and the OS in order to send -// events to an LP. -// +/* + * This control block defines the simple LP queue structure that is + * shared between the hypervisor (PLIC) and the OS in order to send + * events to an LP. + */ #include <asm/types.h> #include <asm/ptrace.h> struct HvLpEvent; -#define ITMaxLpQueues 8 +#define ITMaxLpQueues 8 #define NotUsed 0 // Queue will not be used by PLIC #define DedicatedIo 1 // Queue dedicated to IO processor specified #define DedicatedLp 2 // Queue dedicated to LP specified #define Shared 3 // Queue shared for both IO and LP -#define LpEventStackSize 4096 -#define LpEventMaxSize 256 -#define LpEventAlign 64 +#define LpEventStackSize 4096 +#define LpEventMaxSize 256 +#define LpEventAlign 64 -struct ItLpQueue -{ -// -// The xSlicCurEventPtr is the pointer to the next event stack entry that will -// become valid. The OS must peek at this entry to determine if it is valid. -// PLIC will set the valid indicator as the very last store into that entry. -// -// When the OS has completed processing of the event then it will mark the event -// as invalid so that PLIC knows it can store into that event location again. -// -// If the event stack fills and there are overflow events, then PLIC will set -// the xPlicOverflowIntPending flag in which case the OS will have to fetch the -// additional LP events once they have drained the event stack. -// -// The first 16-bytes are known by both the OS and PLIC. The remainder of the -// cache line is for use by the OS. -// -//============================================================================= +struct ItLpQueue { +/* + * The xSlicCurEventPtr is the pointer to the next event stack entry + * that will become valid. The OS must peek at this entry to determine + * if it is valid. PLIC will set the valid indicator as the very last + * store into that entry. + * + * When the OS has completed processing of the event then it will mark + * the event as invalid so that PLIC knows it can store into that event + * location again. + * + * If the event stack fills and there are overflow events, then PLIC + * will set the xPlicOverflowIntPending flag in which case the OS will + * have to fetch the additional LP events once they have drained the + * event stack. + * + * The first 16-bytes are known by both the OS and PLIC. The remainder + * of the cache line is for use by the OS. + */ u8 xPlicOverflowIntPending;// 0x00 Overflow events are pending u8 xPlicStatus; // 0x01 DedicatedIo or DedicatedLp or NotUsed u16 xSlicLogicalProcIndex; // 0x02 Logical Proc Index for correlation u8 xPlicRsvd[12]; // 0x04 - char* xSlicCurEventPtr; // 0x10 - char* xSlicLastValidEventPtr; // 0x18 - char* xSlicEventStackPtr; // 0x20 + char *xSlicCurEventPtr; // 0x10 + char *xSlicLastValidEventPtr; // 0x18 + char *xSlicEventStackPtr; // 0x20 u8 xIndex; // 0x28 unique sequential index. u8 xSlicRsvd[3]; // 0x29-2b u32 xInUseWord; // 0x2C @@ -76,17 +76,9 @@ struct ItLpQueue extern struct ItLpQueue xItLpQueue; -extern struct HvLpEvent * ItLpQueue_getNextLpEvent( struct ItLpQueue * ); -extern int ItLpQueue_isLpIntPending( struct ItLpQueue * ); -extern unsigned ItLpQueue_process( struct ItLpQueue *, struct pt_regs * ); -extern void ItLpQueue_clearValid( struct HvLpEvent * ); - -static __inline__ void process_iSeries_events( void ) -{ - __asm__ __volatile__ ( - " li 0,0x5555 \n\ - sc" - : : : "r0", "r3" ); -} +extern struct HvLpEvent *ItLpQueue_getNextLpEvent(struct ItLpQueue *); +extern int ItLpQueue_isLpIntPending(struct ItLpQueue *); +extern unsigned ItLpQueue_process(struct ItLpQueue *, struct pt_regs *); +extern void ItLpQueue_clearValid(struct HvLpEvent *); #endif /* _ITLPQUEUE_H */ diff --git a/include/asm-ppc64/iSeries/ItLpRegSave.h b/include/asm-ppc64/iSeries/ItLpRegSave.h index dafc4c8..1b3087e 100644 --- a/include/asm-ppc64/iSeries/ItLpRegSave.h +++ b/include/asm-ppc64/iSeries/ItLpRegSave.h @@ -1,17 +1,17 @@ /* * ItLpRegSave.h * Copyright (C) 2001 Mike Corrigan IBM Corporation - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA @@ -19,33 +19,30 @@ #ifndef _ITLPREGSAVE_H #define _ITLPREGSAVE_H -//===================================================================================== -// -// This control block contains the data that is shared between PLIC -// and the OS -// -// +/* + * This control block contains the data that is shared between PLIC + * and the OS + */ -struct ItLpRegSave -{ +struct ItLpRegSave { u32 xDesc; // Eye catcher "LpRS" ebcdic 000-003 u16 xSize; // Size of this class 004-005 u8 xInUse; // Area is live 006-007 - u8 xRsvd1[9]; // Reserved 007-00F + u8 xRsvd1[9]; // Reserved 007-00F - u8 xFixedRegSave[352]; // Fixed Register Save Area 010-16F + u8 xFixedRegSave[352]; // Fixed Register Save Area 010-16F u32 xCTRL; // Control Register 170-173 - u32 xDEC; // Decrementer 174-177 + u32 xDEC; // Decrementer 174-177 u32 xFPSCR; // FP Status and Control Reg 178-17B u32 xPVR; // Processor Version Number 17C-17F - + u64 xMMCR0; // Monitor Mode Control Reg 0 180-187 u32 xPMC1; // Perf Monitor Counter 1 188-18B u32 xPMC2; // Perf Monitor Counter 2 18C-18F u32 xPMC3; // Perf Monitor Counter 3 190-193 u32 xPMC4; // Perf Monitor Counter 4 194-197 u32 xPIR; // Processor ID Reg 198-19B - + u32 xMMCR1; // Monitor Mode Control Reg 1 19C-19F u32 xMMCRA; // Monitor Mode Control Reg A 1A0-1A3 u32 xPMC5; // Perf Monitor Counter 5 1A4-1A7 @@ -57,17 +54,17 @@ struct ItLpRegSave u32 xRsvd; // Reserved 1BC-1BF u64 xACCR; // Address Compare Control Reg 1C0-1C7 - u64 xIMR; // Instruction Match Register 1C8-1CF - u64 xSDR1; // Storage Description Reg 1 1D0-1D7 + u64 xIMR; // Instruction Match Register 1C8-1CF + u64 xSDR1; // Storage Description Reg 1 1D0-1D7 u64 xSPRG0; // Special Purpose Reg General0 1D8-1DF u64 xSPRG1; // Special Purpose Reg General1 1E0-1E7 u64 xSPRG2; // Special Purpose Reg General2 1E8-1EF u64 xSPRG3; // Special Purpose Reg General3 1F0-1F7 u64 xTB; // Time Base Register 1F8-1FF - + u64 xFPR[32]; // Floating Point Registers 200-2FF - u64 xMSR; // Machine State Register 300-307 + u64 xMSR; // Machine State Register 300-307 u64 xNIA; // Next Instruction Address 308-30F u64 xDABR; // Data Address Breakpoint Reg 310-317 @@ -76,8 +73,8 @@ struct ItLpRegSave u64 xHID0; // HW Implementation Dependent0 320-327 u64 xHID4; // HW Implementation Dependent4 328-32F - u64 xSCOMd; // SCON Data Reg (SPRG4) 330-337 - u64 xSCOMc; // SCON Command Reg (SPRG5) 338-33F + u64 xSCOMd; // SCON Data Reg (SPRG4) 330-337 + u64 xSCOMc; // SCON Command Reg (SPRG5) 338-33F u64 xSDAR; // Sample Data Address Register 340-347 u64 xSIAR; // Sample Inst Address Register 348-34F diff --git a/include/asm-ppc64/iSeries/ItSpCommArea.h b/include/asm-ppc64/iSeries/ItSpCommArea.h index f1b56f9..5535f82 100644 --- a/include/asm-ppc64/iSeries/ItSpCommArea.h +++ b/include/asm-ppc64/iSeries/ItSpCommArea.h @@ -1,29 +1,27 @@ /* * ItSpCommArea.h * Copyright (C) 2001 Mike Corrigan IBM Corporation - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - #ifndef _ITSPCOMMAREA_H #define _ITSPCOMMAREA_H -struct SpCommArea -{ +struct SpCommArea { u32 xDesc; // Descriptor (only in new formats) 000-003 u8 xFormat; // Format (only in new formats) 004-004 u8 xRsvd1[11]; // Reserved 005-00F diff --git a/include/asm-ppc64/iSeries/ItVpdAreas.h b/include/asm-ppc64/iSeries/ItVpdAreas.h index d120439..71b3ad2 100644 --- a/include/asm-ppc64/iSeries/ItVpdAreas.h +++ b/include/asm-ppc64/iSeries/ItVpdAreas.h @@ -1,17 +1,17 @@ /* * ItVpdAreas.h * Copyright (C) 2001 Mike Corrigan IBM Corporation - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA @@ -19,78 +19,71 @@ #ifndef _ITVPDAREAS_H #define _ITVPDAREAS_H -//===================================================================================== -// -// This file defines the address and length of all of the VPD area passed to -// the OS from PLIC (most of which start from the SP). -// +/* + * This file defines the address and length of all of the VPD area passed to + * the OS from PLIC (most of which start from the SP). + */ #include <asm/types.h> -// VPD Entry index is carved in stone - cannot be changed (easily). -#define ItVpdCecVpd 0 -#define ItVpdDynamicSpace 1 -#define ItVpdExtVpd 2 -#define ItVpdExtVpdOnPanel 3 -#define ItVpdFirstPaca 4 -#define ItVpdIoVpd 5 -#define ItVpdIplParms 6 -#define ItVpdMsVpd 7 -#define ItVpdPanelVpd 8 -#define ItVpdLpNaca 9 -#define ItVpdBackplaneAndMaybeClockCardVpd 10 -#define ItVpdRecoveryLogBuffer 11 -#define ItVpdSpCommArea 12 -#define ItVpdSpLogBuffer 13 -#define ItVpdSpLogBufferSave 14 -#define ItVpdSpCardVpd 15 -#define ItVpdFirstProcVpd 16 -#define ItVpdApModelVpd 17 -#define ItVpdClockCardVpd 18 -#define ItVpdBusExtCardVpd 19 -#define ItVpdProcCapacityVpd 20 -#define ItVpdInteractiveCapacityVpd 21 -#define ItVpdFirstSlotLabel 22 -#define ItVpdFirstLpQueue 23 -#define ItVpdFirstL3CacheVpd 24 -#define ItVpdFirstProcFruVpd 25 - -#define ItVpdMaxEntries 26 +/* VPD Entry index is carved in stone - cannot be changed (easily). */ +#define ItVpdCecVpd 0 +#define ItVpdDynamicSpace 1 +#define ItVpdExtVpd 2 +#define ItVpdExtVpdOnPanel 3 +#define ItVpdFirstPaca 4 +#define ItVpdIoVpd 5 +#define ItVpdIplParms 6 +#define ItVpdMsVpd 7 +#define ItVpdPanelVpd 8 +#define ItVpdLpNaca 9 +#define ItVpdBackplaneAndMaybeClockCardVpd 10 +#define ItVpdRecoveryLogBuffer 11 +#define ItVpdSpCommArea 12 +#define ItVpdSpLogBuffer 13 +#define ItVpdSpLogBufferSave 14 +#define ItVpdSpCardVpd 15 +#define ItVpdFirstProcVpd 16 +#define ItVpdApModelVpd 17 +#define ItVpdClockCardVpd 18 +#define ItVpdBusExtCardVpd 19 +#define ItVpdProcCapacityVpd 20 +#define ItVpdInteractiveCapacityVpd 21 +#define ItVpdFirstSlotLabel 22 +#define ItVpdFirstLpQueue 23 +#define ItVpdFirstL3CacheVpd 24 +#define ItVpdFirstProcFruVpd 25 +#define ItVpdMaxEntries 26 -#define ItDmaMaxEntries 10 +#define ItDmaMaxEntries 10 -#define ItVpdAreasMaxSlotLabels 192 +#define ItVpdAreasMaxSlotLabels 192 -struct SlicVpdAdrs { - u32 pad1; - void * vpdAddr; +struct ItVpdAreas { + u32 xSlicDesc; // Descriptor 000-003 + u16 xSlicSize; // Size of this control block 004-005 + u16 xPlicAdjustVpdLens:1; // Flag to indicate new interface006-007 + u16 xRsvd1:15; // Reserved bits ... + u16 xSlicVpdEntries; // Number of VPD entries 008-009 + u16 xSlicDmaEntries; // Number of DMA entries 00A-00B + u16 xSlicMaxLogicalProcs; // Maximum logical processors 00C-00D + u16 xSlicMaxPhysicalProcs; // Maximum physical processors 00E-00F + u16 xSlicDmaToksOffset; // Offset into this of array 010-011 + u16 xSlicVpdAdrsOffset; // Offset into this of array 012-013 + u16 xSlicDmaLensOffset; // Offset into this of array 014-015 + u16 xSlicVpdLensOffset; // Offset into this of array 016-017 + u16 xSlicMaxSlotLabels; // Maximum number of slot labels018-019 + u16 xSlicMaxLpQueues; // Maximum number of LP Queues 01A-01B + u8 xRsvd2[4]; // Reserved 01C-01F + u64 xRsvd3[12]; // Reserved 020-07F + u32 xPlicDmaLens[ItDmaMaxEntries];// Array of DMA lengths 080-0A7 + u32 xPlicDmaToks[ItDmaMaxEntries];// Array of DMA tokens 0A8-0CF + u32 xSlicVpdLens[ItVpdMaxEntries];// Array of VPD lengths 0D0-12F + void *xSlicVpdAdrs[ItVpdMaxEntries];// Array of VPD buffers 130-1EF }; - -struct ItVpdAreas -{ - u32 xSlicDesc; // Descriptor 000-003 - u16 xSlicSize; // Size of this control block 004-005 - u16 xPlicAdjustVpdLens:1; // Flag to indicate new interface 006-007 - u16 xRsvd1:15; // Reserved bits ... - u16 xSlicVpdEntries; // Number of VPD entries 008-009 - u16 xSlicDmaEntries; // Number of DMA entries 00A-00B - u16 xSlicMaxLogicalProcs; // Maximum logical processors 00C-00D - u16 xSlicMaxPhysicalProcs; // Maximum physical processors 00E-00F - u16 xSlicDmaToksOffset; // Offset into this of array 010-011 - u16 xSlicVpdAdrsOffset; // Offset into this of array 012-013 - u16 xSlicDmaLensOffset; // Offset into this of array 014-015 - u16 xSlicVpdLensOffset; // Offset into this of array 016-017 - u16 xSlicMaxSlotLabels; // Maximum number of slot labels 018-019 - u16 xSlicMaxLpQueues; // Maximum number of LP Queues 01A-01B - u8 xRsvd2[4]; // Reserved 01C-01F - u64 xRsvd3[12]; // Reserved 020-07F - u32 xPlicDmaLens[ItDmaMaxEntries];// Array of DMA lengths 080-0A7 - u32 xPlicDmaToks[ItDmaMaxEntries];// Array of DMA tokens 0A8-0CF - u32 xSlicVpdLens[ItVpdMaxEntries];// Array of VPD lengths 0D0-12F - void * xSlicVpdAdrs[ItVpdMaxEntries];// Array of VPD buffers 130-1EF -}; +extern struct ItVpdAreas itVpdAreas; #endif /* _ITVPDAREAS_H */ diff --git a/include/asm-ppc64/iSeries/LparData.h b/include/asm-ppc64/iSeries/LparData.h deleted file mode 100644 index e54f3b6..0000000 --- a/include/asm-ppc64/iSeries/LparData.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * LparData.h - * Copyright (C) 2001 Mike Corrigan IBM Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _LPARDATA_H -#define _LPARDATA_H - -#include <asm/types.h> -#include <asm/page.h> -#include <asm/abs_addr.h> - -#include <asm/iSeries/ItLpNaca.h> -#include <asm/iSeries/ItLpRegSave.h> -#include <asm/iSeries/HvReleaseData.h> -#include <asm/iSeries/LparMap.h> -#include <asm/iSeries/ItVpdAreas.h> -#include <asm/iSeries/ItIplParmsReal.h> -#include <asm/iSeries/ItExtVpdPanel.h> -#include <asm/iSeries/ItLpQueue.h> -#include <asm/iSeries/IoHriProcessorVpd.h> - -extern struct LparMap xLparMap; -extern struct HvReleaseData hvReleaseData; -extern struct ItLpNaca itLpNaca; -extern struct ItIplParmsReal xItIplParmsReal; -extern struct ItExtVpdPanel xItExtVpdPanel; -extern struct IoHriProcessorVpd xIoHriProcessorVpd[]; -extern struct ItLpQueue xItLpQueue; -extern struct ItVpdAreas itVpdAreas; -extern u64 xMsVpd[]; -extern struct msChunks msChunks; - - -#endif /* _LPARDATA_H */ diff --git a/include/asm-ppc64/iSeries/LparMap.h b/include/asm-ppc64/iSeries/LparMap.h index 075205b..038e5df 100644 --- a/include/asm-ppc64/iSeries/LparMap.h +++ b/include/asm-ppc64/iSeries/LparMap.h @@ -1,17 +1,17 @@ /* * LparMap.h * Copyright (C) 2001 Mike Corrigan IBM Corporation - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA @@ -21,13 +21,14 @@ #include <asm/types.h> -/* The iSeries hypervisor will set up mapping for one or more +/* + * The iSeries hypervisor will set up mapping for one or more * ESID/VSID pairs (in SLB/segment registers) and will set up * mappings of one or more ranges of pages to VAs. * We will have the hypervisor set up the ESID->VSID mapping * for the four kernel segments (C-F). With shared processors, * the hypervisor will clear all segment registers and reload - * these four whenever the processor is switched from one + * these four whenever the processor is switched from one * partition to another. */ @@ -38,30 +39,31 @@ * need to be located within the load area (if the total partition size * is 64 MB), but cannot be mapped. Typically, this should specify * to map half (32 MB) of the load area. - * - * The hypervisor will set up page table entries for the number of + * + * The hypervisor will set up page table entries for the number of * pages specified. * * In 32-bit mode, the hypervisor will load all four of the - * segment registers (identified by the low-order four bits of the + * segment registers (identified by the low-order four bits of the * Esid field. In 64-bit mode, the hypervisor will load one SLB * entry to map the Esid to the Vsid. */ -// Hypervisor initially maps 32MB of the load area -#define HvPagesToMap 8192 +/* Hypervisor initially maps 32MB of the load area */ +#define HvPagesToMap 8192 -struct LparMap -{ - u64 xNumberEsids; // Number of ESID/VSID pairs (1) - u64 xNumberRanges; // Number of VA ranges to map (1) - u64 xSegmentTableOffs; // Page number within load area of seg table (0) - u64 xRsvd[5]; // Reserved (0) - u64 xKernelEsid; // Esid used to map kernel load (0x0C00000000) - u64 xKernelVsid; // Vsid used to map kernel load (0x0C00000000) - u64 xPages; // Number of pages to be mapped (8192) - u64 xOffset; // Offset from start of load area (0) - u64 xVPN; // Virtual Page Number (0x000C000000000000) +struct LparMap { + u64 xNumberEsids; // Number of ESID/VSID pairs (1) + u64 xNumberRanges; // Number of VA ranges to map (1) + u64 xSegmentTableOffs; // Page number within load area of seg table (0) + u64 xRsvd[5]; + u64 xKernelEsid; // Esid used to map kernel load (0x0C00000000) + u64 xKernelVsid; // Vsid used to map kernel load (0x0C00000000) + u64 xPages; // Number of pages to be mapped (8192) + u64 xOffset; // Offset from start of load area (0) + u64 xVPN; // Virtual Page Number (0x000C000000000000) }; +extern struct LparMap xLparMap; + #endif /* _LPARMAP_H */ diff --git a/include/asm-ppc64/iSeries/XmPciLpEvent.h b/include/asm-ppc64/iSeries/XmPciLpEvent.h deleted file mode 100644 index a3d27f1..0000000 --- a/include/asm-ppc64/iSeries/XmPciLpEvent.h +++ /dev/null @@ -1,18 +0,0 @@ - -#ifndef __XMPCILPEVENT_H__ -#define __XMPCILPEVENT_H__ - - -#ifdef __cplusplus -extern "C" { -#endif - -int XmPciLpEvent_init(void); -void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq); - - -#ifdef __cplusplus -} -#endif - -#endif /* __XMPCILPEVENT_H__ */ diff --git a/include/asm-ppc64/iSeries/iSeries_io.h b/include/asm-ppc64/iSeries/iSeries_io.h index f52b759..9f79413 100644 --- a/include/asm-ppc64/iSeries/iSeries_io.h +++ b/include/asm-ppc64/iSeries/iSeries_io.h @@ -5,32 +5,33 @@ #ifdef CONFIG_PPC_ISERIES #include <linux/types.h> -/************************************************************************/ -/* File iSeries_io.h created by Allan Trautman on Thu Dec 28 2000. */ -/************************************************************************/ -/* Remaps the io.h for the iSeries Io */ -/* Copyright (C) 20yy Allan H Trautman, IBM Corporation */ -/* */ -/* This program is free software; you can redistribute it and/or modify */ -/* it under the terms of the GNU General Public License as published by */ -/* the Free Software Foundation; either version 2 of the License, or */ -/* (at your option) any later version. */ -/* */ -/* This program is distributed in the hope that it will be useful, */ -/* but WITHOUT ANY WARRANTY; without even the implied warranty of */ -/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ -/* GNU General Public License for more details. */ -/* */ -/* You should have received a copy of the GNU General Public License */ -/* along with this program; if not, write to the: */ -/* Free Software Foundation, Inc., */ -/* 59 Temple Place, Suite 330, */ -/* Boston, MA 02111-1307 USA */ -/************************************************************************/ -/* Change Activity: */ -/* Created December 28, 2000 */ -/* End Change Activity */ -/************************************************************************/ +/* + * File iSeries_io.h created by Allan Trautman on Thu Dec 28 2000. + * + * Remaps the io.h for the iSeries Io + * Copyright (C) 2000 Allan H Trautman, IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the: + * Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, + * Boston, MA 02111-1307 USA + * + * Change Activity: + * Created December 28, 2000 + * End Change Activity + */ + extern u8 iSeries_Read_Byte(const volatile void __iomem * IoAddress); extern u16 iSeries_Read_Word(const volatile void __iomem * IoAddress); extern u32 iSeries_Read_Long(const volatile void __iomem * IoAddress); @@ -39,8 +40,10 @@ extern void iSeries_Write_Word(u16 IoData, volatile void __iomem * IoAddress); extern void iSeries_Write_Long(u32 IoData, volatile void __iomem * IoAddress); extern void iSeries_memset_io(volatile void __iomem *dest, char x, size_t n); -extern void iSeries_memcpy_toio(volatile void __iomem *dest, void *source, size_t n); -extern void iSeries_memcpy_fromio(void *dest, const volatile void __iomem *source, size_t n); +extern void iSeries_memcpy_toio(volatile void __iomem *dest, void *source, + size_t n); +extern void iSeries_memcpy_fromio(void *dest, + const volatile void __iomem *source, size_t n); #endif /* CONFIG_PPC_ISERIES */ #endif /* _ISERIES_IO_H */ diff --git a/include/asm-ppc64/iSeries/iSeries_irq.h b/include/asm-ppc64/iSeries/iSeries_irq.h index ff8dded..6c9767a 100644 --- a/include/asm-ppc64/iSeries/iSeries_irq.h +++ b/include/asm-ppc64/iSeries/iSeries_irq.h @@ -1,19 +1,8 @@ #ifndef __ISERIES_IRQ_H__ #define __ISERIES_IRQ_H__ -#ifdef __cplusplus -extern "C" { -#endif - -void iSeries_init_IRQ(void); -int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, HvAgentId); -int iSeries_assign_IRQ(int, HvBusNumber, HvSubBusNumber, HvAgentId); -void iSeries_activate_IRQs(void); - -int XmPciLpEvent_init(void); - -#ifdef __cplusplus -} -#endif +extern void iSeries_init_IRQ(void); +extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, HvAgentId); +extern void iSeries_activate_IRQs(void); #endif /* __ISERIES_IRQ_H__ */ diff --git a/include/asm-ppc64/iSeries/iSeries_pci.h b/include/asm-ppc64/iSeries/iSeries_pci.h index 5769cff..575f611 100644 --- a/include/asm-ppc64/iSeries/iSeries_pci.h +++ b/include/asm-ppc64/iSeries/iSeries_pci.h @@ -1,112 +1,88 @@ #ifndef _ISERIES_64_PCI_H #define _ISERIES_64_PCI_H -/************************************************************************/ -/* File iSeries_pci.h created by Allan Trautman on Tue Feb 20, 2001. */ -/************************************************************************/ -/* Define some useful macros for the iSeries pci routines. */ -/* Copyright (C) 2001 Allan H Trautman, IBM Corporation */ -/* */ -/* This program is free software; you can redistribute it and/or modify */ -/* it under the terms of the GNU General Public License as published by */ -/* the Free Software Foundation; either version 2 of the License, or */ -/* (at your option) any later version. */ -/* */ -/* This program is distributed in the hope that it will be useful, */ -/* but WITHOUT ANY WARRANTY; without even the implied warranty of */ -/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ -/* GNU General Public License for more details. */ -/* */ -/* You should have received a copy of the GNU General Public License */ -/* along with this program; if not, write to the: */ -/* Free Software Foundation, Inc., */ -/* 59 Temple Place, Suite 330, */ -/* Boston, MA 02111-1307 USA */ -/************************************************************************/ -/* Change Activity: */ -/* Created Feb 20, 2001 */ -/* Added device reset, March 22, 2001 */ -/* Ported to ppc64, May 25, 2001 */ -/* End Change Activity */ -/************************************************************************/ +/* + * File iSeries_pci.h created by Allan Trautman on Tue Feb 20, 2001. + * + * Define some useful macros for the iSeries pci routines. + * Copyright (C) 2001 Allan H Trautman, IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the: + * Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, + * Boston, MA 02111-1307 USA + * + * Change Activity: + * Created Feb 20, 2001 + * Added device reset, March 22, 2001 + * Ported to ppc64, May 25, 2001 + * End Change Activity + */ #include <asm/iSeries/HvCallPci.h> #include <asm/abs_addr.h> -struct pci_dev; /* For Forward Reference */ +struct pci_dev; /* For Forward Reference */ struct iSeries_Device_Node; -/************************************************************************/ -/* Gets iSeries Bus, SubBus, DevFn using iSeries_Device_Node structure */ -/************************************************************************/ +/* + * Gets iSeries Bus, SubBus, DevFn using iSeries_Device_Node structure + */ #define ISERIES_BUS(DevPtr) DevPtr->DsaAddr.Dsa.busNumber #define ISERIES_SUBBUS(DevPtr) DevPtr->DsaAddr.Dsa.subBusNumber #define ISERIES_DEVICE(DevPtr) DevPtr->DsaAddr.Dsa.deviceId #define ISERIES_DSA(DevPtr) DevPtr->DsaAddr.DsaAddr -#define ISERIES_DEVFUN(DevPtr) DevPtr->DevFn -#define ISERIES_DEVNODE(PciDev) ((struct iSeries_Device_Node*)PciDev->sysdata) +#define ISERIES_DEVNODE(PciDev) ((struct iSeries_Device_Node *)PciDev->sysdata) #define EADsMaxAgents 7 -/************************************************************************/ -/* Decodes Linux DevFn to iSeries DevFn, bridge device, or function. */ -/* For Linux, see PCI_SLOT and PCI_FUNC in include/linux/pci.h */ -/************************************************************************/ - -#define ISERIES_PCI_AGENTID(idsel,func) ((idsel & 0x0F) << 4) | (func & 0x07) -#define ISERIES_ENCODE_DEVICE(agentid) ((0x10) | ((agentid&0x20)>>2) | (agentid&07)) - -#define ISERIES_GET_DEVICE_FROM_SUBBUS(subbus) ((subbus >> 5) & 0x7) -#define ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus) ((subbus >> 2) & 0x7) - /* - * N.B. the ISERIES_DECODE_* macros are not used anywhere, and I think - * the 0x71 (at least) must be wrong - 0x78 maybe? -- paulus. + * Decodes Linux DevFn to iSeries DevFn, bridge device, or function. + * For Linux, see PCI_SLOT and PCI_FUNC in include/linux/pci.h */ -#define ISERIES_DECODE_DEVFN(linuxdevfn) (((linuxdevfn & 0x71) << 1) | (linuxdevfn & 0x07)) -#define ISERIES_DECODE_DEVICE(linuxdevfn) (((linuxdevfn & 0x38) >> 3) |(((linuxdevfn & 0x40) >> 2) + 0x10)) -#define ISERIES_DECODE_FUNCTION(linuxdevfn) (linuxdevfn & 0x07) -/************************************************************************/ -/* Converts Virtual Address to Real Address for Hypervisor calls */ -/************************************************************************/ +#define ISERIES_PCI_AGENTID(idsel, func) \ + (((idsel & 0x0F) << 4) | (func & 0x07)) +#define ISERIES_ENCODE_DEVICE(agentid) \ + ((0x10) | ((agentid & 0x20) >> 2) | (agentid & 0x07)) -#define ISERIES_HV_ADDR(virtaddr) (0x8000000000000000 | virt_to_abs(virtaddr)) +#define ISERIES_GET_DEVICE_FROM_SUBBUS(subbus) ((subbus >> 5) & 0x7) +#define ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus) ((subbus >> 2) & 0x7) -/************************************************************************/ -/* iSeries Device Information */ -/************************************************************************/ +/* + * Converts Virtual Address to Real Address for Hypervisor calls + */ +#define ISERIES_HV_ADDR(virtaddr) \ + (0x8000000000000000 | virt_to_abs(virtaddr)) +/* + * iSeries Device Information + */ struct iSeries_Device_Node { struct list_head Device_List; - struct pci_dev* PciDev; /* Pointer to pci_dev structure*/ - union HvDsaMap DsaAddr; /* Direct Select Address */ - /* busNumber,subBusNumber, */ - /* deviceId, barNumber */ - HvAgentId AgentId; /* Hypervisor DevFn */ - int DevFn; /* Linux devfn */ - int BarOffset; - int Irq; /* Assigned IRQ */ - int ReturnCode; /* Return Code Holder */ - int IoRetry; /* Current Retry Count */ - int Flags; /* Possible flags(disable/bist)*/ - u16 Vendor; /* Vendor ID */ - u8 LogicalSlot; /* Hv Slot Index for Tces */ - struct iommu_table* iommu_table;/* Device TCE Table */ - u8 PhbId; /* Phb Card is on. */ - u16 Board; /* Board Number */ - u8 FrameId; /* iSeries spcn Frame Id */ - char CardLocation[4];/* Char format of planar vpd */ - char Location[20]; /* Frame 1, Card C10 */ + struct pci_dev *PciDev; + union HvDsaMap DsaAddr; /* Direct Select Address */ + /* busNumber, subBusNumber, */ + /* deviceId, barNumber */ + int DevFn; /* Linux devfn */ + int Irq; /* Assigned IRQ */ + int Flags; /* Possible flags(disable/bist)*/ + u8 LogicalSlot; /* Hv Slot Index for Tces */ + struct iommu_table *iommu_table;/* Device TCE Table */ }; -/************************************************************************/ -/* Functions */ -/************************************************************************/ - -extern int iSeries_Device_Information(struct pci_dev*,char*, int); -extern void iSeries_Get_Location_Code(struct iSeries_Device_Node*); -extern int iSeries_Device_ToggleReset(struct pci_dev* PciDev, int AssertTime, int DelayTime); +extern void iSeries_Device_Information(struct pci_dev*, int); #endif /* _ISERIES_64_PCI_H */ diff --git a/include/asm-ppc64/iSeries/iSeries_proc.h b/include/asm-ppc64/iSeries/iSeries_proc.h deleted file mode 100644 index adb6dc14..0000000 --- a/include/asm-ppc64/iSeries/iSeries_proc.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * iSeries_proc.h - * Copyright (C) 2001 Kyle A. Lucke IBM Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ -#ifndef _ISERIES_PROC_H -#define _ISERIES_PROC_H - -extern void iSeries_proc_early_init(void); - -#endif /* _iSeries_PROC_H */ diff --git a/include/asm-ppc64/iSeries/mf.h b/include/asm-ppc64/iSeries/mf.h index 2e59a8e..7e6a0d9 100644 --- a/include/asm-ppc64/iSeries/mf.h +++ b/include/asm-ppc64/iSeries/mf.h @@ -9,17 +9,16 @@ * all partitions in the iSeries. It also provides miscellaneous low-level * machine facility type operations. * - * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * + * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA @@ -52,6 +51,7 @@ extern void mf_clear_src(void); extern void mf_init(void); extern int mf_get_rtc(struct rtc_time *tm); +extern int mf_get_boot_rtc(struct rtc_time *tm); extern int mf_set_rtc(struct rtc_time *tm); #endif /* _ASM_PPC64_ISERIES_MF_H */ diff --git a/include/asm-ppc64/iSeries/vio.h b/include/asm-ppc64/iSeries/vio.h index 3e5766a..6c05e625 100644 --- a/include/asm-ppc64/iSeries/vio.h +++ b/include/asm-ppc64/iSeries/vio.h @@ -8,32 +8,32 @@ * Colin Devilbiss <devilbis@us.ibm.com> * * (C) Copyright 2000 IBM Corporation - * + * * This header file is used by the iSeries virtual I/O device * drivers. It defines the interfaces to the common functions * (implemented in drivers/char/viopath.h) as well as defining - * common functions and structures. Currently (at the time I + * common functions and structures. Currently (at the time I * wrote this comment) the iSeries virtual I/O device drivers - * that use this are - * drivers/block/viodasd.c + * that use this are + * drivers/block/viodasd.c * drivers/char/viocons.c * drivers/char/viotape.c * drivers/cdrom/viocd.c * * The iSeries virtual ethernet support (veth.c) uses a whole * different set of functions. - * + * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) anyu later version. * * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of + * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. + * General Public License for more details. * - * You should have received a copy of the GNU General Public License + * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * @@ -44,39 +44,41 @@ #include <asm/iSeries/HvTypes.h> #include <asm/iSeries/HvLpEvent.h> -/* iSeries virtual I/O events use the subtype field in +/* + * iSeries virtual I/O events use the subtype field in * HvLpEvent to figure out what kind of vio event is coming * in. We use a table to route these, and this defines * the maximum number of distinct subtypes */ #define VIO_MAX_SUBTYPES 8 -/* Each subtype can register a handler to process their events. +/* + * Each subtype can register a handler to process their events. * The handler must have this interface. */ typedef void (vio_event_handler_t) (struct HvLpEvent * event); -int viopath_open(HvLpIndex remoteLp, int subtype, int numReq); -int viopath_close(HvLpIndex remoteLp, int subtype, int numReq); -int vio_setHandler(int subtype, vio_event_handler_t * beh); -int vio_clearHandler(int subtype); -int viopath_isactive(HvLpIndex lp); -HvLpInstanceId viopath_sourceinst(HvLpIndex lp); -HvLpInstanceId viopath_targetinst(HvLpIndex lp); -void vio_set_hostlp(void); -void *vio_get_event_buffer(int subtype); -void vio_free_event_buffer(int subtype, void *buffer); +extern int viopath_open(HvLpIndex remoteLp, int subtype, int numReq); +extern int viopath_close(HvLpIndex remoteLp, int subtype, int numReq); +extern int vio_setHandler(int subtype, vio_event_handler_t * beh); +extern int vio_clearHandler(int subtype); +extern int viopath_isactive(HvLpIndex lp); +extern HvLpInstanceId viopath_sourceinst(HvLpIndex lp); +extern HvLpInstanceId viopath_targetinst(HvLpIndex lp); +extern void vio_set_hostlp(void); +extern void *vio_get_event_buffer(int subtype); +extern void vio_free_event_buffer(int subtype, void *buffer); extern HvLpIndex viopath_hostLp; extern HvLpIndex viopath_ourLp; -#define VIOCHAR_MAX_DATA 200 +#define VIOCHAR_MAX_DATA 200 -#define VIOMAJOR_SUBTYPE_MASK 0xff00 -#define VIOMINOR_SUBTYPE_MASK 0x00ff -#define VIOMAJOR_SUBTYPE_SHIFT 8 +#define VIOMAJOR_SUBTYPE_MASK 0xff00 +#define VIOMINOR_SUBTYPE_MASK 0x00ff +#define VIOMAJOR_SUBTYPE_SHIFT 8 -#define VIOVERSION 0x0101 +#define VIOVERSION 0x0101 /* * This is the general structure for VIO errors; each module should have @@ -89,8 +91,8 @@ struct vio_error_entry { int errno; const char *msg; }; -const struct vio_error_entry *vio_lookup_rc(const struct vio_error_entry - *local_table, u16 rc); +extern const struct vio_error_entry *vio_lookup_rc( + const struct vio_error_entry *local_table, u16 rc); enum viosubtypes { viomajorsubtype_monitor = 0x0100, @@ -102,7 +104,6 @@ enum viosubtypes { viomajorsubtype_scsi = 0x0700 }; - enum vioconfigsubtype { vioconfigget = 0x0001, }; diff --git a/include/asm-ppc64/imalloc.h b/include/asm-ppc64/imalloc.h index 3a45e91..e46ff68 100644 --- a/include/asm-ppc64/imalloc.h +++ b/include/asm-ppc64/imalloc.h @@ -4,9 +4,9 @@ /* * Define the address range of the imalloc VM area. */ -#define PHBS_IO_BASE IOREGIONBASE -#define IMALLOC_BASE (IOREGIONBASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */ -#define IMALLOC_END (IOREGIONBASE + EADDR_MASK) +#define PHBS_IO_BASE VMALLOC_END +#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */ +#define IMALLOC_END (VMALLOC_START + EADDR_MASK) /* imalloc region types */ @@ -18,7 +18,9 @@ extern struct vm_struct * im_get_free_area(unsigned long size); extern struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size, - int region_type); -unsigned long im_free(void *addr); + int region_type); +extern void im_free(void *addr); + +extern unsigned long ioremap_bot; #endif /* _PPC64_IMALLOC_H */ diff --git a/include/asm-ppc64/iommu.h b/include/asm-ppc64/iommu.h index bd53ca4..729de5c 100644 --- a/include/asm-ppc64/iommu.h +++ b/include/asm-ppc64/iommu.h @@ -82,24 +82,6 @@ struct iommu_table { unsigned long *it_map; /* A simple allocation bitmap for now */ }; -#ifdef CONFIG_PPC_ISERIES -struct iommu_table_cb { - unsigned long itc_busno; /* Bus number for this tce table */ - unsigned long itc_start; /* Will be NULL for secondary */ - unsigned long itc_totalsize; /* Size (in pages) of whole table */ - unsigned long itc_offset; /* Index into real tce table of the - start of our section */ - unsigned long itc_size; /* Size (in pages) of our section */ - unsigned long itc_index; /* Index of this tce table */ - unsigned short itc_maxtables; /* Max num of tables for partition */ - unsigned char itc_virtbus; /* Flag to indicate virtual bus */ - unsigned char itc_slotno; /* IOA Tce Slot Index */ - unsigned char itc_rsvd[4]; -}; - -extern struct iommu_table vio_tce_table; /* Tce table for virtual bus */ -#endif /* CONFIG_PPC_ISERIES */ - struct scatterlist; #ifdef CONFIG_PPC_MULTIPLATFORM @@ -122,9 +104,6 @@ extern void iommu_devnode_init_pSeries(struct device_node *dn); #ifdef CONFIG_PPC_ISERIES -/* Walks all buses and creates iommu tables */ -extern void iommu_setup_iSeries(void); - /* Initializes tables for bio buses */ extern void __init iommu_vio_init(void); @@ -158,8 +137,12 @@ extern void iommu_init_early_pSeries(void); extern void iommu_init_early_iSeries(void); extern void iommu_init_early_u3(void); +#ifdef CONFIG_PCI extern void pci_iommu_init(void); extern void pci_direct_iommu_init(void); +#else +static inline void pci_iommu_init(void) { } +#endif extern void alloc_u3_dart_table(void); diff --git a/include/asm-ppc64/kdebug.h b/include/asm-ppc64/kdebug.h index 4886342..d383d16 100644 --- a/include/asm-ppc64/kdebug.h +++ b/include/asm-ppc64/kdebug.h @@ -17,7 +17,7 @@ struct die_args { /* Note - you should never unregister because that can race with NMIs. - If you really want to do it first unregister - then synchronize_kernel - + If you really want to do it first unregister - then synchronize_sched - then free. */ int register_die_notifier(struct notifier_block *nb); diff --git a/include/asm-ppc64/kexec.h b/include/asm-ppc64/kexec.h new file mode 100644 index 0000000..511908a --- /dev/null +++ b/include/asm-ppc64/kexec.h @@ -0,0 +1,41 @@ +#ifndef _PPC64_KEXEC_H +#define _PPC64_KEXEC_H + +/* + * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. + * I.e. Maximum page that is mapped directly into kernel memory, + * and kmap is not required. + */ + +/* Maximum physical address we can use pages from */ +/* XXX: since we copy virt we can use any page we allocate */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) + +/* Maximum address we can reach in physical address mode */ +/* XXX: I want to allow initrd in highmem. otherwise set to rmo on lpar */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) + +/* Maximum address we can use for the control code buffer */ +/* XXX: unused today, ppc32 uses TASK_SIZE, probably left over from use_mm */ +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) + +/* XXX: today we don't use this at all, althogh we have a static stack */ +#define KEXEC_CONTROL_CODE_SIZE 4096 + +/* The native architecture */ +#define KEXEC_ARCH KEXEC_ARCH_PPC64 + +#define MAX_NOTE_BYTES 1024 + +#ifndef __ASSEMBLY__ + +typedef u32 note_buf_t[MAX_NOTE_BYTES/4]; + +extern note_buf_t crash_notes[]; + +extern void kexec_smp_wait(void); /* get and clear naca physid, wait for + master to copy new code to 0 */ + +#endif /* __ASSEMBLY__ */ +#endif /* _PPC_KEXEC_H */ + diff --git a/include/asm-ppc64/machdep.h b/include/asm-ppc64/machdep.h index 5d3cd9d..9cdad3e 100644 --- a/include/asm-ppc64/machdep.h +++ b/include/asm-ppc64/machdep.h @@ -76,6 +76,7 @@ struct machdep_calls { void (*tce_flush)(struct iommu_table *tbl); void (*iommu_dev_setup)(struct pci_dev *dev); void (*iommu_bus_setup)(struct pci_bus *bus); + void (*irq_bus_setup)(struct pci_bus *bus); int (*probe)(int platform); void (*setup_arch)(void); @@ -85,6 +86,7 @@ struct machdep_calls { void (*init_IRQ)(void); int (*get_irq)(struct pt_regs *); + void (*cpu_irq_down)(void); /* PCI stuff */ void (*pcibios_fixup)(void); diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h index c78282a..f373de5 100644 --- a/include/asm-ppc64/mmu.h +++ b/include/asm-ppc64/mmu.h @@ -47,9 +47,10 @@ #define SLB_VSID_KS ASM_CONST(0x0000000000000800) #define SLB_VSID_KP ASM_CONST(0x0000000000000400) #define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */ -#define SLB_VSID_L ASM_CONST(0x0000000000000100) /* largepage 16M */ +#define SLB_VSID_L ASM_CONST(0x0000000000000100) /* largepage */ #define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */ - +#define SLB_VSID_LS ASM_CONST(0x0000000000000070) /* size of largepage */ + #define SLB_VSID_KERNEL (SLB_VSID_KP|SLB_VSID_C) #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS) @@ -180,6 +181,28 @@ static inline void tlbiel(unsigned long va) asm volatile("ptesync": : :"memory"); } +static inline unsigned long slot2va(unsigned long avpn, unsigned long large, + unsigned long secondary, unsigned long slot) +{ + unsigned long va; + + va = avpn << 23; + + if (!large) { + unsigned long vpi, pteg; + + pteg = slot / HPTES_PER_GROUP; + if (secondary) + pteg = ~pteg; + + vpi = ((va >> 28) ^ pteg) & htab_hash_mask; + + va |= vpi << PAGE_SHIFT; + } + + return va; +} + /* * Handle a fault by adding an HPTE. If the address can't be determined * to be valid via Linux page tables, return 1. If handled return 0 diff --git a/include/asm-ppc64/mmzone.h b/include/asm-ppc64/mmzone.h index 0619a41..ed473f4 100644 --- a/include/asm-ppc64/mmzone.h +++ b/include/asm-ppc64/mmzone.h @@ -10,9 +10,20 @@ #include <linux/config.h> #include <asm/smp.h> -#ifdef CONFIG_DISCONTIGMEM +/* generic non-linear memory support: + * + * 1) we will not split memory into more chunks than will fit into the + * flags field of the struct page + */ + + +#ifdef CONFIG_NEED_MULTIPLE_NODES extern struct pglist_data *node_data[]; +/* + * Return a pointer to the node data for node n. + */ +#define NODE_DATA(nid) (node_data[nid]) /* * Following are specific to this numa platform. @@ -47,36 +58,32 @@ static inline int pa_to_nid(unsigned long pa) return nid; } -#define pfn_to_nid(pfn) pa_to_nid((pfn) << PAGE_SHIFT) - -/* - * Return a pointer to the node data for node n. - */ -#define NODE_DATA(nid) (node_data[nid]) - #define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn) /* * Following are macros that each numa implmentation must define. */ -/* - * Given a kernel address, find the home node of the underlying memory. - */ -#define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr)) - -#define node_mem_map(nid) (NODE_DATA(nid)->node_mem_map) #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) #define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn) #define local_mapnr(kvaddr) \ ( (__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)) +#ifdef CONFIG_DISCONTIGMEM + +/* + * Given a kernel address, find the home node of the underlying memory. + */ +#define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr)) + +#define pfn_to_nid(pfn) pa_to_nid((unsigned long)(pfn) << PAGE_SHIFT) + /* Written this way to avoid evaluating arguments twice */ #define discontigmem_pfn_to_page(pfn) \ ({ \ unsigned long __tmp = pfn; \ - (node_mem_map(pfn_to_nid(__tmp)) + \ + (NODE_DATA(pfn_to_nid(__tmp))->node_mem_map + \ node_localnr(__tmp, pfn_to_nid(__tmp))); \ }) @@ -91,4 +98,11 @@ static inline int pa_to_nid(unsigned long pa) #define discontigmem_pfn_valid(pfn) ((pfn) < num_physpages) #endif /* CONFIG_DISCONTIGMEM */ + +#endif /* CONFIG_NEED_MULTIPLE_NODES */ + +#ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID +#define early_pfn_to_nid(pfn) pa_to_nid(((unsigned long)pfn) << PAGE_SHIFT) +#endif + #endif /* _ASM_MMZONE_H_ */ diff --git a/include/asm-ppc64/nvram.h b/include/asm-ppc64/nvram.h index 4e6dd37..dfaa215 100644 --- a/include/asm-ppc64/nvram.h +++ b/include/asm-ppc64/nvram.h @@ -70,6 +70,7 @@ extern struct nvram_partition *nvram_find_partition(int sig, const char *name); extern int pSeries_nvram_init(void); extern int pmac_nvram_init(void); +extern int bpa_nvram_init(void); /* PowerMac specific nvram stuffs */ diff --git a/include/asm-ppc64/paca.h b/include/asm-ppc64/paca.h index 1a0223b..ae76cae 100644 --- a/include/asm-ppc64/paca.h +++ b/include/asm-ppc64/paca.h @@ -20,13 +20,13 @@ #include <asm/types.h> #include <asm/lppaca.h> #include <asm/iSeries/ItLpRegSave.h> +#include <asm/iSeries/ItLpQueue.h> #include <asm/mmu.h> register struct paca_struct *local_paca asm("r13"); #define get_paca() local_paca struct task_struct; -struct ItLpQueue; /* * Defines the layout of the paca. diff --git a/include/asm-ppc64/page.h b/include/asm-ppc64/page.h index bcd2178..a5893a3 100644 --- a/include/asm-ppc64/page.h +++ b/include/asm-ppc64/page.h @@ -202,9 +202,7 @@ extern u64 ppc64_pft_size; /* Log 2 of page table size */ #define PAGE_OFFSET ASM_CONST(0xC000000000000000) #define KERNELBASE PAGE_OFFSET #define VMALLOCBASE ASM_CONST(0xD000000000000000) -#define IOREGIONBASE ASM_CONST(0xE000000000000000) -#define IO_REGION_ID (IOREGIONBASE >> REGION_SHIFT) #define VMALLOC_REGION_ID (VMALLOCBASE >> REGION_SHIFT) #define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT) #define USER_REGION_ID (0UL) @@ -219,7 +217,8 @@ extern u64 ppc64_pft_size; /* Log 2 of page table size */ #define page_to_pfn(page) discontigmem_page_to_pfn(page) #define pfn_to_page(pfn) discontigmem_pfn_to_page(pfn) #define pfn_valid(pfn) discontigmem_pfn_valid(pfn) -#else +#endif +#ifdef CONFIG_FLATMEM #define pfn_to_page(pfn) (mem_map + (pfn)) #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) #define pfn_valid(pfn) ((pfn) < max_mapnr) diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h index 264c4f7..46cf61c 100644 --- a/include/asm-ppc64/pgtable.h +++ b/include/asm-ppc64/pgtable.h @@ -53,7 +53,8 @@ * Define the address range of the vmalloc VM area. */ #define VMALLOC_START (0xD000000000000000ul) -#define VMALLOC_END (VMALLOC_START + EADDR_MASK) +#define VMALLOC_SIZE (0x10000000000UL) +#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) /* * Bits in a linux-style PTE. These match the bits in the @@ -239,9 +240,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) /* This now only contains the vmalloc pages */ #define pgd_offset_k(address) pgd_offset(&init_mm, address) -/* to find an entry in the ioremap page-table-directory */ -#define pgd_offset_i(address) (ioremap_pgd + pgd_index(address)) - /* * The following only work if pte_present() is true. * Undefined behaviour if not.. @@ -459,15 +457,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr, #define __HAVE_ARCH_PTE_SAME #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) -extern unsigned long ioremap_bot, ioremap_base; - #define pmd_ERROR(e) \ printk("%s:%d: bad pmd %08x.\n", __FILE__, __LINE__, pmd_val(e)) #define pgd_ERROR(e) \ printk("%s:%d: bad pgd %08x.\n", __FILE__, __LINE__, pgd_val(e)) extern pgd_t swapper_pg_dir[]; -extern pgd_t ioremap_dir[]; extern void paging_init(void); diff --git a/include/asm-ppc64/ppc32.h b/include/asm-ppc64/ppc32.h index 1d04048..6b44a8c 100644 --- a/include/asm-ppc64/ppc32.h +++ b/include/asm-ppc64/ppc32.h @@ -32,7 +32,7 @@ typedef struct compat_siginfo { /* POSIX.1b timers */ struct { - timer_t _tid; /* timer id */ + compat_timer_t _tid; /* timer id */ int _overrun; /* overrun count */ compat_sigval_t _sigval; /* same as below */ int _sys_private; /* not to be passed to user */ diff --git a/include/asm-ppc64/processor.h b/include/asm-ppc64/processor.h index 0035efe..af28aa5 100644 --- a/include/asm-ppc64/processor.h +++ b/include/asm-ppc64/processor.h @@ -120,128 +120,36 @@ /* Special Purpose Registers (SPRNs)*/ -#define SPRN_CDBCR 0x3D7 /* Cache Debug Control Register */ #define SPRN_CTR 0x009 /* Count Register */ #define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */ -#define SPRN_DAC1 0x3F6 /* Data Address Compare 1 */ -#define SPRN_DAC2 0x3F7 /* Data Address Compare 2 */ +#define DABR_TRANSLATION (1UL << 2) #define SPRN_DAR 0x013 /* Data Address Register */ -#define SPRN_DBCR 0x3F2 /* Debug Control Regsiter */ -#define DBCR_EDM 0x80000000 -#define DBCR_IDM 0x40000000 -#define DBCR_RST(x) (((x) & 0x3) << 28) -#define DBCR_RST_NONE 0 -#define DBCR_RST_CORE 1 -#define DBCR_RST_CHIP 2 -#define DBCR_RST_SYSTEM 3 -#define DBCR_IC 0x08000000 /* Instruction Completion Debug Evnt */ -#define DBCR_BT 0x04000000 /* Branch Taken Debug Event */ -#define DBCR_EDE 0x02000000 /* Exception Debug Event */ -#define DBCR_TDE 0x01000000 /* TRAP Debug Event */ -#define DBCR_FER 0x00F80000 /* First Events Remaining Mask */ -#define DBCR_FT 0x00040000 /* Freeze Timers on Debug Event */ -#define DBCR_IA1 0x00020000 /* Instr. Addr. Compare 1 Enable */ -#define DBCR_IA2 0x00010000 /* Instr. Addr. Compare 2 Enable */ -#define DBCR_D1R 0x00008000 /* Data Addr. Compare 1 Read Enable */ -#define DBCR_D1W 0x00004000 /* Data Addr. Compare 1 Write Enable */ -#define DBCR_D1S(x) (((x) & 0x3) << 12) /* Data Adrr. Compare 1 Size */ -#define DAC_BYTE 0 -#define DAC_HALF 1 -#define DAC_WORD 2 -#define DAC_QUAD 3 -#define DBCR_D2R 0x00000800 /* Data Addr. Compare 2 Read Enable */ -#define DBCR_D2W 0x00000400 /* Data Addr. Compare 2 Write Enable */ -#define DBCR_D2S(x) (((x) & 0x3) << 8) /* Data Addr. Compare 2 Size */ -#define DBCR_SBT 0x00000040 /* Second Branch Taken Debug Event */ -#define DBCR_SED 0x00000020 /* Second Exception Debug Event */ -#define DBCR_STD 0x00000010 /* Second Trap Debug Event */ -#define DBCR_SIA 0x00000008 /* Second IAC Enable */ -#define DBCR_SDA 0x00000004 /* Second DAC Enable */ -#define DBCR_JOI 0x00000002 /* JTAG Serial Outbound Int. Enable */ -#define DBCR_JII 0x00000001 /* JTAG Serial Inbound Int. Enable */ -#define SPRN_DBCR0 0x3F2 /* Debug Control Register 0 */ -#define SPRN_DBCR1 0x3BD /* Debug Control Register 1 */ -#define SPRN_DBSR 0x3F0 /* Debug Status Register */ -#define SPRN_DCCR 0x3FA /* Data Cache Cacheability Register */ -#define DCCR_NOCACHE 0 /* Noncacheable */ -#define DCCR_CACHE 1 /* Cacheable */ -#define SPRN_DCMP 0x3D1 /* Data TLB Compare Register */ -#define SPRN_DCWR 0x3BA /* Data Cache Write-thru Register */ -#define DCWR_COPY 0 /* Copy-back */ -#define DCWR_WRITE 1 /* Write-through */ -#define SPRN_DEAR 0x3D5 /* Data Error Address Register */ #define SPRN_DEC 0x016 /* Decrement Register */ -#define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */ #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ #define DSISR_NOHPTE 0x40000000 /* no translation found */ #define DSISR_PROTFAULT 0x08000000 /* protection fault */ #define DSISR_ISSTORE 0x02000000 /* access was a store */ #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */ #define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */ -#define SPRN_EAR 0x11A /* External Address Register */ -#define SPRN_ESR 0x3D4 /* Exception Syndrome Register */ -#define ESR_IMCP 0x80000000 /* Instr. Machine Check - Protection */ -#define ESR_IMCN 0x40000000 /* Instr. Machine Check - Non-config */ -#define ESR_IMCB 0x20000000 /* Instr. Machine Check - Bus error */ -#define ESR_IMCT 0x10000000 /* Instr. Machine Check - Timeout */ -#define ESR_PIL 0x08000000 /* Program Exception - Illegal */ -#define ESR_PPR 0x04000000 /* Program Exception - Priveleged */ -#define ESR_PTR 0x02000000 /* Program Exception - Trap */ -#define ESR_DST 0x00800000 /* Storage Exception - Data miss */ -#define ESR_DIZ 0x00400000 /* Storage Exception - Zone fault */ -#define SPRN_EVPR 0x3D6 /* Exception Vector Prefix Register */ -#define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */ -#define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */ #define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */ -#define HID0_EMCP (1<<31) /* Enable Machine Check pin */ -#define HID0_EBA (1<<29) /* Enable Bus Address Parity */ -#define HID0_EBD (1<<28) /* Enable Bus Data Parity */ -#define HID0_SBCLK (1<<27) -#define HID0_EICE (1<<26) -#define HID0_ECLK (1<<25) -#define HID0_PAR (1<<24) -#define HID0_DOZE (1<<23) -#define HID0_NAP (1<<22) -#define HID0_SLEEP (1<<21) -#define HID0_DPM (1<<20) -#define HID0_ICE (1<<15) /* Instruction Cache Enable */ -#define HID0_DCE (1<<14) /* Data Cache Enable */ -#define HID0_ILOCK (1<<13) /* Instruction Cache Lock */ -#define HID0_DLOCK (1<<12) /* Data Cache Lock */ -#define HID0_ICFI (1<<11) /* Instr. Cache Flash Invalidate */ -#define HID0_DCI (1<<10) /* Data Cache Invalidate */ -#define HID0_SPD (1<<9) /* Speculative disable */ -#define HID0_SGE (1<<7) /* Store Gathering Enable */ -#define HID0_SIED (1<<7) /* Serial Instr. Execution [Disable] */ -#define HID0_BTIC (1<<5) /* Branch Target Instruction Cache Enable */ -#define HID0_ABE (1<<3) /* Address Broadcast Enable */ -#define HID0_BHTE (1<<2) /* Branch History Table Enable */ -#define HID0_BTCD (1<<1) /* Branch target cache disable */ #define SPRN_MSRDORM 0x3F1 /* Hardware Implementation Register 1 */ #define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */ #define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */ #define SPRN_NIADORM 0x3F3 /* Hardware Implementation Register 2 */ #define SPRN_HID4 0x3F4 /* 970 HID4 */ #define SPRN_HID5 0x3F6 /* 970 HID5 */ -#define SPRN_TSC 0x3FD /* Thread switch control */ -#define SPRN_TST 0x3FC /* Thread switch timeout */ -#define SPRN_IAC1 0x3F4 /* Instruction Address Compare 1 */ -#define SPRN_IAC2 0x3F5 /* Instruction Address Compare 2 */ -#define SPRN_ICCR 0x3FB /* Instruction Cache Cacheability Register */ -#define ICCR_NOCACHE 0 /* Noncacheable */ -#define ICCR_CACHE 1 /* Cacheable */ -#define SPRN_ICDBDR 0x3D3 /* Instruction Cache Debug Data Register */ -#define SPRN_ICMP 0x3D5 /* Instruction TLB Compare Register */ -#define SPRN_ICTC 0x3FB /* Instruction Cache Throttling Control Reg */ -#define SPRN_IMISS 0x3D4 /* Instruction TLB Miss Register */ -#define SPRN_IMMR 0x27E /* Internal Memory Map Register */ +#define SPRN_HID6 0x3F9 /* BE HID 6 */ +#define HID6_LB (0x0F<<12) /* Concurrent Large Page Modes */ +#define HID6_DLP (1<<20) /* Disable all large page modes (4K only) */ +#define SPRN_TSCR 0x399 /* Thread switch control on BE */ +#define SPRN_TTR 0x39A /* Thread switch timeout on BE */ +#define TSCR_DEC_ENABLE 0x200000 /* Decrementer Interrupt */ +#define TSCR_EE_ENABLE 0x100000 /* External Interrupt */ +#define TSCR_EE_BOOST 0x080000 /* External Interrupt Boost */ +#define SPRN_TSC 0x3FD /* Thread switch control on others */ +#define SPRN_TST 0x3FC /* Thread switch timeout on others */ #define SPRN_L2CR 0x3F9 /* Level 2 Cache Control Regsiter */ #define SPRN_LR 0x008 /* Link Register */ -#define SPRN_PBL1 0x3FC /* Protection Bound Lower 1 */ -#define SPRN_PBL2 0x3FE /* Protection Bound Lower 2 */ -#define SPRN_PBU1 0x3FD /* Protection Bound Upper 1 */ -#define SPRN_PBU2 0x3FF /* Protection Bound Upper 2 */ -#define SPRN_PID 0x3B1 /* Process ID */ #define SPRN_PIR 0x3FF /* Processor Identification Register */ #define SPRN_PIT 0x3DB /* Programmable Interval Timer */ #define SPRN_PURR 0x135 /* Processor Utilization of Resources Register */ @@ -249,9 +157,6 @@ #define SPRN_RPA 0x3D6 /* Required Physical Address Register */ #define SPRN_SDA 0x3BF /* Sampled Data Address Register */ #define SPRN_SDR1 0x019 /* MMU Hash Base Register */ -#define SPRN_SGR 0x3B9 /* Storage Guarded Register */ -#define SGR_NORMAL 0 -#define SGR_GUARDED 1 #define SPRN_SIA 0x3BB /* Sampled Instruction Address Register */ #define SPRN_SPRG0 0x110 /* Special Purpose Register General 0 */ #define SPRN_SPRG1 0x111 /* Special Purpose Register General 1 */ @@ -264,50 +169,12 @@ #define SPRN_TBWL 0x11C /* Time Base Lower Register (super, W/O) */ #define SPRN_TBWU 0x11D /* Time Base Write Upper Register (super, W/O) */ #define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */ -#define SPRN_TCR 0x3DA /* Timer Control Register */ -#define TCR_WP(x) (((x)&0x3)<<30) /* WDT Period */ -#define WP_2_17 0 /* 2^17 clocks */ -#define WP_2_21 1 /* 2^21 clocks */ -#define WP_2_25 2 /* 2^25 clocks */ -#define WP_2_29 3 /* 2^29 clocks */ -#define TCR_WRC(x) (((x)&0x3)<<28) /* WDT Reset Control */ -#define WRC_NONE 0 /* No reset will occur */ -#define WRC_CORE 1 /* Core reset will occur */ -#define WRC_CHIP 2 /* Chip reset will occur */ -#define WRC_SYSTEM 3 /* System reset will occur */ -#define TCR_WIE 0x08000000 /* WDT Interrupt Enable */ -#define TCR_PIE 0x04000000 /* PIT Interrupt Enable */ -#define TCR_FP(x) (((x)&0x3)<<24) /* FIT Period */ -#define FP_2_9 0 /* 2^9 clocks */ -#define FP_2_13 1 /* 2^13 clocks */ -#define FP_2_17 2 /* 2^17 clocks */ -#define FP_2_21 3 /* 2^21 clocks */ -#define TCR_FIE 0x00800000 /* FIT Interrupt Enable */ -#define TCR_ARE 0x00400000 /* Auto Reload Enable */ -#define SPRN_THRM1 0x3FC /* Thermal Management Register 1 */ -#define THRM1_TIN (1<<0) -#define THRM1_TIV (1<<1) -#define THRM1_THRES (0x7f<<2) -#define THRM1_TID (1<<29) -#define THRM1_TIE (1<<30) -#define THRM1_V (1<<31) -#define SPRN_THRM2 0x3FD /* Thermal Management Register 2 */ -#define SPRN_THRM3 0x3FE /* Thermal Management Register 3 */ -#define THRM3_E (1<<31) -#define SPRN_TSR 0x3D8 /* Timer Status Register */ -#define TSR_ENW 0x80000000 /* Enable Next Watchdog */ -#define TSR_WIS 0x40000000 /* WDT Interrupt Status */ -#define TSR_WRS(x) (((x)&0x3)<<28) /* WDT Reset Status */ -#define WRS_NONE 0 /* No WDT reset occurred */ -#define WRS_CORE 1 /* WDT forced core reset */ -#define WRS_CHIP 2 /* WDT forced chip reset */ -#define WRS_SYSTEM 3 /* WDT forced system reset */ -#define TSR_PIS 0x08000000 /* PIT Interrupt Status */ -#define TSR_FIS 0x04000000 /* FIT Interrupt Status */ #define SPRN_USIA 0x3AB /* User Sampled Instruction Address Register */ #define SPRN_XER 0x001 /* Fixed Point Exception Register */ -#define SPRN_ZPR 0x3B0 /* Zone Protection Register */ #define SPRN_VRSAVE 0x100 /* Vector save */ +#define SPRN_CTRLF 0x088 +#define SPRN_CTRLT 0x098 +#define CTRL_RUNLATCH 0x1 /* Performance monitor SPRs */ #define SPRN_SIAR 780 @@ -352,28 +219,19 @@ #define CTR SPRN_CTR /* Counter Register */ #define DAR SPRN_DAR /* Data Address Register */ #define DABR SPRN_DABR /* Data Address Breakpoint Register */ -#define DCMP SPRN_DCMP /* Data TLB Compare Register */ #define DEC SPRN_DEC /* Decrement Register */ -#define DMISS SPRN_DMISS /* Data TLB Miss Register */ #define DSISR SPRN_DSISR /* Data Storage Interrupt Status Register */ -#define EAR SPRN_EAR /* External Address Register */ -#define HASH1 SPRN_HASH1 /* Primary Hash Address Register */ -#define HASH2 SPRN_HASH2 /* Secondary Hash Address Register */ #define HID0 SPRN_HID0 /* Hardware Implementation Register 0 */ #define MSRDORM SPRN_MSRDORM /* MSR Dormant Register */ #define NIADORM SPRN_NIADORM /* NIA Dormant Register */ #define TSC SPRN_TSC /* Thread switch control */ #define TST SPRN_TST /* Thread switch timeout */ #define IABR SPRN_IABR /* Instruction Address Breakpoint Register */ -#define ICMP SPRN_ICMP /* Instruction TLB Compare Register */ -#define IMISS SPRN_IMISS /* Instruction TLB Miss Register */ -#define IMMR SPRN_IMMR /* PPC 860/821 Internal Memory Map Register */ #define L2CR SPRN_L2CR /* PPC 750 L2 control register */ #define __LR SPRN_LR #define PVR SPRN_PVR /* Processor Version */ #define PIR SPRN_PIR /* Processor ID */ #define PURR SPRN_PURR /* Processor Utilization of Resource Register */ -//#define RPA SPRN_RPA /* Required Physical Address Register */ #define SDR1 SPRN_SDR1 /* MMU hash base register */ #define SPR0 SPRN_SPRG0 /* Supervisor Private Registers */ #define SPR1 SPRN_SPRG1 @@ -389,10 +247,6 @@ #define TBRU SPRN_TBRU /* Time Base Read Upper Register */ #define TBWL SPRN_TBWL /* Time Base Write Lower Register */ #define TBWU SPRN_TBWU /* Time Base Write Upper Register */ -#define ICTC 1019 -#define THRM1 SPRN_THRM1 /* Thermal Management Register 1 */ -#define THRM2 SPRN_THRM2 /* Thermal Management Register 2 */ -#define THRM3 SPRN_THRM3 /* Thermal Management Register 3 */ #define XER SPRN_XER /* Processor Version Register (PVR) field extraction */ @@ -413,6 +267,7 @@ #define PV_970FX 0x003C #define PV_630 0x0040 #define PV_630p 0x0041 +#define PV_BE 0x0070 /* Platforms supported by PPC64 */ #define PLATFORM_PSERIES 0x0100 @@ -421,6 +276,7 @@ #define PLATFORM_LPAR 0x0001 #define PLATFORM_POWERMAC 0x0400 #define PLATFORM_MAPLE 0x0500 +#define PLATFORM_BPA 0x1000 /* Compatibility with drivers coming from PPC32 world */ #define _machine (systemcfg->platform) @@ -432,16 +288,11 @@ #define IC_INVALID 0 #define IC_OPEN_PIC 1 #define IC_PPC_XIC 2 +#define IC_BPA_IIC 3 #define XGLUE(a,b) a##b #define GLUE(a,b) XGLUE(a,b) -/* iSeries CTRL register (for runlatch) */ - -#define CTRLT 0x098 -#define CTRLF 0x088 -#define RUNLATCH 0x0001 - #ifdef __ASSEMBLY__ #define _GLOBAL(name) \ @@ -590,16 +441,6 @@ struct thread_struct { } /* - * Note: the vm_start and vm_end fields here should *not* - * be in kernel space. (Could vm_end == vm_start perhaps?) - */ -#define IOREMAP_MMAP { &ioremap_mm, 0, 0x1000, NULL, \ - PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, \ - 1, NULL, NULL } - -extern struct mm_struct ioremap_mm; - -/* * Return saved PC of a blocked thread. For now, this is the "user" PC */ #define thread_saved_pc(tsk) \ @@ -656,6 +497,24 @@ static inline void prefetchw(const void *x) #define HAVE_ARCH_PICK_MMAP_LAYOUT +static inline void ppc64_runlatch_on(void) +{ + unsigned long ctrl; + + ctrl = mfspr(SPRN_CTRLF); + ctrl |= CTRL_RUNLATCH; + mtspr(SPRN_CTRLT, ctrl); +} + +static inline void ppc64_runlatch_off(void) +{ + unsigned long ctrl; + + ctrl = mfspr(SPRN_CTRLF); + ctrl &= ~CTRL_RUNLATCH; + mtspr(SPRN_CTRLT, ctrl); +} + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ diff --git a/include/asm-ppc64/prom.h b/include/asm-ppc64/prom.h index 2440a2c..04b1a84 100644 --- a/include/asm-ppc64/prom.h +++ b/include/asm-ppc64/prom.h @@ -147,9 +147,7 @@ struct device_node { struct device_node *sibling; struct device_node *next; /* next device of same type */ struct device_node *allnext; /* next in list of all nodes */ - struct proc_dir_entry *pde; /* this node's proc directory */ - struct proc_dir_entry *name_link; /* name symlink */ - struct proc_dir_entry *addr_link; /* addr symlink */ + struct proc_dir_entry *pde; /* this node's proc directory */ struct kref kref; unsigned long _flags; }; @@ -174,15 +172,6 @@ static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_e dn->pde = de; } -static void inline set_node_name_link(struct device_node *dn, struct proc_dir_entry *de) -{ - dn->name_link = de; -} - -static void inline set_node_addr_link(struct device_node *dn, struct proc_dir_entry *de) -{ - dn->addr_link = de; -} /* OBSOLETE: Old stlye node lookup */ extern struct device_node *find_devices(const char *name); diff --git a/include/asm-ppc64/rtas.h b/include/asm-ppc64/rtas.h index a8ab0e9..e7d1b52 100644 --- a/include/asm-ppc64/rtas.h +++ b/include/asm-ppc64/rtas.h @@ -186,8 +186,14 @@ extern int rtas_get_sensor(int sensor, int index, int *state); extern int rtas_get_power_level(int powerdomain, int *level); extern int rtas_set_power_level(int powerdomain, int level, int *setlevel); extern int rtas_set_indicator(int indicator, int index, int new_value); +extern void rtas_progress(char *s, unsigned short hex); extern void rtas_initialize(void); +struct rtc_time; +extern void rtas_get_boot_time(struct rtc_time *rtc_time); +extern void rtas_get_rtc_time(struct rtc_time *rtc_time); +extern int rtas_set_rtc_time(struct rtc_time *rtc_time); + /* Given an RTAS status code of 9900..9905 compute the hinted delay */ unsigned int rtas_extended_busy_delay_time(int status); static inline int rtas_is_extended_busy(int status) diff --git a/include/asm-ppc64/smp.h b/include/asm-ppc64/smp.h index c8646fa..d86f742 100644 --- a/include/asm-ppc64/smp.h +++ b/include/asm-ppc64/smp.h @@ -45,7 +45,7 @@ void generic_cpu_die(unsigned int cpu); void generic_mach_cpu_die(void); #endif -#define __smp_processor_id() (get_paca()->paca_index) +#define raw_smp_processor_id() (get_paca()->paca_index) #define hard_smp_processor_id() (get_paca()->hw_cpu_id) extern cpumask_t cpu_sibling_map[NR_CPUS]; @@ -85,6 +85,14 @@ extern void smp_generic_take_timebase(void); extern struct smp_ops_t *smp_ops; +#ifdef CONFIG_PPC_PSERIES +void vpa_init(int cpu); +#else +static inline void vpa_init(int cpu) +{ +} +#endif /* CONFIG_PPC_PSERIES */ + #endif /* __ASSEMBLY__ */ #endif /* !(_PPC64_SMP_H) */ diff --git a/include/asm-ppc64/sparsemem.h b/include/asm-ppc64/sparsemem.h new file mode 100644 index 0000000..c5bd47e --- /dev/null +++ b/include/asm-ppc64/sparsemem.h @@ -0,0 +1,16 @@ +#ifndef _ASM_PPC64_SPARSEMEM_H +#define _ASM_PPC64_SPARSEMEM_H 1 + +#ifdef CONFIG_SPARSEMEM +/* + * SECTION_SIZE_BITS 2^N: how big each section will be + * MAX_PHYSADDR_BITS 2^N: how much physical address space we have + * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space + */ +#define SECTION_SIZE_BITS 24 +#define MAX_PHYSADDR_BITS 38 +#define MAX_PHYSMEM_BITS 36 + +#endif /* CONFIG_SPARSEMEM */ + +#endif /* _ASM_PPC64_SPARSEMEM_H */ diff --git a/include/asm-ppc64/thread_info.h b/include/asm-ppc64/thread_info.h index 037b5e0..0494df6 100644 --- a/include/asm-ppc64/thread_info.h +++ b/include/asm-ppc64/thread_info.h @@ -24,7 +24,7 @@ struct thread_info { struct task_struct *task; /* main task structure */ struct exec_domain *exec_domain; /* execution domain */ int cpu; /* cpu we're on */ - int preempt_count; + int preempt_count; /* 0 => preemptable, <0 => BUG */ struct restart_block restart_block; /* set by force_successful_syscall_return */ unsigned char syscall_noerror; @@ -96,7 +96,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_POLLING_NRFLAG 4 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_32BIT 5 /* 32 bit binary */ -#define TIF_RUN_LIGHT 6 /* iSeries run light */ +/* #define SPARE 6 */ #define TIF_ABI_PENDING 7 /* 32/64 bit switch needed */ #define TIF_SYSCALL_AUDIT 8 /* syscall auditing active */ #define TIF_SINGLESTEP 9 /* singlestepping active */ @@ -110,7 +110,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_32BIT (1<<TIF_32BIT) -#define _TIF_RUN_LIGHT (1<<TIF_RUN_LIGHT) +/* #define _SPARE (1<<SPARE) */ #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) diff --git a/include/asm-ppc64/time.h b/include/asm-ppc64/time.h index 8d6e376..c6c762c 100644 --- a/include/asm-ppc64/time.h +++ b/include/asm-ppc64/time.h @@ -34,6 +34,15 @@ struct rtc_time; extern void to_tm(int tim, struct rtc_time * tm); extern time_t last_rtc_update; +void generic_calibrate_decr(void); +void setup_default_decr(void); + +/* Some sane defaults: 125 MHz timebase, 1GHz processor */ +extern unsigned long ppc_proc_freq; +#define DEFAULT_PROC_FREQ (DEFAULT_TB_FREQ * 8) +extern unsigned long ppc_tb_freq; +#define DEFAULT_TB_FREQ 125000000UL + /* * By putting all of this stuff into a single struct we * reduce the number of cache lines touched by do_gettimeofday. diff --git a/include/asm-ppc64/topology.h b/include/asm-ppc64/topology.h index d58d9dd..fcdcfd2 100644 --- a/include/asm-ppc64/topology.h +++ b/include/asm-ppc64/topology.h @@ -59,10 +59,8 @@ static inline int node_to_first_cpu(int node) .nr_balance_failed = 0, \ } -#else /* !CONFIG_NUMA */ +#endif /* CONFIG_NUMA */ #include <asm-generic/topology.h> -#endif /* CONFIG_NUMA */ - #endif /* _ASM_PPC64_TOPOLOGY_H */ diff --git a/include/asm-ppc64/xics.h b/include/asm-ppc64/xics.h index fdec5e7..0c45e14 100644 --- a/include/asm-ppc64/xics.h +++ b/include/asm-ppc64/xics.h @@ -17,6 +17,7 @@ void xics_init_IRQ(void); int xics_get_irq(struct pt_regs *); void xics_setup_cpu(void); +void xics_teardown_cpu(void); void xics_cause_IPI(int cpu); void xics_request_IPIs(void); void xics_migrate_irqs_away(void); diff --git a/include/asm-s390/cpcmd.h b/include/asm-s390/cpcmd.h index 1d33c5da..1fcf65b 100644 --- a/include/asm-s390/cpcmd.h +++ b/include/asm-s390/cpcmd.h @@ -11,14 +11,28 @@ #define __CPCMD__ /* + * the lowlevel function for cpcmd * the caller of __cpcmd has to ensure that the response buffer is below 2 GB */ -extern void __cpcmd(char *cmd, char *response, int rlen); +extern int __cpcmd(const char *cmd, char *response, int rlen, int *response_code); #ifndef __s390x__ #define cpcmd __cpcmd #else -extern void cpcmd(char *cmd, char *response, int rlen); +/* + * cpcmd is the in-kernel interface for issuing CP commands + * + * cmd: null-terminated command string, max 240 characters + * response: response buffer for VM's textual response + * rlen: size of the response buffer, cpcmd will not exceed this size + * but will cap the output, if its too large. Everything that + * did not fit into the buffer will be silently dropped + * response_code: return pointer for VM's error code + * return value: the size of the response. The caller can check if the buffer + * was large enough by comparing the return value and rlen + * NOTE: If the response buffer is not below 2 GB, cpcmd can sleep + */ +extern int cpcmd(const char *cmd, char *response, int rlen, int *response_code); #endif /*__s390x__*/ #endif diff --git a/include/asm-s390/debug.h b/include/asm-s390/debug.h index 6bbcdea..92360d9 100644 --- a/include/asm-s390/debug.h +++ b/include/asm-s390/debug.h @@ -9,6 +9,8 @@ #ifndef DEBUG_H #define DEBUG_H +#include <linux/config.h> +#include <linux/fs.h> #include <linux/string.h> /* Note: @@ -31,19 +33,18 @@ struct __debug_entry{ } __attribute__((packed)); -#define __DEBUG_FEATURE_VERSION 1 /* version of debug feature */ +#define __DEBUG_FEATURE_VERSION 2 /* version of debug feature */ #ifdef __KERNEL__ #include <linux/spinlock.h> #include <linux/kernel.h> #include <linux/time.h> -#include <linux/proc_fs.h> #define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */ #define DEBUG_OFF_LEVEL -1 /* level where debug is switched off */ #define DEBUG_FLUSH_ALL -1 /* parameter to flush all areas */ #define DEBUG_MAX_VIEWS 10 /* max number of views in proc fs */ -#define DEBUG_MAX_PROCF_LEN 64 /* max length for a proc file name */ +#define DEBUG_MAX_NAME_LEN 64 /* max length for a debugfs file name */ #define DEBUG_DEFAULT_LEVEL 3 /* initial debug level */ #define DEBUG_DIR_ROOT "s390dbf" /* name of debug root directory in proc fs */ @@ -64,16 +65,17 @@ typedef struct debug_info { spinlock_t lock; int level; int nr_areas; - int page_order; + int pages_per_area; int buf_size; int entry_size; - debug_entry_t** areas; + debug_entry_t*** areas; int active_area; - int *active_entry; - struct proc_dir_entry* proc_root_entry; - struct proc_dir_entry* proc_entries[DEBUG_MAX_VIEWS]; + int *active_pages; + int *active_entries; + struct dentry* debugfs_root_entry; + struct dentry* debugfs_entries[DEBUG_MAX_VIEWS]; struct debug_view* views[DEBUG_MAX_VIEWS]; - char name[DEBUG_MAX_PROCF_LEN]; + char name[DEBUG_MAX_NAME_LEN]; } debug_info_t; typedef int (debug_header_proc_t) (debug_info_t* id, @@ -98,7 +100,7 @@ int debug_dflt_header_fn(debug_info_t* id, struct debug_view* view, int area, debug_entry_t* entry, char* out_buf); struct debug_view { - char name[DEBUG_MAX_PROCF_LEN]; + char name[DEBUG_MAX_NAME_LEN]; debug_prolog_proc_t* prolog_proc; debug_header_proc_t* header_proc; debug_format_proc_t* format_proc; @@ -120,7 +122,7 @@ debug_entry_t* debug_exception_common(debug_info_t* id, int level, /* Debug Feature API: */ -debug_info_t* debug_register(char* name, int pages_index, int nr_areas, +debug_info_t* debug_register(char* name, int pages, int nr_areas, int buf_size); void debug_unregister(debug_info_t* id); @@ -132,7 +134,8 @@ void debug_stop_all(void); extern inline debug_entry_t* debug_event(debug_info_t* id, int level, void* data, int length) { - if ((!id) || (level > id->level)) return NULL; + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; return debug_event_common(id,level,data,length); } @@ -140,7 +143,8 @@ extern inline debug_entry_t* debug_int_event(debug_info_t* id, int level, unsigned int tag) { unsigned int t=tag; - if ((!id) || (level > id->level)) return NULL; + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; return debug_event_common(id,level,&t,sizeof(unsigned int)); } @@ -148,14 +152,16 @@ extern inline debug_entry_t * debug_long_event (debug_info_t* id, int level, unsigned long tag) { unsigned long t=tag; - if ((!id) || (level > id->level)) return NULL; + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; return debug_event_common(id,level,&t,sizeof(unsigned long)); } extern inline debug_entry_t* debug_text_event(debug_info_t* id, int level, const char* txt) { - if ((!id) || (level > id->level)) return NULL; + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; return debug_event_common(id,level,txt,strlen(txt)); } @@ -167,7 +173,8 @@ debug_sprintf_event(debug_info_t* id,int level,char *string,...) extern inline debug_entry_t* debug_exception(debug_info_t* id, int level, void* data, int length) { - if ((!id) || (level > id->level)) return NULL; + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; return debug_exception_common(id,level,data,length); } @@ -175,7 +182,8 @@ extern inline debug_entry_t* debug_int_exception(debug_info_t* id, int level, unsigned int tag) { unsigned int t=tag; - if ((!id) || (level > id->level)) return NULL; + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; return debug_exception_common(id,level,&t,sizeof(unsigned int)); } @@ -183,14 +191,16 @@ extern inline debug_entry_t * debug_long_exception (debug_info_t* id, int level, unsigned long tag) { unsigned long t=tag; - if ((!id) || (level > id->level)) return NULL; + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; return debug_exception_common(id,level,&t,sizeof(unsigned long)); } extern inline debug_entry_t* debug_text_exception(debug_info_t* id, int level, const char* txt) { - if ((!id) || (level > id->level)) return NULL; + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; return debug_exception_common(id,level,txt,strlen(txt)); } diff --git a/include/asm-s390/kexec.h b/include/asm-s390/kexec.h new file mode 100644 index 0000000..54cf7d9 --- /dev/null +++ b/include/asm-s390/kexec.h @@ -0,0 +1,42 @@ +/* + * include/asm-s390/kexec.h + * + * (C) Copyright IBM Corp. 2005 + * + * Author(s): Rolf Adelsberger <adelsberger@de.ibm.com> + * + */ + +#ifndef _S390_KEXEC_H +#define _S390_KEXEC_H + +#include <asm/page.h> +#include <asm/processor.h> +/* + * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. + * I.e. Maximum page that is mapped directly into kernel memory, + * and kmap is not required. + */ + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) + +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) + +/* Maximum address we can use for the control pages */ +/* Not more than 2GB */ +#define KEXEC_CONTROL_MEMORY_LIMIT (1<<31) + +/* Allocate one page for the pdp and the second for the code */ +#define KEXEC_CONTROL_CODE_SIZE 4096 + +/* The native architecture */ +#define KEXEC_ARCH KEXEC_ARCH_S390 + +#define MAX_NOTE_BYTES 1024 +typedef u32 note_buf_t[MAX_NOTE_BYTES/4]; + +extern note_buf_t crash_notes[]; + +#endif /*_S390_KEXEC_H */ diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h index df5172f..76b5b19 100644 --- a/include/asm-s390/lowcore.h +++ b/include/asm-s390/lowcore.h @@ -109,10 +109,14 @@ #ifndef __s390x__ #define __LC_PFAULT_INTPARM 0x080 +#define __LC_CPU_TIMER_SAVE_AREA 0x0D8 #define __LC_AREGS_SAVE_AREA 0x120 +#define __LC_GPREGS_SAVE_AREA 0x180 #define __LC_CREGS_SAVE_AREA 0x1C0 #else /* __s390x__ */ #define __LC_PFAULT_INTPARM 0x11B8 +#define __LC_GPREGS_SAVE_AREA 0x1280 +#define __LC_CPU_TIMER_SAVE_AREA 0x1328 #define __LC_AREGS_SAVE_AREA 0x1340 #define __LC_CREGS_SAVE_AREA 0x1380 #endif /* __s390x__ */ @@ -167,7 +171,8 @@ struct _lowcore __u16 subchannel_nr; /* 0x0ba */ __u32 io_int_parm; /* 0x0bc */ __u32 io_int_word; /* 0x0c0 */ - __u8 pad3[0xD8-0xC4]; /* 0x0c4 */ + __u8 pad3[0xD4-0xC4]; /* 0x0c4 */ + __u32 extended_save_area_addr; /* 0x0d4 */ __u32 cpu_timer_save_area[2]; /* 0x0d8 */ __u32 clock_comp_save_area[2]; /* 0x0e0 */ __u32 mcck_interruption_code[2]; /* 0x0e8 */ diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h index fb46e90..8bd14de 100644 --- a/include/asm-s390/processor.h +++ b/include/asm-s390/processor.h @@ -207,6 +207,18 @@ unsigned long get_wchan(struct task_struct *p); #endif /* __s390x__ */ /* + * Set PSW to specified value. + */ +static inline void __load_psw(psw_t psw) +{ +#ifndef __s390x__ + asm volatile ("lpsw 0(%0)" : : "a" (&psw), "m" (psw) : "cc" ); +#else + asm volatile ("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc" ); +#endif +} + +/* * Set PSW mask to specified value, while leaving the * PSW addr pointing to the next instruction. */ @@ -214,8 +226,8 @@ unsigned long get_wchan(struct task_struct *p); static inline void __load_psw_mask (unsigned long mask) { unsigned long addr; - psw_t psw; + psw.mask = mask; #ifndef __s390x__ @@ -241,30 +253,8 @@ static inline void __load_psw_mask (unsigned long mask) */ static inline void enabled_wait(void) { - unsigned long reg; - psw_t wait_psw; - - wait_psw.mask = PSW_BASE_BITS | PSW_MASK_IO | PSW_MASK_EXT | - PSW_MASK_MCHECK | PSW_MASK_WAIT | PSW_DEFAULT_KEY; -#ifndef __s390x__ - asm volatile ( - " basr %0,0\n" - "0: la %0,1f-0b(%0)\n" - " st %0,4(%1)\n" - " oi 4(%1),0x80\n" - " lpsw 0(%1)\n" - "1:" - : "=&a" (reg) : "a" (&wait_psw), "m" (wait_psw) - : "memory", "cc" ); -#else /* __s390x__ */ - asm volatile ( - " larl %0,0f\n" - " stg %0,8(%1)\n" - " lpswe 0(%1)\n" - "0:" - : "=&a" (reg) : "a" (&wait_psw), "m" (wait_psw) - : "memory", "cc" ); -#endif /* __s390x__ */ + __load_psw_mask(PSW_BASE_BITS | PSW_MASK_IO | PSW_MASK_EXT | + PSW_MASK_MCHECK | PSW_MASK_WAIT | PSW_DEFAULT_KEY); } /* @@ -273,13 +263,11 @@ static inline void enabled_wait(void) static inline void disabled_wait(unsigned long code) { - char psw_buffer[2*sizeof(psw_t)]; unsigned long ctl_buf; - psw_t *dw_psw = (psw_t *)(((unsigned long) &psw_buffer+sizeof(psw_t)-1) - & -sizeof(psw_t)); + psw_t dw_psw; - dw_psw->mask = PSW_BASE_BITS | PSW_MASK_WAIT; - dw_psw->addr = code; + dw_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT; + dw_psw.addr = code; /* * Store status and then load disabled wait psw, * the processor is dead afterwards @@ -301,7 +289,7 @@ static inline void disabled_wait(unsigned long code) " oi 0x1c0,0x10\n" /* fake protection bit */ " lpsw 0(%1)" : "=m" (ctl_buf) - : "a" (dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc" ); + : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc" ); #else /* __s390x__ */ asm volatile (" stctg 0,0,0(%2)\n" " ni 4(%2),0xef\n" /* switch off protection */ @@ -333,7 +321,7 @@ static inline void disabled_wait(unsigned long code) " oi 0x384(1),0x10\n" /* fake protection bit */ " lpswe 0(%1)" : "=m" (ctl_buf) - : "a" (dw_psw), "a" (&ctl_buf), + : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1"); #endif /* __s390x__ */ } diff --git a/include/asm-s390/ptrace.h b/include/asm-s390/ptrace.h index 4eff8f2..fc7c96e 100644 --- a/include/asm-s390/ptrace.h +++ b/include/asm-s390/ptrace.h @@ -276,7 +276,7 @@ typedef struct #endif /* __s390x__ */ #define PSW_KERNEL_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | \ - PSW_DEFAULT_KEY) + PSW_MASK_MCHECK | PSW_DEFAULT_KEY) #define PSW_USER_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | \ PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | \ PSW_MASK_PSTATE | PSW_DEFAULT_KEY) diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h index 9473786..dd50e57 100644 --- a/include/asm-s390/smp.h +++ b/include/asm-s390/smp.h @@ -47,7 +47,7 @@ extern int smp_call_function_on(void (*func) (void *info), void *info, #define PROC_CHANGE_PENALTY 20 /* Schedule penalty */ -#define smp_processor_id() (S390_lowcore.cpu_data.cpu_nr) +#define raw_smp_processor_id() (S390_lowcore.cpu_data.cpu_nr) extern int smp_get_cpu(cpumask_t cpu_map); extern void smp_put_cpu(int cpu); diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h index 81514d7..b4a9f05 100644 --- a/include/asm-s390/system.h +++ b/include/asm-s390/system.h @@ -16,6 +16,7 @@ #include <asm/types.h> #include <asm/ptrace.h> #include <asm/setup.h> +#include <asm/processor.h> #ifdef __KERNEL__ @@ -103,29 +104,18 @@ static inline void restore_access_regs(unsigned int *acrs) prev = __switch_to(prev,next); \ } while (0) -#define prepare_arch_switch(rq, next) do { } while(0) -#define task_running(rq, p) ((rq)->curr == (p)) - #ifdef CONFIG_VIRT_CPU_ACCOUNTING extern void account_user_vtime(struct task_struct *); extern void account_system_vtime(struct task_struct *); - -#define finish_arch_switch(rq, prev) do { \ - set_fs(current->thread.mm_segment); \ - spin_unlock(&(rq)->lock); \ - account_system_vtime(prev); \ - local_irq_enable(); \ -} while (0) - #else +#define account_system_vtime(prev) do { } while (0) +#endif #define finish_arch_switch(rq, prev) do { \ set_fs(current->thread.mm_segment); \ - spin_unlock_irq(&(rq)->lock); \ + account_system_vtime(prev); \ } while (0) -#endif - #define nop() __asm__ __volatile__ ("nop") #define xchg(ptr,x) \ @@ -331,9 +321,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) #ifdef __s390x__ -#define __load_psw(psw) \ - __asm__ __volatile__("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc" ); - #define __ctl_load(array, low, high) ({ \ typedef struct { char _[sizeof(array)]; } addrtype; \ __asm__ __volatile__ ( \ @@ -390,9 +377,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) #else /* __s390x__ */ -#define __load_psw(psw) \ - __asm__ __volatile__("lpsw 0(%0)" : : "a" (&psw) : "cc" ); - #define __ctl_load(array, low, high) ({ \ typedef struct { char _[sizeof(array)]; } addrtype; \ __asm__ __volatile__ ( \ @@ -451,6 +435,20 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) /* For spinlocks etc */ #define local_irq_save(x) ((x) = local_irq_disable()) +/* + * Use to set psw mask except for the first byte which + * won't be changed by this function. + */ +static inline void +__set_psw_mask(unsigned long mask) +{ + local_save_flags(mask); + __load_psw_mask(mask); +} + +#define local_mcck_enable() __set_psw_mask(PSW_KERNEL_BITS) +#define local_mcck_disable() __set_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK) + #ifdef CONFIG_SMP extern void smp_ctl_set_bit(int cr, int bit); diff --git a/include/asm-s390/thread_info.h b/include/asm-s390/thread_info.h index aade85c..6c18a3f 100644 --- a/include/asm-s390/thread_info.h +++ b/include/asm-s390/thread_info.h @@ -50,7 +50,7 @@ struct thread_info { struct exec_domain *exec_domain; /* execution domain */ unsigned long flags; /* low level flags */ unsigned int cpu; /* current CPU */ - unsigned int preempt_count; /* 0 => preemptable */ + int preempt_count; /* 0 => preemptable, <0 => BUG */ struct restart_block restart_block; }; @@ -96,6 +96,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_RESTART_SVC 4 /* restart svc with new svc number */ #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ #define TIF_SINGLE_STEP 6 /* deliver sigtrap on return to user */ +#define TIF_MCCK_PENDING 7 /* machine check handling is pending */ #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ @@ -109,6 +110,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) +#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) #define _TIF_USEDFPU (1<<TIF_USEDFPU) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_31BIT (1<<TIF_31BIT) diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h index f1a204f..363db45 100644 --- a/include/asm-s390/unistd.h +++ b/include/asm-s390/unistd.h @@ -269,7 +269,7 @@ #define __NR_mq_timedreceive 274 #define __NR_mq_notify 275 #define __NR_mq_getsetattr 276 -/* Number 277 is reserved for new sys_kexec_load */ +#define __NR_kexec_load 277 #define __NR_add_key 278 #define __NR_request_key 279 #define __NR_keyctl 280 diff --git a/include/asm-s390/user.h b/include/asm-s390/user.h index c64f8c1..1dc74ba 100644 --- a/include/asm-s390/user.h +++ b/include/asm-s390/user.h @@ -10,7 +10,7 @@ #define _S390_USER_H #include <asm/page.h> -#include <linux/ptrace.h> +#include <asm/ptrace.h> /* Core file format: The core file is written in such a way that gdb can understand it and provide useful information to the user (under linux we use the 'trad-core' bfd). There are quite a number of diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h index 4c6d129..180467b 100644 --- a/include/asm-sh/page.h +++ b/include/asm-sh/page.h @@ -31,6 +31,7 @@ #define HPAGE_SIZE (1UL << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE-1)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT) +#define ARCH_HAS_SETCLEAR_HUGE_PTE #endif #ifdef __KERNEL__ diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h index cd847a4..ecb9095 100644 --- a/include/asm-sh/pgtable.h +++ b/include/asm-sh/pgtable.h @@ -196,6 +196,7 @@ static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _ static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; } +static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; } /* * Macro and implementation to make a page protection as uncachable. diff --git a/include/asm-sh/smp.h b/include/asm-sh/smp.h index 38b5446..f19a8b3 100644 --- a/include/asm-sh/smp.h +++ b/include/asm-sh/smp.h @@ -25,7 +25,7 @@ extern cpumask_t cpu_possible_map; #define cpu_online(cpu) cpu_isset(cpu, cpu_online_map) -#define smp_processor_id() (current_thread_info()->cpu) +#define raw_smp_processor_id() (current_thread_info()->cpu) /* I've no idea what the real meaning of this is */ #define PROC_CHANGE_PENALTY 20 diff --git a/include/asm-sh/thread_info.h b/include/asm-sh/thread_info.h index d82f883..46080ce 100644 --- a/include/asm-sh/thread_info.h +++ b/include/asm-sh/thread_info.h @@ -20,14 +20,14 @@ struct thread_info { struct exec_domain *exec_domain; /* execution domain */ __u32 flags; /* low level flags */ __u32 cpu; - __s32 preempt_count; /* 0 => preemptable, <0 => BUG */ + int preempt_count; /* 0 => preemptable, <0 => BUG */ struct restart_block restart_block; __u8 supervisor_stack[0]; }; #endif -#define PREEMPT_ACTIVE 0x4000000 +#define PREEMPT_ACTIVE 0x10000000 /* * macros/functions for gaining access to the thread information structure diff --git a/include/asm-sh64/page.h b/include/asm-sh64/page.h index e1f7f5a..d6167f1 100644 --- a/include/asm-sh64/page.h +++ b/include/asm-sh64/page.h @@ -41,6 +41,7 @@ #define HPAGE_SIZE (1UL << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE-1)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT) +#define ARCH_HAS_SETCLEAR_HUGE_PTE #endif #ifdef __KERNEL__ diff --git a/include/asm-sh64/pgtable.h b/include/asm-sh64/pgtable.h index 525e152..78ac6be 100644 --- a/include/asm-sh64/pgtable.h +++ b/include/asm-sh64/pgtable.h @@ -430,6 +430,8 @@ extern inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | extern inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_EXECUTE)); return pte; } extern inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } extern inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } +extern inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; } + /* * Conversion functions: convert a page and protection to a page entry. diff --git a/include/asm-sh64/thread_info.h b/include/asm-sh64/thread_info.h index e65f394..10f024c 100644 --- a/include/asm-sh64/thread_info.h +++ b/include/asm-sh64/thread_info.h @@ -22,7 +22,7 @@ struct thread_info { struct exec_domain *exec_domain; /* execution domain */ unsigned long flags; /* low level flags */ /* Put the 4 32-bit fields together to make asm offsetting easier. */ - __s32 preempt_count; /* 0 => preemptable, <0 => BUG */ + int preempt_count; /* 0 => preemptable, <0 => BUG */ __u16 cpu; mm_segment_t addr_limit; @@ -73,7 +73,7 @@ static inline struct thread_info *current_thread_info(void) #define THREAD_SIZE 8192 -#define PREEMPT_ACTIVE 0x4000000 +#define PREEMPT_ACTIVE 0x10000000 /* thread information flags */ #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ diff --git a/include/asm-sparc/smp.h b/include/asm-sparc/smp.h index f986c0d..4f96d83 100644 --- a/include/asm-sparc/smp.h +++ b/include/asm-sparc/smp.h @@ -148,7 +148,7 @@ extern __inline__ int hard_smp_processor_id(void) } #endif -#define smp_processor_id() (current_thread_info()->cpu) +#define raw_smp_processor_id() (current_thread_info()->cpu) #define prof_multiplier(__cpu) cpu_data(__cpu).multiplier #define prof_counter(__cpu) cpu_data(__cpu).counter diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h index 80cf20c..898562e 100644 --- a/include/asm-sparc/system.h +++ b/include/asm-sparc/system.h @@ -101,7 +101,7 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr, * SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work) * XXX WTF is the above comment? Found in late teen 2.4.x. */ -#define prepare_arch_switch(rq, next) do { \ +#define prepare_arch_switch(next) do { \ __asm__ __volatile__( \ ".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \ "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \ @@ -109,8 +109,6 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr, "save %sp, -0x40, %sp\n\t" \ "restore; restore; restore; restore; restore; restore; restore"); \ } while(0) -#define finish_arch_switch(rq, next) spin_unlock_irq(&(rq)->lock) -#define task_running(rq, p) ((rq)->curr == (p)) /* Much care has gone into this code, do not touch it. * diff --git a/include/asm-sparc/thread_info.h b/include/asm-sparc/thread_info.h index 104f03c..ff6ccb3 100644 --- a/include/asm-sparc/thread_info.h +++ b/include/asm-sparc/thread_info.h @@ -30,9 +30,9 @@ struct thread_info { struct task_struct *task; /* main task structure */ struct exec_domain *exec_domain; /* execution domain */ unsigned long flags; /* low level flags */ - int cpu; /* cpu we're on */ - int preempt_count; + int preempt_count; /* 0 => preemptable, + <0 => BUG */ int softirq_count; int hardirq_count; diff --git a/include/asm-sparc/uaccess.h b/include/asm-sparc/uaccess.h index f461144..0a780e8 100644 --- a/include/asm-sparc/uaccess.h +++ b/include/asm-sparc/uaccess.h @@ -41,10 +41,11 @@ * No one can read/write anything from userland in the kernel space by setting * large size and address near to PAGE_OFFSET - a fault will break his intentions. */ -#define __user_ok(addr,size) ((addr) < STACK_TOP) +#define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; }) #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) #define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size))) -#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) +#define access_ok(type, addr, size) \ + ({ (void)(type); __access_ok((unsigned long)(addr), size); }) /* this function will go away soon - use access_ok() instead */ static inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size) diff --git a/include/asm-sparc64/agp.h b/include/asm-sparc64/agp.h index ba05bdf..58f8cb6 100644 --- a/include/asm-sparc64/agp.h +++ b/include/asm-sparc64/agp.h @@ -8,4 +8,14 @@ #define flush_agp_mappings() #define flush_agp_cache() mb() +/* Convert a physical address to an address suitable for the GART. */ +#define phys_to_gart(x) (x) +#define gart_to_phys(x) (x) + +/* GATT allocation. Returns/accepts GATT kernel virtual address. */ +#define alloc_gatt_pages(order) \ + ((char *)__get_free_pages(GFP_KERNEL, (order))) +#define free_gatt_pages(table, order) \ + free_pages((unsigned long)(table), (order)) + #endif diff --git a/include/asm-sparc64/compat.h b/include/asm-sparc64/compat.h index 22f5805..b59122d 100644 --- a/include/asm-sparc64/compat.h +++ b/include/asm-sparc64/compat.h @@ -25,6 +25,7 @@ typedef s32 compat_daddr_t; typedef u32 compat_caddr_t; typedef __kernel_fsid_t compat_fsid_t; typedef s32 compat_key_t; +typedef s32 compat_timer_t; typedef s32 compat_int_t; typedef s32 compat_long_t; diff --git a/include/asm-sparc64/iommu.h b/include/asm-sparc64/iommu.h index 5fd16e4..0de7a3d 100644 --- a/include/asm-sparc64/iommu.h +++ b/include/asm-sparc64/iommu.h @@ -16,4 +16,6 @@ #define IOPTE_CACHE 0x0000000000000010UL /* Cached (in UPA E-cache) */ #define IOPTE_WRITE 0x0000000000000002UL /* Writeable */ +#define IOMMU_NUM_CTXS 4096 + #endif /* !(_SPARC_IOMMU_H) */ diff --git a/include/asm-sparc64/kdebug.h b/include/asm-sparc64/kdebug.h index f70d3da..6321f5a 100644 --- a/include/asm-sparc64/kdebug.h +++ b/include/asm-sparc64/kdebug.h @@ -16,7 +16,7 @@ struct die_args { }; /* Note - you should never unregister because that can race with NMIs. - * If you really want to do it first unregister - then synchronize_kernel + * If you really want to do it first unregister - then synchronize_sched * - then free. */ int register_die_notifier(struct notifier_block *nb); diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h index 219ea04..b87dbbd 100644 --- a/include/asm-sparc64/page.h +++ b/include/asm-sparc64/page.h @@ -95,6 +95,8 @@ typedef unsigned long pgprot_t; #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE - 1UL)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) +#define ARCH_HAS_SETCLEAR_HUGE_PTE +#define ARCH_HAS_HUGETLB_PREFAULT_HOOK #endif #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \ diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h index 9299963..4c15610a 100644 --- a/include/asm-sparc64/pbm.h +++ b/include/asm-sparc64/pbm.h @@ -15,6 +15,7 @@ #include <asm/io.h> #include <asm/page.h> #include <asm/oplib.h> +#include <asm/iommu.h> /* The abstraction used here is that there are PCI controllers, * each with one (Sabre) or two (PSYCHO/SCHIZO) PCI bus modules @@ -40,9 +41,6 @@ struct pci_iommu { */ spinlock_t lock; - /* Context allocator. */ - unsigned int iommu_cur_ctx; - /* IOMMU page table, a linear array of ioptes. */ iopte_t *page_table; /* The page table itself. */ int page_table_sz_bits; /* log2 of ow many pages does it map? */ @@ -87,6 +85,10 @@ struct pci_iommu { u16 flush; } alloc_info[PBM_NCLUSTERS]; + /* CTX allocation. */ + unsigned long ctx_lowest_free; + unsigned long ctx_bitmap[IOMMU_NUM_CTXS / (sizeof(unsigned long) * 8)]; + /* Here a PCI controller driver describes the areas of * PCI memory space where DMA to/from physical memory * are addressed. Drivers interrogate the PCI layer diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index ae2cd5b..1ae00c5 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -286,6 +286,7 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) #define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_ACCESSED | _PAGE_R)) #define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_WRITE)) #define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_MODIFIED | _PAGE_W)) +#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_SZHUGE)) /* to find an entry in a page-table-directory. */ #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h index bc1445b..d0bee24 100644 --- a/include/asm-sparc64/processor.h +++ b/include/asm-sparc64/processor.h @@ -192,6 +192,40 @@ extern unsigned long get_wchan(struct task_struct *task); #define cpu_relax() barrier() +/* Prefetch support. This is tuned for UltraSPARC-III and later. + * UltraSPARC-I will treat these as nops, and UltraSPARC-II has + * a shallower prefetch queue than later chips. + */ +#define ARCH_HAS_PREFETCH +#define ARCH_HAS_PREFETCHW +#define ARCH_HAS_SPINLOCK_PREFETCH + +static inline void prefetch(const void *x) +{ + /* We do not use the read prefetch mnemonic because that + * prefetches into the prefetch-cache which only is accessible + * by floating point operations in UltraSPARC-III and later. + * By contrast, "#one_write" prefetches into the L2 cache + * in shared state. + */ + __asm__ __volatile__("prefetch [%0], #one_write" + : /* no outputs */ + : "r" (x)); +} + +static inline void prefetchw(const void *x) +{ + /* The most optimal prefetch to use for writes is + * "#n_writes". This brings the cacheline into the + * L2 cache in "owned" state. + */ + __asm__ __volatile__("prefetch [%0], #n_writes" + : /* no outputs */ + : "r" (x)); +} + +#define spin_lock_prefetch(x) prefetchw(x) + #endif /* !(__ASSEMBLY__) */ #endif /* !(__ASM_SPARC64_PROCESSOR_H) */ diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h index 5e3e06d..110a2de 100644 --- a/include/asm-sparc64/smp.h +++ b/include/asm-sparc64/smp.h @@ -64,7 +64,7 @@ static __inline__ int hard_smp_processor_id(void) } } -#define smp_processor_id() (current_thread_info()->cpu) +#define raw_smp_processor_id() (current_thread_info()->cpu) #endif /* !(__ASSEMBLY__) */ diff --git a/include/asm-sparc64/spitfire.h b/include/asm-sparc64/spitfire.h index ad78ce6..9d7613e 100644 --- a/include/asm-sparc64/spitfire.h +++ b/include/asm-sparc64/spitfire.h @@ -48,6 +48,9 @@ enum ultra_tlb_layout { extern enum ultra_tlb_layout tlb_type; +extern int cheetah_pcache_forced_on; +extern void cheetah_enable_pcache(void); + #define sparc64_highest_locked_tlbent() \ (tlb_type == spitfire ? \ SPITFIRE_HIGHEST_LOCKED_TLBENT : \ diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h index fd12ca3..f9be2c5 100644 --- a/include/asm-sparc64/system.h +++ b/include/asm-sparc64/system.h @@ -139,19 +139,13 @@ extern void __flushw_user(void); #define flush_user_windows flushw_user #define flush_register_windows flushw_all -#define prepare_arch_switch(rq, next) \ -do { spin_lock(&(next)->switch_lock); \ - spin_unlock(&(rq)->lock); \ +/* Don't hold the runqueue lock over context switch */ +#define __ARCH_WANT_UNLOCKED_CTXSW +#define prepare_arch_switch(next) \ +do { \ flushw_all(); \ } while (0) -#define finish_arch_switch(rq, prev) \ -do { spin_unlock_irq(&(prev)->switch_lock); \ -} while (0) - -#define task_running(rq, p) \ - ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock)) - /* See what happens when you design the chip correctly? * * We tell gcc we clobber all non-fixed-usage registers except diff --git a/include/asm-sparc64/termios.h b/include/asm-sparc64/termios.h index 8effce0..9777a9c 100644 --- a/include/asm-sparc64/termios.h +++ b/include/asm-sparc64/termios.h @@ -100,16 +100,17 @@ struct winsize { #define user_termio_to_kernel_termios(termios, termio) \ ({ \ unsigned short tmp; \ - get_user(tmp, &(termio)->c_iflag); \ + int err; \ + err = get_user(tmp, &(termio)->c_iflag); \ (termios)->c_iflag = (0xffff0000 & ((termios)->c_iflag)) | tmp; \ - get_user(tmp, &(termio)->c_oflag); \ + err |= get_user(tmp, &(termio)->c_oflag); \ (termios)->c_oflag = (0xffff0000 & ((termios)->c_oflag)) | tmp; \ - get_user(tmp, &(termio)->c_cflag); \ + err |= get_user(tmp, &(termio)->c_cflag); \ (termios)->c_cflag = (0xffff0000 & ((termios)->c_cflag)) | tmp; \ - get_user(tmp, &(termio)->c_lflag); \ + err |= get_user(tmp, &(termio)->c_lflag); \ (termios)->c_lflag = (0xffff0000 & ((termios)->c_lflag)) | tmp; \ - copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ - 0; \ + err |= copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ + err; \ }) /* @@ -119,53 +120,56 @@ struct winsize { */ #define kernel_termios_to_user_termio(termio, termios) \ ({ \ - put_user((termios)->c_iflag, &(termio)->c_iflag); \ - put_user((termios)->c_oflag, &(termio)->c_oflag); \ - put_user((termios)->c_cflag, &(termio)->c_cflag); \ - put_user((termios)->c_lflag, &(termio)->c_lflag); \ - put_user((termios)->c_line, &(termio)->c_line); \ - copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ + int err; \ + err = put_user((termios)->c_iflag, &(termio)->c_iflag); \ + err |= put_user((termios)->c_oflag, &(termio)->c_oflag); \ + err |= put_user((termios)->c_cflag, &(termio)->c_cflag); \ + err |= put_user((termios)->c_lflag, &(termio)->c_lflag); \ + err |= put_user((termios)->c_line, &(termio)->c_line); \ + err |= copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ if (!((termios)->c_lflag & ICANON)) { \ - put_user((termios)->c_cc[VMIN], &(termio)->c_cc[_VMIN]); \ - put_user((termios)->c_cc[VTIME], &(termio)->c_cc[_VTIME]); \ + err |= put_user((termios)->c_cc[VMIN], &(termio)->c_cc[_VMIN]); \ + err |= put_user((termios)->c_cc[VTIME], &(termio)->c_cc[_VTIME]); \ } \ - 0; \ + err; \ }) #define user_termios_to_kernel_termios(k, u) \ ({ \ - get_user((k)->c_iflag, &(u)->c_iflag); \ - get_user((k)->c_oflag, &(u)->c_oflag); \ - get_user((k)->c_cflag, &(u)->c_cflag); \ - get_user((k)->c_lflag, &(u)->c_lflag); \ - get_user((k)->c_line, &(u)->c_line); \ - copy_from_user((k)->c_cc, (u)->c_cc, NCCS); \ + int err; \ + err = get_user((k)->c_iflag, &(u)->c_iflag); \ + err |= get_user((k)->c_oflag, &(u)->c_oflag); \ + err |= get_user((k)->c_cflag, &(u)->c_cflag); \ + err |= get_user((k)->c_lflag, &(u)->c_lflag); \ + err |= get_user((k)->c_line, &(u)->c_line); \ + err |= copy_from_user((k)->c_cc, (u)->c_cc, NCCS); \ if((k)->c_lflag & ICANON) { \ - get_user((k)->c_cc[VEOF], &(u)->c_cc[VEOF]); \ - get_user((k)->c_cc[VEOL], &(u)->c_cc[VEOL]); \ + err |= get_user((k)->c_cc[VEOF], &(u)->c_cc[VEOF]); \ + err |= get_user((k)->c_cc[VEOL], &(u)->c_cc[VEOL]); \ } else { \ - get_user((k)->c_cc[VMIN], &(u)->c_cc[_VMIN]); \ - get_user((k)->c_cc[VTIME], &(u)->c_cc[_VTIME]); \ + err |= get_user((k)->c_cc[VMIN], &(u)->c_cc[_VMIN]); \ + err |= get_user((k)->c_cc[VTIME], &(u)->c_cc[_VTIME]); \ } \ - 0; \ + err; \ }) #define kernel_termios_to_user_termios(u, k) \ ({ \ - put_user((k)->c_iflag, &(u)->c_iflag); \ - put_user((k)->c_oflag, &(u)->c_oflag); \ - put_user((k)->c_cflag, &(u)->c_cflag); \ - put_user((k)->c_lflag, &(u)->c_lflag); \ - put_user((k)->c_line, &(u)->c_line); \ - copy_to_user((u)->c_cc, (k)->c_cc, NCCS); \ + int err; \ + err = put_user((k)->c_iflag, &(u)->c_iflag); \ + err |= put_user((k)->c_oflag, &(u)->c_oflag); \ + err |= put_user((k)->c_cflag, &(u)->c_cflag); \ + err |= put_user((k)->c_lflag, &(u)->c_lflag); \ + err |= put_user((k)->c_line, &(u)->c_line); \ + err |= copy_to_user((u)->c_cc, (k)->c_cc, NCCS); \ if(!((k)->c_lflag & ICANON)) { \ - put_user((k)->c_cc[VMIN], &(u)->c_cc[_VMIN]); \ - put_user((k)->c_cc[VTIME], &(u)->c_cc[_VTIME]); \ + err |= put_user((k)->c_cc[VMIN], &(u)->c_cc[_VMIN]); \ + err |= put_user((k)->c_cc[VTIME], &(u)->c_cc[_VTIME]); \ } else { \ - put_user((k)->c_cc[VEOF], &(u)->c_cc[VEOF]); \ - put_user((k)->c_cc[VEOL], &(u)->c_cc[VEOL]); \ + err |= put_user((k)->c_cc[VEOF], &(u)->c_cc[VEOF]); \ + err |= put_user((k)->c_cc[VEOL], &(u)->c_cc[VEOL]); \ } \ - 0; \ + err; \ }) #endif /* __KERNEL__ */ diff --git a/include/asm-sparc64/thread_info.h b/include/asm-sparc64/thread_info.h index 517caab..0cd6529 100644 --- a/include/asm-sparc64/thread_info.h +++ b/include/asm-sparc64/thread_info.h @@ -46,7 +46,7 @@ struct thread_info { unsigned long fault_address; struct pt_regs *kregs; struct exec_domain *exec_domain; - int preempt_count; + int preempt_count; /* 0 => preemptable, <0 => BUG */ int __pad; unsigned long *utraps; diff --git a/include/asm-um/page.h b/include/asm-um/page.h index 504ea8e..5afee8a 100644 --- a/include/asm-um/page.h +++ b/include/asm-um/page.h @@ -98,7 +98,13 @@ extern unsigned long uml_physmem; extern unsigned long to_phys(void *virt); extern void *to_virt(unsigned long phys); -#define __pa(virt) to_phys((void *) virt) + +/* Cast to unsigned long before casting to void * to avoid a warning from + * mmap_kmem about cutting a long long down to a void *. Not sure that + * casting is the right thing, but 32-bit UML can't have 64-bit virtual + * addresses + */ +#define __pa(virt) to_phys((void *) (unsigned long) virt) #define __va(phys) to_virt((unsigned long) phys) #define page_to_pfn(page) ((page) - mem_map) diff --git a/include/asm-um/pgtable.h b/include/asm-um/pgtable.h index 510e513..a880409 100644 --- a/include/asm-um/pgtable.h +++ b/include/asm-um/pgtable.h @@ -114,17 +114,9 @@ extern unsigned long end_iomem; extern unsigned long pg0[1024]; /* - * BAD_PAGETABLE is used when we need a bogus page-table, while - * BAD_PAGE is used for a bogus page. - * * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */ -extern pte_t __bad_page(void); -extern pte_t * __bad_pagetable(void); - -#define BAD_PAGETABLE __bad_pagetable() -#define BAD_PAGE __bad_page() #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) diff --git a/include/asm-um/ptrace-i386.h b/include/asm-um/ptrace-i386.h index 04222f3..fe882b9 100644 --- a/include/asm-um/ptrace-i386.h +++ b/include/asm-um/ptrace-i386.h @@ -32,6 +32,10 @@ #define PT_REGS_SYSCALL_RET(r) PT_REGS_EAX(r) #define PT_FIX_EXEC_STACK(sp) do ; while(0) +/* Cope with a conditional i386 definition. */ +#undef profile_pc +#define profile_pc(regs) PT_REGS_IP(regs) + #define user_mode(r) UPT_IS_USER(&(r)->regs) #endif diff --git a/include/asm-um/smp.h b/include/asm-um/smp.h index 4412d5d..d879eba 100644 --- a/include/asm-um/smp.h +++ b/include/asm-um/smp.h @@ -8,7 +8,8 @@ #include "asm/current.h" #include "linux/cpumask.h" -#define smp_processor_id() (current_thread->cpu) +#define raw_smp_processor_id() (current_thread->cpu) + #define cpu_logical_map(n) (n) #define cpu_number_map(n) (n) #define PROC_CHANGE_PENALTY 15 /* Pick a number, any number */ diff --git a/include/asm-um/thread_info.h b/include/asm-um/thread_info.h index a10ea15..97267f0 100644 --- a/include/asm-um/thread_info.h +++ b/include/asm-um/thread_info.h @@ -17,7 +17,7 @@ struct thread_info { struct exec_domain *exec_domain; /* execution domain */ unsigned long flags; /* low level flags */ __u32 cpu; /* current CPU */ - __s32 preempt_count; /* 0 => preemptable, + int preempt_count; /* 0 => preemptable, <0 => BUG */ mm_segment_t addr_limit; /* thread address space: 0-0xBFFFFFFF for user @@ -41,18 +41,17 @@ struct thread_info { #define init_thread_info (init_thread_union.thread_info) #define init_stack (init_thread_union.stack) +#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE) /* how to get the thread information struct from C */ static inline struct thread_info *current_thread_info(void) { struct thread_info *ti; - unsigned long mask = PAGE_SIZE * - (1 << CONFIG_KERNEL_STACK_ORDER) - 1; - ti = (struct thread_info *) (((unsigned long) &ti) & ~mask); + unsigned long mask = THREAD_SIZE - 1; + ti = (struct thread_info *) (((unsigned long) &ti) & ~mask); return ti; } /* thread information allocation */ -#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE) #define alloc_thread_info(tsk) \ ((struct thread_info *) kmalloc(THREAD_SIZE, GFP_KERNEL)) #define free_thread_info(ti) kfree(ti) @@ -62,7 +61,7 @@ static inline struct thread_info *current_thread_info(void) #endif -#define PREEMPT_ACTIVE 0x4000000 +#define PREEMPT_ACTIVE 0x10000000 #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_SIGPENDING 1 /* signal pending */ diff --git a/include/asm-v850/thread_info.h b/include/asm-v850/thread_info.h index e2ef445..e4cfad9 100644 --- a/include/asm-v850/thread_info.h +++ b/include/asm-v850/thread_info.h @@ -30,7 +30,8 @@ struct thread_info { struct exec_domain *exec_domain; /* execution domain */ unsigned long flags; /* low level flags */ int cpu; /* cpu we're on */ - int preempt_count; + int preempt_count; /* 0 => preemptable, + <0 => BUG */ struct restart_block restart_block; }; diff --git a/include/asm-x86_64/a.out.h b/include/asm-x86_64/a.out.h index 5952914..7255cde 100644 --- a/include/asm-x86_64/a.out.h +++ b/include/asm-x86_64/a.out.h @@ -21,7 +21,7 @@ struct exec #ifdef __KERNEL__ #include <linux/thread_info.h> -#define STACK_TOP (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE) +#define STACK_TOP TASK_SIZE #endif #endif /* __A_OUT_GNU_H__ */ diff --git a/include/asm-x86_64/agp.h b/include/asm-x86_64/agp.h index 0bb9019..06c52ee 100644 --- a/include/asm-x86_64/agp.h +++ b/include/asm-x86_64/agp.h @@ -19,4 +19,14 @@ int unmap_page_from_agp(struct page *page); worth it. Would need a page for it. */ #define flush_agp_cache() asm volatile("wbinvd":::"memory") +/* Convert a physical address to an address suitable for the GART. */ +#define phys_to_gart(x) (x) +#define gart_to_phys(x) (x) + +/* GATT allocation. Returns/accepts GATT kernel virtual address. */ +#define alloc_gatt_pages(order) \ + ((char *)__get_free_pages(GFP_KERNEL, (order))) +#define free_gatt_pages(table, order) \ + free_pages((unsigned long)(table), (order)) + #endif diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h index e4b1017..16ec82e 100644 --- a/include/asm-x86_64/apic.h +++ b/include/asm-x86_64/apic.h @@ -77,7 +77,7 @@ static inline void ack_APIC_irq(void) extern int get_maxlvt (void); extern void clear_local_APIC (void); extern void connect_bsp_APIC (void); -extern void disconnect_bsp_APIC (void); +extern void disconnect_bsp_APIC (int virt_wire_setup); extern void disable_local_APIC (void); extern int verify_local_APIC (void); extern void cache_APIC_registers (void); diff --git a/include/asm-x86_64/apicdef.h b/include/asm-x86_64/apicdef.h index bfebdb6..9388062 100644 --- a/include/asm-x86_64/apicdef.h +++ b/include/asm-x86_64/apicdef.h @@ -94,7 +94,7 @@ #define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8)) #define APIC_MODE_FIXED 0x0 #define APIC_MODE_NMI 0x4 -#define APIC_MODE_EXINT 0x7 +#define APIC_MODE_EXTINT 0x7 #define APIC_LVT1 0x360 #define APIC_LVTERR 0x370 #define APIC_TMICT 0x380 diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h index 5dd7727..a31bb99 100644 --- a/include/asm-x86_64/bitops.h +++ b/include/asm-x86_64/bitops.h @@ -411,8 +411,6 @@ static __inline__ int ffs(int x) /* find last set bit */ #define fls(x) generic_fls(x) -#define ARCH_HAS_ATOMIC_UNSIGNED 1 - #endif /* __KERNEL__ */ #endif /* _X86_64_BITOPS_H */ diff --git a/include/asm-x86_64/bug.h b/include/asm-x86_64/bug.h index bdbf66e..3d2a666 100644 --- a/include/asm-x86_64/bug.h +++ b/include/asm-x86_64/bug.h @@ -21,6 +21,8 @@ struct bug_frame { asm volatile("ud2 ; .quad %c1 ; .short %c0" :: \ "i"(__LINE__), "i" (__stringify(__FILE__))) void out_of_line_bug(void); +#else +static inline void out_of_line_bug(void) { } #endif #include <asm-generic/bug.h> diff --git a/include/asm-x86_64/ia32.h b/include/asm-x86_64/ia32.h index c0a7717..6efa00f 100644 --- a/include/asm-x86_64/ia32.h +++ b/include/asm-x86_64/ia32.h @@ -94,7 +94,7 @@ typedef struct compat_siginfo{ /* POSIX.1b timers */ struct { - int _tid; /* timer id */ + compat_timer_t _tid; /* timer id */ int _overrun; /* overrun count */ compat_sigval_t _sigval; /* same as below */ int _sys_private; /* not to be passed to user */ diff --git a/include/asm-x86_64/io.h b/include/asm-x86_64/io.h index 9420270..37fc3f1 100644 --- a/include/asm-x86_64/io.h +++ b/include/asm-x86_64/io.h @@ -124,12 +124,7 @@ extern inline void * phys_to_virt(unsigned long address) /* * Change "struct page" to physical address. */ -#ifdef CONFIG_DISCONTIGMEM -#include <asm/mmzone.h> #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) -#else -#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT) -#endif #include <asm-generic/iomap.h> diff --git a/include/asm-x86_64/irq.h b/include/asm-x86_64/irq.h index 3af50b3..eb3b7aa 100644 --- a/include/asm-x86_64/irq.h +++ b/include/asm-x86_64/irq.h @@ -52,4 +52,9 @@ struct irqaction; struct pt_regs; int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); +#ifdef CONFIG_HOTPLUG_CPU +#include <linux/cpumask.h> +extern void fixup_irqs(cpumask_t map); +#endif + #endif /* _ASM_IRQ_H */ diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h index 6277f75..b903419 100644 --- a/include/asm-x86_64/kdebug.h +++ b/include/asm-x86_64/kdebug.h @@ -14,7 +14,7 @@ struct die_args { }; /* Note - you should never unregister because that can race with NMIs. - If you really want to do it first unregister - then synchronize_kernel - then free. + If you really want to do it first unregister - then synchronize_sched - then free. */ int register_die_notifier(struct notifier_block *nb); extern struct notifier_block *die_chain; diff --git a/include/asm-x86_64/kexec.h b/include/asm-x86_64/kexec.h new file mode 100644 index 0000000..42d2ff1 --- /dev/null +++ b/include/asm-x86_64/kexec.h @@ -0,0 +1,33 @@ +#ifndef _X86_64_KEXEC_H +#define _X86_64_KEXEC_H + +#include <asm/page.h> +#include <asm/proto.h> + +/* + * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. + * I.e. Maximum page that is mapped directly into kernel memory, + * and kmap is not required. + * + * So far x86_64 is limited to 40 physical address bits. + */ + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (0xFFFFFFFFFFUL) +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (0xFFFFFFFFFFUL) +/* Maximum address we can use for the control pages */ +#define KEXEC_CONTROL_MEMORY_LIMIT (0xFFFFFFFFFFUL) + +/* Allocate one page for the pdp and the second for the code */ +#define KEXEC_CONTROL_CODE_SIZE (4096UL + 4096UL) + +/* The native architecture */ +#define KEXEC_ARCH KEXEC_ARCH_X86_64 + +#define MAX_NOTE_BYTES 1024 +typedef u32 note_buf_t[MAX_NOTE_BYTES/4]; + +extern note_buf_t crash_notes[]; + +#endif /* _X86_64_KEXEC_H */ diff --git a/include/asm-x86_64/kprobes.h b/include/asm-x86_64/kprobes.h index bfea52d..6d6d883 100644 --- a/include/asm-x86_64/kprobes.h +++ b/include/asm-x86_64/kprobes.h @@ -38,6 +38,9 @@ typedef u8 kprobe_opcode_t; : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry +#define ARCH_SUPPORTS_KRETPROBES + +void kretprobe_trampoline(void); /* Architecture specific copy of original instruction*/ struct arch_specific_insn { diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h index d95b7c2..7684137 100644 --- a/include/asm-x86_64/mmzone.h +++ b/include/asm-x86_64/mmzone.h @@ -6,7 +6,7 @@ #include <linux/config.h> -#ifdef CONFIG_DISCONTIGMEM +#ifdef CONFIG_NUMA #define VIRTUAL_BUG_ON(x) @@ -30,27 +30,23 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) return nid; } -#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT) - -#define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr)) #define NODE_DATA(nid) (node_data[nid]) -#define node_mem_map(nid) (NODE_DATA(nid)->node_mem_map) - -#define node_mem_map(nid) (NODE_DATA(nid)->node_mem_map) #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) #define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ NODE_DATA(nid)->node_spanned_pages) -#define local_mapnr(kvaddr) \ - ( (__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)) ) +#ifdef CONFIG_DISCONTIGMEM + +#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT) +#define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr)) /* AK: this currently doesn't deal with invalid addresses. We'll see if the 2.5 kernel doesn't pass them (2.4 used to). */ #define pfn_to_page(pfn) ({ \ int nid = phys_to_nid(((unsigned long)(pfn)) << PAGE_SHIFT); \ - ((pfn) - node_start_pfn(nid)) + node_mem_map(nid); \ + ((pfn) - node_start_pfn(nid)) + NODE_DATA(nid)->node_mem_map; \ }) #define page_to_pfn(page) \ @@ -60,4 +56,8 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) ({ u8 nid__ = pfn_to_nid(pfn); \ nid__ != 0xff && (pfn) >= node_start_pfn(nid__) && (pfn) <= node_end_pfn(nid__); })) #endif + +#define local_mapnr(kvaddr) \ + ( (__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)) ) +#endif #endif diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h index 513e52c..bc70023 100644 --- a/include/asm-x86_64/msr.h +++ b/include/asm-x86_64/msr.h @@ -57,11 +57,6 @@ (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \ } while(0) -#define rdpmc(counter,low,high) \ - __asm__ __volatile__("rdpmc" \ - : "=a" (low), "=d" (high) \ - : "c" (counter)) - #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) #define rdpmc(counter,low,high) \ diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h index f430480..4313187 100644 --- a/include/asm-x86_64/page.h +++ b/include/asm-x86_64/page.h @@ -28,6 +28,7 @@ #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) +#define ARCH_HAS_HUGETLB_CLEAN_STALE_PGTABLE #ifdef __KERNEL__ #ifndef __ASSEMBLY__ @@ -63,12 +64,14 @@ typedef struct { unsigned long pgprot; } pgprot_t; #define __pgd(x) ((pgd_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } ) -#define __START_KERNEL 0xffffffff80100000UL +#define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START) +#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) #define __START_KERNEL_map 0xffffffff80000000UL #define __PAGE_OFFSET 0xffff810000000000UL #else -#define __START_KERNEL 0xffffffff80100000 +#define __PHYSICAL_START CONFIG_PHYSICAL_START +#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) #define __START_KERNEL_map 0xffffffff80000000 #define __PAGE_OFFSET 0xffff810000000000 #endif /* !__ASSEMBLY__ */ @@ -118,7 +121,9 @@ extern __inline__ int get_order(unsigned long size) __pa(v); }) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) -#ifndef CONFIG_DISCONTIGMEM +#define __boot_va(x) __va(x) +#define __boot_pa(x) __pa(x) +#ifdef CONFIG_FLATMEM #define pfn_to_page(pfn) (mem_map + (pfn)) #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) #define pfn_valid(pfn) ((pfn) < max_mapnr) diff --git a/include/asm-x86_64/param.h b/include/asm-x86_64/param.h index b707f05..40b1193 100644 --- a/include/asm-x86_64/param.h +++ b/include/asm-x86_64/param.h @@ -1,9 +1,11 @@ +#include <linux/config.h> + #ifndef _ASMx86_64_PARAM_H #define _ASMx86_64_PARAM_H #ifdef __KERNEL__ -# define HZ 1000 /* Internal kernel timer frequency */ -# define USER_HZ 100 /* .. some user interfaces are in "ticks */ +# define HZ CONFIG_HZ /* Internal kernel timer frequency */ +# define USER_HZ 100 /* .. some user interfaces are in "ticks */ #define CLOCKS_PER_SEC (USER_HZ) /* like times() */ #endif diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h index 415d73f..9c71855 100644 --- a/include/asm-x86_64/percpu.h +++ b/include/asm-x86_64/percpu.h @@ -39,7 +39,7 @@ extern void setup_per_cpu_areas(void); #define DEFINE_PER_CPU(type, name) \ __typeof__(type) per_cpu__##name -#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var)) +#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) #define __get_cpu_var(var) per_cpu__##var #endif /* SMP */ diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index db2a0ef..4eec176 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h @@ -253,6 +253,7 @@ extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; extern inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } +#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT) extern inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } extern inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } extern inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } @@ -263,6 +264,7 @@ extern inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _ extern inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } extern inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } extern inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; } +extern inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | __LARGE_PTE)); return pte; } struct vm_area_struct; @@ -290,7 +292,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, */ #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) -#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT) static inline int pmd_large(pmd_t pte) { return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE; } diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h index d641b19..106f666 100644 --- a/include/asm-x86_64/processor.h +++ b/include/asm-x86_64/processor.h @@ -160,16 +160,17 @@ static inline void clear_in_cr4 (unsigned long mask) /* * User space process size. 47bits minus one guard page. */ -#define TASK_SIZE (0x800000000000UL - 4096) +#define TASK_SIZE64 (0x800000000000UL - 4096) /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000) -#define TASK_UNMAPPED_32 PAGE_ALIGN(IA32_PAGE_OFFSET/3) -#define TASK_UNMAPPED_64 PAGE_ALIGN(TASK_SIZE/3) -#define TASK_UNMAPPED_BASE \ - (test_thread_flag(TIF_IA32) ? TASK_UNMAPPED_32 : TASK_UNMAPPED_64) + +#define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64) +#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64) + +#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3) /* * Size of io_bitmap. @@ -279,6 +280,14 @@ struct thread_struct { set_fs(USER_DS); \ } while(0) +#define get_debugreg(var, register) \ + __asm__("movq %%db" #register ", %0" \ + :"=r" (var)) +#define set_debugreg(value, register) \ + __asm__("movq %0,%%db" #register \ + : /* no output */ \ + :"r" (value)) + struct task_struct; struct mm_struct; diff --git a/include/asm-x86_64/ptrace.h b/include/asm-x86_64/ptrace.h index 5bbc8d3..ca6f15f 100644 --- a/include/asm-x86_64/ptrace.h +++ b/include/asm-x86_64/ptrace.h @@ -82,6 +82,7 @@ struct pt_regs { #if defined(__KERNEL__) && !defined(__ASSEMBLY__) #define user_mode(regs) (!!((regs)->cs & 3)) +#define user_mode_vm(regs) user_mode(regs) #define instruction_pointer(regs) ((regs)->rip) extern unsigned long profile_pc(struct pt_regs *regs); void signal_fault(struct pt_regs *regs, void __user *frame, char *where); diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h index 96844fe..aeb1b73 100644 --- a/include/asm-x86_64/smp.h +++ b/include/asm-x86_64/smp.h @@ -43,6 +43,8 @@ extern cpumask_t cpu_callout_map; extern void smp_alloc_memory(void); extern volatile unsigned long smp_invalidate_needed; extern int pic_mode; +extern void lock_ipi_call_lock(void); +extern void unlock_ipi_call_lock(void); extern int smp_num_siblings; extern void smp_flush_tlb(void); extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs); @@ -68,7 +70,7 @@ static inline int num_booting_cpus(void) return cpus_weight(cpu_callout_map); } -#define __smp_processor_id() read_pda(cpunumber) +#define raw_smp_processor_id() read_pda(cpunumber) extern __inline int hard_smp_processor_id(void) { @@ -77,6 +79,8 @@ extern __inline int hard_smp_processor_id(void) } extern int safe_smp_processor_id(void); +extern int __cpu_disable(void); +extern void __cpu_die(unsigned int cpu); #endif /* !ASSEMBLY */ diff --git a/include/asm-x86_64/sparsemem.h b/include/asm-x86_64/sparsemem.h new file mode 100644 index 0000000..dabb167 --- /dev/null +++ b/include/asm-x86_64/sparsemem.h @@ -0,0 +1,26 @@ +#ifndef _ASM_X86_64_SPARSEMEM_H +#define _ASM_X86_64_SPARSEMEM_H 1 + +#ifdef CONFIG_SPARSEMEM + +/* + * generic non-linear memory support: + * + * 1) we will not split memory into more chunks than will fit into the flags + * field of the struct page + * + * SECTION_SIZE_BITS 2^n: size of each section + * MAX_PHYSADDR_BITS 2^n: max size of physical address space + * MAX_PHYSMEM_BITS 2^n: how much memory we can have in that space + * + */ + +#define SECTION_SIZE_BITS 27 /* matt - 128 is convenient right now */ +#define MAX_PHYSADDR_BITS 40 +#define MAX_PHYSMEM_BITS 40 + +extern int early_pfn_to_nid(unsigned long pfn); + +#endif /* CONFIG_SPARSEMEM */ + +#endif /* _ASM_X86_64_SPARSEMEM_H */ diff --git a/include/asm-x86_64/suspend.h b/include/asm-x86_64/suspend.h index ec74580..bb9f405 100644 --- a/include/asm-x86_64/suspend.h +++ b/include/asm-x86_64/suspend.h @@ -16,7 +16,7 @@ arch_prepare_suspend(void) struct saved_context { u16 ds, es, fs, gs, ss; unsigned long gs_base, gs_kernel_base, fs_base; - unsigned long cr0, cr2, cr3, cr4; + unsigned long cr0, cr2, cr3, cr4, cr8; u16 gdt_pad; u16 gdt_limit; unsigned long gdt_base; diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h index f4b3b24..08eb6e4 100644 --- a/include/asm-x86_64/thread_info.h +++ b/include/asm-x86_64/thread_info.h @@ -29,7 +29,7 @@ struct thread_info { __u32 flags; /* low level flags */ __u32 status; /* thread synchronous flags */ __u32 cpu; /* current CPU */ - int preempt_count; + int preempt_count; /* 0 => preemptable, <0 => BUG */ mm_segment_t addr_limit; struct restart_block restart_block; diff --git a/include/asm-x86_64/timex.h b/include/asm-x86_64/timex.h index 34f31a1..24ecf6a 100644 --- a/include/asm-x86_64/timex.h +++ b/include/asm-x86_64/timex.h @@ -26,6 +26,9 @@ static inline cycles_t get_cycles (void) extern unsigned int cpu_khz; +extern int read_current_timer(unsigned long *timer_value); +#define ARCH_HAS_READ_CURRENT_TIMER 1 + extern struct vxtime_data vxtime; #endif diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h index 67f24e0..c1bc3fa 100644 --- a/include/asm-x86_64/topology.h +++ b/include/asm-x86_64/topology.h @@ -3,7 +3,7 @@ #include <linux/config.h> -#ifdef CONFIG_DISCONTIGMEM +#ifdef CONFIG_NUMA #include <asm/mpspec.h> #include <asm/bitops.h> @@ -13,8 +13,8 @@ extern cpumask_t cpu_online_map; extern unsigned char cpu_to_node[]; +extern unsigned char pci_bus_to_node[]; extern cpumask_t node_to_cpumask[]; -extern cpumask_t pci_bus_to_cpumask[]; #ifdef CONFIG_ACPI_NUMA extern int __node_distance(int, int); @@ -26,18 +26,9 @@ extern int __node_distance(int, int); #define parent_node(node) (node) #define node_to_first_cpu(node) (__ffs(node_to_cpumask[node])) #define node_to_cpumask(node) (node_to_cpumask[node]) +#define pcibus_to_node(bus) pci_bus_to_node[(bus)->number] +#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)); -static inline cpumask_t __pcibus_to_cpumask(int bus) -{ - cpumask_t busmask = pci_bus_to_cpumask[bus]; - cpumask_t online = cpu_online_map; - cpumask_t res; - cpus_and(res, busmask, online); - return res; -} -#define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus->number) - -#ifdef CONFIG_NUMA /* sched_domains SD_NODE_INIT for x86_64 machines */ #define SD_NODE_INIT (struct sched_domain) { \ .span = CPU_MASK_NONE, \ @@ -48,18 +39,21 @@ static inline cpumask_t __pcibus_to_cpumask(int bus) .busy_factor = 32, \ .imbalance_pct = 125, \ .cache_hot_time = (10*1000000), \ - .cache_nice_tries = 1, \ + .cache_nice_tries = 2, \ + .busy_idx = 3, \ + .idle_idx = 2, \ + .newidle_idx = 0, \ + .wake_idx = 1, \ + .forkexec_idx = 1, \ .per_cpu_gain = 100, \ .flags = SD_LOAD_BALANCE \ - | SD_BALANCE_NEWIDLE \ + | SD_BALANCE_FORK \ | SD_BALANCE_EXEC \ - | SD_WAKE_IDLE \ | SD_WAKE_BALANCE, \ .last_balance = jiffies, \ .balance_interval = 1, \ .nr_balance_failed = 0, \ } -#endif #endif diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h index 3c9af6f..d767adc 100644 --- a/include/asm-x86_64/unistd.h +++ b/include/asm-x86_64/unistd.h @@ -552,7 +552,7 @@ __SYSCALL(__NR_mq_notify, sys_mq_notify) #define __NR_mq_getsetattr 245 __SYSCALL(__NR_mq_getsetattr, sys_mq_getsetattr) #define __NR_kexec_load 246 -__SYSCALL(__NR_kexec_load, sys_ni_syscall) +__SYSCALL(__NR_kexec_load, sys_kexec_load) #define __NR_waitid 247 __SYSCALL(__NR_waitid, sys_waitid) #define __NR_add_key 248 diff --git a/include/asm-xtensa/a.out.h b/include/asm-xtensa/a.out.h new file mode 100644 index 0000000..3be701d --- /dev/null +++ b/include/asm-xtensa/a.out.h @@ -0,0 +1,33 @@ +/* + * include/asm-xtensa/addrspace.h + * + * Dummy a.out file. Xtensa does not support the a.out format, but the kernel + * seems to depend on it. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_A_OUT_H +#define _XTENSA_A_OUT_H + +/* Note: the kernel needs the a.out definitions, even if only ELF is used. */ + +#define STACK_TOP TASK_SIZE + +struct exec +{ + unsigned long a_info; + unsigned a_text; + unsigned a_data; + unsigned a_bss; + unsigned a_syms; + unsigned a_entry; + unsigned a_trsize; + unsigned a_drsize; +}; + +#endif /* _XTENSA_A_OUT_H */ diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h new file mode 100644 index 0000000..d72bcb3 --- /dev/null +++ b/include/asm-xtensa/atomic.h @@ -0,0 +1,272 @@ +/* + * include/asm-xtensa/atomic.h + * + * Atomic operations that C can't guarantee us. Useful for resource counting.. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_ATOMIC_H +#define _XTENSA_ATOMIC_H + +#include <linux/config.h> +#include <linux/stringify.h> + +typedef struct { volatile int counter; } atomic_t; + +#ifdef __KERNEL__ +#include <asm/processor.h> +#include <asm/system.h> + +#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) + +/* + * This Xtensa implementation assumes that the right mechanism + * for exclusion is for locking interrupts to level 1. + * + * Locking interrupts looks like this: + * + * rsil a15, 1 + * <code> + * wsr a15, PS + * rsync + * + * Note that a15 is used here because the register allocation + * done by the compiler is not guaranteed and a window overflow + * may not occur between the rsil and wsr instructions. By using + * a15 in the rsil, the machine is guaranteed to be in a state + * where no register reference will cause an overflow. + */ + +/** + * atomic_read - read atomic variable + * @v: pointer of type atomic_t + * + * Atomically reads the value of @v. + */ +#define atomic_read(v) ((v)->counter) + +/** + * atomic_set - set atomic variable + * @v: pointer of type atomic_t + * @i: required value + * + * Atomically sets the value of @v to @i. + */ +#define atomic_set(v,i) ((v)->counter = (i)) + +/** + * atomic_add - add integer to atomic variable + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v. + */ +extern __inline__ void atomic_add(int i, atomic_t * v) +{ + unsigned int vval; + + __asm__ __volatile__( + "rsil a15, "__stringify(LOCKLEVEL)"\n\t" + "l32i %0, %2, 0 \n\t" + "add %0, %0, %1 \n\t" + "s32i %0, %2, 0 \n\t" + "wsr a15, "__stringify(PS)" \n\t" + "rsync \n" + : "=&a" (vval) + : "a" (i), "a" (v) + : "a15", "memory" + ); +} + +/** + * atomic_sub - subtract the atomic variable + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v. + */ +extern __inline__ void atomic_sub(int i, atomic_t *v) +{ + unsigned int vval; + + __asm__ __volatile__( + "rsil a15, "__stringify(LOCKLEVEL)"\n\t" + "l32i %0, %2, 0 \n\t" + "sub %0, %0, %1 \n\t" + "s32i %0, %2, 0 \n\t" + "wsr a15, "__stringify(PS)" \n\t" + "rsync \n" + : "=&a" (vval) + : "a" (i), "a" (v) + : "a15", "memory" + ); +} + +/* + * We use atomic_{add|sub}_return to define other functions. + */ + +extern __inline__ int atomic_add_return(int i, atomic_t * v) +{ + unsigned int vval; + + __asm__ __volatile__( + "rsil a15,"__stringify(LOCKLEVEL)"\n\t" + "l32i %0, %2, 0 \n\t" + "add %0, %0, %1 \n\t" + "s32i %0, %2, 0 \n\t" + "wsr a15, "__stringify(PS)" \n\t" + "rsync \n" + : "=&a" (vval) + : "a" (i), "a" (v) + : "a15", "memory" + ); + + return vval; +} + +extern __inline__ int atomic_sub_return(int i, atomic_t * v) +{ + unsigned int vval; + + __asm__ __volatile__( + "rsil a15,"__stringify(LOCKLEVEL)"\n\t" + "l32i %0, %2, 0 \n\t" + "sub %0, %0, %1 \n\t" + "s32i %0, %2, 0 \n\t" + "wsr a15, "__stringify(PS)" \n\t" + "rsync \n" + : "=&a" (vval) + : "a" (i), "a" (v) + : "a15", "memory" + ); + + return vval; +} + +/** + * atomic_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) + +/** + * atomic_inc - increment atomic variable + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1. + */ +#define atomic_inc(v) atomic_add(1,(v)) + +/** + * atomic_inc - increment atomic variable + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1. + */ +#define atomic_inc_return(v) atomic_add_return(1,(v)) + +/** + * atomic_dec - decrement atomic variable + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1. + */ +#define atomic_dec(v) atomic_sub(1,(v)) + +/** + * atomic_dec_return - decrement atomic variable + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1. + */ +#define atomic_dec_return(v) atomic_sub_return(1,(v)) + +/** + * atomic_dec_and_test - decrement and test + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +#define atomic_dec_and_test(v) (atomic_sub_return(1,(v)) == 0) + +/** + * atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +#define atomic_inc_and_test(v) (atomic_add_return(1,(v)) == 0) + +/** + * atomic_add_negative - add and test if negative + * @v: pointer of type atomic_t + * @i: integer value to add + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0) + + +extern __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) +{ + unsigned int all_f = -1; + unsigned int vval; + + __asm__ __volatile__( + "rsil a15,"__stringify(LOCKLEVEL)"\n\t" + "l32i %0, %2, 0 \n\t" + "xor %1, %4, %3 \n\t" + "and %0, %0, %4 \n\t" + "s32i %0, %2, 0 \n\t" + "wsr a15, "__stringify(PS)" \n\t" + "rsync \n" + : "=&a" (vval), "=a" (mask) + : "a" (v), "a" (all_f), "1" (mask) + : "a15", "memory" + ); +} + +extern __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v) +{ + unsigned int vval; + + __asm__ __volatile__( + "rsil a15,"__stringify(LOCKLEVEL)"\n\t" + "l32i %0, %2, 0 \n\t" + "or %0, %0, %1 \n\t" + "s32i %0, %2, 0 \n\t" + "wsr a15, "__stringify(PS)" \n\t" + "rsync \n" + : "=&a" (vval) + : "a" (mask), "a" (v) + : "a15", "memory" + ); +} + +/* Atomic operations are already serializing */ +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + +#endif /* __KERNEL__ */ + +#endif /* _XTENSA_ATOMIC_H */ + diff --git a/include/asm-xtensa/bitops.h b/include/asm-xtensa/bitops.h new file mode 100644 index 0000000..d395ef2 --- /dev/null +++ b/include/asm-xtensa/bitops.h @@ -0,0 +1,446 @@ +/* + * include/asm-xtensa/bitops.h + * + * Atomic operations that C can't guarantee us.Useful for resource counting etc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_BITOPS_H +#define _XTENSA_BITOPS_H + +#ifdef __KERNEL__ + +#include <asm/processor.h> +#include <asm/byteorder.h> +#include <asm/system.h> + +#ifdef CONFIG_SMP +# error SMP not supported on this architecture +#endif + +static __inline__ void set_bit(int nr, volatile void * addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + unsigned long *a = ((unsigned long *)addr) + (nr >> 5); + unsigned long flags; + + local_irq_save(flags); + *a |= mask; + local_irq_restore(flags); +} + +static __inline__ void __set_bit(int nr, volatile unsigned long * addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + unsigned long *a = ((unsigned long *)addr) + (nr >> 5); + + *a |= mask; +} + +static __inline__ void clear_bit(int nr, volatile void * addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + unsigned long *a = ((unsigned long *)addr) + (nr >> 5); + unsigned long flags; + + local_irq_save(flags); + *a &= ~mask; + local_irq_restore(flags); +} + +static __inline__ void __clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + unsigned long *a = ((unsigned long *)addr) + (nr >> 5); + + *a &= ~mask; +} + +/* + * clear_bit() doesn't provide any barrier for the compiler. + */ + +#define smp_mb__before_clear_bit() barrier() +#define smp_mb__after_clear_bit() barrier() + +static __inline__ void change_bit(int nr, volatile void * addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + unsigned long *a = ((unsigned long *)addr) + (nr >> 5); + unsigned long flags; + + local_irq_save(flags); + *a ^= mask; + local_irq_restore(flags); +} + +static __inline__ void __change_bit(int nr, volatile void * addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + unsigned long *a = ((unsigned long *)addr) + (nr >> 5); + + *a ^= mask; +} + +static __inline__ int test_and_set_bit(int nr, volatile void * addr) +{ + unsigned long retval; + unsigned long mask = 1 << (nr & 0x1f); + unsigned long *a = ((unsigned long *)addr) + (nr >> 5); + unsigned long flags; + + local_irq_save(flags); + retval = (mask & *a) != 0; + *a |= mask; + local_irq_restore(flags); + + return retval; +} + +static __inline__ int __test_and_set_bit(int nr, volatile void * addr) +{ + unsigned long retval; + unsigned long mask = 1 << (nr & 0x1f); + unsigned long *a = ((unsigned long *)addr) + (nr >> 5); + + retval = (mask & *a) != 0; + *a |= mask; + + return retval; +} + +static __inline__ int test_and_clear_bit(int nr, volatile void * addr) +{ + unsigned long retval; + unsigned long mask = 1 << (nr & 0x1f); + unsigned long *a = ((unsigned long *)addr) + (nr >> 5); + unsigned long flags; + + local_irq_save(flags); + retval = (mask & *a) != 0; + *a &= ~mask; + local_irq_restore(flags); + + return retval; +} + +static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + unsigned long *a = ((unsigned long *)addr) + (nr >> 5); + unsigned long old = *a; + + *a = old & ~mask; + return (old & mask) != 0; +} + +static __inline__ int test_and_change_bit(int nr, volatile void * addr) +{ + unsigned long retval; + unsigned long mask = 1 << (nr & 0x1f); + unsigned long *a = ((unsigned long *)addr) + (nr >> 5); + unsigned long flags; + + local_irq_save(flags); + + retval = (mask & *a) != 0; + *a ^= mask; + local_irq_restore(flags); + + return retval; +} + +/* + * non-atomic version; can be reordered + */ + +static __inline__ int __test_and_change_bit(int nr, volatile void *addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + unsigned long *a = ((unsigned long *)addr) + (nr >> 5); + unsigned long old = *a; + + *a = old ^ mask; + return (old & mask) != 0; +} + +static __inline__ int test_bit(int nr, const volatile void *addr) +{ + return 1UL & (((const volatile unsigned int *)addr)[nr>>5] >> (nr&31)); +} + +#if XCHAL_HAVE_NSAU + +static __inline__ int __cntlz (unsigned long x) +{ + int lz; + asm ("nsau %0, %1" : "=r" (lz) : "r" (x)); + return 31 - lz; +} + +#else + +static __inline__ int __cntlz (unsigned long x) +{ + unsigned long sum, x1, x2, x4, x8, x16; + x1 = x & 0xAAAAAAAA; + x2 = x & 0xCCCCCCCC; + x4 = x & 0xF0F0F0F0; + x8 = x & 0xFF00FF00; + x16 = x & 0xFFFF0000; + sum = x2 ? 2 : 0; + sum += (x16 != 0) * 16; + sum += (x8 != 0) * 8; + sum += (x4 != 0) * 4; + sum += (x1 != 0); + + return sum; +} + +#endif + +/* + * ffz: Find first zero in word. Undefined if no zero exists. + * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). + */ + +static __inline__ int ffz(unsigned long x) +{ + if ((x = ~x) == 0) + return 32; + return __cntlz(x & -x); +} + +/* + * __ffs: Find first bit set in word. Return 0 for bit 0 + */ + +static __inline__ int __ffs(unsigned long x) +{ + return __cntlz(x & -x); +} + +/* + * ffs: Find first bit set in word. This is defined the same way as + * the libc and compiler builtin ffs routines, therefore + * differs in spirit from the above ffz (man ffs). + */ + +static __inline__ int ffs(unsigned long x) +{ + return __cntlz(x & -x) + 1; +} + +/* + * fls: Find last (most-significant) bit set in word. + * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. + */ + +static __inline__ int fls (unsigned int x) +{ + return __cntlz(x); +} + +static __inline__ int +find_next_bit(const unsigned long *addr, int size, int offset) +{ + const unsigned long *p = addr + (offset >> 5); + unsigned long result = offset & ~31UL; + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 31UL; + if (offset) { + tmp = *p++; + tmp &= ~0UL << offset; + if (size < 32) + goto found_first; + if (tmp) + goto found_middle; + size -= 32; + result += 32; + } + while (size >= 32) { + if ((tmp = *p++) != 0) + goto found_middle; + result += 32; + size -= 32; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp &= ~0UL >> (32 - size); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + __ffs(tmp); +} + +/** + * find_first_bit - find the first set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit-number of the first set bit, not the number of the byte + * containing a bit. + */ + +#define find_first_bit(addr, size) \ + find_next_bit((addr), (size), 0) + +static __inline__ int +find_next_zero_bit(const unsigned long *addr, int size, int offset) +{ + const unsigned long *p = addr + (offset >> 5); + unsigned long result = offset & ~31UL; + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 31UL; + if (offset) { + tmp = *p++; + tmp |= ~0UL >> (32-offset); + if (size < 32) + goto found_first; + if (~tmp) + goto found_middle; + size -= 32; + result += 32; + } + while (size & ~31UL) { + if (~(tmp = *p++)) + goto found_middle; + result += 32; + size -= 32; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp |= ~0UL << size; +found_middle: + return result + ffz(tmp); +} + +#define find_first_zero_bit(addr, size) \ + find_next_zero_bit((addr), (size), 0) + +#ifdef __XTENSA_EL__ +# define ext2_set_bit(nr,addr) __test_and_set_bit((nr), (addr)) +# define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr),(addr)) +# define ext2_clear_bit(nr,addr) __test_and_clear_bit((nr), (addr)) +# define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr),(addr)) +# define ext2_test_bit(nr,addr) test_bit((nr), (addr)) +# define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr),(size)) +# define ext2_find_next_zero_bit(addr, size, offset) \ + find_next_zero_bit((addr), (size), (offset)) +#elif defined(__XTENSA_EB__) +# define ext2_set_bit(nr,addr) __test_and_set_bit((nr) ^ 0x18, (addr)) +# define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr) ^ 0x18, (addr)) +# define ext2_clear_bit(nr,addr) __test_and_clear_bit((nr) ^ 18, (addr)) +# define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr)^0x18,(addr)) +# define ext2_test_bit(nr,addr) test_bit((nr) ^ 0x18, (addr)) +# define ext2_find_first_zero_bit(addr, size) \ + ext2_find_next_zero_bit((addr), (size), 0) + +static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) +{ + unsigned long *p = ((unsigned long *) addr) + (offset >> 5); + unsigned long result = offset & ~31UL; + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 31UL; + if(offset) { + /* We hold the little endian value in tmp, but then the + * shift is illegal. So we could keep a big endian value + * in tmp, like this: + * + * tmp = __swab32(*(p++)); + * tmp |= ~0UL >> (32-offset); + * + * but this would decrease preformance, so we change the + * shift: + */ + tmp = *(p++); + tmp |= __swab32(~0UL >> (32-offset)); + if(size < 32) + goto found_first; + if(~tmp) + goto found_middle; + size -= 32; + result += 32; + } + while(size & ~31UL) { + if(~(tmp = *(p++))) + goto found_middle; + result += 32; + size -= 32; + } + if(!size) + return result; + tmp = *p; + +found_first: + /* tmp is little endian, so we would have to swab the shift, + * see above. But then we have to swab tmp below for ffz, so + * we might as well do this here. + */ + return result + ffz(__swab32(tmp) | (~0UL << size)); +found_middle: + return result + ffz(__swab32(tmp)); +} + +#else +# error processor byte order undefined! +#endif + + +#define hweight32(x) generic_hweight32(x) +#define hweight16(x) generic_hweight16(x) +#define hweight8(x) generic_hweight8(x) + +/* + * Find the first bit set in a 140-bit bitmap. + * The first 100 bits are unlikely to be set. + */ + +static inline int sched_find_first_bit(const unsigned long *b) +{ + if (unlikely(b[0])) + return __ffs(b[0]); + if (unlikely(b[1])) + return __ffs(b[1]) + 32; + if (unlikely(b[2])) + return __ffs(b[2]) + 64; + if (b[3]) + return __ffs(b[3]) + 96; + return __ffs(b[4]) + 128; +} + + +/* Bitmap functions for the minix filesystem. */ + +#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) +#define minix_set_bit(nr,addr) set_bit(nr,addr) +#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) +#define minix_test_bit(nr,addr) test_bit(nr,addr) +#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) + +#endif /* __KERNEL__ */ + +#endif /* _XTENSA_BITOPS_H */ diff --git a/include/asm-xtensa/bootparam.h b/include/asm-xtensa/bootparam.h new file mode 100644 index 0000000..9983f2c --- /dev/null +++ b/include/asm-xtensa/bootparam.h @@ -0,0 +1,61 @@ +/* + * include/asm-xtensa/bootparam.h + * + * Definition of the Linux/Xtensa boot parameter structure + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + * + * (Concept borrowed from the 68K port) + */ + +#ifndef _XTENSA_BOOTPARAM_H +#define _XTENSA_BOOTPARAM_H + +#define BP_VERSION 0x0001 + +#define BP_TAG_COMMAND_LINE 0x1001 /* command line (0-terminated string)*/ +#define BP_TAG_INITRD 0x1002 /* ramdisk addr and size (bp_meminfo) */ +#define BP_TAG_MEMORY 0x1003 /* memory addr and size (bp_meminfo) */ +#define BP_TAG_SERIAL_BAUSRATE 0x1004 /* baud rate of current console. */ +#define BP_TAG_SERIAL_PORT 0x1005 /* serial device of current console */ + +#define BP_TAG_FIRST 0x7B0B /* first tag with a version number */ +#define BP_TAG_LAST 0x7E0B /* last tag */ + +#ifndef __ASSEMBLY__ + +/* All records are aligned to 4 bytes */ + +typedef struct bp_tag { + unsigned short id; /* tag id */ + unsigned short size; /* size of this record excluding the structure*/ + unsigned long data[0]; /* data */ +} bp_tag_t; + +typedef struct meminfo { + unsigned long type; + unsigned long start; + unsigned long end; +} meminfo_t; + +#define SYSMEM_BANKS_MAX 5 + +#define MEMORY_TYPE_CONVENTIONAL 0x1000 +#define MEMORY_TYPE_NONE 0x2000 + +typedef struct sysmem_info { + int nr_banks; + meminfo_t bank[SYSMEM_BANKS_MAX]; +} sysmem_info_t; + +extern sysmem_info_t sysmem; + +#endif +#endif + + + diff --git a/include/asm-xtensa/bug.h b/include/asm-xtensa/bug.h new file mode 100644 index 0000000..5670365 --- /dev/null +++ b/include/asm-xtensa/bug.h @@ -0,0 +1,41 @@ +/* + * include/asm-xtensa/bug.h + * + * Macros to cause a 'bug' message. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_BUG_H +#define _XTENSA_BUG_H + +#include <linux/stringify.h> + +#define ILL __asm__ __volatile__ (".byte 0,0,0\n") + +#ifdef CONFIG_KALLSYMS +# define BUG() do { \ + printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ + ILL; \ +} while (0) +#else +# define BUG() do { \ + printk("kernel BUG!\n"); \ + ILL; \ +} while (0) +#endif + +#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0) +#define PAGE_BUG(page) do { BUG(); } while (0) +#define WARN_ON(condition) do { \ + if (unlikely((condition)!=0)) { \ + printk ("Warning in %s at %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \ + dump_stack(); \ + } \ +} while (0) + +#endif /* _XTENSA_BUG_H */ diff --git a/include/asm-xtensa/bugs.h b/include/asm-xtensa/bugs.h new file mode 100644 index 0000000..c422853 --- /dev/null +++ b/include/asm-xtensa/bugs.h @@ -0,0 +1,22 @@ +/* + * include/asm-xtensa/bugs.h + * + * This is included by init/main.c to check for architecture-dependent bugs. + * + * Xtensa processors don't have any bugs. :) + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of + * this archive for more details. + */ + +#ifndef _XTENSA_BUGS_H +#define _XTENSA_BUGS_H + +#include <asm/processor.h> + +static void __init check_bugs(void) +{ +} + +#endif /* _XTENSA_BUGS_H */ diff --git a/include/asm-xtensa/byteorder.h b/include/asm-xtensa/byteorder.h new file mode 100644 index 0000000..0b15525 --- /dev/null +++ b/include/asm-xtensa/byteorder.h @@ -0,0 +1,82 @@ +/* + * include/asm-xtensa/byteorder.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_BYTEORDER_H +#define _XTENSA_BYTEORDER_H + +#include <asm/processor.h> +#include <asm/types.h> + +static __inline__ __const__ __u32 ___arch__swab32(__u32 x) +{ + __u32 res; + /* instruction sequence from Xtensa ISA release 2/2000 */ + __asm__("ssai 8 \n\t" + "srli %0, %1, 16 \n\t" + "src %0, %0, %1 \n\t" + "src %0, %0, %0 \n\t" + "src %0, %1, %0 \n" + : "=&a" (res) + : "a" (x) + ); + return res; +} + +static __inline__ __const__ __u16 ___arch__swab16(__u16 x) +{ + /* Given that 'short' values are signed (i.e., can be negative), + * we cannot assume that the upper 16-bits of the register are + * zero. We are careful to mask values after shifting. + */ + + /* There exists an anomaly between xt-gcc and xt-xcc. xt-gcc + * inserts an extui instruction after putting this function inline + * to ensure that it uses only the least-significant 16 bits of + * the result. xt-xcc doesn't use an extui, but assumes the + * __asm__ macro follows convention that the upper 16 bits of an + * 'unsigned short' result are still zero. This macro doesn't + * follow convention; indeed, it leaves garbage in the upport 16 + * bits of the register. + + * Declaring the temporary variables 'res' and 'tmp' to be 32-bit + * types while the return type of the function is a 16-bit type + * forces both compilers to insert exactly one extui instruction + * (or equivalent) to mask off the upper 16 bits. */ + + __u32 res; + __u32 tmp; + + __asm__("extui %1, %2, 8, 8\n\t" + "slli %0, %2, 8 \n\t" + "or %0, %0, %1 \n" + : "=&a" (res), "=&a" (tmp) + : "a" (x) + ); + + return res; +} + +#define __arch__swab32(x) ___arch__swab32(x) +#define __arch__swab16(x) ___arch__swab16(x) + +#if !defined(__STRICT_ANSI__) || defined(__KERNEL__) +# define __BYTEORDER_HAS_U64__ +# define __SWAB_64_THRU_32__ +#endif + +#ifdef __XTENSA_EL__ +# include <linux/byteorder/little_endian.h> +#elif defined(__XTENSA_EB__) +# include <linux/byteorder/big_endian.h> +#else +# error processor byte order undefined! +#endif + +#endif /* __ASM_XTENSA_BYTEORDER_H */ diff --git a/include/asm-xtensa/cache.h b/include/asm-xtensa/cache.h new file mode 100644 index 0000000..5aae3f1 --- /dev/null +++ b/include/asm-xtensa/cache.h @@ -0,0 +1,32 @@ +/* + * include/asm-xtensa/cacheflush.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * 2 of the License, or (at your option) any later version. + * + * (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_CACHE_H +#define _XTENSA_CACHE_H + +#include <xtensa/config/core.h> + +#if XCHAL_ICACHE_SIZE > 0 +# if (XCHAL_ICACHE_SIZE % (XCHAL_ICACHE_LINESIZE*XCHAL_ICACHE_WAYS*4)) != 0 +# error cache configuration outside expected/supported range! +# endif +#endif + +#if XCHAL_DCACHE_SIZE > 0 +# if (XCHAL_DCACHE_SIZE % (XCHAL_DCACHE_LINESIZE*XCHAL_DCACHE_WAYS*4)) != 0 +# error cache configuration outside expected/supported range! +# endif +#endif + +#define L1_CACHE_SHIFT XCHAL_CACHE_LINEWIDTH_MAX +#define L1_CACHE_BYTES XCHAL_CACHE_LINESIZE_MAX + +#endif /* _XTENSA_CACHE_H */ diff --git a/include/asm-xtensa/cacheflush.h b/include/asm-xtensa/cacheflush.h new file mode 100644 index 0000000..44a36e0 --- /dev/null +++ b/include/asm-xtensa/cacheflush.h @@ -0,0 +1,122 @@ +/* + * include/asm-xtensa/cacheflush.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_CACHEFLUSH_H +#define _XTENSA_CACHEFLUSH_H + +#ifdef __KERNEL__ + +#include <linux/mm.h> +#include <asm/processor.h> +#include <asm/page.h> + +/* + * flush and invalidate data cache, invalidate instruction cache: + * + * __flush_invalidate_cache_all() + * __flush_invalidate_cache_range(from,sze) + * + * invalidate data or instruction cache: + * + * __invalidate_icache_all() + * __invalidate_icache_page(adr) + * __invalidate_dcache_page(adr) + * __invalidate_icache_range(from,size) + * __invalidate_dcache_range(from,size) + * + * flush data cache: + * + * __flush_dcache_page(adr) + * + * flush and invalidate data cache: + * + * __flush_invalidate_dcache_all() + * __flush_invalidate_dcache_page(adr) + * __flush_invalidate_dcache_range(from,size) + */ + +extern void __flush_invalidate_cache_all(void); +extern void __flush_invalidate_cache_range(unsigned long, unsigned long); +extern void __flush_invalidate_dcache_all(void); +extern void __invalidate_icache_all(void); + +extern void __invalidate_dcache_page(unsigned long); +extern void __invalidate_icache_page(unsigned long); +extern void __invalidate_icache_range(unsigned long, unsigned long); +extern void __invalidate_dcache_range(unsigned long, unsigned long); + +#if XCHAL_DCACHE_IS_WRITEBACK +extern void __flush_dcache_page(unsigned long); +extern void __flush_invalidate_dcache_page(unsigned long); +extern void __flush_invalidate_dcache_range(unsigned long, unsigned long); +#else +# define __flush_dcache_page(p) do { } while(0) +# define __flush_invalidate_dcache_page(p) do { } while(0) +# define __flush_invalidate_dcache_range(p,s) do { } while(0) +#endif + +/* + * We have physically tagged caches - nothing to do here - + * unless we have cache aliasing. + * + * Pages can get remapped. Because this might change the 'color' of that page, + * we have to flush the cache before the PTE is changed. + * (see also Documentation/cachetlb.txt) + */ + +#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK + +#define flush_cache_all() __flush_invalidate_cache_all(); +#define flush_cache_mm(mm) __flush_invalidate_cache_all(); + +#define flush_cache_vmap(start,end) __flush_invalidate_cache_all(); +#define flush_cache_vunmap(start,end) __flush_invalidate_cache_all(); + +extern void flush_dcache_page(struct page*); + +extern void flush_cache_range(struct vm_area_struct*, ulong, ulong); +extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long); + +#else + +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) + +#define flush_cache_vmap(start,end) do { } while (0) +#define flush_cache_vunmap(start,end) do { } while (0) + +#define flush_dcache_page(page) do { } while (0) + +#define flush_cache_page(vma,addr,pfn) do { } while (0) +#define flush_cache_range(vma,start,end) do { } while (0) + +#endif + +#define flush_icache_range(start,end) \ + __invalidate_icache_range(start,(end)-(start)) + +/* This is not required, see Documentation/cachetlb.txt */ + +#define flush_icache_page(vma,page) do { } while(0) + +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) + + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) + +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) + +#endif /* __KERNEL__ */ + +#endif /* _XTENSA_CACHEFLUSH_H */ + diff --git a/include/asm-xtensa/checksum.h b/include/asm-xtensa/checksum.h new file mode 100644 index 0000000..1a00fad --- /dev/null +++ b/include/asm-xtensa/checksum.h @@ -0,0 +1,264 @@ +/* + * include/asm-xtensa/checksum.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_CHECKSUM_H +#define _XTENSA_CHECKSUM_H + +#include <linux/config.h> +#include <linux/in6.h> +#include <xtensa/config/core.h> + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +asmlinkage unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); + +/* + * the same as csum_partial, but copies from src while it + * checksums, and handles user-space pointer exceptions correctly, when needed. + * + * here even more important to align src and dst on a 32-bit (or even + * better 64-bit) boundary + */ + +asmlinkage unsigned int csum_partial_copy_generic( const char *src, char *dst, int len, int sum, + int *src_err_ptr, int *dst_err_ptr); + +/* + * Note: when you get a NULL pointer exception here this means someone + * passed in an incorrect kernel address to one of these functions. + * + * If you use these functions directly please don't forget the + * verify_area(). + */ +extern __inline__ +unsigned int csum_partial_copy_nocheck ( const char *src, char *dst, + int len, int sum) +{ + return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL); +} + +extern __inline__ +unsigned int csum_partial_copy_from_user ( const char *src, char *dst, + int len, int sum, int *err_ptr) +{ + return csum_partial_copy_generic ( src, dst, len, sum, err_ptr, NULL); +} + +/* + * These are the old (and unsafe) way of doing checksums, a warning message will be + * printed if they are used and an exeption occurs. + * + * these functions should go away after some time. + */ + +#define csum_partial_copy_fromuser csum_partial_copy +unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum); + +/* + * Fold a partial checksum + */ + +static __inline__ unsigned int csum_fold(unsigned int sum) +{ + unsigned int __dummy; + __asm__("extui %1, %0, 16, 16\n\t" + "extui %0 ,%0, 0, 16\n\t" + "add %0, %0, %1\n\t" + "slli %1, %0, 16\n\t" + "add %0, %0, %1\n\t" + "extui %0, %0, 16, 16\n\t" + "neg %0, %0\n\t" + "addi %0, %0, -1\n\t" + "extui %0, %0, 0, 16\n\t" + : "=r" (sum), "=&r" (__dummy) + : "0" (sum)); + return sum; +} + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + */ +static __inline__ unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl) +{ + unsigned int sum, tmp, endaddr; + + __asm__ __volatile__( + "sub %0, %0, %0\n\t" +#if XCHAL_HAVE_LOOPS + "loopgtz %2, 2f\n\t" +#else + "beqz %2, 2f\n\t" + "slli %4, %2, 2\n\t" + "add %4, %4, %1\n\t" + "0:\t" +#endif + "l32i %3, %1, 0\n\t" + "add %0, %0, %3\n\t" + "bgeu %0, %3, 1f\n\t" + "addi %0, %0, 1\n\t" + "1:\t" + "addi %1, %1, 4\n\t" +#if !XCHAL_HAVE_LOOPS + "blt %1, %4, 0b\n\t" +#endif + "2:\t" + /* Since the input registers which are loaded with iph and ihl + are modified, we must also specify them as outputs, or gcc + will assume they contain their original values. */ + : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp), "=&r" (endaddr) + : "1" (iph), "2" (ihl)); + + return csum_fold(sum); +} + +static __inline__ unsigned long csum_tcpudp_nofold(unsigned long saddr, + unsigned long daddr, + unsigned short len, + unsigned short proto, + unsigned int sum) +{ + +#ifdef __XTENSA_EL__ + unsigned long len_proto = (ntohs(len)<<16)+proto*256; +#elif defined(__XTENSA_EB__) + unsigned long len_proto = (proto<<16)+len; +#else +# error processor byte order undefined! +#endif + __asm__("add %0, %0, %1\n\t" + "bgeu %0, %1, 1f\n\t" + "addi %0, %0, 1\n\t" + "1:\t" + "add %0, %0, %2\n\t" + "bgeu %0, %2, 1f\n\t" + "addi %0, %0, 1\n\t" + "1:\t" + "add %0, %0, %3\n\t" + "bgeu %0, %3, 1f\n\t" + "addi %0, %0, 1\n\t" + "1:\t" + : "=r" (sum), "=r" (len_proto) + : "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum)); + return sum; +} + +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ +static __inline__ unsigned short int csum_tcpudp_magic(unsigned long saddr, + unsigned long daddr, + unsigned short len, + unsigned short proto, + unsigned int sum) +{ + return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); +} + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ + +static __inline__ unsigned short ip_compute_csum(unsigned char * buff, int len) +{ + return csum_fold (csum_partial(buff, len, 0)); +} + +#define _HAVE_ARCH_IPV6_CSUM +static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, + struct in6_addr *daddr, + __u32 len, + unsigned short proto, + unsigned int sum) +{ + unsigned int __dummy; + __asm__("l32i %1, %2, 0\n\t" + "add %0, %0, %1\n\t" + "bgeu %0, %1, 1f\n\t" + "addi %0, %0, 1\n\t" + "1:\t" + "l32i %1, %2, 4\n\t" + "add %0, %0, %1\n\t" + "bgeu %0, %1, 1f\n\t" + "addi %0, %0, 1\n\t" + "1:\t" + "l32i %1, %2, 8\n\t" + "add %0, %0, %1\n\t" + "bgeu %0, %1, 1f\n\t" + "addi %0, %0, 1\n\t" + "1:\t" + "l32i %1, %2, 12\n\t" + "add %0, %0, %1\n\t" + "bgeu %0, %1, 1f\n\t" + "addi %0, %0, 1\n\t" + "1:\t" + "l32i %1, %3, 0\n\t" + "add %0, %0, %1\n\t" + "bgeu %0, %1, 1f\n\t" + "addi %0, %0, 1\n\t" + "1:\t" + "l32i %1, %3, 4\n\t" + "add %0, %0, %1\n\t" + "bgeu %0, %1, 1f\n\t" + "addi %0, %0, 1\n\t" + "1:\t" + "l32i %1, %3, 8\n\t" + "add %0, %0, %1\n\t" + "bgeu %0, %1, 1f\n\t" + "addi %0, %0, 1\n\t" + "1:\t" + "l32i %1, %3, 12\n\t" + "add %0, %0, %1\n\t" + "bgeu %0, %1, 1f\n\t" + "addi %0, %0, 1\n\t" + "1:\t" + "add %0, %0, %4\n\t" + "bgeu %0, %4, 1f\n\t" + "addi %0, %0, 1\n\t" + "1:\t" + "add %0, %0, %5\n\t" + "bgeu %0, %5, 1f\n\t" + "addi %0, %0, 1\n\t" + "1:\t" + : "=r" (sum), "=&r" (__dummy) + : "r" (saddr), "r" (daddr), + "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)); + + return csum_fold(sum); +} + +/* + * Copy and checksum to user + */ +#define HAVE_CSUM_COPY_USER +static __inline__ unsigned int csum_and_copy_to_user (const char *src, char *dst, + int len, int sum, int *err_ptr) +{ + if (access_ok(VERIFY_WRITE, dst, len)) + return csum_partial_copy_generic(src, dst, len, sum, NULL, err_ptr); + + if (len) + *err_ptr = -EFAULT; + + return -1; /* invalid checksum */ +} +#endif diff --git a/include/asm-xtensa/coprocessor.h b/include/asm-xtensa/coprocessor.h new file mode 100644 index 0000000..a91b96d --- /dev/null +++ b/include/asm-xtensa/coprocessor.h @@ -0,0 +1,70 @@ +/* + * include/asm-xtensa/cpextra.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_COPROCESSOR_H +#define _XTENSA_COPROCESSOR_H + +#include <xtensa/config/core.h> + +#define XTOFS(last_start,last_size,align) \ + ((last_start+last_size+align-1) & -align) + +#define XTENSA_CP_EXTRA_OFFSET 0 +#define XTENSA_CP_EXTRA_ALIGN XCHAL_EXTRA_SA_ALIGN + +#define XTENSA_CPE_CP0_OFFSET \ + XTOFS(XTENSA_CP_EXTRA_OFFSET, XCHAL_EXTRA_SA_SIZE, XCHAL_CP0_SA_ALIGN) +#define XTENSA_CPE_CP1_OFFSET \ + XTOFS(XTENSA_CPE_CP0_OFFSET, XCHAL_CP0_SA_SIZE, XCHAL_CP1_SA_ALIGN) +#define XTENSA_CPE_CP2_OFFSET \ + XTOFS(XTENSA_CPE_CP1_OFFSET, XCHAL_CP1_SA_SIZE, XCHAL_CP2_SA_ALIGN) +#define XTENSA_CPE_CP3_OFFSET \ + XTOFS(XTENSA_CPE_CP2_OFFSET, XCHAL_CP2_SA_SIZE, XCHAL_CP3_SA_ALIGN) +#define XTENSA_CPE_CP4_OFFSET \ + XTOFS(XTENSA_CPE_CP3_OFFSET, XCHAL_CP3_SA_SIZE, XCHAL_CP4_SA_ALIGN) +#define XTENSA_CPE_CP5_OFFSET \ + XTOFS(XTENSA_CPE_CP4_OFFSET, XCHAL_CP4_SA_SIZE, XCHAL_CP5_SA_ALIGN) +#define XTENSA_CPE_CP6_OFFSET \ + XTOFS(XTENSA_CPE_CP5_OFFSET, XCHAL_CP5_SA_SIZE, XCHAL_CP6_SA_ALIGN) +#define XTENSA_CPE_CP7_OFFSET \ + XTOFS(XTENSA_CPE_CP6_OFFSET, XCHAL_CP6_SA_SIZE, XCHAL_CP7_SA_ALIGN) +#define XTENSA_CP_EXTRA_SIZE \ + XTOFS(XTENSA_CPE_CP7_OFFSET, XCHAL_CP7_SA_SIZE, 16) + +#if XCHAL_CP_NUM > 0 +# ifndef __ASSEMBLY__ +/* + * Tasks that own contents of (last user) each coprocessor. + * Entries are 0 for not-owned or non-existent coprocessors. + * Note: The size of this structure is fixed to 8 bytes in entry.S + */ +typedef struct { + struct task_struct *owner; /* owner */ + int offset; /* offset in cpextra space. */ +} coprocessor_info_t; +# else +# define COPROCESSOR_INFO_OWNER 0 +# define COPROCESSOR_INFO_OFFSET 4 +# define COPROCESSOR_INFO_SIZE 8 +# endif +#endif + + +#ifndef __ASSEMBLY__ +# if XCHAL_CP_NUM > 0 +struct task_struct; +extern void release_coprocessors (struct task_struct*); +extern void save_coprocessor_registers(void*, int); +# else +# define release_coprocessors(task) +# endif +#endif + +#endif /* _XTENSA_COPROCESSOR_H */ diff --git a/include/asm-xtensa/cpumask.h b/include/asm-xtensa/cpumask.h new file mode 100644 index 0000000..ebeede3 --- /dev/null +++ b/include/asm-xtensa/cpumask.h @@ -0,0 +1,16 @@ +/* + * include/asm-xtensa/cpumask.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_CPUMASK_H +#define _XTENSA_CPUMASK_H + +#include <asm-generic/cpumask.h> + +#endif /* _XTENSA_CPUMASK_H */ diff --git a/include/asm-xtensa/cputime.h b/include/asm-xtensa/cputime.h new file mode 100644 index 0000000..a7fb864 --- /dev/null +++ b/include/asm-xtensa/cputime.h @@ -0,0 +1,6 @@ +#ifndef _XTENSA_CPUTIME_H +#define _XTENSA_CPUTIME_H + +#include <asm-generic/cputime.h> + +#endif /* _XTENSA_CPUTIME_H */ diff --git a/include/asm-xtensa/current.h b/include/asm-xtensa/current.h new file mode 100644 index 0000000..8d1eb5d --- /dev/null +++ b/include/asm-xtensa/current.h @@ -0,0 +1,38 @@ +/* + * include/asm-xtensa/current.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_CURRENT_H +#define _XTENSA_CURRENT_H + +#ifndef __ASSEMBLY__ + +#include <linux/thread_info.h> + +struct task_struct; + +static inline struct task_struct *get_current(void) +{ + return current_thread_info()->task; +} + +#define current get_current() + +#else + +#define CURRENT_SHIFT 13 + +#define GET_CURRENT(reg,sp) \ + GET_THREAD_INFO(reg,sp); \ + l32i reg, reg, TI_TASK \ + +#endif + + +#endif /* XTENSA_CURRENT_H */ diff --git a/include/asm-xtensa/delay.h b/include/asm-xtensa/delay.h new file mode 100644 index 0000000..6359c55 --- /dev/null +++ b/include/asm-xtensa/delay.h @@ -0,0 +1,50 @@ +/* + * include/asm-xtensa/delay.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + * + */ + +#ifndef _XTENSA_DELAY_H +#define _XTENSA_DELAY_H + +#include <linux/config.h> +#include <asm/processor.h> +#include <asm/param.h> + +extern unsigned long loops_per_jiffy; + +extern __inline__ void __delay(unsigned long loops) +{ + /* 2 cycles per loop. */ + __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 1, 1b" + : "=r" (loops) : "0" (loops)); +} + +static __inline__ u32 xtensa_get_ccount(void) +{ + u32 ccount; + asm volatile ("rsr %0, 234; # CCOUNT\n" : "=r" (ccount)); + return ccount; +} + +/* For SMP/NUMA systems, change boot_cpu_data to something like + * local_cpu_data->... where local_cpu_data points to the current + * cpu. */ + +static __inline__ void udelay (unsigned long usecs) +{ + unsigned long start = xtensa_get_ccount(); + unsigned long cycles = usecs * (loops_per_jiffy / (1000000UL / HZ)); + + /* Note: all variables are unsigned (can wrap around)! */ + while (((unsigned long)xtensa_get_ccount()) - start < cycles) + ; +} + +#endif + diff --git a/include/asm-xtensa/div64.h b/include/asm-xtensa/div64.h new file mode 100644 index 0000000..c4a1057 --- /dev/null +++ b/include/asm-xtensa/div64.h @@ -0,0 +1,19 @@ +/* + * include/asm-xtensa/div64.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_DIV64_H +#define _XTENSA_DIV64_H + +#define do_div(n,base) ({ \ + int __res = n % ((unsigned int) base); \ + n /= (unsigned int) base; \ + __res; }) + +#endif diff --git a/include/asm-xtensa/dma-mapping.h b/include/asm-xtensa/dma-mapping.h new file mode 100644 index 0000000..e86a206 --- /dev/null +++ b/include/asm-xtensa/dma-mapping.h @@ -0,0 +1,182 @@ +/* + * include/asm-xtensa/dma_mapping.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_DMA_MAPPING_H +#define _XTENSA_DMA_MAPPING_H + +#include <asm/scatterlist.h> +#include <asm/cache.h> +#include <asm/io.h> +#include <linux/mm.h> + +/* + * DMA-consistent mapping functions. + */ + +extern void *consistent_alloc(int, size_t, dma_addr_t, unsigned long); +extern void consistent_free(void*, size_t, dma_addr_t); +extern void consistent_sync(void*, size_t, int); + +#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) +#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) + +void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, int flag); + +void dma_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle); + +static inline dma_addr_t +dma_map_single(struct device *dev, void *ptr, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); + consistent_sync(ptr, size, direction); + return virt_to_phys(ptr); +} + +static inline void +dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); +} + +static inline int +dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction direction) +{ + int i; + + BUG_ON(direction == DMA_NONE); + + for (i = 0; i < nents; i++, sg++ ) { + BUG_ON(!sg->page); + + sg->dma_address = page_to_phys(sg->page) + sg->offset; + consistent_sync(page_address(sg->page) + sg->offset, + sg->length, direction); + } + + return nents; +} + +static inline dma_addr_t +dma_map_page(struct device *dev, struct page *page, unsigned long offset, + size_t size, enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); + return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset; +} + +static inline void +dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); +} + + +static inline void +dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); +} + +static inline void +dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) +{ + consistent_sync((void *)bus_to_virt(dma_handle), size, direction); +} + +static inline void +dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) +{ + consistent_sync((void *)bus_to_virt(dma_handle), size, direction); +} + +static inline void +dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + + consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction); +} + +static inline void +dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + + consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction); +} +static inline void +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, + enum dma_data_direction dir) +{ + int i; + for (i = 0; i < nelems; i++, sg++) + consistent_sync(page_address(sg->page) + sg->offset, + sg->length, dir); +} + +static inline void +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, + enum dma_data_direction dir) +{ + int i; + for (i = 0; i < nelems; i++, sg++) + consistent_sync(page_address(sg->page) + sg->offset, + sg->length, dir); +} +static inline int +dma_mapping_error(dma_addr_t dma_addr) +{ + return 0; +} + +static inline int +dma_supported(struct device *dev, u64 mask) +{ + return 1; +} + +static inline int +dma_set_mask(struct device *dev, u64 mask) +{ + if(!dev->dma_mask || !dma_supported(dev, mask)) + return -EIO; + + *dev->dma_mask = mask; + + return 0; +} + +static inline int +dma_get_cache_alignment(void) +{ + return L1_CACHE_BYTES; +} + +#define dma_is_consistent(d) (1) + +static inline void +dma_cache_sync(void *vaddr, size_t size, + enum dma_data_direction direction) +{ + consistent_sync(vaddr, size, direction); +} + +#endif /* _XTENSA_DMA_MAPPING_H */ diff --git a/include/asm-xtensa/dma.h b/include/asm-xtensa/dma.h new file mode 100644 index 0000000..1c22b02 --- /dev/null +++ b/include/asm-xtensa/dma.h @@ -0,0 +1,61 @@ +/* + * include/asm-xtensa/dma.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_DMA_H +#define _XTENSA_DMA_H + +#include <linux/config.h> +#include <asm/io.h> /* need byte IO */ +#include <xtensa/config/core.h> + +/* + * This is only to be defined if we have PC-like DMA. + * By default this is not true on an Xtensa processor, + * however on boards with a PCI bus, such functionality + * might be emulated externally. + * + * NOTE: there still exists driver code that assumes + * this is defined, eg. drivers/sound/soundcard.c (as of 2.4). + */ +#define MAX_DMA_CHANNELS 8 + +/* + * The maximum virtual address to which DMA transfers + * can be performed on this platform. + * + * NOTE: This is board (platform) specific, not processor-specific! + * + * NOTE: This assumes DMA transfers can only be performed on + * the section of physical memory contiguously mapped in virtual + * space for the kernel. For the Xtensa architecture, this + * means the maximum possible size of this DMA area is + * the size of the statically mapped kernel segment + * (XCHAL_KSEG_{CACHED,BYPASS}_SIZE), ie. 128 MB. + * + * NOTE: When the entire KSEG area is DMA capable, we substract + * one from the max address so that the virt_to_phys() macro + * works correctly on the address (otherwise the address + * enters another area, and virt_to_phys() may not return + * the value desired). + */ +#define MAX_DMA_ADDRESS (PAGE_OFFSET + XCHAL_KSEG_CACHED_SIZE - 1) + +/* Reserve and release a DMA channel */ +extern int request_dma(unsigned int dmanr, const char * device_id); +extern void free_dma(unsigned int dmanr); + +#ifdef CONFIG_PCI +extern int isa_dma_bridge_buggy; +#else +#define isa_dma_bridge_buggy (0) +#endif + + +#endif diff --git a/include/asm-xtensa/elf.h b/include/asm-xtensa/elf.h new file mode 100644 index 0000000..64f1f53 --- /dev/null +++ b/include/asm-xtensa/elf.h @@ -0,0 +1,222 @@ +/* + * include/asm-xtensa/elf.h + * + * ELF register definitions + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_ELF_H +#define _XTENSA_ELF_H + +#include <asm/ptrace.h> +#include <asm/coprocessor.h> +#include <xtensa/config/core.h> + +/* Xtensa processor ELF architecture-magic number */ + +#define EM_XTENSA 94 +#define EM_XTENSA_OLD 0xABC7 + +/* ELF register definitions. This is needed for core dump support. */ + +/* + * elf_gregset_t contains the application-level state in the following order: + * Processor info: config_version, cpuxy + * Processor state: pc, ps, exccause, excvaddr, wb, ws, + * lbeg, lend, lcount, sar + * GP regs: ar0 - arXX + */ + +typedef unsigned long elf_greg_t; + +typedef struct { + elf_greg_t xchal_config_id0; + elf_greg_t xchal_config_id1; + elf_greg_t cpux; + elf_greg_t cpuy; + elf_greg_t pc; + elf_greg_t ps; + elf_greg_t exccause; + elf_greg_t excvaddr; + elf_greg_t windowbase; + elf_greg_t windowstart; + elf_greg_t lbeg; + elf_greg_t lend; + elf_greg_t lcount; + elf_greg_t sar; + elf_greg_t syscall; + elf_greg_t ar[XCHAL_NUM_AREGS]; +} xtensa_gregset_t; + +#define ELF_NGREG (sizeof(xtensa_gregset_t) / sizeof(elf_greg_t)) + +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +/* + * Compute the size of the coprocessor and extra state layout (register info) + * table (in bytes). + * This is actually the maximum size of the table, as opposed to the size, + * which is available from the _xtensa_reginfo_table_size global variable. + * + * (See also arch/xtensa/kernel/coprocessor.S) + * + */ + +#ifndef XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM +# define XTENSA_CPE_LTABLE_SIZE 0 +#else +# define XTENSA_CPE_SEGMENT(num) (num ? (1+num) : 0) +# define XTENSA_CPE_LTABLE_ENTRIES \ + ( XTENSA_CPE_SEGMENT(XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM) \ + + XTENSA_CPE_SEGMENT(XCHAL_CP0_SA_CONTENTS_LIBDB_NUM) \ + + XTENSA_CPE_SEGMENT(XCHAL_CP1_SA_CONTENTS_LIBDB_NUM) \ + + XTENSA_CPE_SEGMENT(XCHAL_CP2_SA_CONTENTS_LIBDB_NUM) \ + + XTENSA_CPE_SEGMENT(XCHAL_CP3_SA_CONTENTS_LIBDB_NUM) \ + + XTENSA_CPE_SEGMENT(XCHAL_CP4_SA_CONTENTS_LIBDB_NUM) \ + + XTENSA_CPE_SEGMENT(XCHAL_CP5_SA_CONTENTS_LIBDB_NUM) \ + + XTENSA_CPE_SEGMENT(XCHAL_CP6_SA_CONTENTS_LIBDB_NUM) \ + + XTENSA_CPE_SEGMENT(XCHAL_CP7_SA_CONTENTS_LIBDB_NUM) \ + + 1 /* final entry */ \ + ) +# define XTENSA_CPE_LTABLE_SIZE (XTENSA_CPE_LTABLE_ENTRIES * 8) +#endif + + +/* + * Instantiations of the elf_fpregset_t type contain, in most + * architectures, the floating point (FPU) register set. + * For Xtensa, this type is extended to contain all custom state, + * ie. coprocessor and "extra" (non-coprocessor) state (including, + * for example, TIE-defined states and register files; as well + * as other optional processor state). + * This includes FPU state if a floating-point coprocessor happens + * to have been configured within the Xtensa processor. + * + * TOTAL_FPREGS_SIZE is the required size (without rounding) + * of elf_fpregset_t. It provides space for the following: + * + * a) 32-bit mask of active coprocessors for this task (similar + * to CPENABLE in single-threaded Xtensa processor systems) + * + * b) table describing the layout of custom states (ie. of + * individual registers, etc) within the save areas + * + * c) save areas for each coprocessor and for non-coprocessor + * ("extra") state + * + * Note that save areas may require up to 16-byte alignment when + * accessed by save/restore sequences. We do not need to ensure + * such alignment in an elf_fpregset_t structure because custom + * state is not directly loaded/stored into it; rather, save area + * contents are copied to elf_fpregset_t from the active save areas + * (see 'struct task_struct' definition in processor.h for that) + * using memcpy(). But we do allow space for such alignment, + * to allow optimizations of layout and copying. + */ + +#define TOTAL_FPREGS_SIZE \ + (4 + XTENSA_CPE_LTABLE_SIZE + XTENSA_CP_EXTRA_SIZE) +#define ELF_NFPREG \ + ((TOTAL_FPREGS_SIZE + sizeof(elf_fpreg_t) - 1) / sizeof(elf_fpreg_t)) + +typedef unsigned int elf_fpreg_t; +typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; + +#define ELF_CORE_COPY_REGS(_eregs, _pregs) \ + xtensa_elf_core_copy_regs (&_eregs, _pregs); + +extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *); + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ + +#define elf_check_arch(x) ( ( (x)->e_machine == EM_XTENSA ) || \ + ( (x)->e_machine == EM_XTENSA_OLD ) ) + +/* + * These are used to set parameters in the core dumps. + */ + +#ifdef __XTENSA_EL__ +# define ELF_DATA ELFDATA2LSB +#elif defined(__XTENSA_EB__) +# define ELF_DATA ELFDATA2MSB +#else +# error processor byte order undefined! +#endif + +#define ELF_CLASS ELFCLASS32 +#define ELF_ARCH EM_XTENSA + +#define USE_ELF_CORE_DUMP +#define ELF_EXEC_PAGESIZE PAGE_SIZE + +/* + * This is the location that an ET_DYN program is loaded if exec'ed. Typical + * use of this is to invoke "./ld.so someprog" to test out a new version of + * the loader. We need to make sure that it is out of the way of the program + * that it will "exec", and that there is sufficient room for the brk. + */ + +#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) + +/* + * This yields a mask that user programs can use to figure out what + * instruction set this CPU supports. This could be done in user space, + * but it's not easy, and we've already done it here. + */ + +#define ELF_HWCAP (0) + +/* + * This yields a string that ld.so will use to load implementation + * specific libraries for optimization. This is more specific in + * intent than poking at uname or /proc/cpuinfo. + * For the moment, we have only optimizations for the Intel generations, + * but that could change... + */ + +#define ELF_PLATFORM (NULL) + +/* + * The Xtensa processor ABI says that when the program starts, a2 + * contains a pointer to a function which might be registered using + * `atexit'. This provides a mean for the dynamic linker to call + * DT_FINI functions for shared libraries that have been loaded before + * the code runs. + * + * A value of 0 tells we have no such handler. + * + * We might as well make sure everything else is cleared too (except + * for the stack pointer in a1), just to make things more + * deterministic. Also, clearing a0 terminates debugger backtraces. + */ + +#define ELF_PLAT_INIT(_r, load_addr) \ + do { _r->areg[0]=0; /*_r->areg[1]=0;*/ _r->areg[2]=0; _r->areg[3]=0; \ + _r->areg[4]=0; _r->areg[5]=0; _r->areg[6]=0; _r->areg[7]=0; \ + _r->areg[8]=0; _r->areg[9]=0; _r->areg[10]=0; _r->areg[11]=0; \ + _r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \ + } while (0) + +#ifdef __KERNEL__ + +#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT) + +extern void do_copy_regs (xtensa_gregset_t*, struct pt_regs*, + struct task_struct*); +extern void do_restore_regs (xtensa_gregset_t*, struct pt_regs*, + struct task_struct*); +extern void do_save_fpregs (elf_fpregset_t*, struct pt_regs*, + struct task_struct*); +extern int do_restore_fpregs (elf_fpregset_t*, struct pt_regs*, + struct task_struct*); + +#endif /* __KERNEL__ */ +#endif /* _XTENSA_ELF_H */ diff --git a/include/asm-xtensa/errno.h b/include/asm-xtensa/errno.h new file mode 100644 index 0000000..ced5194 --- /dev/null +++ b/include/asm-xtensa/errno.h @@ -0,0 +1,142 @@ +/* + * include/asm-xtensa/errno.h + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of + * this archive for more details. + * + * Copyright (C) 2002 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_ERRNO_H +#define _XTENSA_ERRNO_H + +#define EPERM 1 /* Operation not permitted */ +#define ENOENT 2 /* No such file or directory */ +#define ESRCH 3 /* No such process */ +#define EINTR 4 /* Interrupted system call */ +#define EIO 5 /* I/O error */ +#define ENXIO 6 /* No such device or address */ +#define E2BIG 7 /* Arg list too long */ +#define ENOEXEC 8 /* Exec format error */ +#define EBADF 9 /* Bad file number */ +#define ECHILD 10 /* No child processes */ +#define EAGAIN 11 /* Try again */ +#define ENOMEM 12 /* Out of memory */ +#define EACCES 13 /* Permission denied */ +#define EFAULT 14 /* Bad address */ +#define ENOTBLK 15 /* Block device required */ +#define EBUSY 16 /* Device or resource busy */ +#define EEXIST 17 /* File exists */ +#define EXDEV 18 /* Cross-device link */ +#define ENODEV 19 /* No such device */ +#define ENOTDIR 20 /* Not a directory */ +#define EISDIR 21 /* Is a directory */ +#define EINVAL 22 /* Invalid argument */ +#define ENFILE 23 /* File table overflow */ +#define EMFILE 24 /* Too many open files */ +#define ENOTTY 25 /* Not a typewriter */ +#define ETXTBSY 26 /* Text file busy */ +#define EFBIG 27 /* File too large */ +#define ENOSPC 28 /* No space left on device */ +#define ESPIPE 29 /* Illegal seek */ +#define EROFS 30 /* Read-only file system */ +#define EMLINK 31 /* Too many links */ +#define EPIPE 32 /* Broken pipe */ +#define EDOM 33 /* Math argument out of domain of func */ +#define ERANGE 34 /* Math result not representable */ +#define EDEADLK 35 /* Resource deadlock would occur */ +#define ENAMETOOLONG 36 /* File name too long */ +#define ENOLCK 37 /* No record locks available */ +#define ENOSYS 38 /* Function not implemented */ +#define ENOTEMPTY 39 /* Directory not empty */ +#define ELOOP 40 /* Too many symbolic links encountered */ +#define EWOULDBLOCK EAGAIN /* Operation would block */ +#define ENOMSG 42 /* No message of desired type */ +#define EIDRM 43 /* Identifier removed */ +#define ECHRNG 44 /* Channel number out of range */ +#define EL2NSYNC 45 /* Level 2 not synchronized */ +#define EL3HLT 46 /* Level 3 halted */ +#define EL3RST 47 /* Level 3 reset */ +#define ELNRNG 48 /* Link number out of range */ +#define EUNATCH 49 /* Protocol driver not attached */ +#define ENOCSI 50 /* No CSI structure available */ +#define EL2HLT 51 /* Level 2 halted */ +#define EBADE 52 /* Invalid exchange */ +#define EBADR 53 /* Invalid request descriptor */ +#define EXFULL 54 /* Exchange full */ +#define ENOANO 55 /* No anode */ +#define EBADRQC 56 /* Invalid request code */ +#define EBADSLT 57 /* Invalid slot */ + +#define EDEADLOCK EDEADLK + +#define EBFONT 59 /* Bad font file format */ +#define ENOSTR 60 /* Device not a stream */ +#define ENODATA 61 /* No data available */ +#define ETIME 62 /* Timer expired */ +#define ENOSR 63 /* Out of streams resources */ +#define ENONET 64 /* Machine is not on the network */ +#define ENOPKG 65 /* Package not installed */ +#define EREMOTE 66 /* Object is remote */ +#define ENOLINK 67 /* Link has been severed */ +#define EADV 68 /* Advertise error */ +#define ESRMNT 69 /* Srmount error */ +#define ECOMM 70 /* Communication error on send */ +#define EPROTO 71 /* Protocol error */ +#define EMULTIHOP 72 /* Multihop attempted */ +#define EDOTDOT 73 /* RFS specific error */ +#define EBADMSG 74 /* Not a data message */ +#define EOVERFLOW 75 /* Value too large for defined data type */ +#define ENOTUNIQ 76 /* Name not unique on network */ +#define EBADFD 77 /* File descriptor in bad state */ +#define EREMCHG 78 /* Remote address changed */ +#define ELIBACC 79 /* Can not access a needed shared library */ +#define ELIBBAD 80 /* Accessing a corrupted shared library */ +#define ELIBSCN 81 /* .lib section in a.out corrupted */ +#define ELIBMAX 82 /* Attempting to link in too many shared libraries */ +#define ELIBEXEC 83 /* Cannot exec a shared library directly */ +#define EILSEQ 84 /* Illegal byte sequence */ +#define ERESTART 85 /* Interrupted system call should be restarted */ +#define ESTRPIPE 86 /* Streams pipe error */ +#define EUSERS 87 /* Too many users */ +#define ENOTSOCK 88 /* Socket operation on non-socket */ +#define EDESTADDRREQ 89 /* Destination address required */ +#define EMSGSIZE 90 /* Message too long */ +#define EPROTOTYPE 91 /* Protocol wrong type for socket */ +#define ENOPROTOOPT 92 /* Protocol not available */ +#define EPROTONOSUPPORT 93 /* Protocol not supported */ +#define ESOCKTNOSUPPORT 94 /* Socket type not supported */ +#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ +#define EPFNOSUPPORT 96 /* Protocol family not supported */ +#define EAFNOSUPPORT 97 /* Address family not supported by protocol */ +#define EADDRINUSE 98 /* Address already in use */ +#define EADDRNOTAVAIL 99 /* Cannot assign requested address */ +#define ENETDOWN 100 /* Network is down */ +#define ENETUNREACH 101 /* Network is unreachable */ +#define ENETRESET 102 /* Network dropped connection because of reset */ +#define ECONNABORTED 103 /* Software caused connection abort */ +#define ECONNRESET 104 /* Connection reset by peer */ +#define ENOBUFS 105 /* No buffer space available */ +#define EISCONN 106 /* Transport endpoint is already connected */ +#define ENOTCONN 107 /* Transport endpoint is not connected */ +#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */ +#define ETOOMANYREFS 109 /* Too many references: cannot splice */ +#define ETIMEDOUT 110 /* Connection timed out */ +#define ECONNREFUSED 111 /* Connection refused */ +#define EHOSTDOWN 112 /* Host is down */ +#define EHOSTUNREACH 113 /* No route to host */ +#define EALREADY 114 /* Operation already in progress */ +#define EINPROGRESS 115 /* Operation now in progress */ +#define ESTALE 116 /* Stale NFS file handle */ +#define EUCLEAN 117 /* Structure needs cleaning */ +#define ENOTNAM 118 /* Not a XENIX named type file */ +#define ENAVAIL 119 /* No XENIX semaphores available */ +#define EISNAM 120 /* Is a named type file */ +#define EREMOTEIO 121 /* Remote I/O error */ +#define EDQUOT 122 /* Quota exceeded */ + +#define ENOMEDIUM 123 /* No medium found */ +#define EMEDIUMTYPE 124 /* Wrong medium type */ + +#endif /* _XTENSA_ERRNO_H */ diff --git a/include/asm-xtensa/fcntl.h b/include/asm-xtensa/fcntl.h new file mode 100644 index 0000000..48876bb7 --- /dev/null +++ b/include/asm-xtensa/fcntl.h @@ -0,0 +1,101 @@ +/* + * include/asm-xtensa/fcntl.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_FCNTL_H +#define _XTENSA_FCNTL_H + +/* open/fcntl - O_SYNC is only implemented on blocks devices and on files + located on an ext2 file system */ +#define O_ACCMODE 0x0003 +#define O_RDONLY 0x0000 +#define O_WRONLY 0x0001 +#define O_RDWR 0x0002 +#define O_APPEND 0x0008 +#define O_SYNC 0x0010 +#define O_NONBLOCK 0x0080 +#define O_CREAT 0x0100 /* not fcntl */ +#define O_TRUNC 0x0200 /* not fcntl */ +#define O_EXCL 0x0400 /* not fcntl */ +#define O_NOCTTY 0x0800 /* not fcntl */ +#define FASYNC 0x1000 /* fcntl, for BSD compatibility */ +#define O_LARGEFILE 0x2000 /* allow large file opens - currently ignored */ +#define O_DIRECT 0x8000 /* direct disk access hint - currently ignored*/ +#define O_DIRECTORY 0x10000 /* must be a directory */ +#define O_NOFOLLOW 0x20000 /* don't follow links */ +#define O_NOATIME 0x100000 + +#define O_NDELAY O_NONBLOCK + +#define F_DUPFD 0 /* dup */ +#define F_GETFD 1 /* get close_on_exec */ +#define F_SETFD 2 /* set/clear close_on_exec */ +#define F_GETFL 3 /* get file->f_flags */ +#define F_SETFL 4 /* set file->f_flags */ +#define F_GETLK 14 +#define F_GETLK64 15 +#define F_SETLK 6 +#define F_SETLKW 7 +#define F_SETLK64 16 +#define F_SETLKW64 17 + +#define F_SETOWN 24 /* for sockets. */ +#define F_GETOWN 23 /* for sockets. */ +#define F_SETSIG 10 /* for sockets. */ +#define F_GETSIG 11 /* for sockets. */ + +/* for F_[GET|SET]FL */ +#define FD_CLOEXEC 1 /* actually anything with low bit set goes */ + +/* for posix fcntl() and lockf() */ +#define F_RDLCK 0 +#define F_WRLCK 1 +#define F_UNLCK 2 + +/* for old implementation of bsd flock () */ +#define F_EXLCK 4 /* or 3 */ +#define F_SHLCK 8 /* or 4 */ + +/* for leases */ +#define F_INPROGRESS 16 + +/* operations for bsd flock(), also used by the kernel implementation */ +#define LOCK_SH 1 /* shared lock */ +#define LOCK_EX 2 /* exclusive lock */ +#define LOCK_NB 4 /* or'd with one of the above to prevent + blocking */ +#define LOCK_UN 8 /* remove lock */ + +#define LOCK_MAND 32 /* This is a mandatory flock ... */ +#define LOCK_READ 64 /* which allows concurrent read operations */ +#define LOCK_WRITE 128 /* which allows concurrent write operations */ +#define LOCK_RW 192 /* which allows concurrent read & write ops */ + +typedef struct flock { + short l_type; + short l_whence; + __kernel_off_t l_start; + __kernel_off_t l_len; + long l_sysid; + __kernel_pid_t l_pid; + long pad[4]; +} flock_t; + +struct flock64 { + short l_type; + short l_whence; + __kernel_off_t l_start; + __kernel_off_t l_len; + pid_t l_pid; +}; + +#define F_LINUX_SPECIFIC_BASE 1024 + +#endif /* _XTENSA_FCNTL_H */ diff --git a/include/asm-xtensa/fixmap.h b/include/asm-xtensa/fixmap.h new file mode 100644 index 0000000..4423b8a --- /dev/null +++ b/include/asm-xtensa/fixmap.h @@ -0,0 +1,252 @@ +/* + * include/asm-xtensa/fixmap.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_FIXMAP_H +#define _XTENSA_FIXMAP_H + +#include <asm/processor.h> + +#ifdef CONFIG_MMU + +/* + * Here we define all the compile-time virtual addresses. + */ + +#if XCHAL_SEG_MAPPABLE_VADDR != 0 +# error "Current port requires virtual user space starting at 0" +#endif +#if XCHAL_SEG_MAPPABLE_SIZE < 0x80000000 +# error "Current port requires at least 0x8000000 bytes for user space" +#endif + +/* Verify instruction/data ram/rom and xlmi don't overlay vmalloc space. */ + +#define __IN_VMALLOC(addr) \ + (((addr) >= VMALLOC_START) && ((addr) < VMALLOC_END)) +#define __SPAN_VMALLOC(start,end) \ + (((start) < VMALLOC_START) && ((end) >= VMALLOC_END)) +#define INSIDE_VMALLOC(start,end) \ + (__IN_VMALLOC((start)) || __IN_VMALLOC(end) || __SPAN_VMALLOC((start),(end))) + +#if XCHAL_NUM_INSTROM +# if XCHAL_NUM_INSTROM == 1 +# if INSIDE_VMALLOC(XCHAL_INSTROM0_VADDR,XCHAL_INSTROM0_VADDR+XCHAL_INSTROM0_SIZE) +# error vmalloc range conflicts with instrom0 +# endif +# endif +# if XCHAL_NUM_INSTROM == 2 +# if INSIDE_VMALLOC(XCHAL_INSTROM1_VADDR,XCHAL_INSTROM1_VADDR+XCHAL_INSTROM1_SIZE) +# error vmalloc range conflicts with instrom1 +# endif +# endif +#endif + +#if XCHAL_NUM_INSTRAM +# if XCHAL_NUM_INSTRAM == 1 +# if INSIDE_VMALLOC(XCHAL_INSTRAM0_VADDR,XCHAL_INSTRAM0_VADDR+XCHAL_INSTRAM0_SIZE) +# error vmalloc range conflicts with instram0 +# endif +# endif +# if XCHAL_NUM_INSTRAM == 2 +# if INSIDE_VMALLOC(XCHAL_INSTRAM1_VADDR,XCHAL_INSTRAM1_VADDR+XCHAL_INSTRAM1_SIZE) +# error vmalloc range conflicts with instram1 +# endif +# endif +#endif + +#if XCHAL_NUM_DATAROM +# if XCHAL_NUM_DATAROM == 1 +# if INSIDE_VMALLOC(XCHAL_DATAROM0_VADDR,XCHAL_DATAROM0_VADDR+XCHAL_DATAROM0_SIZE) +# error vmalloc range conflicts with datarom0 +# endif +# endif +# if XCHAL_NUM_DATAROM == 2 +# if INSIDE_VMALLOC(XCHAL_DATAROM1_VADDR,XCHAL_DATAROM1_VADDR+XCHAL_DATAROM1_SIZE) +# error vmalloc range conflicts with datarom1 +# endif +# endif +#endif + +#if XCHAL_NUM_DATARAM +# if XCHAL_NUM_DATARAM == 1 +# if INSIDE_VMALLOC(XCHAL_DATARAM0_VADDR,XCHAL_DATARAM0_VADDR+XCHAL_DATARAM0_SIZE) +# error vmalloc range conflicts with dataram0 +# endif +# endif +# if XCHAL_NUM_DATARAM == 2 +# if INSIDE_VMALLOC(XCHAL_DATARAM1_VADDR,XCHAL_DATARAM1_VADDR+XCHAL_DATARAM1_SIZE) +# error vmalloc range conflicts with dataram1 +# endif +# endif +#endif + +#if XCHAL_NUM_XLMI +# if XCHAL_NUM_XLMI == 1 +# if INSIDE_VMALLOC(XCHAL_XLMI0_VADDR,XCHAL_XLMI0_VADDR+XCHAL_XLMI0_SIZE) +# error vmalloc range conflicts with xlmi0 +# endif +# endif +# if XCHAL_NUM_XLMI == 2 +# if INSIDE_VMALLOC(XCHAL_XLMI1_VADDR,XCHAL_XLMI1_VADDR+XCHAL_XLMI1_SIZE) +# error vmalloc range conflicts with xlmi1 +# endif +# endif +#endif + +#if (XCHAL_NUM_INSTROM > 2) || \ + (XCHAL_NUM_INSTRAM > 2) || \ + (XCHAL_NUM_DATARAM > 2) || \ + (XCHAL_NUM_DATAROM > 2) || \ + (XCHAL_NUM_XLMI > 2) +# error Insufficient checks on vmalloc above for more than 2 devices +#endif + +/* + * USER_VM_SIZE does not necessarily equal TASK_SIZE. We bumped + * TASK_SIZE down to 0x4000000 to simplify the handling of windowed + * call instructions (currently limited to a range of 1 GByte). User + * tasks may very well reclaim the VM space from 0x40000000 to + * 0x7fffffff in the future, so we do not want the kernel becoming + * accustomed to having any of its stuff (e.g., page tables) in this + * region. This VM region is no-man's land for now. + */ + +#define USER_VM_START XCHAL_SEG_MAPPABLE_VADDR +#define USER_VM_SIZE 0x80000000 + +/* Size of page table: */ + +#define PGTABLE_SIZE_BITS (32 - XCHAL_MMU_MIN_PTE_PAGE_SIZE + 2) +#define PGTABLE_SIZE (1L << PGTABLE_SIZE_BITS) + +/* All kernel-mappable space: */ + +#define KERNEL_ALLMAP_START (USER_VM_START + USER_VM_SIZE) +#define KERNEL_ALLMAP_SIZE (XCHAL_SEG_MAPPABLE_SIZE - KERNEL_ALLMAP_START) + +/* Carve out page table at start of kernel-mappable area: */ + +#if KERNEL_ALLMAP_SIZE < PGTABLE_SIZE +#error "Gimme some space for page table!" +#endif +#define PGTABLE_START KERNEL_ALLMAP_START + +/* Remaining kernel-mappable space: */ + +#define KERNEL_MAPPED_START (KERNEL_ALLMAP_START + PGTABLE_SIZE) +#define KERNEL_MAPPED_SIZE (KERNEL_ALLMAP_SIZE - PGTABLE_SIZE) + +#if KERNEL_MAPPED_SIZE < 0x01000000 /* 16 MB is arbitrary for now */ +# error "Shouldn't the kernel have at least *some* mappable space?" +#endif + +#define MAX_LOW_MEMORY XCHAL_KSEG_CACHED_SIZE + +#endif + +/* + * Some constants used elsewhere, but perhaps only in Xtensa header + * files, so maybe we can get rid of some and access compile-time HAL + * directly... + * + * Note: We assume that system RAM is located at the very start of the + * kernel segments !! + */ +#define KERNEL_VM_LOW XCHAL_KSEG_CACHED_VADDR +#define KERNEL_VM_HIGH XCHAL_KSEG_BYPASS_VADDR +#define KERNEL_SPACE XCHAL_KSEG_CACHED_VADDR + +/* + * Returns the physical/virtual addresses of the kernel space + * (works with the cached kernel segment only, which is the + * one normally used for kernel operation). + */ + +/* PHYSICAL BYPASS CACHED + * + * bypass vaddr bypass paddr * cached vaddr + * cached vaddr cached paddr bypass vaddr * + * bypass paddr * bypass vaddr cached vaddr + * cached paddr * bypass vaddr cached vaddr + * other * * * + */ + +#define PHYSADDR(a) \ +(((unsigned)(a) >= XCHAL_KSEG_BYPASS_VADDR \ + && (unsigned)(a) < XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_BYPASS_SIZE) ? \ + (unsigned)(a) - XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_BYPASS_PADDR : \ + ((unsigned)(a) >= XCHAL_KSEG_CACHED_VADDR \ + && (unsigned)(a) < XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_CACHED_SIZE) ? \ + (unsigned)(a) - XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_CACHED_PADDR : \ + (unsigned)(a)) + +#define BYPASS_ADDR(a) \ +(((unsigned)(a) >= XCHAL_KSEG_BYPASS_PADDR \ + && (unsigned)(a) < XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_SIZE) ? \ + (unsigned)(a) - XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_VADDR : \ + ((unsigned)(a) >= XCHAL_KSEG_CACHED_PADDR \ + && (unsigned)(a) < XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_SIZE) ? \ + (unsigned)(a) - XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_BYPASS_VADDR : \ + ((unsigned)(a) >= XCHAL_KSEG_CACHED_VADDR \ + && (unsigned)(a) < XCHAL_KSEG_CACHED_VADDR+XCHAL_KSEG_CACHED_SIZE)? \ + (unsigned)(a) - XCHAL_KSEG_CACHED_VADDR+XCHAL_KSEG_BYPASS_VADDR: \ + (unsigned)(a)) + +#define CACHED_ADDR(a) \ +(((unsigned)(a) >= XCHAL_KSEG_BYPASS_PADDR \ + && (unsigned)(a) < XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_SIZE) ? \ + (unsigned)(a) - XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_CACHED_VADDR : \ + ((unsigned)(a) >= XCHAL_KSEG_CACHED_PADDR \ + && (unsigned)(a) < XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_SIZE) ? \ + (unsigned)(a) - XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_VADDR : \ + ((unsigned)(a) >= XCHAL_KSEG_BYPASS_VADDR \ + && (unsigned)(a) < XCHAL_KSEG_BYPASS_VADDR+XCHAL_KSEG_BYPASS_SIZE) ? \ + (unsigned)(a) - XCHAL_KSEG_BYPASS_VADDR+XCHAL_KSEG_CACHED_VADDR : \ + (unsigned)(a)) + +#define PHYSADDR_IO(a) \ +(((unsigned)(a) >= XCHAL_KIO_BYPASS_VADDR \ + && (unsigned)(a) < XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_SIZE) ? \ + (unsigned)(a) - XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_PADDR : \ + ((unsigned)(a) >= XCHAL_KIO_CACHED_VADDR \ + && (unsigned)(a) < XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_SIZE) ? \ + (unsigned)(a) - XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_PADDR : \ + (unsigned)(a)) + +#define BYPASS_ADDR_IO(a) \ +(((unsigned)(a) >= XCHAL_KIO_BYPASS_PADDR \ + && (unsigned)(a) < XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_SIZE) ? \ + (unsigned)(a) - XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_VADDR : \ + ((unsigned)(a) >= XCHAL_KIO_CACHED_PADDR \ + && (unsigned)(a) < XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_SIZE) ? \ + (unsigned)(a) - XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_BYPASS_VADDR : \ + ((unsigned)(a) >= XCHAL_KIO_CACHED_VADDR \ + && (unsigned)(a) < XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_SIZE) ? \ + (unsigned)(a) - XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_BYPASS_VADDR : \ + (unsigned)(a)) + +#define CACHED_ADDR_IO(a) \ +(((unsigned)(a) >= XCHAL_KIO_BYPASS_PADDR \ + && (unsigned)(a) < XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_SIZE) ? \ + (unsigned)(a) - XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_CACHED_VADDR : \ + ((unsigned)(a) >= XCHAL_KIO_CACHED_PADDR \ + && (unsigned)(a) < XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_SIZE) ? \ + (unsigned)(a) - XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_VADDR : \ + ((unsigned)(a) >= XCHAL_KIO_BYPASS_VADDR \ + && (unsigned)(a) < XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_SIZE) ? \ + (unsigned)(a) - XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_CACHED_VADDR : \ + (unsigned)(a)) + +#endif /* _XTENSA_ADDRSPACE_H */ + + + + + diff --git a/include/asm-xtensa/hardirq.h b/include/asm-xtensa/hardirq.h new file mode 100644 index 0000000..e07c76c --- /dev/null +++ b/include/asm-xtensa/hardirq.h @@ -0,0 +1,28 @@ +/* + * include/asm-xtensa/hardirq.h + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of + * this archive for more details. + * + * Copyright (C) 2002 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_HARDIRQ_H +#define _XTENSA_HARDIRQ_H + +#include <linux/config.h> +#include <linux/cache.h> +#include <asm/irq.h> + +/* headers.S is sensitive to the offsets of these fields */ +typedef struct { + unsigned int __softirq_pending; + unsigned int __syscall_count; + struct task_struct * __ksoftirqd_task; /* waitqueue is too large */ + unsigned int __nmi_count; /* arch dependent */ +} ____cacheline_aligned irq_cpustat_t; + +#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ + +#endif /* _XTENSA_HARDIRQ_H */ diff --git a/include/asm-xtensa/hdreg.h b/include/asm-xtensa/hdreg.h new file mode 100644 index 0000000..64b8060 --- /dev/null +++ b/include/asm-xtensa/hdreg.h @@ -0,0 +1,17 @@ +/* + * include/asm-xtensa/hdreg.h + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of + * this archive for more details. + * + * Copyright (C) 2002 - 2005 Tensilica Inc. + * Copyright (C) 1994-1996 Linus Torvalds & authors + */ + +#ifndef _XTENSA_HDREG_H +#define _XTENSA_HDREG_H + +typedef unsigned int ide_ioreg_t; + +#endif diff --git a/include/asm-xtensa/highmem.h b/include/asm-xtensa/highmem.h new file mode 100644 index 0000000..0a046ca --- /dev/null +++ b/include/asm-xtensa/highmem.h @@ -0,0 +1,17 @@ +/* + * include/asm-xtensa/highmem.h + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of + * this archive for more details. + * + * Copyright (C) 2003 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_HIGHMEM_H +#define _XTENSA_HIGHMEM_H + +extern void flush_cache_kmaps(void); + +#endif + diff --git a/include/asm-xtensa/hw_irq.h b/include/asm-xtensa/hw_irq.h new file mode 100644 index 0000000..ccf4362 --- /dev/null +++ b/include/asm-xtensa/hw_irq.h @@ -0,0 +1,18 @@ +/* + * include/asm-xtensa/hw_irq.h + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of + * this archive for more details. + * + * Copyright (C) 2002 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_HW_IRQ_H +#define _XTENSA_HW_IRQ_H + +static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) +{ +} + +#endif diff --git a/include/asm-xtensa/ide.h b/include/asm-xtensa/ide.h new file mode 100644 index 0000000..b523cd4 --- /dev/null +++ b/include/asm-xtensa/ide.h @@ -0,0 +1,36 @@ +/* + * include/asm-xtensa/ide.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994 - 1996 Linus Torvalds & authors + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_IDE_H +#define _XTENSA_IDE_H + +#ifdef __KERNEL__ + +#include <linux/config.h> + +#ifndef MAX_HWIFS +# define MAX_HWIFS 1 +#endif + +static __inline__ int ide_default_irq(unsigned long base) +{ + /* Unsupported! */ + return 0; +} + +static __inline__ unsigned long ide_default_io_base(int index) +{ + /* Unsupported! */ + return 0; +} + +#endif /* __KERNEL__ */ +#endif /* _XTENSA_IDE_H */ diff --git a/include/asm-xtensa/io.h b/include/asm-xtensa/io.h new file mode 100644 index 0000000..2c471c4 --- /dev/null +++ b/include/asm-xtensa/io.h @@ -0,0 +1,197 @@ +/* + * linux/include/asm-xtensa/io.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_IO_H +#define _XTENSA_IO_H + +#ifdef __KERNEL__ +#include <linux/config.h> +#include <asm/byteorder.h> + +#include <linux/types.h> +#include <asm/fixmap.h> + +#define _IO_BASE 0 + + +/* + * swap functions to change byte order from little-endian to big-endian and + * vice versa. + */ + +static inline unsigned short _swapw (unsigned short v) +{ + return (v << 8) | (v >> 8); +} + +static inline unsigned int _swapl (unsigned int v) +{ + return (v << 24) | ((v & 0xff00) << 8) | ((v >> 8) & 0xff00) | (v >> 24); +} + +/* + * Change virtual addresses to physical addresses and vv. + * These are trivial on the 1:1 Linux/Xtensa mapping + */ + +extern inline unsigned long virt_to_phys(volatile void * address) +{ + return PHYSADDR((unsigned long)address); +} + +extern inline void * phys_to_virt(unsigned long address) +{ + return (void*) CACHED_ADDR(address); +} + +/* + * IO bus memory addresses are also 1:1 with the physical address + */ + +extern inline unsigned long virt_to_bus(volatile void * address) +{ + return PHYSADDR((unsigned long)address); +} + +extern inline void * bus_to_virt (unsigned long address) +{ + return (void *) CACHED_ADDR(address); +} + +/* + * Change "struct page" to physical address. + */ + +extern inline void *ioremap(unsigned long offset, unsigned long size) +{ + return (void *) CACHED_ADDR_IO(offset); +} + +extern inline void *ioremap_nocache(unsigned long offset, unsigned long size) +{ + return (void *) BYPASS_ADDR_IO(offset); +} + +extern inline void iounmap(void *addr) +{ +} + +/* + * Generic I/O + */ + +#define readb(addr) \ + ({ unsigned char __v = (*(volatile unsigned char *)(addr)); __v; }) +#define readw(addr) \ + ({ unsigned short __v = (*(volatile unsigned short *)(addr)); __v; }) +#define readl(addr) \ + ({ unsigned int __v = (*(volatile unsigned int *)(addr)); __v; }) +#define writeb(b, addr) (void)((*(volatile unsigned char *)(addr)) = (b)) +#define writew(b, addr) (void)((*(volatile unsigned short *)(addr)) = (b)) +#define writel(b, addr) (void)((*(volatile unsigned int *)(addr)) = (b)) + +static inline __u8 __raw_readb(const volatile void __iomem *addr) +{ + return *(__force volatile __u8 *)(addr); +} +static inline __u16 __raw_readw(const volatile void __iomem *addr) +{ + return *(__force volatile __u16 *)(addr); +} +static inline __u32 __raw_readl(const volatile void __iomem *addr) +{ + return *(__force volatile __u32 *)(addr); +} +static inline void __raw_writeb(__u8 b, volatile void __iomem *addr) +{ + *(__force volatile __u8 *)(addr) = b; +} +static inline void __raw_writew(__u16 b, volatile void __iomem *addr) +{ + *(__force volatile __u16 *)(addr) = b; +} +static inline void __raw_writel(__u32 b, volatile void __iomem *addr) +{ + *(__force volatile __u32 *)(addr) = b; +} + + + + +/* These are the definitions for the x86 IO instructions + * inb/inw/inl/outb/outw/outl, the "string" versions + * insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions + * inb_p/inw_p/... + * The macros don't do byte-swapping. + */ + +#define inb(port) readb((u8 *)((port)+_IO_BASE)) +#define outb(val, port) writeb((val),(u8 *)((unsigned long)(port)+_IO_BASE)) +#define inw(port) readw((u16 *)((port)+_IO_BASE)) +#define outw(val, port) writew((val),(u16 *)((unsigned long)(port)+_IO_BASE)) +#define inl(port) readl((u32 *)((port)+_IO_BASE)) +#define outl(val, port) writel((val),(u32 *)((unsigned long)(port))) + +#define inb_p(port) inb((port)) +#define outb_p(val, port) outb((val), (port)) +#define inw_p(port) inw((port)) +#define outw_p(val, port) outw((val), (port)) +#define inl_p(port) inl((port)) +#define outl_p(val, port) outl((val), (port)) + +extern void insb (unsigned long port, void *dst, unsigned long count); +extern void insw (unsigned long port, void *dst, unsigned long count); +extern void insl (unsigned long port, void *dst, unsigned long count); +extern void outsb (unsigned long port, const void *src, unsigned long count); +extern void outsw (unsigned long port, const void *src, unsigned long count); +extern void outsl (unsigned long port, const void *src, unsigned long count); + +#define IO_SPACE_LIMIT ~0 + +#define memset_io(a,b,c) memset((void *)(a),(b),(c)) +#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c)) +#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c)) + +/* At this point the Xtensa doesn't provide byte swap instructions */ + +#ifdef __XTENSA_EB__ +# define in_8(addr) (*(u8*)(addr)) +# define in_le16(addr) _swapw(*(u16*)(addr)) +# define in_le32(addr) _swapl(*(u32*)(addr)) +# define out_8(b, addr) *(u8*)(addr) = (b) +# define out_le16(b, addr) *(u16*)(addr) = _swapw(b) +# define out_le32(b, addr) *(u32*)(addr) = _swapl(b) +#elif defined(__XTENSA_EL__) +# define in_8(addr) (*(u8*)(addr)) +# define in_le16(addr) (*(u16*)(addr)) +# define in_le32(addr) (*(u32*)(addr)) +# define out_8(b, addr) *(u8*)(addr) = (b) +# define out_le16(b, addr) *(u16*)(addr) = (b) +# define out_le32(b, addr) *(u32*)(addr) = (b) +#else +# error processor byte order undefined! +#endif + + +/* + * * Convert a physical pointer to a virtual kernel pointer for /dev/mem + * * access + * */ +#define xlate_dev_mem_ptr(p) __va(p) + +/* + * * Convert a virtual cached pointer to an uncached pointer + * */ +#define xlate_dev_kmem_ptr(p) p + + +#endif /* __KERNEL__ */ + +#endif /* _XTENSA_IO_H */ diff --git a/include/asm-xtensa/ioctl.h b/include/asm-xtensa/ioctl.h new file mode 100644 index 0000000..856c605 --- /dev/null +++ b/include/asm-xtensa/ioctl.h @@ -0,0 +1,83 @@ +/* + * include/asm-xtensa/ioctl.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003 - 2005 Tensilica Inc. + * + * Derived from "include/asm-i386/ioctl.h" + */ + +#ifndef _XTENSA_IOCTL_H +#define _XTENSA_IOCTL_H + + +/* ioctl command encoding: 32 bits total, command in lower 16 bits, + * size of the parameter structure in the lower 14 bits of the + * upper 16 bits. + * Encoding the size of the parameter structure in the ioctl request + * is useful for catching programs compiled with old versions + * and to avoid overwriting user space outside the user buffer area. + * The highest 2 bits are reserved for indicating the ``access mode''. + * NOTE: This limits the max parameter size to 16kB -1 ! + */ + +/* + * The following is for compatibility across the various Linux + * platforms. The i386 ioctl numbering scheme doesn't really enforce + * a type field. De facto, however, the top 8 bits of the lower 16 + * bits are indeed used as a type field, so we might just as well make + * this explicit here. Please be sure to use the decoding macros + * below from now on. + */ +#define _IOC_NRBITS 8 +#define _IOC_TYPEBITS 8 +#define _IOC_SIZEBITS 14 +#define _IOC_DIRBITS 2 + +#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1) +#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1) +#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1) +#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1) + +#define _IOC_NRSHIFT 0 +#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS) +#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS) +#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) + +/* + * Direction bits. + */ +#define _IOC_NONE 0U +#define _IOC_WRITE 1U +#define _IOC_READ 2U + +#define _IOC(dir,type,nr,size) \ + (((dir) << _IOC_DIRSHIFT) | \ + ((type) << _IOC_TYPESHIFT) | \ + ((nr) << _IOC_NRSHIFT) | \ + ((size) << _IOC_SIZESHIFT)) + +/* used to create numbers */ +#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) +#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size)) +#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size)) +#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size)) + +/* used to decode ioctl numbers.. */ +#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK) +#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK) +#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK) +#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK) + +/* ...and for the drivers/sound files... */ + +#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT) +#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT) +#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT) +#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT) +#define IOCSIZE_SHIFT (_IOC_SIZESHIFT) + +#endif diff --git a/include/asm-xtensa/ioctls.h b/include/asm-xtensa/ioctls.h new file mode 100644 index 0000000..10c4434 --- /dev/null +++ b/include/asm-xtensa/ioctls.h @@ -0,0 +1,112 @@ +/* + * include/asm-xtensa/ioctl.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003 - 2005 Tensilica Inc. + * + * Derived from "include/asm-i386/ioctls.h" + */ + +#ifndef _XTENSA_IOCTLS_H +#define _XTENSA_IOCTLS_H + +#include <asm/ioctl.h> + +#define FIOCLEX _IO('f', 1) +#define FIONCLEX _IO('f', 2) +#define FIOASYNC _IOW('f', 125, int) +#define FIONBIO _IOW('f', 126, int) +#define FIONREAD _IOR('f', 127, int) +#define TIOCINQ FIONREAD +#define FIOQSIZE _IOR('f', 128, loff_t) + +#define TCGETS 0x5401 +#define TCSETS 0x5402 +#define TCSETSW 0x5403 +#define TCSETSF 0x5404 + +#define TCGETA _IOR('t', 23, struct termio) +#define TCSETA _IOW('t', 24, struct termio) +#define TCSETAW _IOW('t', 25, struct termio) +#define TCSETAF _IOW('t', 28, struct termio) + +#define TCSBRK _IO('t', 29) +#define TCXONC _IO('t', 30) +#define TCFLSH _IO('t', 31) + +#define TIOCSWINSZ _IOW('t', 103, struct winsize) +#define TIOCGWINSZ _IOR('t', 104, struct winsize) +#define TIOCSTART _IO('t', 110) /* start output, like ^Q */ +#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */ +#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */ + +#define TIOCSPGRP _IOW('t', 118, int) +#define TIOCGPGRP _IOR('t', 119, int) + +#define TIOCEXCL _IO('T', 12) +#define TIOCNXCL _IO('T', 13) +#define TIOCSCTTY _IO('T', 14) + +#define TIOCSTI _IOW('T', 18, char) +#define TIOCMGET _IOR('T', 21, unsigned int) +#define TIOCMBIS _IOW('T', 22, unsigned int) +#define TIOCMBIC _IOW('T', 23, unsigned int) +#define TIOCMSET _IOW('T', 24, unsigned int) +# define TIOCM_LE 0x001 +# define TIOCM_DTR 0x002 +# define TIOCM_RTS 0x004 +# define TIOCM_ST 0x008 +# define TIOCM_SR 0x010 +# define TIOCM_CTS 0x020 +# define TIOCM_CAR 0x040 +# define TIOCM_RNG 0x080 +# define TIOCM_DSR 0x100 +# define TIOCM_CD TIOCM_CAR +# define TIOCM_RI TIOCM_RNG + +#define TIOCGSOFTCAR _IOR('T', 25, unsigned int) +#define TIOCSSOFTCAR _IOW('T', 26, unsigned int) +#define TIOCLINUX _IOW('T', 28, char) +#define TIOCCONS _IO('T', 29) +#define TIOCGSERIAL _IOR('T', 30, struct serial_struct) +#define TIOCSSERIAL _IOW('T', 31, struct serial_struct) +#define TIOCPKT _IOW('T', 32, int) +# define TIOCPKT_DATA 0 +# define TIOCPKT_FLUSHREAD 1 +# define TIOCPKT_FLUSHWRITE 2 +# define TIOCPKT_STOP 4 +# define TIOCPKT_START 8 +# define TIOCPKT_NOSTOP 16 +# define TIOCPKT_DOSTOP 32 + + +#define TIOCNOTTY _IO('T', 34) +#define TIOCSETD _IOW('T', 35, int) +#define TIOCGETD _IOR('T', 36, int) +#define TCSBRKP _IOW('T', 37, int) /* Needed for POSIX tcsendbreak()*/ +#define TIOCTTYGSTRUCT _IOR('T', 38, struct tty_struct) /* For debugging only*/ +#define TIOCSBRK _IO('T', 39) /* BSD compatibility */ +#define TIOCCBRK _IO('T', 40) /* BSD compatibility */ +#define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/ +#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ +#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ + +#define TIOCSERCONFIG _IO('T', 83) +#define TIOCSERGWILD _IOR('T', 84, int) +#define TIOCSERSWILD _IOW('T', 85, int) +#define TIOCGLCKTRMIOS 0x5456 +#define TIOCSLCKTRMIOS 0x5457 +#define TIOCSERGSTRUCT 0x5458 /* For debugging only */ +#define TIOCSERGETLSR _IOR('T', 89, unsigned int) /* Get line status reg. */ + /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ +# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ +#define TIOCSERGETMULTI _IOR('T', 90, struct serial_multiport_struct) /* Get multiport config */ +#define TIOCSERSETMULTI _IOW('T', 91, struct serial_multiport_struct) /* Set multiport config */ + +#define TIOCMIWAIT _IO('T', 92) /* wait for a change on serial input line(s) */ +#define TIOCGICOUNT _IOR('T', 93, struct async_icount) /* read serial port inline interrupt counts */ + +#endif /* _XTENSA_IOCTLS_H */ diff --git a/include/asm-xtensa/ipc.h b/include/asm-xtensa/ipc.h new file mode 100644 index 0000000..d37bdb4d4c --- /dev/null +++ b/include/asm-xtensa/ipc.h @@ -0,0 +1,34 @@ +/* + * include/asm-xtensa/ipc.h + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of + * this archive for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_IPC_H +#define _XTENSA_IPC_H + +struct ipc_kludge { + struct msgbuf __user *msgp; + long msgtyp; +}; + +#define SEMOP 1 +#define SEMGET 2 +#define SEMCTL 3 +#define SEMTIMEDOP 4 +#define MSGSND 11 +#define MSGRCV 12 +#define MSGGET 13 +#define MSGCTL 14 +#define SHMAT 21 +#define SHMDT 22 +#define SHMGET 23 +#define SHMCTL 24 + +#define IPCCALL(version,op) ((version)<<16 | (op)) + +#endif /* _XTENSA_IPC_H */ diff --git a/include/asm-xtensa/ipcbuf.h b/include/asm-xtensa/ipcbuf.h new file mode 100644 index 0000000..c33aa6a --- /dev/null +++ b/include/asm-xtensa/ipcbuf.h @@ -0,0 +1,37 @@ +/* + * include/asm-xtensa/ipcbuf.h + * + * The ipc64_perm structure for the Xtensa architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_IPCBUF_H +#define _XTENSA_IPCBUF_H + +/* + * Pad space is left for: + * - 32-bit mode_t and seq + * - 2 miscellaneous 32-bit values + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of + * this archive for more details. + */ + +struct ipc64_perm +{ + __kernel_key_t key; + __kernel_uid32_t uid; + __kernel_gid32_t gid; + __kernel_uid32_t cuid; + __kernel_gid32_t cgid; + __kernel_mode_t mode; + unsigned long seq; + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* _XTENSA_IPCBUF_H */ diff --git a/include/asm-xtensa/irq.h b/include/asm-xtensa/irq.h new file mode 100644 index 0000000..d984e95 --- /dev/null +++ b/include/asm-xtensa/irq.h @@ -0,0 +1,37 @@ +/* + * include/asm-xtensa/irq.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_IRQ_H +#define _XTENSA_IRQ_H + +#include <linux/config.h> +#include <asm/platform/hardware.h> + +#include <xtensa/config/core.h> + +#ifndef PLATFORM_NR_IRQS +# define PLATFORM_NR_IRQS 0 +#endif +#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS +#define NR_IRQS (XTENSA_NR_IRQS + PLATFORM_NR_IRQS) + +static __inline__ int irq_canonicalize(int irq) +{ + return (irq); +} + +struct irqaction; +#if 0 // FIXME +extern void disable_irq_nosync(unsigned int); +extern void disable_irq(unsigned int); +extern void enable_irq(unsigned int); +#endif + +#endif /* _XTENSA_IRQ_H */ diff --git a/include/asm-xtensa/kmap_types.h b/include/asm-xtensa/kmap_types.h new file mode 100644 index 0000000..9e822d2 --- /dev/null +++ b/include/asm-xtensa/kmap_types.h @@ -0,0 +1,31 @@ +/* + * include/asm-xtensa/kmap_types.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_KMAP_TYPES_H +#define _XTENSA_KMAP_TYPES_H + +enum km_type { + KM_BOUNCE_READ, + KM_SKB_SUNRPC_DATA, + KM_SKB_DATA_SOFTIRQ, + KM_USER0, + KM_USER1, + KM_BIO_SRC_IRQ, + KM_BIO_DST_IRQ, + KM_PTE0, + KM_PTE1, + KM_IRQ0, + KM_IRQ1, + KM_SOFTIRQ0, + KM_SOFTIRQ1, + KM_TYPE_NR +}; + +#endif /* _XTENSA_KMAP_TYPES_H */ diff --git a/include/asm-xtensa/linkage.h b/include/asm-xtensa/linkage.h new file mode 100644 index 0000000..bf2128a --- /dev/null +++ b/include/asm-xtensa/linkage.h @@ -0,0 +1,16 @@ +/* + * include/asm-xtensa/linkage.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_LINKAGE_H +#define _XTENSA_LINKAGE_H + +/* Nothing to do here ... */ + +#endif /* _XTENSA_LINKAGE_H */ diff --git a/include/asm-xtensa/local.h b/include/asm-xtensa/local.h new file mode 100644 index 0000000..48723e5 --- /dev/null +++ b/include/asm-xtensa/local.h @@ -0,0 +1,16 @@ +/* + * include/asm-xtensa/local.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_LOCAL_H +#define _XTENSA_LOCAL_H + +#include <asm-generic/local.h> + +#endif /* _XTENSA_LOCAL_H */ diff --git a/include/asm-xtensa/mman.h b/include/asm-xtensa/mman.h new file mode 100644 index 0000000..9a95a45 --- /dev/null +++ b/include/asm-xtensa/mman.h @@ -0,0 +1,80 @@ +/* + * include/asm-xtensa/mman.h + * + * Xtensa Processor memory-manager definitions + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995 by Ralf Baechle + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_MMAN_H +#define _XTENSA_MMAN_H + +/* + * Protections are chosen from these bits, OR'd together. The + * implementation does not necessarily support PROT_EXEC or PROT_WRITE + * without PROT_READ. The only guarantees are that no writing will be + * allowed without PROT_WRITE and no access will be allowed for PROT_NONE. + */ + +#define PROT_NONE 0x0 /* page can not be accessed */ +#define PROT_READ 0x1 /* page can be read */ +#define PROT_WRITE 0x2 /* page can be written */ +#define PROT_EXEC 0x4 /* page can be executed */ + +#define PROT_SEM 0x10 /* page may be used for atomic ops */ +#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ +#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end fo growsup vma */ + +/* + * Flags for mmap + */ +#define MAP_SHARED 0x001 /* Share changes */ +#define MAP_PRIVATE 0x002 /* Changes are private */ +#define MAP_TYPE 0x00f /* Mask for type of mapping */ +#define MAP_FIXED 0x010 /* Interpret addr exactly */ + +/* not used by linux, but here to make sure we don't clash with ABI defines */ +#define MAP_RENAME 0x020 /* Assign page to file */ +#define MAP_AUTOGROW 0x040 /* File may grow by writing */ +#define MAP_LOCAL 0x080 /* Copy on fork/sproc */ +#define MAP_AUTORSRV 0x100 /* Logical swap reserved on demand */ + +/* These are linux-specific */ +#define MAP_NORESERVE 0x0400 /* don't check for reservations */ +#define MAP_ANONYMOUS 0x0800 /* don't use a file */ +#define MAP_GROWSDOWN 0x1000 /* stack-like segment */ +#define MAP_DENYWRITE 0x2000 /* ETXTBSY */ +#define MAP_EXECUTABLE 0x4000 /* mark it as an executable */ +#define MAP_LOCKED 0x8000 /* pages are locked */ +#define MAP_POPULATE 0x10000 /* populate (prefault) pagetables */ +#define MAP_NONBLOCK 0x20000 /* do not block on IO */ + +/* + * Flags for msync + */ +#define MS_ASYNC 0x0001 /* sync memory asynchronously */ +#define MS_INVALIDATE 0x0002 /* invalidate mappings & caches */ +#define MS_SYNC 0x0004 /* synchronous memory sync */ + +/* + * Flags for mlockall + */ +#define MCL_CURRENT 1 /* lock all current mappings */ +#define MCL_FUTURE 2 /* lock all future mappings */ + +#define MADV_NORMAL 0x0 /* default page-in behavior */ +#define MADV_RANDOM 0x1 /* page-in minimum required */ +#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ +#define MADV_WILLNEED 0x3 /* pre-fault pages */ +#define MADV_DONTNEED 0x4 /* discard these pages */ + +/* compatibility flags */ +#define MAP_ANON MAP_ANONYMOUS +#define MAP_FILE 0 + +#endif /* _XTENSA_MMAN_H */ diff --git a/include/asm-xtensa/mmu.h b/include/asm-xtensa/mmu.h new file mode 100644 index 0000000..44c5bb0 --- /dev/null +++ b/include/asm-xtensa/mmu.h @@ -0,0 +1,17 @@ +/* + * include/asm-xtensa/mmu.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_MMU_H +#define _XTENSA_MMU_H + +/* Default "unsigned long" context */ +typedef unsigned long mm_context_t; + +#endif /* _XTENSA_MMU_H */ diff --git a/include/asm-xtensa/mmu_context.h b/include/asm-xtensa/mmu_context.h new file mode 100644 index 0000000..1b08015 --- /dev/null +++ b/include/asm-xtensa/mmu_context.h @@ -0,0 +1,330 @@ +/* + * include/asm-xtensa/mmu_context.h + * + * Switch an MMU context. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_MMU_CONTEXT_H +#define _XTENSA_MMU_CONTEXT_H + +#include <linux/config.h> +#include <linux/stringify.h> + +#include <asm/pgtable.h> +#include <asm/mmu_context.h> +#include <asm/cacheflush.h> +#include <asm/tlbflush.h> + +/* + * Linux was ported to Xtensa assuming all auto-refill ways in set 0 + * had the same properties (a very likely assumption). Multiple sets + * of auto-refill ways will still work properly, but not as optimally + * as the Xtensa designer may have assumed. + * + * We make this case a hard #error, killing the kernel build, to alert + * the developer to this condition (which is more likely an error). + * You super-duper clever developers can change it to a warning or + * remove it altogether if you think you know what you're doing. :) + */ + +#if (XCHAL_HAVE_TLBS != 1) +# error "Linux must have an MMU!" +#endif + +#if ((XCHAL_ITLB_ARF_WAYS == 0) || (XCHAL_DTLB_ARF_WAYS == 0)) +# error "MMU must have auto-refill ways" +#endif + +#if ((XCHAL_ITLB_ARF_SETS != 1) || (XCHAL_DTLB_ARF_SETS != 1)) +# error Linux may not use all auto-refill ways as efficiently as you think +#endif + +#if (XCHAL_MMU_MAX_PTE_PAGE_SIZE != XCHAL_MMU_MIN_PTE_PAGE_SIZE) +# error Only one page size allowed! +#endif + +extern unsigned long asid_cache; +extern pgd_t *current_pgd; + +/* + * Define the number of entries per auto-refill way in set 0 of both I and D + * TLBs. We deal only with set 0 here (an assumption further explained in + * assertions.h). Also, define the total number of ARF entries in both TLBs. + */ + +#define ITLB_ENTRIES_PER_ARF_WAY (XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,ENTRIES)) +#define DTLB_ENTRIES_PER_ARF_WAY (XCHAL_DTLB_SET(XCHAL_DTLB_ARF_SET0,ENTRIES)) + +#define ITLB_ENTRIES \ + (ITLB_ENTRIES_PER_ARF_WAY * (XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,WAYS))) +#define DTLB_ENTRIES \ + (DTLB_ENTRIES_PER_ARF_WAY * (XCHAL_DTLB_SET(XCHAL_DTLB_ARF_SET0,WAYS))) + + +/* + * SMALLEST_NTLB_ENTRIES is the smaller of ITLB_ENTRIES and DTLB_ENTRIES. + * In practice, they are probably equal. This macro simplifies function + * flush_tlb_range(). + */ + +#if (DTLB_ENTRIES < ITLB_ENTRIES) +# define SMALLEST_NTLB_ENTRIES DTLB_ENTRIES +#else +# define SMALLEST_NTLB_ENTRIES ITLB_ENTRIES +#endif + + +/* + * asid_cache tracks only the ASID[USER_RING] field of the RASID special + * register, which is the current user-task asid allocation value. + * mm->context has the same meaning. When it comes time to write the + * asid_cache or mm->context values to the RASID special register, we first + * shift the value left by 8, then insert the value. + * ASID[0] always contains the kernel's asid value, and we reserve three + * other asid values that we never assign to user tasks. + */ + +#define ASID_INC 0x1 +#define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1) + +/* + * XCHAL_MMU_ASID_INVALID is a configurable Xtensa processor constant + * indicating invalid address space. XCHAL_MMU_ASID_KERNEL is a configurable + * Xtensa processor constant indicating the kernel address space. They can + * be arbitrary values. + * + * We identify three more unique, reserved ASID values to use in the unused + * ring positions. No other user process will be assigned these reserved + * ASID values. + * + * For example, given that + * + * XCHAL_MMU_ASID_INVALID == 0 + * XCHAL_MMU_ASID_KERNEL == 1 + * + * the following maze of #if statements would generate + * + * ASID_RESERVED_1 == 2 + * ASID_RESERVED_2 == 3 + * ASID_RESERVED_3 == 4 + * ASID_FIRST_NONRESERVED == 5 + */ + +#if (XCHAL_MMU_ASID_INVALID != XCHAL_MMU_ASID_KERNEL + 1) +# define ASID_RESERVED_1 ((XCHAL_MMU_ASID_KERNEL + 1) & ASID_MASK) +#else +# define ASID_RESERVED_1 ((XCHAL_MMU_ASID_KERNEL + 2) & ASID_MASK) +#endif + +#if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_1 + 1) +# define ASID_RESERVED_2 ((ASID_RESERVED_1 + 1) & ASID_MASK) +#else +# define ASID_RESERVED_2 ((ASID_RESERVED_1 + 2) & ASID_MASK) +#endif + +#if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_2 + 1) +# define ASID_RESERVED_3 ((ASID_RESERVED_2 + 1) & ASID_MASK) +#else +# define ASID_RESERVED_3 ((ASID_RESERVED_2 + 2) & ASID_MASK) +#endif + +#if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_3 + 1) +# define ASID_FIRST_NONRESERVED ((ASID_RESERVED_3 + 1) & ASID_MASK) +#else +# define ASID_FIRST_NONRESERVED ((ASID_RESERVED_3 + 2) & ASID_MASK) +#endif + +#define ASID_ALL_RESERVED ( ((ASID_RESERVED_1) << 24) + \ + ((ASID_RESERVED_2) << 16) + \ + ((ASID_RESERVED_3) << 8) + \ + ((XCHAL_MMU_ASID_KERNEL)) ) + + +/* + * NO_CONTEXT is the invalid ASID value that we don't ever assign to + * any user or kernel context. NO_CONTEXT is a better mnemonic than + * XCHAL_MMU_ASID_INVALID, so we use it in code instead. + */ + +#define NO_CONTEXT XCHAL_MMU_ASID_INVALID + +#if (KERNEL_RING != 0) +# error The KERNEL_RING really should be zero. +#endif + +#if (USER_RING >= XCHAL_MMU_RINGS) +# error USER_RING cannot be greater than the highest numbered ring. +#endif + +#if (USER_RING == KERNEL_RING) +# error The user and kernel rings really should not be equal. +#endif + +#if (USER_RING == 1) +#define ASID_INSERT(x) ( ((ASID_RESERVED_1) << 24) + \ + ((ASID_RESERVED_2) << 16) + \ + (((x) & (ASID_MASK)) << 8) + \ + ((XCHAL_MMU_ASID_KERNEL)) ) + +#elif (USER_RING == 2) +#define ASID_INSERT(x) ( ((ASID_RESERVED_1) << 24) + \ + (((x) & (ASID_MASK)) << 16) + \ + ((ASID_RESERVED_2) << 8) + \ + ((XCHAL_MMU_ASID_KERNEL)) ) + +#elif (USER_RING == 3) +#define ASID_INSERT(x) ( (((x) & (ASID_MASK)) << 24) + \ + ((ASID_RESERVED_1) << 16) + \ + ((ASID_RESERVED_2) << 8) + \ + ((XCHAL_MMU_ASID_KERNEL)) ) + +#else +#error Goofy value for USER_RING + +#endif /* USER_RING == 1 */ + + +/* + * All unused by hardware upper bits will be considered + * as a software asid extension. + */ + +#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) +#define ASID_FIRST_VERSION \ + ((unsigned long)(~ASID_VERSION_MASK) + 1 + ASID_FIRST_NONRESERVED) + +extern inline void set_rasid_register (unsigned long val) +{ + __asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t" + " isync\n" : : "a" (val)); +} + +extern inline unsigned long get_rasid_register (void) +{ + unsigned long tmp; + __asm__ __volatile__ (" rsr %0, "__stringify(RASID)"\n\t" : "=a" (tmp)); + return tmp; +} + + +#if ((XCHAL_MMU_ASID_INVALID == 0) && (XCHAL_MMU_ASID_KERNEL == 1)) + +extern inline void +get_new_mmu_context(struct mm_struct *mm, unsigned long asid) +{ + extern void flush_tlb_all(void); + if (! ((asid += ASID_INC) & ASID_MASK) ) { + flush_tlb_all(); /* start new asid cycle */ + if (!asid) /* fix version if needed */ + asid = ASID_FIRST_VERSION - ASID_FIRST_NONRESERVED; + asid += ASID_FIRST_NONRESERVED; + } + mm->context = asid_cache = asid; +} + +#else +#warning ASID_{INVALID,KERNEL} values impose non-optimal get_new_mmu_context implementation + +/* XCHAL_MMU_ASID_INVALID == 0 and XCHAL_MMU_ASID_KERNEL ==1 are + really the best, but if you insist... */ + +extern inline int validate_asid (unsigned long asid) +{ + switch (asid) { + case XCHAL_MMU_ASID_INVALID: + case XCHAL_MMU_ASID_KERNEL: + case ASID_RESERVED_1: + case ASID_RESERVED_2: + case ASID_RESERVED_3: + return 0; /* can't use these values as ASIDs */ + } + return 1; /* valid */ +} + +extern inline void +get_new_mmu_context(struct mm_struct *mm, unsigned long asid) +{ + extern void flush_tlb_all(void); + while (1) { + asid += ASID_INC; + if ( ! (asid & ASID_MASK) ) { + flush_tlb_all(); /* start new asid cycle */ + if (!asid) /* fix version if needed */ + asid = ASID_FIRST_VERSION - ASID_FIRST_NONRESERVED; + asid += ASID_FIRST_NONRESERVED; + break; /* no need to validate here */ + } + if (validate_asid (asid & ASID_MASK)) + break; + } + mm->context = asid_cache = asid; +} + +#endif + + +/* + * Initialize the context related info for a new mm_struct + * instance. + */ + +extern inline int +init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + mm->context = NO_CONTEXT; + return 0; +} + +extern inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + unsigned long asid = asid_cache; + + /* Check if our ASID is of an older version and thus invalid */ + + if ((next->context ^ asid) & ASID_VERSION_MASK) + get_new_mmu_context(next, asid); + + set_rasid_register (ASID_INSERT(next->context)); + invalidate_page_directory(); +} + +#define deactivate_mm(tsk, mm) do { } while(0) + +/* + * Destroy context related info for an mm_struct that is about + * to be put to rest. + */ +extern inline void destroy_context(struct mm_struct *mm) +{ + /* Nothing to do. */ +} + +/* + * After we have set current->mm to a new value, this activates + * the context for the new mm so we see the new mappings. + */ +extern inline void +activate_mm(struct mm_struct *prev, struct mm_struct *next) +{ + /* Unconditionally get a new ASID. */ + + get_new_mmu_context(next, asid_cache); + set_rasid_register (ASID_INSERT(next->context)); + invalidate_page_directory(); +} + + +static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) +{ + /* Nothing to do. */ + +} + +#endif /* _XTENSA_MMU_CONTEXT_H */ diff --git a/include/asm-xtensa/module.h b/include/asm-xtensa/module.h new file mode 100644 index 0000000..ffb25bf --- /dev/null +++ b/include/asm-xtensa/module.h @@ -0,0 +1,25 @@ +/* + * include/asm-xtensa/module.h + * + * This file contains the module code specific to the Xtensa architecture. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_MODULE_H +#define _XTENSA_MODULE_H + +struct mod_arch_specific +{ + /* Module support is not completely implemented. */ +}; + +#define Elf_Shdr Elf32_Shdr +#define Elf_Sym Elf32_Sym +#define Elf_Ehdr Elf32_Ehdr + +#endif /* _XTENSA_MODULE_H */ diff --git a/include/asm-xtensa/msgbuf.h b/include/asm-xtensa/msgbuf.h new file mode 100644 index 0000000..693c967 --- /dev/null +++ b/include/asm-xtensa/msgbuf.h @@ -0,0 +1,48 @@ +/* + * include/asm-xtensa/msgbuf.h + * + * The msqid64_ds structure for the Xtensa architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 64-bit time_t to solve y2038 problem + * - 2 miscellaneous 32-bit values + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of + * this archive for more details. + */ + +#ifndef _XTENSA_MSGBUF_H +#define _XTENSA_MSGBUF_H + +struct msqid64_ds { + struct ipc64_perm msg_perm; +#ifdef __XTENSA_EB__ + unsigned int __unused1; + __kernel_time_t msg_stime; /* last msgsnd time */ + unsigned int __unused2; + __kernel_time_t msg_rtime; /* last msgrcv time */ + unsigned int __unused3; + __kernel_time_t msg_ctime; /* last change time */ +#elif defined(__XTENSA_EL__) + __kernel_time_t msg_stime; /* last msgsnd time */ + unsigned int __unused1; + __kernel_time_t msg_rtime; /* last msgrcv time */ + unsigned int __unused2; + __kernel_time_t msg_ctime; /* last change time */ + unsigned int __unused3; +#else +# error processor byte order undefined! +#endif + unsigned long msg_cbytes; /* current number of bytes on queue */ + unsigned long msg_qnum; /* number of messages in queue */ + unsigned long msg_qbytes; /* max number of bytes on queue */ + __kernel_pid_t msg_lspid; /* pid of last msgsnd */ + __kernel_pid_t msg_lrpid; /* last receive pid */ + unsigned long __unused4; + unsigned long __unused5; +}; + +#endif /* _XTENSA_MSGBUF_H */ diff --git a/include/asm-xtensa/namei.h b/include/asm-xtensa/namei.h new file mode 100644 index 0000000..3fdff03 --- /dev/null +++ b/include/asm-xtensa/namei.h @@ -0,0 +1,26 @@ +/* + * include/asm-xtensa/namei.h + * + * Included from linux/fs/namei.c + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_NAMEI_H +#define _XTENSA_NAMEI_H + +#ifdef __KERNEL__ + +/* This dummy routine maybe changed to something useful + * for /usr/gnemul/ emulation stuff. + * Look at asm-sparc/namei.h for details. + */ + +#define __emul_prefix() NULL + +#endif /* __KERNEL__ */ +#endif /* _XTENSA_NAMEI_H */ diff --git a/include/asm-xtensa/page.h b/include/asm-xtensa/page.h new file mode 100644 index 0000000..b495e5b --- /dev/null +++ b/include/asm-xtensa/page.h @@ -0,0 +1,133 @@ +/* + * linux/include/asm-xtensa/page.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version2 as + * published by the Free Software Foundation. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_PAGE_H +#define _XTENSA_PAGE_H + +#ifdef __KERNEL__ + +#include <asm/processor.h> +#include <linux/config.h> + +/* + * PAGE_SHIFT determines the page size + * PAGE_ALIGN(x) aligns the pointer to the (next) page boundary + */ + +#define PAGE_SHIFT XCHAL_MMU_MIN_PTE_PAGE_SIZE +#define PAGE_SIZE (1 << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) +#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK) + +#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS) +#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR + +#ifdef __ASSEMBLY__ + +#define __pgprot(x) (x) + +#else + +/* + * These are used to make use of C type-checking.. + */ + +typedef struct { unsigned long pte; } pte_t; /* page table entry */ +typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */ +typedef struct { unsigned long pgprot; } pgprot_t; + +#define pte_val(x) ((x).pte) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pgd(x) ((pgd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +/* + * Pure 2^n version of get_order + */ + +extern __inline__ int get_order(unsigned long size) +{ + int order; +#ifndef XCHAL_HAVE_NSU + unsigned long x1, x2, x4, x8, x16; + + size = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + x1 = size & 0xAAAAAAAA; + x2 = size & 0xCCCCCCCC; + x4 = size & 0xF0F0F0F0; + x8 = size & 0xFF00FF00; + x16 = size & 0xFFFF0000; + order = x2 ? 2 : 0; + order += (x16 != 0) * 16; + order += (x8 != 0) * 8; + order += (x4 != 0) * 4; + order += (x1 != 0); + + return order; +#else + size = (size - 1) >> PAGE_SHIFT; + asm ("nsau %0, %1" : "=r" (order) : "r" (size)); + return 32 - order; +#endif +} + + +struct page; +extern void clear_page(void *page); +extern void copy_page(void *to, void *from); + +/* + * If we have cache aliasing and writeback caches, we might have to do + * some extra work + */ + +#if (DCACHE_WAY_SIZE > PAGE_SIZE) +void clear_user_page(void *addr, unsigned long vaddr, struct page* page); +void copy_user_page(void *to,void* from,unsigned long vaddr,struct page* page); +#else +# define clear_user_page(page,vaddr,pg) clear_page(page) +# define copy_user_page(to, from, vaddr, pg) copy_page(to, from) +#endif + +/* + * This handles the memory map. We handle pages at + * XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space. + * These macros are for conversion of kernel address, not user + * addresses. + */ + +#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) +#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) +#define pfn_valid(pfn) ((unsigned long)pfn < max_mapnr) +#ifndef CONFIG_DISCONTIGMEM +# define pfn_to_page(pfn) (mem_map + (pfn)) +# define page_to_pfn(page) ((unsigned long)((page) - mem_map)) +#else +# error CONFIG_DISCONTIGMEM not supported +#endif + +#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) +#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) +#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) +#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) + +#define WANT_PAGE_VIRTUAL + + +#endif /* __ASSEMBLY__ */ + +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#endif /* __KERNEL__ */ +#endif /* _XTENSA_PAGE_H */ diff --git a/include/asm-xtensa/page.h.n b/include/asm-xtensa/page.h.n new file mode 100644 index 0000000..546cc66 --- /dev/null +++ b/include/asm-xtensa/page.h.n @@ -0,0 +1,135 @@ +/* + * linux/include/asm-xtensa/page.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version2 as + * published by the Free Software Foundation. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_PAGE_H +#define _XTENSA_PAGE_H + +#ifdef __KERNEL__ + +#include <asm/processor.h> +#include <linux/config.h> + +/* + * PAGE_SHIFT determines the page size + * PAGE_ALIGN(x) aligns the pointer to the (next) page boundary + */ +#define PAGE_SHIFT XCHAL_MMU_MIN_PTE_PAGE_SIZE +#define PAGE_SIZE (1 << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) +#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK) + +#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS) +#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR + +#ifdef __ASSEMBLY__ + +#define __pgprot(x) (x) + +#else + + +/* + * These are used to make use of C type-checking.. + */ +typedef struct { unsigned long pte; } pte_t; /* page table entry */ +typedef struct { unsigned long pmd; } pmd_t; /* PMD table entry */ +typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */ +typedef struct { unsigned long pgprot; } pgprot_t; + +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((x).pmd) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pmd(x) ((pmd_t) { (x) } ) +#define __pgd(x) ((pgd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +/* + * Pure 2^n version of get_order + */ +extern __inline__ int get_order(unsigned long size) +{ + int order; +#ifndef XCHAL_HAVE_NSU + unsigned long x1, x2, x4, x8, x16; + + size = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + x1 = size & 0xAAAAAAAA; + x2 = size & 0xCCCCCCCC; + x4 = size & 0xF0F0F0F0; + x8 = size & 0xFF00FF00; + x16 = size & 0xFFFF0000; + order = x2 ? 2 : 0; + order += (x16 != 0) * 16; + order += (x8 != 0) * 8; + order += (x4 != 0) * 4; + order += (x1 != 0); + + return order; +#else + size = (size - 1) >> PAGE_SHIFT; + asm ("nsau %0, %1" : "=r" (order) : "r" (size)); + return 32 - order; +#endif +} + + +struct page; +extern void clear_page(void *page); +extern void copy_page(void *to, void *from); + +/* + * If we have cache aliasing and writeback caches, we might have to do + * some extra work + */ + +#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK +void clear_user_page(void *addr, unsigned long vaddr, struct page* page); +void copy_user_page(void *to, void* from, unsigned long vaddr, struct page* page); +#else +# define clear_user_page(page,vaddr,pg) clear_page(page) +# define copy_user_page(to, from, vaddr, pg) copy_page(to, from) +#endif + + +/* + * This handles the memory map. We handle pages at + * XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space. + * These macros are for conversion of kernel address, not user + * addresses. + */ + +#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) +#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) +#define pfn_valid(pfn) ((unsigned long)pfn < max_mapnr) +#ifndef CONFIG_DISCONTIGMEM +# define pfn_to_page(pfn) (mem_map + (pfn)) +# define page_to_pfn(page) ((unsigned long)((page) - mem_map)) +#else +# error CONFIG_DISCONTIGMEM not supported +#endif + +#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) +#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) +#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) +#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) + +#define WANT_PAGE_VIRTUAL + + +#endif /* __ASSEMBLY__ */ + +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#endif /* __KERNEL__ */ +#endif /* _XTENSA_PAGE_H */ diff --git a/include/asm-xtensa/param.h b/include/asm-xtensa/param.h new file mode 100644 index 0000000..c0eec82 --- /dev/null +++ b/include/asm-xtensa/param.h @@ -0,0 +1,34 @@ +/* + * include/asm-xtensa/param.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_PARAM_H +#define _XTENSA_PARAM_H + +#include <xtensa/config/core.h> + +#ifdef __KERNEL__ +# define HZ 100 /* internal timer frequency */ +# define USER_HZ 100 /* for user interfaces in "ticks" */ +# define CLOCKS_PER_SEC (USER_HZ) /* frequnzy at which times() counts */ +#endif + +#define EXEC_PAGESIZE (1 << XCHAL_MMU_MIN_PTE_PAGE_SIZE) + +#ifndef NGROUPS +#define NGROUPS 32 +#endif + +#ifndef NOGROUP +#define NOGROUP (-1) +#endif + +#define MAXHOSTNAMELEN 64 /* max length of hostname */ + +#endif /* _XTENSA_PARAM_H */ diff --git a/include/asm-xtensa/pci-bridge.h b/include/asm-xtensa/pci-bridge.h new file mode 100644 index 0000000..00fcbd7 --- /dev/null +++ b/include/asm-xtensa/pci-bridge.h @@ -0,0 +1,88 @@ +/* + * include/asm-xtensa/pci-bridge.h + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of + * this archive for more details. + * + * Copyright (C) 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_PCI_BRIDGE_H +#define _XTENSA_PCI_BRIDGE_H + +#ifdef __KERNEL__ + +struct device_node; +struct pci_controller; + +/* + * pciauto_bus_scan() enumerates the pci space. + */ + +extern int pciauto_bus_scan(struct pci_controller *, int); + +struct pci_space { + unsigned long start; + unsigned long end; + unsigned long base; +}; + +/* + * Structure of a PCI controller (host bridge) + */ + +struct pci_controller { + int index; /* used for pci_controller_num */ + struct pci_controller *next; + struct pci_bus *bus; + void *arch_data; + + int first_busno; + int last_busno; + + struct pci_ops *ops; + volatile unsigned int *cfg_addr; + volatile unsigned char *cfg_data; + + /* Currently, we limit ourselves to 1 IO range and 3 mem + * ranges since the common pci_bus structure can't handle more + */ + struct resource io_resource; + struct resource mem_resources[3]; + int mem_resource_count; + + /* Host bridge I/O and Memory space + * Used for BAR placement algorithms + */ + struct pci_space io_space; + struct pci_space mem_space; + + /* Return the interrupt number fo a device. */ + int (*map_irq)(struct pci_dev*, u8, u8); + +}; + +static inline void pcibios_init_resource(struct resource *res, + unsigned long start, unsigned long end, int flags, char *name) +{ + res->start = start; + res->end = end; + res->flags = flags; + res->name = name; + res->parent = NULL; + res->sibling = NULL; + res->child = NULL; +} + + +/* These are used for config access before all the PCI probing has been done. */ +int early_read_config_byte(struct pci_controller*, int, int, int, u8*); +int early_read_config_word(struct pci_controller*, int, int, int, u16*); +int early_read_config_dword(struct pci_controller*, int, int, int, u32*); +int early_write_config_byte(struct pci_controller*, int, int, int, u8); +int early_write_config_word(struct pci_controller*, int, int, int, u16); +int early_write_config_dword(struct pci_controller*, int, int, int, u32); + +#endif /* __KERNEL__ */ +#endif /* _XTENSA_PCI_BRIDGE_H */ diff --git a/include/asm-xtensa/pci.h b/include/asm-xtensa/pci.h new file mode 100644 index 0000000..6817742 --- /dev/null +++ b/include/asm-xtensa/pci.h @@ -0,0 +1,89 @@ +/* + * linux/include/asm-xtensa/pci.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_PCI_H +#define _XTENSA_PCI_H + +#ifdef __KERNEL__ + +/* Can be used to override the logic in pci_scan_bus for skipping + * already-configured bus numbers - to be used for buggy BIOSes + * or architectures with incomplete PCI setup by the loader + */ + +#define pcibios_assign_all_busses() 0 + +extern struct pci_controller* pcibios_alloc_controller(void); + +extern inline void pcibios_set_master(struct pci_dev *dev) +{ + /* No special bus mastering setup handling */ +} + +extern inline void pcibios_penalize_isa_irq(int irq) +{ + /* We don't do dynamic PCI IRQ allocation */ +} + +/* Assume some values. (We should revise them, if necessary) */ + +#define PCIBIOS_MIN_IO 0x2000 +#define PCIBIOS_MIN_MEM 0x10000000 + +/* Dynamic DMA mapping stuff. + * Xtensa has everything mapped statically like x86. + */ + +#include <linux/types.h> +#include <linux/slab.h> +#include <asm/scatterlist.h> +#include <linux/string.h> +#include <asm/io.h> + +struct pci_dev; + +/* The PCI address space does equal the physical memory address space. + * The networking and block device layers use this boolean for bounce buffer + * decisions. + */ + +#define PCI_DMA_BUS_IS_PHYS (1) + +/* pci_unmap_{page,single} is a no-op, so */ +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) +#define pci_unmap_addr(PTR, ADDR_NAME) (0) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) +#define pci_ubnmap_len(PTR, LEN_NAME) (0) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) + +/* We cannot access memory above 4GB */ +#define pci_dac_dma_supported(pci_dev, mask) (0) + +/* Map a range of PCI memory or I/O space for a device into user space */ +int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma, + enum pci_mmap_state mmap_state, int write_combine); + +/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ +#define HAVE_PCI_MMAP 1 + +static inline void pcibios_add_platform_entries(struct pci_dev *dev) +{ +} + +#endif /* __KERNEL__ */ + +/* Implement the pci_ DMA API in terms of the generic device dma_ one */ +#include <asm-generic/pci-dma-compat.h> + +/* Generic PCI */ +#include <asm-generic/pci.h> + +#endif /* _XTENSA_PCI_H */ diff --git a/include/asm-xtensa/percpu.h b/include/asm-xtensa/percpu.h new file mode 100644 index 0000000..6d2bc2a --- /dev/null +++ b/include/asm-xtensa/percpu.h @@ -0,0 +1,16 @@ +/* + * linux/include/asm-xtensa/percpu.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_PERCPU__ +#define _XTENSA_PERCPU__ + +#include <asm-generic/percpu.h> + +#endif /* _XTENSA_PERCPU__ */ diff --git a/include/asm-xtensa/pgalloc.h b/include/asm-xtensa/pgalloc.h new file mode 100644 index 0000000..734a8d0 --- /dev/null +++ b/include/asm-xtensa/pgalloc.h @@ -0,0 +1,116 @@ +/* + * linux/include/asm-xtensa/pgalloc.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Copyright (C) 2001-2005 Tensilica Inc. + */ + +#ifndef _XTENSA_PGALLOC_H +#define _XTENSA_PGALLOC_H + +#ifdef __KERNEL__ + +#include <linux/config.h> +#include <linux/threads.h> +#include <linux/highmem.h> +#include <asm/processor.h> +#include <asm/cacheflush.h> + + +/* Cache aliasing: + * + * If the cache size for one way is greater than the page size, we have to + * deal with cache aliasing. The cache index is wider than the page size: + * + * |cache | + * |pgnum |page| virtual address + * |xxxxxX|zzzz| + * | | | + * \ / | | + * trans.| | + * / \ | | + * |yyyyyY|zzzz| physical address + * + * When the page number is translated to the physical page address, the lowest + * bit(s) (X) that are also part of the cache index are also translated (Y). + * If this translation changes this bit (X), the cache index is also afected, + * thus resulting in a different cache line than before. + * The kernel does not provide a mechanism to ensure that the page color + * (represented by this bit) remains the same when allocated or when pages + * are remapped. When user pages are mapped into kernel space, the color of + * the page might also change. + * + * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2 + * to temporarily map a patch so we can match the color. + */ + +#if (DCACHE_WAY_SIZE > PAGE_SIZE) +# define PAGE_COLOR_MASK (PAGE_MASK & (DCACHE_WAY_SIZE-1)) +# define PAGE_COLOR(a) \ + (((unsigned long)(a)&PAGE_COLOR_MASK) >> PAGE_SHIFT) +# define PAGE_COLOR_EQ(a,b) \ + ((((unsigned long)(a) ^ (unsigned long)(b)) & PAGE_COLOR_MASK) == 0) +# define PAGE_COLOR_MAP0(v) \ + (VMALLOC_END + ((unsigned long)(v) & PAGE_COLOR_MASK)) +# define PAGE_COLOR_MAP1(v) \ + (VMALLOC_END + ((unsigned long)(v) & PAGE_COLOR_MASK) + DCACHE_WAY_SIZE) +#endif + +/* + * Allocating and freeing a pmd is trivial: the 1-entry pmd is + * inside the pgd, so has no extra memory associated with it. + */ + +#define pgd_free(pgd) free_page((unsigned long)(pgd)) + +#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK + +static inline void +pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *pte) +{ + pmd_val(*(pmdp)) = (unsigned long)(pte); + __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp)); +} + +static inline void +pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *page) +{ + pmd_val(*(pmdp)) = (unsigned long)page_to_virt(page); + __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp)); +} + + + +#else + +# define pmd_populate_kernel(mm, pmdp, pte) \ + (pmd_val(*(pmdp)) = (unsigned long)(pte)) +# define pmd_populate(mm, pmdp, page) \ + (pmd_val(*(pmdp)) = (unsigned long)page_to_virt(page)) + +#endif + +static inline pgd_t* +pgd_alloc(struct mm_struct *mm) +{ + pgd_t *pgd; + + pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGD_ORDER); + + if (likely(pgd != NULL)) + __flush_dcache_page((unsigned long)pgd); + + return pgd; +} + +extern pte_t* pte_alloc_one_kernel(struct mm_struct* mm, unsigned long addr); +extern struct page* pte_alloc_one(struct mm_struct* mm, unsigned long addr); + +#define pte_free_kernel(pte) free_page((unsigned long)pte) +#define pte_free(pte) __free_page(pte) + +#endif /* __KERNEL__ */ +#endif /* _XTENSA_PGALLOC_H */ diff --git a/include/asm-xtensa/pgtable.h b/include/asm-xtensa/pgtable.h new file mode 100644 index 0000000..0bb6416 --- /dev/null +++ b/include/asm-xtensa/pgtable.h @@ -0,0 +1,468 @@ +/* + * linux/include/asm-xtensa/page.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version2 as + * published by the Free Software Foundation. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_PGTABLE_H +#define _XTENSA_PGTABLE_H + +#include <asm-generic/pgtable-nopmd.h> +#include <asm/page.h> + +/* Assertions. */ + +#ifdef CONFIG_MMU + + +#if (XCHAL_MMU_RINGS < 2) +# error Linux build assumes at least 2 ring levels. +#endif + +#if (XCHAL_MMU_CA_BITS != 4) +# error We assume exactly four bits for CA. +#endif + +#if (XCHAL_MMU_SR_BITS != 0) +# error We have no room for SR bits. +#endif + +/* + * Use the first min-wired way for mapping page-table pages. + * Page coloring requires a second min-wired way. + */ + +#if (XCHAL_DTLB_MINWIRED_SETS == 0) +# error Need a min-wired way for mapping page-table pages +#endif + +#define DTLB_WAY_PGTABLE XCHAL_DTLB_SET(XCHAL_DTLB_MINWIRED_SET0, WAY) + +#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK +# if XCHAL_DTLB_SET(XCHAL_DTLB_MINWIRED_SET0, WAYS) >= 2 +# define DTLB_WAY_DCACHE_ALIAS0 (DTLB_WAY_PGTABLE + 1) +# define DTLB_WAY_DCACHE_ALIAS1 (DTLB_WAY_PGTABLE + 2) +# else +# error Page coloring requires its own wired dtlb way! +# endif +#endif + +#endif /* CONFIG_MMU */ + +/* + * We only use two ring levels, user and kernel space. + */ + +#define USER_RING 1 /* user ring level */ +#define KERNEL_RING 0 /* kernel ring level */ + +/* + * The Xtensa architecture port of Linux has a two-level page table system, + * i.e. the logical three-level Linux page table layout are folded. + * Each task has the following memory page tables: + * + * PGD table (page directory), ie. 3rd-level page table: + * One page (4 kB) of 1024 (PTRS_PER_PGD) pointers to PTE tables + * (Architectures that don't have the PMD folded point to the PMD tables) + * + * The pointer to the PGD table for a given task can be retrieved from + * the task structure (struct task_struct*) t, e.g. current(): + * (t->mm ? t->mm : t->active_mm)->pgd + * + * PMD tables (page middle-directory), ie. 2nd-level page tables: + * Absent for the Xtensa architecture (folded, PTRS_PER_PMD == 1). + * + * PTE tables (page table entry), ie. 1st-level page tables: + * One page (4 kB) of 1024 (PTRS_PER_PTE) PTEs with a special PTE + * invalid_pte_table for absent mappings. + * + * The individual pages are 4 kB big with special pages for the empty_zero_page. + */ +#define PGDIR_SHIFT 22 +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* + * Entries per page directory level: we use two-level, so + * we don't really have any PMD directory physically. + */ +#define PTRS_PER_PTE 1024 +#define PTRS_PER_PTE_SHIFT 10 +#define PTRS_PER_PMD 1 +#define PTRS_PER_PGD 1024 +#define PGD_ORDER 0 +#define PMD_ORDER 0 +#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) +#define FIRST_USER_ADDRESS XCHAL_SEG_MAPPABLE_VADDR +#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT) + +/* virtual memory area. We keep a distance to other memory regions to be + * on the safe side. We also use this area for cache aliasing. + */ + +// FIXME: virtual memory area must be configuration-dependent + +#define VMALLOC_START 0xC0000000 +#define VMALLOC_END 0xC7FF0000 + +/* Xtensa Linux config PTE layout (when present): + * 31-12: PPN + * 11-6: Software + * 5-4: RING + * 3-0: CA + * + * Similar to the Alpha and MIPS ports, we need to keep track of the ref + * and mod bits in software. We have a software "you can read + * from this page" bit, and a hardware one which actually lets the + * process read from the page. On the same token we have a software + * writable bit and the real hardware one which actually lets the + * process write to the page. + * + * See further below for PTE layout for swapped-out pages. + */ + +#define _PAGE_VALID (1<<0) /* hardware: page is accessible */ +#define _PAGE_WRENABLE (1<<1) /* hardware: page is writable */ + +/* None of these cache modes include MP coherency: */ +#define _PAGE_NO_CACHE (0<<2) /* bypass, non-speculative */ +#if XCHAL_DCACHE_IS_WRITEBACK +# define _PAGE_WRITEBACK (1<<2) /* write back */ +# define _PAGE_WRITETHRU (2<<2) /* write through */ +#else +# define _PAGE_WRITEBACK (1<<2) /* assume write through */ +# define _PAGE_WRITETHRU (1<<2) +#endif +#define _PAGE_NOALLOC (3<<2) /* don't allocate cache,if not cached */ +#define _CACHE_MASK (3<<2) + +#define _PAGE_USER (1<<4) /* user access (ring=1) */ +#define _PAGE_KERNEL (0<<4) /* kernel access (ring=0) */ + +/* Software */ +#define _PAGE_RW (1<<6) /* software: page writable */ +#define _PAGE_DIRTY (1<<7) /* software: page dirty */ +#define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */ +#define _PAGE_FILE (1<<9) /* nonlinear file mapping*/ + +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _CACHE_MASK | _PAGE_DIRTY) +#define _PAGE_PRESENT ( _PAGE_VALID | _PAGE_WRITEBACK | _PAGE_ACCESSED) + +#ifdef CONFIG_MMU + +# define PAGE_NONE __pgprot(_PAGE_PRESENT) +# define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_RW) +# define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER) +# define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER) +# define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_KERNEL | _PAGE_WRENABLE) +# define PAGE_INVALID __pgprot(_PAGE_USER) + +# if (DCACHE_WAY_SIZE > PAGE_SIZE) +# define PAGE_DIRECTORY __pgprot(_PAGE_VALID | _PAGE_ACCESSED | _PAGE_KERNEL) +# else +# define PAGE_DIRECTORY __pgprot(_PAGE_PRESENT | _PAGE_KERNEL) +# endif + +#else /* no mmu */ + +# define PAGE_NONE __pgprot(0) +# define PAGE_SHARED __pgprot(0) +# define PAGE_COPY __pgprot(0) +# define PAGE_READONLY __pgprot(0) +# define PAGE_KERNEL __pgprot(0) + +#endif + +/* + * On certain configurations of Xtensa MMUs (eg. the initial Linux config), + * the MMU can't do page protection for execute, and considers that the same as + * read. Also, write permissions may imply read permissions. + * What follows is the closest we can get by reasonable means.. + * See linux/mm/mmap.c for protection_map[] array that uses these definitions. + */ +#define __P000 PAGE_NONE /* private --- */ +#define __P001 PAGE_READONLY /* private --r */ +#define __P010 PAGE_COPY /* private -w- */ +#define __P011 PAGE_COPY /* private -wr */ +#define __P100 PAGE_READONLY /* private x-- */ +#define __P101 PAGE_READONLY /* private x-r */ +#define __P110 PAGE_COPY /* private xw- */ +#define __P111 PAGE_COPY /* private xwr */ + +#define __S000 PAGE_NONE /* shared --- */ +#define __S001 PAGE_READONLY /* shared --r */ +#define __S010 PAGE_SHARED /* shared -w- */ +#define __S011 PAGE_SHARED /* shared -wr */ +#define __S100 PAGE_READONLY /* shared x-- */ +#define __S101 PAGE_READONLY /* shared x-r */ +#define __S110 PAGE_SHARED /* shared xw- */ +#define __S111 PAGE_SHARED /* shared xwr */ + +#ifndef __ASSEMBLY__ + +#define pte_ERROR(e) \ + printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) +#define pgd_ERROR(e) \ + printk("%s:%d: bad pgd entry %08lx.\n", __FILE__, __LINE__, pgd_val(e)) + +extern unsigned long empty_zero_page[1024]; + +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) + +extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)]; + +/* + * The pmd contains the kernel virtual address of the pte page. + */ +#define pmd_page_kernel(pmd) ((unsigned long)(pmd_val(pmd) & PAGE_MASK)) +#define pmd_page(pmd) virt_to_page(pmd_val(pmd)) + +/* + * The following only work if pte_present() is true. + */ +#define pte_none(pte) (!(pte_val(pte) ^ _PAGE_USER)) +#define pte_present(pte) (pte_val(pte) & _PAGE_VALID) +#define pte_clear(mm,addr,ptep) \ + do { update_pte(ptep, __pte(_PAGE_USER)); } while(0) + +#define pmd_none(pmd) (!pmd_val(pmd)) +#define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK) +#define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0) +#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) + +/* Note: We use the _PAGE_USER bit to indicate write-protect kernel memory */ + +static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; } +static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } +static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } +static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } +static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } +static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~(_PAGE_RW | _PAGE_WRENABLE); return pte; } +static inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_USER; return pte; } +static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } +static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } +static inline pte_t pte_mkread(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; } +static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } +static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } +static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; } + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ +#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) +#define pte_same(a,b) (pte_val(a) == pte_val(b)) +#define pte_page(x) pfn_to_page(pte_pfn(x)) +#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) +#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) + +extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); +} + +/* + * Certain architectures need to do special things when pte's + * within a page table are directly modified. Thus, the following + * hook is made available. + */ +static inline void update_pte(pte_t *ptep, pte_t pteval) +{ + *ptep = pteval; +#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK + __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (ptep)); +#endif +} + +extern inline void +set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) +{ + update_pte(ptep, pteval); +} + + +extern inline void +set_pmd(pmd_t *pmdp, pmd_t pmdval) +{ + *pmdp = pmdval; +#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK + __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp)); +#endif +} + + +static inline int +ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep) +{ + pte_t pte = *ptep; + if (!pte_young(pte)) + return 0; + update_pte(ptep, pte_mkold(pte)); + return 1; +} + +static inline int +ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep) +{ + pte_t pte = *ptep; + if (!pte_dirty(pte)) + return 0; + update_pte(ptep, pte_mkclean(pte)); + return 1; +} + +static inline pte_t +ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + pte_t pte = *ptep; + pte_clear(mm, addr, ptep); + return pte; +} + +static inline void +ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + pte_t pte = *ptep; + update_pte(ptep, pte_wrprotect(pte)); +} + +/* to find an entry in a kernel page-table-directory */ +#define pgd_offset_k(address) pgd_offset(&init_mm, address) + +/* to find an entry in a page-table-directory */ +#define pgd_offset(mm,address) ((mm)->pgd + pgd_index(address)) + +#define pgd_index(address) ((address) >> PGDIR_SHIFT) + +/* Find an entry in the second-level page table.. */ +#define pmd_offset(dir,address) ((pmd_t*)(dir)) + +/* Find an entry in the third-level page table.. */ +#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +#define pte_offset_kernel(dir,addr) \ + ((pte_t*) pmd_page_kernel(*(dir)) + pte_index(addr)) +#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr)) +#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr)) + +#define pte_unmap(pte) do { } while (0) +#define pte_unmap_nested(pte) do { } while (0) + + +/* + * Encode and decode a swap entry. + * Each PTE in a process VM's page table is either: + * "present" -- valid and not swapped out, protection bits are meaningful; + * "not present" -- which further subdivides in these two cases: + * "none" -- no mapping at all; identified by pte_none(), set by pte_clear( + * "swapped out" -- the page is swapped out, and the SWP macros below + * are used to store swap file info in the PTE itself. + * + * In the Xtensa processor MMU, any PTE entries in user space (or anywhere + * in virtual memory that can map differently across address spaces) + * must have a correct ring value that represents the RASID field that + * is changed when switching address spaces. Eg. such PTE entries cannot + * be set to ring zero, because that can cause a (global) kernel ASID + * entry to be created in the TLBs (even with invalid cache attribute), + * potentially causing a multihit exception when going back to another + * address space that mapped the same virtual address at another ring. + * + * SO: we avoid using ring bits (_PAGE_RING_MASK) in "not present" PTEs. + * We also avoid using the _PAGE_VALID bit which must be zero for non-present + * pages. + * + * We end up with the following available bits: 1..3 and 7..31. + * We don't bother with 1..3 for now (we can use them later if needed), + * and chose to allocate 6 bits for SWP_TYPE and the remaining 19 bits + * for SWP_OFFSET. At least 5 bits are needed for SWP_TYPE, because it + * is currently implemented as an index into swap_info[MAX_SWAPFILES] + * and MAX_SWAPFILES is currently defined as 32 in <linux/swap.h>. + * However, for some reason all other architectures in the 2.4 kernel + * reserve either 6, 7, or 8 bits so I'll not detract from that for now. :) + * SWP_OFFSET is an offset into the swap file in page-size units, so + * with 4 kB pages, 19 bits supports a maximum swap file size of 2 GB. + * + * FIXME: 2 GB isn't very big. Other bits can be used to allow + * larger swap sizes. In the meantime, it appears relatively easy to get + * around the 2 GB limitation by simply using multiple swap files. + */ + +#define __swp_type(entry) (((entry).val >> 7) & 0x3f) +#define __swp_offset(entry) ((entry).val >> 13) +#define __swp_entry(type,offs) ((swp_entry_t) {((type) << 7) | ((offs) << 13)}) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) + +#define PTE_FILE_MAX_BITS 29 +#define pte_to_pgoff(pte) (pte_val(pte) >> 3) +#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE }) + + +#endif /* !defined (__ASSEMBLY__) */ + + +#ifdef __ASSEMBLY__ + +/* Assembly macro _PGD_INDEX is the same as C pgd_index(unsigned long), + * _PGD_OFFSET as C pgd_offset(struct mm_struct*, unsigned long), + * _PMD_OFFSET as C pmd_offset(pgd_t*, unsigned long) + * _PTE_OFFSET as C pte_offset(pmd_t*, unsigned long) + * + * Note: We require an additional temporary register which can be the same as + * the register that holds the address. + * + * ((pte_t*) ((unsigned long)(pmd_val(*pmd) & PAGE_MASK)) + pte_index(addr)) + * + */ +#define _PGD_INDEX(rt,rs) extui rt, rs, PGDIR_SHIFT, 32-PGDIR_SHIFT +#define _PTE_INDEX(rt,rs) extui rt, rs, PAGE_SHIFT, PTRS_PER_PTE_SHIFT + +#define _PGD_OFFSET(mm,adr,tmp) l32i mm, mm, MM_PGD; \ + _PGD_INDEX(tmp, adr); \ + addx4 mm, tmp, mm + +#define _PTE_OFFSET(pmd,adr,tmp) _PTE_INDEX(tmp, adr); \ + srli pmd, pmd, PAGE_SHIFT; \ + slli pmd, pmd, PAGE_SHIFT; \ + addx4 pmd, tmp, pmd + +#else + +extern void paging_init(void); + +#define kern_addr_valid(addr) (1) + +extern void update_mmu_cache(struct vm_area_struct * vma, + unsigned long address, pte_t pte); + +/* + * remap a physical address `phys' of size `size' with page protection `prot' + * into virtual address `from' + */ +#define io_remap_page_range(vma,from,phys,size,prot) \ + remap_pfn_range(vma, from, (phys) >> PAGE_SHIFT, size, prot) + + +/* No page table caches to init */ + +#define pgtable_cache_init() do { } while (0) + +typedef pte_t *pte_addr_t; + +#endif /* !defined (__ASSEMBLY__) */ + +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +#define __HAVE_ARCH_PTEP_SET_WRPROTECT +#define __HAVE_ARCH_PTEP_MKDIRTY +#define __HAVE_ARCH_PTE_SAME + +#include <asm-generic/pgtable.h> + +#endif /* _XTENSA_PGTABLE_H */ diff --git a/include/asm-xtensa/platform-iss/hardware.h b/include/asm-xtensa/platform-iss/hardware.h new file mode 100644 index 0000000..22240f0 --- /dev/null +++ b/include/asm-xtensa/platform-iss/hardware.h @@ -0,0 +1,29 @@ +/* + * include/asm-xtensa/platform-iss/hardware.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 Tensilica Inc. + */ + +/* + * This file contains the default configuration of ISS. + */ + +#ifndef __ASM_XTENSA_ISS_HARDWARE +#define __ASM_XTENSA_ISS_HARDWARE + +/* + * Memory configuration. + */ + +#define PLATFORM_DEFAULT_MEM_START XSHAL_RAM_PADDR +#define PLATFORM_DEFAULT_MEM_SIZE XSHAL_RAM_VSIZE + +/* + * Interrupt configuration. + */ + +#endif /* __ASM_XTENSA_ISS_HARDWARE */ diff --git a/include/asm-xtensa/platform.h b/include/asm-xtensa/platform.h new file mode 100644 index 0000000..3616389 --- /dev/null +++ b/include/asm-xtensa/platform.h @@ -0,0 +1,92 @@ +/* + * include/asm-xtensa/platform.h + * + * Platform specific functions + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of + * this archive for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_PLATFORM_H +#define _XTENSA_PLATFORM_H + +#include <linux/config.h> +#include <linux/types.h> +#include <linux/pci.h> + +#include <asm/bootparam.h> + +/* + * platform_init is called before the mmu is initialized to give the + * platform a early hook-up. bp_tag_t is a list of configuration tags + * passed from the boot-loader. + */ +extern void platform_init(bp_tag_t*); + +/* + * platform_setup is called from setup_arch with a pointer to the command-line + * string. + */ +extern void platform_setup (char **); + +/* + * platform_init_irq is called from init_IRQ. + */ +extern void platform_init_irq (void); + +/* + * platform_restart is called to restart the system. + */ +extern void platform_restart (void); + +/* + * platform_halt is called to stop the system and halt. + */ +extern void platform_halt (void); + +/* + * platform_power_off is called to stop the system and power it off. + */ +extern void platform_power_off (void); + +/* + * platform_idle is called from the idle function. + */ +extern void platform_idle (void); + +/* + * platform_heartbeat is called every HZ + */ +extern void platform_heartbeat (void); + +/* + * platform_pcibios_init is called to allow the platform to setup the pci bus. + */ +extern void platform_pcibios_init (void); + +/* + * platform_pcibios_fixup allows to modify the PCI configuration. + */ +extern int platform_pcibios_fixup (void); + +/* + * platform_calibrate_ccount calibrates cpu clock freq (CONFIG_XTENSA_CALIBRATE) + */ +extern void platform_calibrate_ccount (void); + +/* + * platform_get_rtc_time returns RTC seconds (returns 0 for no error) + */ +extern int platform_get_rtc_time(time_t*); + +/* + * platform_set_rtc_time set RTC seconds (returns 0 for no error) + */ +extern int platform_set_rtc_time(time_t); + + +#endif /* _XTENSA_PLATFORM_H */ + diff --git a/include/asm-xtensa/poll.h b/include/asm-xtensa/poll.h new file mode 100644 index 0000000..dffe447 --- /dev/null +++ b/include/asm-xtensa/poll.h @@ -0,0 +1,37 @@ +/* + * include/asm-xtensa/poll.h + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of + * this archive for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_POLL_H +#define _XTENSA_POLL_H + + +#define POLLIN 0x0001 +#define POLLPRI 0x0002 +#define POLLOUT 0x0004 + +#define POLLERR 0x0008 +#define POLLHUP 0x0010 +#define POLLNVAL 0x0020 + +#define POLLRDNORM 0x0040 +#define POLLRDBAND 0x0080 +#define POLLWRNORM POLLOUT +#define POLLWRBAND 0x0100 + +#define POLLMSG 0x0400 +#define POLLREMOVE 0x0800 + +struct pollfd { + int fd; + short events; + short revents; +}; + +#endif /* _XTENSA_POLL_H */ diff --git a/include/asm-xtensa/posix_types.h b/include/asm-xtensa/posix_types.h new file mode 100644 index 0000000..2c816b0 --- /dev/null +++ b/include/asm-xtensa/posix_types.h @@ -0,0 +1,123 @@ +/* + * include/asm-xtensa/posix_types.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Largely copied from include/asm-ppc/posix_types.h + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_POSIX_TYPES_H +#define _XTENSA_POSIX_TYPES_H + +/* + * This file is generally used by user-level software, so you need to + * be a little careful about namespace pollution etc. Also, we cannot + * assume GCC is being used. + */ + +typedef unsigned long __kernel_ino_t; +typedef unsigned int __kernel_mode_t; +typedef unsigned short __kernel_nlink_t; +typedef long __kernel_off_t; +typedef int __kernel_pid_t; +typedef unsigned short __kernel_ipc_pid_t; +typedef unsigned int __kernel_uid_t; +typedef unsigned int __kernel_gid_t; +typedef unsigned int __kernel_size_t; +typedef int __kernel_ssize_t; +typedef long __kernel_ptrdiff_t; +typedef long __kernel_time_t; +typedef long __kernel_suseconds_t; +typedef long __kernel_clock_t; +typedef int __kernel_timer_t; +typedef int __kernel_clockid_t; +typedef int __kernel_daddr_t; +typedef char * __kernel_caddr_t; +typedef unsigned short __kernel_uid16_t; +typedef unsigned short __kernel_gid16_t; +typedef unsigned int __kernel_uid32_t; +typedef unsigned int __kernel_gid32_t; + +typedef unsigned short __kernel_old_uid_t; +typedef unsigned short __kernel_old_gid_t; +typedef unsigned short __kernel_old_dev_t; + +#ifdef __GNUC__ +typedef long long __kernel_loff_t; +#endif + +typedef struct { + int val[2]; +} __kernel_fsid_t; + +#ifndef __GNUC__ + +#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d)) +#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d)) +#define __FD_ISSET(d, set) ((set)->fds_bits[__FDELT(d)] & __FDMASK(d)) +#define __FD_ZERO(set) \ + ((void) memset ((__ptr_t) (set), 0, sizeof (__kernel_fd_set))) + +#else /* __GNUC__ */ + +#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) \ + || (__GLIBC__ == 2 && __GLIBC_MINOR__ == 0) +/* With GNU C, use inline functions instead so args are evaluated only once: */ + +#undef __FD_SET +static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) +{ + unsigned long _tmp = fd / __NFDBITS; + unsigned long _rem = fd % __NFDBITS; + fdsetp->fds_bits[_tmp] |= (1UL<<_rem); +} + +#undef __FD_CLR +static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) +{ + unsigned long _tmp = fd / __NFDBITS; + unsigned long _rem = fd % __NFDBITS; + fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem); +} + +#undef __FD_ISSET +static __inline__ int __FD_ISSET(unsigned long fd, __kernel_fd_set *p) +{ + unsigned long _tmp = fd / __NFDBITS; + unsigned long _rem = fd % __NFDBITS; + return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0; +} + +/* + * This will unroll the loop for the normal constant case (8 ints, + * for a 256-bit fd_set) + */ +#undef __FD_ZERO +static __inline__ void __FD_ZERO(__kernel_fd_set *p) +{ + unsigned int *tmp = (unsigned int *)p->fds_bits; + int i; + + if (__builtin_constant_p(__FDSET_LONGS)) { + switch (__FDSET_LONGS) { + case 8: + tmp[0] = 0; tmp[1] = 0; tmp[2] = 0; tmp[3] = 0; + tmp[4] = 0; tmp[5] = 0; tmp[6] = 0; tmp[7] = 0; + return; + } + } + i = __FDSET_LONGS; + while (i) { + i--; + *tmp = 0; + tmp++; + } +} + +#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */ +#endif /* __GNUC__ */ +#endif /* _XTENSA_POSIX_TYPES_H */ diff --git a/include/asm-xtensa/processor.h b/include/asm-xtensa/processor.h new file mode 100644 index 0000000..9cab5e4 --- /dev/null +++ b/include/asm-xtensa/processor.h @@ -0,0 +1,205 @@ +/* + * include/asm-xtensa/processor.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_PROCESSOR_H +#define _XTENSA_PROCESSOR_H + +#ifdef __ASSEMBLY__ +#define _ASMLANGUAGE +#endif + +#include <xtensa/config/core.h> +#include <xtensa/config/specreg.h> +#include <xtensa/config/tie.h> +#include <xtensa/config/system.h> + +#include <asm/ptrace.h> +#include <asm/types.h> +#include <asm/coprocessor.h> + +/* Assertions. */ + +#if (XCHAL_HAVE_WINDOWED != 1) +#error Linux requires the Xtensa Windowed Registers Option. +#endif + +/* + * User space process size: 1 GB. + * Windowed call ABI requires caller and callee to be located within the same + * 1 GB region. The C compiler places trampoline code on the stack for sources + * that take the address of a nested C function (a feature used by glibc), so + * the 1 GB requirement applies to the stack as well. + */ + +#define TASK_SIZE 0x40000000 + +/* + * General exception cause assigned to debug exceptions. Debug exceptions go + * to their own vector, rather than the general exception vectors (user, + * kernel, double); and their specific causes are reported via DEBUGCAUSE + * rather than EXCCAUSE. However it is sometimes convenient to redirect debug + * exceptions to the general exception mechanism. To do this, an otherwise + * unused EXCCAUSE value was assigned to debug exceptions for this purpose. + */ + +#define EXCCAUSE_MAPPED_DEBUG 63 + +/* + * We use DEPC also as a flag to distinguish between double and regular + * exceptions. For performance reasons, DEPC might contain the value of + * EXCCAUSE for regular exceptions, so we use this definition to mark a + * valid double exception address. + * (Note: We use it in bgeui, so it should be 64, 128, or 256) + */ + +#define VALID_DOUBLE_EXCEPTION_ADDRESS 64 + +/* LOCKLEVEL defines the interrupt level that masks all + * general-purpose interrupts. + */ +#define LOCKLEVEL 1 + +/* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE + * registers + */ +#define WSBITS (XCHAL_NUM_AREGS / 4) /* width of WINDOWSTART in bits */ +#define WBBITS (XCHAL_NUM_AREGS_LOG2 - 2) /* width of WINDOWBASE in bits */ + +#ifndef __ASSEMBLY__ + +/* Build a valid return address for the specified call winsize. + * winsize must be 1 (call4), 2 (call8), or 3 (call12) + */ +#define MAKE_RA_FOR_CALL(ra,ws) (((ra) & 0x3fffffff) | (ws) << 30) + +/* Convert return address to a valid pc + * Note: We assume that the stack pointer is in the same 1GB ranges as the ra + */ +#define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000)) + +typedef struct { + unsigned long seg; +} mm_segment_t; + +struct thread_struct { + + /* kernel's return address and stack pointer for context switching */ + unsigned long ra; /* kernel's a0: return address and window call size */ + unsigned long sp; /* kernel's a1: stack pointer */ + + mm_segment_t current_ds; /* see uaccess.h for example uses */ + + /* struct xtensa_cpuinfo info; */ + + unsigned long bad_vaddr; /* last user fault */ + unsigned long bad_uaddr; /* last kernel fault accessing user space */ + unsigned long error_code; + + unsigned long ibreak[XCHAL_NUM_IBREAK]; + unsigned long dbreaka[XCHAL_NUM_DBREAK]; + unsigned long dbreakc[XCHAL_NUM_DBREAK]; + + /* Allocate storage for extra state and coprocessor state. */ + unsigned char cp_save[XTENSA_CP_EXTRA_SIZE] + __attribute__ ((aligned(XTENSA_CP_EXTRA_ALIGN))); + + /* Make structure 16 bytes aligned. */ + int align[0] __attribute__ ((aligned(16))); +}; + + +/* + * Default implementation of macro that returns current + * instruction pointer ("program counter"). + */ +#define current_text_addr() ({ __label__ _l; _l: &&_l;}) + + +/* This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE (TASK_SIZE / 2) + +#define INIT_THREAD \ +{ \ + ra: 0, \ + sp: sizeof(init_stack) + (long) &init_stack, \ + current_ds: {0}, \ + /*info: {0}, */ \ + bad_vaddr: 0, \ + bad_uaddr: 0, \ + error_code: 0, \ +} + + +/* + * Do necessary setup to start up a newly executed thread. + * Note: We set-up ps as if we did a call4 to the new pc. + * set_thread_state in signal.c depends on it. + */ +#define USER_PS_VALUE ( (1 << XCHAL_PS_WOE_SHIFT) + \ + (1 << XCHAL_PS_CALLINC_SHIFT) + \ + (USER_RING << XCHAL_PS_RING_SHIFT) + \ + (1 << XCHAL_PS_PROGSTACK_SHIFT) + \ + (1 << XCHAL_PS_EXCM_SHIFT) ) + +/* Clearing a0 terminates the backtrace. */ +#define start_thread(regs, new_pc, new_sp) \ + regs->pc = new_pc; \ + regs->ps = USER_PS_VALUE; \ + regs->areg[1] = new_sp; \ + regs->areg[0] = 0; \ + regs->wmask = 1; \ + regs->depc = 0; \ + regs->windowbase = 0; \ + regs->windowstart = 1; + +/* Forward declaration */ +struct task_struct; +struct mm_struct; + +// FIXME: do we need release_thread for CP?? +/* Free all resources held by a thread. */ +#define release_thread(thread) do { } while(0) + +// FIXME: do we need prepare_to_copy (lazy status) for CP?? +/* Prepare to copy thread state - unlazy all lazy status */ +#define prepare_to_copy(tsk) do { } while (0) + +/* + * create a kernel thread without removing it from tasklists + */ +extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); + +/* Copy and release all segment info associated with a VM */ + +#define copy_segments(p, mm) do { } while(0) +#define release_segments(mm) do { } while(0) +#define forget_segments() do { } while (0) + +#define thread_saved_pc(tsk) (xtensa_pt_regs(tsk)->pc) + +extern unsigned long get_wchan(struct task_struct *p); + +#define KSTK_EIP(tsk) (xtensa_pt_regs(tsk)->pc) +#define KSTK_ESP(tsk) (xtensa_pt_regs(tsk)->areg[1]) + +#define cpu_relax() do { } while (0) + +/* Special register access. */ + +#define WSR(v,sr) __asm__ __volatile__ ("wsr %0,"__stringify(sr) :: "a"(v)); +#define RSR(v,sr) __asm__ __volatile__ ("rsr %0,"__stringify(sr) : "=a"(v)); + +#define set_sr(x,sr) ({unsigned int v=(unsigned int)x; WSR(v,sr);}) +#define get_sr(sr) ({unsigned int v; RSR(v,sr); v; }) + +#endif /* __ASSEMBLY__ */ +#endif /* _XTENSA_PROCESSOR_H */ diff --git a/include/asm-xtensa/ptrace.h b/include/asm-xtensa/ptrace.h new file mode 100644 index 0000000..2848a5f --- /dev/null +++ b/include/asm-xtensa/ptrace.h @@ -0,0 +1,135 @@ +/* + * include/asm-xtensa/ptrace.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_PTRACE_H +#define _XTENSA_PTRACE_H + +#include <xtensa/config/core.h> + +/* + * Kernel stack + * + * +-----------------------+ -------- STACK_SIZE + * | register file | | + * +-----------------------+ | + * | struct pt_regs | | + * +-----------------------+ | ------ PT_REGS_OFFSET + * double : 16 bytes spill area : | ^ + * excetion :- - - - - - - - - - - -: | | + * frame : struct pt_regs : | | + * :- - - - - - - - - - - -: | | + * | | | | + * | memory stack | | | + * | | | | + * ~ ~ ~ ~ + * ~ ~ ~ ~ + * | | | | + * | | | | + * +-----------------------+ | | --- STACK_BIAS + * | struct task_struct | | | ^ + * current --> +-----------------------+ | | | + * | struct thread_info | | | | + * +-----------------------+ -------- + */ + +#define KERNEL_STACK_SIZE (2 * PAGE_SIZE) + +/* Offsets for exception_handlers[] (3 x 64-entries x 4-byte tables). */ + +#define EXC_TABLE_KSTK 0x004 /* Kernel Stack */ +#define EXC_TABLE_DOUBLE_SAVE 0x008 /* Double exception save area for a0 */ +#define EXC_TABLE_FIXUP 0x00c /* Fixup handler */ +#define EXC_TABLE_PARAM 0x010 /* For passing a parameter to fixup */ +#define EXC_TABLE_SYSCALL_SAVE 0x014 /* For fast syscall handler */ +#define EXC_TABLE_FAST_USER 0x100 /* Fast user exception handler */ +#define EXC_TABLE_FAST_KERNEL 0x200 /* Fast kernel exception handler */ +#define EXC_TABLE_DEFAULT 0x300 /* Default C-Handler */ +#define EXC_TABLE_SIZE 0x400 + +/* Registers used by strace */ + +#define REG_A_BASE 0xfc000000 +#define REG_AR_BASE 0x04000000 +#define REG_PC 0x14000000 +#define REG_PS 0x080000e6 +#define REG_WB 0x08000048 +#define REG_WS 0x08000049 +#define REG_LBEG 0x08000000 +#define REG_LEND 0x08000001 +#define REG_LCOUNT 0x08000002 +#define REG_SAR 0x08000003 +#define REG_DEPC 0x080000c0 +#define REG_EXCCAUSE 0x080000e8 +#define REG_EXCVADDR 0x080000ee +#define SYSCALL_NR 0x1 + +#define AR_REGNO_TO_A_REGNO(ar, wb) (ar - wb*4) & ~(XCHAL_NUM_AREGS - 1) + +/* Other PTRACE_ values defined in <linux/ptrace.h> using values 0-9,16,17,24 */ + +#define PTRACE_GETREGS 12 +#define PTRACE_SETREGS 13 +#define PTRACE_GETFPREGS 14 +#define PTRACE_SETFPREGS 15 +#define PTRACE_GETFPREGSIZE 18 + +#ifndef __ASSEMBLY__ + +/* + * This struct defines the way the registers are stored on the + * kernel stack during a system call or other kernel entry. + */ +struct pt_regs { + unsigned long pc; /* 4 */ + unsigned long ps; /* 8 */ + unsigned long depc; /* 12 */ + unsigned long exccause; /* 16 */ + unsigned long excvaddr; /* 20 */ + unsigned long debugcause; /* 24 */ + unsigned long wmask; /* 28 */ + unsigned long lbeg; /* 32 */ + unsigned long lend; /* 36 */ + unsigned long lcount; /* 40 */ + unsigned long sar; /* 44 */ + unsigned long windowbase; /* 48 */ + unsigned long windowstart; /* 52 */ + unsigned long syscall; /* 56 */ + int reserved[2]; /* 64 */ + + /* Make sure the areg field is 16 bytes aligned. */ + int align[0] __attribute__ ((aligned(16))); + + /* current register frame. + * Note: The ESF for kernel exceptions ends after 16 registers! + */ + unsigned long areg[16]; /* 128 (64) */ +}; + +#ifdef __KERNEL__ +# define xtensa_pt_regs(tsk) ((struct pt_regs*) \ + (((long)(tsk)->thread_info + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4)) - 1) +# define user_mode(regs) (((regs)->ps & 0x00000020)!=0) +# define instruction_pointer(regs) ((regs)->pc) +extern void show_regs(struct pt_regs *); + +# ifndef CONFIG_SMP +# define profile_pc(regs) instruction_pointer(regs) +# endif +#endif /* __KERNEL__ */ + +#else /* __ASSEMBLY__ */ + +#ifdef __KERNEL__ +# include <asm/offsets.h> +#define PT_REGS_OFFSET (KERNEL_STACK_SIZE - PT_USER_SIZE) +#endif + +#endif /* !__ASSEMBLY__ */ +#endif /* _XTENSA_PTRACE_H */ diff --git a/include/asm-xtensa/resource.h b/include/asm-xtensa/resource.h new file mode 100644 index 0000000..17b5ab3 --- /dev/null +++ b/include/asm-xtensa/resource.h @@ -0,0 +1,16 @@ +/* + * include/asm-xtensa/resource.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_RESOURCE_H +#define _XTENSA_RESOURCE_H + +#include <asm-generic/resource.h> + +#endif /* _XTENSA_RESOURCE_H */ diff --git a/include/asm-xtensa/rmap.h b/include/asm-xtensa/rmap.h new file mode 100644 index 0000000..649588b --- /dev/null +++ b/include/asm-xtensa/rmap.h @@ -0,0 +1,16 @@ +/* + * include/asm-xtensa/rmap.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_RMAP_H +#define _XTENSA_RMAP_H + +#include <asm-generic/rmap.h> + +#endif diff --git a/include/asm-xtensa/rwsem.h b/include/asm-xtensa/rwsem.h new file mode 100644 index 0000000..3c02b0e --- /dev/null +++ b/include/asm-xtensa/rwsem.h @@ -0,0 +1,175 @@ +/* + * include/asm-xtensa/rwsem.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Largely copied from include/asm-ppc/rwsem.h + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_RWSEM_H +#define _XTENSA_RWSEM_H + +#include <linux/list.h> +#include <linux/spinlock.h> +#include <asm/atomic.h> +#include <asm/system.h> + +/* + * the semaphore definition + */ +struct rw_semaphore { + signed long count; +#define RWSEM_UNLOCKED_VALUE 0x00000000 +#define RWSEM_ACTIVE_BIAS 0x00000001 +#define RWSEM_ACTIVE_MASK 0x0000ffff +#define RWSEM_WAITING_BIAS (-0x00010000) +#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS +#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) + spinlock_t wait_lock; + struct list_head wait_list; +#if RWSEM_DEBUG + int debug; +#endif +}; + +/* + * initialisation + */ +#if RWSEM_DEBUG +#define __RWSEM_DEBUG_INIT , 0 +#else +#define __RWSEM_DEBUG_INIT /* */ +#endif + +#define __RWSEM_INITIALIZER(name) \ + { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ + LIST_HEAD_INIT((name).wait_list) \ + __RWSEM_DEBUG_INIT } + +#define DECLARE_RWSEM(name) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name) + +extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); + +static inline void init_rwsem(struct rw_semaphore *sem) +{ + sem->count = RWSEM_UNLOCKED_VALUE; + spin_lock_init(&sem->wait_lock); + INIT_LIST_HEAD(&sem->wait_list); +#if RWSEM_DEBUG + sem->debug = 0; +#endif +} + +/* + * lock for reading + */ +static inline void __down_read(struct rw_semaphore *sem) +{ + if (atomic_add_return(1,(atomic_t *)(&sem->count)) > 0) + smp_wmb(); + else + rwsem_down_read_failed(sem); +} + +static inline int __down_read_trylock(struct rw_semaphore *sem) +{ + int tmp; + + while ((tmp = sem->count) >= 0) { + if (tmp == cmpxchg(&sem->count, tmp, + tmp + RWSEM_ACTIVE_READ_BIAS)) { + smp_wmb(); + return 1; + } + } + return 0; +} + +/* + * lock for writing + */ +static inline void __down_write(struct rw_semaphore *sem) +{ + int tmp; + + tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS, + (atomic_t *)(&sem->count)); + if (tmp == RWSEM_ACTIVE_WRITE_BIAS) + smp_wmb(); + else + rwsem_down_write_failed(sem); +} + +static inline int __down_write_trylock(struct rw_semaphore *sem) +{ + int tmp; + + tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, + RWSEM_ACTIVE_WRITE_BIAS); + smp_wmb(); + return tmp == RWSEM_UNLOCKED_VALUE; +} + +/* + * unlock after reading + */ +static inline void __up_read(struct rw_semaphore *sem) +{ + int tmp; + + smp_wmb(); + tmp = atomic_sub_return(1,(atomic_t *)(&sem->count)); + if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0) + rwsem_wake(sem); +} + +/* + * unlock after writing + */ +static inline void __up_write(struct rw_semaphore *sem) +{ + smp_wmb(); + if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, + (atomic_t *)(&sem->count)) < 0) + rwsem_wake(sem); +} + +/* + * implement atomic add functionality + */ +static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) +{ + atomic_add(delta, (atomic_t *)(&sem->count)); +} + +/* + * downgrade write lock to read lock + */ +static inline void __downgrade_write(struct rw_semaphore *sem) +{ + int tmp; + + smp_wmb(); + tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count)); + if (tmp < 0) + rwsem_downgrade_wake(sem); +} + +/* + * implement exchange and add functionality + */ +static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) +{ + smp_mb(); + return atomic_add_return(delta, (atomic_t *)(&sem->count)); +} + +#endif /* _XTENSA_RWSEM_XADD_H */ diff --git a/include/asm-xtensa/scatterlist.h b/include/asm-xtensa/scatterlist.h new file mode 100644 index 0000000..38a2b9a --- /dev/null +++ b/include/asm-xtensa/scatterlist.h @@ -0,0 +1,34 @@ +/* + * include/asm-xtensa/scatterlist.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_SCATTERLIST_H +#define _XTENSA_SCATTERLIST_H + +struct scatterlist { + struct page *page; + unsigned int offset; + dma_addr_t dma_address; + unsigned int length; +}; + +/* + * These macros should be used after a pci_map_sg call has been done + * to get bus addresses of each of the SG entries and their lengths. + * You should only work with the number of sg entries pci_map_sg + * returns, or alternatively stop on the first sg_dma_len(sg) which + * is 0. + */ +#define sg_dma_address(sg) ((sg)->dma_address) +#define sg_dma_len(sg) ((sg)->length) + + +#define ISA_DMA_THRESHOLD (~0UL) + +#endif /* _XTENSA_SCATTERLIST_H */ diff --git a/include/asm-xtensa/sections.h b/include/asm-xtensa/sections.h new file mode 100644 index 0000000..40b5191 --- /dev/null +++ b/include/asm-xtensa/sections.h @@ -0,0 +1,16 @@ +/* + * include/asm-xtensa/sections.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_SECTIONS_H +#define _XTENSA_SECTIONS_H + +#include <asm-generic/sections.h> + +#endif /* _XTENSA_SECTIONS_H */ diff --git a/include/asm-xtensa/segment.h b/include/asm-xtensa/segment.h new file mode 100644 index 0000000..a2eb547 --- /dev/null +++ b/include/asm-xtensa/segment.h @@ -0,0 +1,16 @@ +/* + * include/asm-xtensa/segment.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_SEGMENT_H +#define _XTENSA_SEGMENT_H + +#include <asm/uaccess.h> + +#endif /* _XTENSA_SEGEMENT_H */ diff --git a/include/asm-xtensa/semaphore.h b/include/asm-xtensa/semaphore.h new file mode 100644 index 0000000..c8a7574 --- /dev/null +++ b/include/asm-xtensa/semaphore.h @@ -0,0 +1,129 @@ +/* + * linux/include/asm-xtensa/semaphore.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_SEMAPHORE_H +#define _XTENSA_SEMAPHORE_H + +#include <asm/atomic.h> +#include <asm/system.h> +#include <linux/wait.h> +#include <linux/rwsem.h> + +struct semaphore { + atomic_t count; + int sleepers; + wait_queue_head_t wait; +#if WAITQUEUE_DEBUG + long __magic; +#endif +}; + +#if WAITQUEUE_DEBUG +# define __SEM_DEBUG_INIT(name) \ + , (int)&(name).__magic +#else +# define __SEM_DEBUG_INIT(name) +#endif + +#define __SEMAPHORE_INITIALIZER(name,count) \ + { ATOMIC_INIT(count), \ + 0, \ + __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ + __SEM_DEBUG_INIT(name) } + +#define __MUTEX_INITIALIZER(name) \ + __SEMAPHORE_INITIALIZER(name, 1) + +#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ + struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) + +#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) +#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0) + +extern inline void sema_init (struct semaphore *sem, int val) +{ +/* + * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); + * + * i'd rather use the more flexible initialization above, but sadly + * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well. + */ + atomic_set(&sem->count, val); + init_waitqueue_head(&sem->wait); +#if WAITQUEUE_DEBUG + sem->__magic = (int)&sem->__magic; +#endif +} + +static inline void init_MUTEX (struct semaphore *sem) +{ + sema_init(sem, 1); +} + +static inline void init_MUTEX_LOCKED (struct semaphore *sem) +{ + sema_init(sem, 0); +} + +asmlinkage void __down(struct semaphore * sem); +asmlinkage int __down_interruptible(struct semaphore * sem); +asmlinkage int __down_trylock(struct semaphore * sem); +asmlinkage void __up(struct semaphore * sem); + +extern spinlock_t semaphore_wake_lock; + +extern __inline__ void down(struct semaphore * sem) +{ +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + + if (atomic_sub_return(1, &sem->count) < 0) + __down(sem); +} + +extern __inline__ int down_interruptible(struct semaphore * sem) +{ + int ret = 0; +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + + if (atomic_sub_return(1, &sem->count) < 0) + ret = __down_interruptible(sem); + return ret; +} + +extern __inline__ int down_trylock(struct semaphore * sem) +{ + int ret = 0; +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + + if (atomic_sub_return(1, &sem->count) < 0) + ret = __down_trylock(sem); + return ret; +} + +/* + * Note! This is subtle. We jump to wake people up only if + * the semaphore was negative (== somebody was waiting on it). + */ +extern __inline__ void up(struct semaphore * sem) +{ +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + if (atomic_add_return(1, &sem->count) <= 0) + __up(sem); +} + +#endif /* _XTENSA_SEMAPHORE_H */ diff --git a/include/asm-xtensa/sembuf.h b/include/asm-xtensa/sembuf.h new file mode 100644 index 0000000..2d26c47 --- /dev/null +++ b/include/asm-xtensa/sembuf.h @@ -0,0 +1,44 @@ +/* + * include/asm-xtensa/sembuf.h + * + * The semid64_ds structure for Xtensa architecture. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + * + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 64-bit time_t to solve y2038 problem + * - 2 miscellaneous 32-bit values + * + */ + +#ifndef _XTENSA_SEMBUF_H +#define _XTENSA_SEMBUF_H + +#include <asm/byteorder.h> + +struct semid64_ds { + struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ +#if XCHAL_HAVE_LE + __kernel_time_t sem_otime; /* last semop time */ + unsigned long __unused1; + __kernel_time_t sem_ctime; /* last change time */ + unsigned long __unused2; +#else + unsigned long __unused1; + __kernel_time_t sem_otime; /* last semop time */ + unsigned long __unused2; + __kernel_time_t sem_ctime; /* last change time */ +#endif + unsigned long sem_nsems; /* no. of semaphores in array */ + unsigned long __unused3; + unsigned long __unused4; +}; + +#endif /* __ASM_XTENSA_SEMBUF_H */ diff --git a/include/asm-xtensa/serial.h b/include/asm-xtensa/serial.h new file mode 100644 index 0000000..ec04114 --- /dev/null +++ b/include/asm-xtensa/serial.h @@ -0,0 +1,18 @@ +/* + * include/asm-xtensa/serial.h + * + * Configuration details for 8250, 16450, 16550, etc. serial ports + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_SERIAL_H +#define _XTENSA_SERIAL_H + +#include <asm/platform/serial.h> + +#endif /* _XTENSA_SERIAL_H */ diff --git a/include/asm-xtensa/setup.h b/include/asm-xtensa/setup.h new file mode 100644 index 0000000..e363652 --- /dev/null +++ b/include/asm-xtensa/setup.h @@ -0,0 +1,16 @@ +/* + * include/asm-xtensa/setup.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_SETUP_H +#define _XTENSA_SETUP_H + +#define COMMAND_LINE_SIZE 256 + +#endif diff --git a/include/asm-xtensa/shmbuf.h b/include/asm-xtensa/shmbuf.h new file mode 100644 index 0000000..a30b81a --- /dev/null +++ b/include/asm-xtensa/shmbuf.h @@ -0,0 +1,50 @@ +/* + * include/asm-xtensa/shmbuf.h + * + * The shmid64_ds structure for Xtensa architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 64-bit time_t to solve y2038 problem + * - 2 miscellaneous 32-bit values + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_SHMBUF_H +#define _XTENSA_SHMBUF_H + +struct shmid64_ds { + struct ipc64_perm shm_perm; /* operation perms */ + size_t shm_segsz; /* size of segment (bytes) */ + __kernel_time_t shm_atime; /* last attach time */ + unsigned long __unused1; + __kernel_time_t shm_dtime; /* last detach time */ + unsigned long __unused2; + __kernel_time_t shm_ctime; /* last change time */ + unsigned long __unused3; + __kernel_pid_t shm_cpid; /* pid of creator */ + __kernel_pid_t shm_lpid; /* pid of last operator */ + unsigned long shm_nattch; /* no. of current attaches */ + unsigned long __unused4; + unsigned long __unused5; +}; + +struct shminfo64 { + unsigned long shmmax; + unsigned long shmmin; + unsigned long shmmni; + unsigned long shmseg; + unsigned long shmall; + unsigned long __unused1; + unsigned long __unused2; + unsigned long __unused3; + unsigned long __unused4; +}; + +#endif /* _XTENSA_SHMBUF_H */ diff --git a/include/asm-xtensa/shmparam.h b/include/asm-xtensa/shmparam.h new file mode 100644 index 0000000..d3b65bf --- /dev/null +++ b/include/asm-xtensa/shmparam.h @@ -0,0 +1,23 @@ +/* + * include/asm-xtensa/shmparam.h + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of + * this archive for more details. + */ + +#ifndef _XTENSA_SHMPARAM_H +#define _XTENSA_SHMPARAM_H + +#include <asm/processor.h> + +/* + * Xtensa can have variable size caches, and if + * the size of single way is larger than the page size, + * then we have to start worrying about cache aliasing + * problems. + */ + +#define SHMLBA ((PAGE_SIZE > DCACHE_WAY_SIZE)? PAGE_SIZE : DCACHE_WAY_SIZE) + +#endif /* _XTENSA_SHMPARAM_H */ diff --git a/include/asm-xtensa/sigcontext.h b/include/asm-xtensa/sigcontext.h new file mode 100644 index 0000000..a751772 --- /dev/null +++ b/include/asm-xtensa/sigcontext.h @@ -0,0 +1,44 @@ +/* + * include/asm-xtensa/sigcontext.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2003 Tensilica Inc. + */ + +#ifndef _XTENSA_SIGCONTEXT_H +#define _XTENSA_SIGCONTEXT_H + +#define _ASMLANGUAGE +#include <asm/processor.h> +#include <asm/coprocessor.h> + + +struct _cpstate { + unsigned char _cpstate[XTENSA_CP_EXTRA_SIZE]; +} __attribute__ ((aligned (XTENSA_CP_EXTRA_ALIGN))); + + +struct sigcontext { + unsigned long oldmask; + + /* CPU registers */ + unsigned long sc_pc; + unsigned long sc_ps; + unsigned long sc_wmask; + unsigned long sc_windowbase; + unsigned long sc_windowstart; + unsigned long sc_lbeg; + unsigned long sc_lend; + unsigned long sc_lcount; + unsigned long sc_sar; + unsigned long sc_depc; + unsigned long sc_dareg0; + unsigned long sc_treg[4]; + unsigned long sc_areg[XCHAL_NUM_AREGS]; + struct _cpstate *sc_cpstate; +}; + +#endif /* __ASM_XTENSA_SIGCONTEXT_H */ diff --git a/include/asm-xtensa/siginfo.h b/include/asm-xtensa/siginfo.h new file mode 100644 index 0000000..44f0ae7 --- /dev/null +++ b/include/asm-xtensa/siginfo.h @@ -0,0 +1,16 @@ +/* + * include/asm-xtensa/processor.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_SIGINFO_H +#define _XTENSA_SIGINFO_H + +#include <asm-generic/siginfo.h> + +#endif /* _XTENSA_SIGINFO_H */ diff --git a/include/asm-xtensa/signal.h b/include/asm-xtensa/signal.h new file mode 100644 index 0000000..5d6fc9c --- /dev/null +++ b/include/asm-xtensa/signal.h @@ -0,0 +1,187 @@ +/* + * include/asm-xtensa/signal.h + * + * Swiped from SH. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_SIGNAL_H +#define _XTENSA_SIGNAL_H + + +#define _NSIG 64 +#define _NSIG_BPW 32 +#define _NSIG_WORDS (_NSIG / _NSIG_BPW) + +#ifndef __ASSEMBLY__ + +#include <linux/types.h> + +/* Avoid too many header ordering problems. */ +struct siginfo; +typedef unsigned long old_sigset_t; /* at least 32 bits */ +typedef struct { + unsigned long sig[_NSIG_WORDS]; +} sigset_t; + +#endif + +#define SIGHUP 1 +#define SIGINT 2 +#define SIGQUIT 3 +#define SIGILL 4 +#define SIGTRAP 5 +#define SIGABRT 6 +#define SIGIOT 6 +#define SIGBUS 7 +#define SIGFPE 8 +#define SIGKILL 9 +#define SIGUSR1 10 +#define SIGSEGV 11 +#define SIGUSR2 12 +#define SIGPIPE 13 +#define SIGALRM 14 +#define SIGTERM 15 +#define SIGSTKFLT 16 +#define SIGCHLD 17 +#define SIGCONT 18 +#define SIGSTOP 19 +#define SIGTSTP 20 +#define SIGTTIN 21 +#define SIGTTOU 22 +#define SIGURG 23 +#define SIGXCPU 24 +#define SIGXFSZ 25 +#define SIGVTALRM 26 +#define SIGPROF 27 +#define SIGWINCH 28 +#define SIGIO 29 +#define SIGPOLL SIGIO +/* #define SIGLOST 29 */ +#define SIGPWR 30 +#define SIGSYS 31 +#define SIGUNUSED 31 + +/* These should not be considered constants from userland. */ +#define SIGRTMIN 32 +#define SIGRTMAX (_NSIG-1) + +/* + * SA_FLAGS values: + * + * SA_ONSTACK indicates that a registered stack_t will be used. + * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the + * SA_RESTART flag to get restarting signals (which were the default long ago) + * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. + * SA_RESETHAND clears the handler when the signal is delivered. + * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. + * SA_NODEFER prevents the current signal from being masked in the handler. + * + * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single + * Unix names RESETHAND and NODEFER respectively. + */ +#define SA_NOCLDSTOP 0x00000001 +#define SA_NOCLDWAIT 0x00000002 /* not supported yet */ +#define SA_SIGINFO 0x00000004 +#define SA_ONSTACK 0x08000000 +#define SA_RESTART 0x10000000 +#define SA_NODEFER 0x40000000 +#define SA_RESETHAND 0x80000000 + +#define SA_NOMASK SA_NODEFER +#define SA_ONESHOT SA_RESETHAND +#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ + +#define SA_RESTORER 0x04000000 + +/* + * sigaltstack controls + */ +#define SS_ONSTACK 1 +#define SS_DISABLE 2 + +#define MINSIGSTKSZ 2048 +#define SIGSTKSZ 8192 + +#ifndef __ASSEMBLY__ +#ifdef __KERNEL__ + +/* + * These values of sa_flags are used only by the kernel as part of the + * irq handling routines. + * + * SA_INTERRUPT is also used by the irq handling routines. + * SA_SHIRQ is for shared interrupt support on PCI and EISA. + */ +#define SA_PROBE SA_ONESHOT +#define SA_SAMPLE_RANDOM SA_RESTART +#define SA_SHIRQ 0x04000000 +#endif + +#define SIG_BLOCK 0 /* for blocking signals */ +#define SIG_UNBLOCK 1 /* for unblocking signals */ +#define SIG_SETMASK 2 /* for setting the signal mask */ + +/* Type of a signal handler. */ +typedef void (*__sighandler_t)(int); + +#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ +#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ +#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ + +#ifdef __KERNEL__ +struct old_sigaction { + __sighandler_t sa_handler; + old_sigset_t sa_mask; + unsigned long sa_flags; + void (*sa_restorer)(void); +}; + +struct sigaction { + __sighandler_t sa_handler; + unsigned long sa_flags; + void (*sa_restorer)(void); + sigset_t sa_mask; /* mask last for extensibility */ +}; + +struct k_sigaction { + struct sigaction sa; +}; + +#else + +/* Here we must cater to libcs that poke about in kernel headers. */ + +struct sigaction { + union { + __sighandler_t _sa_handler; + void (*_sa_sigaction)(int, struct siginfo *, void *); + } _u; + sigset_t sa_mask; + unsigned long sa_flags; + void (*sa_restorer)(void); +}; + +#define sa_handler _u._sa_handler +#define sa_sigaction _u._sa_sigaction + +#endif /* __KERNEL__ */ + +typedef struct sigaltstack { + void *ss_sp; + int ss_flags; + size_t ss_size; +} stack_t; + +#ifdef __KERNEL__ +#include <asm/sigcontext.h> +#define ptrace_signal_deliver(regs, cookie) do { } while (0) + +#endif /* __KERNEL__ */ +#endif /* __ASSEMBLY__ */ +#endif /* _XTENSA_SIGNAL_H */ diff --git a/include/asm-xtensa/smp.h b/include/asm-xtensa/smp.h new file mode 100644 index 0000000..83c569e --- /dev/null +++ b/include/asm-xtensa/smp.h @@ -0,0 +1,27 @@ +/* + * include/asm-xtensa/smp.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_SMP_H +#define _XTENSA_SMP_H + +extern struct xtensa_cpuinfo boot_cpu_data; + +#define cpu_data (&boot_cpu_data) +#define current_cpu_data boot_cpu_data + +struct xtensa_cpuinfo { + unsigned long *pgd_cache; + unsigned long *pte_cache; + unsigned long pgtable_cache_sz; +}; + +#define cpu_logical_map(cpu) (cpu) + +#endif /* _XTENSA_SMP_H */ diff --git a/include/asm-xtensa/socket.h b/include/asm-xtensa/socket.h new file mode 100644 index 0000000..daccd05 --- /dev/null +++ b/include/asm-xtensa/socket.h @@ -0,0 +1,61 @@ +/* + * include/asm-xtensa/socket.h + * + * Copied from i386. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _XTENSA_SOCKET_H +#define _XTENSA_SOCKET_H + +#include <asm/sockios.h> + +/* For setsockoptions(2) */ +#define SOL_SOCKET 1 + +#define SO_DEBUG 1 +#define SO_REUSEADDR 2 +#define SO_TYPE 3 +#define SO_ERROR 4 +#define SO_DONTROUTE 5 +#define SO_BROADCAST 6 +#define SO_SNDBUF 7 +#define SO_RCVBUF 8 +#define SO_KEEPALIVE 9 +#define SO_OOBINLINE 10 +#define SO_NO_CHECK 11 +#define SO_PRIORITY 12 +#define SO_LINGER 13 +#define SO_BSDCOMPAT 14 +/* To add :#define SO_REUSEPORT 15 */ +#define SO_PASSCRED 16 +#define SO_PEERCRED 17 +#define SO_RCVLOWAT 18 +#define SO_SNDLOWAT 19 +#define SO_RCVTIMEO 20 +#define SO_SNDTIMEO 21 + +/* Security levels - as per NRL IPv6 - don't actually do anything */ + +#define SO_SECURITY_AUTHENTICATION 22 +#define SO_SECURITY_ENCRYPTION_TRANSPORT 23 +#define SO_SECURITY_ENCRYPTION_NETWORK 24 + +#define SO_BINDTODEVICE 25 + +/* Socket filtering */ + +#define SO_ATTACH_FILTER 26 +#define SO_DETACH_FILTER 27 + +#define SO_PEERNAME 28 +#define SO_TIMESTAMP 29 +#define SCM_TIMESTAMP SO_TIMESTAMP + +#define SO_ACCEPTCONN 30 +#define SO_PEERSEC 31 + +#endif /* _XTENSA_SOCKET_H */ diff --git a/include/asm-xtensa/sockios.h b/include/asm-xtensa/sockios.h new file mode 100644 index 0000000..20d2ba1 --- /dev/null +++ b/include/asm-xtensa/sockios.h @@ -0,0 +1,30 @@ +/* + * include/asm-xtensa/sockios.h + * + * Socket-level I/O control calls. Copied from MIPS. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995 by Ralf Baechle + * Copyright (C) 2001 Tensilica Inc. + */ + +#ifndef _XTENSA_SOCKIOS_H +#define _XTENSA_SOCKIOS_H + +#include <asm/ioctl.h> + +/* Socket-level I/O control calls. */ + +#define FIOGETOWN _IOR('f', 123, int) +#define FIOSETOWN _IOW('f', 124, int) + +#define SIOCATMARK _IOR('s', 7, int) +#define SIOCSPGRP _IOW('s', 8, pid_t) +#define SIOCGPGRP _IOR('s', 9, pid_t) + +#define SIOCGSTAMP 0x8906 /* Get stamp - linux-specific */ + +#endif /* _XTENSA_SOCKIOS_H */ diff --git a/include/asm-xtensa/spinlock.h b/include/asm-xtensa/spinlock.h new file mode 100644 index 0000000..8ff2364 --- /dev/null +++ b/include/asm-xtensa/spinlock.h @@ -0,0 +1,16 @@ +/* + * include/asm-xtensa/spinlock.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_SPINLOCK_H +#define _XTENSA_SPINLOCK_H + +#include <linux/spinlock.h> + +#endif /* _XTENSA_SPINLOCK_H */ diff --git a/include/asm-xtensa/stat.h b/include/asm-xtensa/stat.h new file mode 100644 index 0000000..2f4662f --- /dev/null +++ b/include/asm-xtensa/stat.h @@ -0,0 +1,105 @@ +/* + * include/asm-xtensa/stat.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_STAT_H +#define _XTENSA_STAT_H + +#include <linux/types.h> + +struct __old_kernel_stat { + unsigned short st_dev; + unsigned short st_ino; + unsigned short st_mode; + unsigned short st_nlink; + unsigned short st_uid; + unsigned short st_gid; + unsigned short st_rdev; + unsigned long st_size; + unsigned long st_atime; + unsigned long st_mtime; + unsigned long st_ctime; +}; + +#define STAT_HAVE_NSEC 1 + +struct stat { + unsigned short st_dev; + unsigned short __pad1; + unsigned long st_ino; + unsigned short st_mode; + unsigned short st_nlink; + unsigned short st_uid; + unsigned short st_gid; + unsigned short st_rdev; + unsigned short __pad2; + unsigned long st_size; + unsigned long st_blksize; + unsigned long st_blocks; + unsigned long st_atime; + unsigned long st_atime_nsec; + unsigned long st_mtime; + unsigned long st_mtime_nsec; + unsigned long st_ctime; + unsigned long st_ctime_nsec; + unsigned long __unused4; + unsigned long __unused5; +}; + +/* This matches struct stat64 in glibc-2.2.3. */ + +struct stat64 { +#ifdef __XTENSA_EL__ + unsigned short st_dev; /* Device */ + unsigned char __pad0[10]; +#else + unsigned char __pad0[6]; + unsigned short st_dev; + unsigned char __pad1[2]; +#endif + +#define STAT64_HAS_BROKEN_ST_INO 1 + unsigned long __st_ino; /* 32bit file serial number. */ + + unsigned int st_mode; /* File mode. */ + unsigned int st_nlink; /* Link count. */ + unsigned int st_uid; /* User ID of the file's owner. */ + unsigned int st_gid; /* Group ID of the file's group. */ + +#ifdef __XTENSA_EL__ + unsigned short st_rdev; /* Device number, if device. */ + unsigned char __pad3[10]; +#else + unsigned char __pad2[6]; + unsigned short st_rdev; + unsigned char __pad3[2]; +#endif + + long long int st_size; /* Size of file, in bytes. */ + long int st_blksize; /* Optimal block size for I/O. */ + +#ifdef __XTENSA_EL__ + unsigned long st_blocks; /* Number 512-byte blocks allocated. */ + unsigned long __pad4; +#else + unsigned long __pad4; + unsigned long st_blocks; +#endif + + unsigned long __pad5; + long int st_atime; /* Time of last access. */ + unsigned long st_atime_nsec; + long int st_mtime; /* Time of last modification. */ + unsigned long st_mtime_nsec; + long int st_ctime; /* Time of last status change. */ + unsigned long st_ctime_nsec; + unsigned long long int st_ino; /* File serial number. */ +}; + +#endif /* _XTENSA_STAT_H */ diff --git a/include/asm-xtensa/statfs.h b/include/asm-xtensa/statfs.h new file mode 100644 index 0000000..9c3d1a2 --- /dev/null +++ b/include/asm-xtensa/statfs.h @@ -0,0 +1,17 @@ +/* + * include/asm-xtensa/statfs.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_STATFS_H +#define _XTENSA_STATFS_H + +#include <asm-generic/statfs.h> + +#endif /* _XTENSA_STATFS_H */ + diff --git a/include/asm-xtensa/string.h b/include/asm-xtensa/string.h new file mode 100644 index 0000000..3f81b27 --- /dev/null +++ b/include/asm-xtensa/string.h @@ -0,0 +1,124 @@ +/* + * include/asm-xtensa/string.h + * + * These trivial string functions are considered part of the public domain. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +/* We should optimize these. See arch/xtensa/lib/strncpy_user.S */ + +#ifndef _XTENSA_STRING_H +#define _XTENSA_STRING_H + +#define __HAVE_ARCH_STRCPY +extern __inline__ char *strcpy(char *__dest, const char *__src) +{ + register char *__xdest = __dest; + unsigned long __dummy; + + __asm__ __volatile__("1:\n\t" + "l8ui %2, %1, 0\n\t" + "s8i %2, %0, 0\n\t" + "addi %1, %1, 1\n\t" + "addi %0, %0, 1\n\t" + "bnez %2, 1b\n\t" + : "=r" (__dest), "=r" (__src), "=&r" (__dummy) + : "0" (__dest), "1" (__src) + : "memory"); + + return __xdest; +} + +#define __HAVE_ARCH_STRNCPY +extern __inline__ char *strncpy(char *__dest, const char *__src, size_t __n) +{ + register char *__xdest = __dest; + unsigned long __dummy; + + if (__n == 0) + return __xdest; + + __asm__ __volatile__( + "1:\n\t" + "l8ui %2, %1, 0\n\t" + "s8i %2, %0, 0\n\t" + "addi %1, %1, 1\n\t" + "addi %0, %0, 1\n\t" + "beqz %2, 2f\n\t" + "bne %1, %5, 1b\n" + "2:" + : "=r" (__dest), "=r" (__src), "=&r" (__dummy) + : "0" (__dest), "1" (__src), "r" (__src+__n) + : "memory"); + + return __xdest; +} + +#define __HAVE_ARCH_STRCMP +extern __inline__ int strcmp(const char *__cs, const char *__ct) +{ + register int __res; + unsigned long __dummy; + + __asm__ __volatile__( + "1:\n\t" + "l8ui %3, %1, 0\n\t" + "addi %1, %1, 1\n\t" + "l8ui %2, %0, 0\n\t" + "addi %0, %0, 1\n\t" + "beqz %2, 2f\n\t" + "beq %2, %3, 1b\n" + "2:\n\t" + "sub %2, %3, %2" + : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy) + : "0" (__cs), "1" (__ct)); + + return __res; +} + +#define __HAVE_ARCH_STRNCMP +extern __inline__ int strncmp(const char *__cs, const char *__ct, size_t __n) +{ + register int __res; + unsigned long __dummy; + + __asm__ __volatile__( + "mov %2, %3\n" + "1:\n\t" + "beq %0, %6, 2f\n\t" + "l8ui %3, %1, 0\n\t" + "addi %1, %1, 1\n\t" + "l8ui %2, %0, 0\n\t" + "addi %0, %0, 1\n\t" + "beqz %2, 2f\n\t" + "beqz %3, 2f\n\t" + "beq %2, %3, 1b\n" + "2:\n\t" + "sub %2, %3, %2" + : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy) + : "0" (__cs), "1" (__ct), "r" (__cs+__n)); + + return __res; +} + +#define __HAVE_ARCH_MEMSET +extern void *memset(void *__s, int __c, size_t __count); + +#define __HAVE_ARCH_MEMCPY +extern void *memcpy(void *__to, __const__ void *__from, size_t __n); + +#define __HAVE_ARCH_MEMMOVE +extern void *memmove(void *__dest, __const__ void *__src, size_t __n); + +/* Don't build bcopy at all ... */ +#define __HAVE_ARCH_BCOPY + +#define __HAVE_ARCH_MEMSCAN +#define memscan memchr + +#endif /* _XTENSA_STRING_H */ diff --git a/include/asm-xtensa/system.h b/include/asm-xtensa/system.h new file mode 100644 index 0000000..690fe32 --- /dev/null +++ b/include/asm-xtensa/system.h @@ -0,0 +1,252 @@ +/* + * include/asm-xtensa/system.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_SYSTEM_H +#define _XTENSA_SYSTEM_H + +#include <linux/config.h> +#include <linux/stringify.h> + +#include <asm/processor.h> + +/* interrupt control */ + +#define local_save_flags(x) \ + __asm__ __volatile__ ("rsr %0,"__stringify(PS) : "=a" (x)); +#define local_irq_restore(x) do { \ + __asm__ __volatile__ ("wsr %0, "__stringify(PS)" ; rsync" \ + :: "a" (x) : "memory"); } while(0); +#define local_irq_save(x) do { \ + __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL) \ + : "=a" (x) :: "memory");} while(0); + +static inline void local_irq_disable(void) +{ + unsigned long flags; + __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL) + : "=a" (flags) :: "memory"); +} +static inline void local_irq_enable(void) +{ + unsigned long flags; + __asm__ __volatile__ ("rsil %0, 0" : "=a" (flags) :: "memory"); + +} + +static inline int irqs_disabled(void) +{ + unsigned long flags; + local_save_flags(flags); + return flags & 0xf; +} + +#define RSR_CPENABLE(x) do { \ + __asm__ __volatile__("rsr %0," __stringify(CPENABLE) : "=a" (x)); \ + } while(0); +#define WSR_CPENABLE(x) do { \ + __asm__ __volatile__("wsr %0," __stringify(CPENABLE)";rsync" \ + :: "a" (x));} while(0); + +#define clear_cpenable() __clear_cpenable() + +extern __inline__ void __clear_cpenable(void) +{ +#if XCHAL_HAVE_CP + unsigned long i = 0; + WSR_CPENABLE(i); +#endif +} + +extern __inline__ void enable_coprocessor(int i) +{ +#if XCHAL_HAVE_CP + int cp; + RSR_CPENABLE(cp); + cp |= 1 << i; + WSR_CPENABLE(cp); +#endif +} + +extern __inline__ void disable_coprocessor(int i) +{ +#if XCHAL_HAVE_CP + int cp; + RSR_CPENABLE(cp); + cp &= ~(1 << i); + WSR_CPENABLE(cp); +#endif +} + +#define smp_read_barrier_depends() do { } while(0) +#define read_barrier_depends() do { } while(0) + +#define mb() barrier() +#define rmb() mb() +#define wmb() mb() + +#ifdef CONFIG_SMP +#error smp_* not defined +#else +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#endif + +#define set_mb(var, value) do { var = value; mb(); } while (0) +#define set_wmb(var, value) do { var = value; wmb(); } while (0) + +#if !defined (__ASSEMBLY__) + +/* * switch_to(n) should switch tasks to task nr n, first + * checking that n isn't the current task, in which case it does nothing. + */ +extern void *_switch_to(void *last, void *next); + +#endif /* __ASSEMBLY__ */ + +#define prepare_to_switch() do { } while(0) + +#define switch_to(prev,next,last) \ +do { \ + clear_cpenable(); \ + (last) = _switch_to(prev, next); \ +} while(0) + +/* + * cmpxchg + */ + +extern __inline__ unsigned long +__cmpxchg_u32(volatile int *p, int old, int new) +{ + __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" + "l32i %0, %1, 0 \n\t" + "bne %0, %2, 1f \n\t" + "s32i %3, %1, 0 \n\t" + "1: \n\t" + "wsr a15, "__stringify(PS)" \n\t" + "rsync \n\t" + : "=&a" (old) + : "a" (p), "a" (old), "r" (new) + : "a15", "memory"); + return old; +} +/* This function doesn't exist, so you'll get a linker error + * if something tries to do an invalid cmpxchg(). */ + +extern void __cmpxchg_called_with_bad_pointer(void); + +static __inline__ unsigned long +__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) +{ + switch (size) { + case 4: return __cmpxchg_u32(ptr, old, new); + default: __cmpxchg_called_with_bad_pointer(); + return old; + } +} + +#define cmpxchg(ptr,o,n) \ + ({ __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof (*(ptr))); \ + }) + + + + +/* + * xchg_u32 + * + * Note that a15 is used here because the register allocation + * done by the compiler is not guaranteed and a window overflow + * may not occur between the rsil and wsr instructions. By using + * a15 in the rsil, the machine is guaranteed to be in a state + * where no register reference will cause an overflow. + */ + +extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val) +{ + unsigned long tmp; + __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" + "l32i %0, %1, 0 \n\t" + "s32i %2, %1, 0 \n\t" + "wsr a15, "__stringify(PS)" \n\t" + "rsync \n\t" + : "=&a" (tmp) + : "a" (m), "a" (val) + : "a15", "memory"); + return tmp; +} + +#define tas(ptr) (xchg((ptr),1)) + +#if ( __XCC__ == 1 ) + +/* xt-xcc processes __inline__ differently than xt-gcc and decides to + * insert an out-of-line copy of function __xchg. This presents the + * unresolved symbol at link time of __xchg_called_with_bad_pointer, + * even though such a function would never be called at run-time. + * xt-gcc always inlines __xchg, and optimizes away the undefined + * bad_pointer function. + */ + +#define xchg(ptr,x) xchg_u32(ptr,x) + +#else /* assume xt-gcc */ + +#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) + +/* + * This only works if the compiler isn't horribly bad at optimizing. + * gcc-2.5.8 reportedly can't handle this, but I define that one to + * be dead anyway. + */ + +extern void __xchg_called_with_bad_pointer(void); + +static __inline__ unsigned long +__xchg(unsigned long x, volatile void * ptr, int size) +{ + switch (size) { + case 4: + return xchg_u32(ptr, x); + } + __xchg_called_with_bad_pointer(); + return x; +} + +#endif + +extern void set_except_vector(int n, void *addr); + +static inline void spill_registers(void) +{ + unsigned int a0, ps; + + __asm__ __volatile__ ( + "movi a14," __stringify (PS_EXCM_MASK) " | 1\n\t" + "mov a12, a0\n\t" + "rsr a13," __stringify(SAR) "\n\t" + "xsr a14," __stringify(PS) "\n\t" + "movi a0, _spill_registers\n\t" + "rsync\n\t" + "callx0 a0\n\t" + "mov a0, a12\n\t" + "wsr a13," __stringify(SAR) "\n\t" + "wsr a14," __stringify(PS) "\n\t" + :: "a" (&a0), "a" (&ps) + : "a2", "a3", "a12", "a13", "a14", "a15", "memory"); +} + +#define arch_align_stack(x) (x) + +#endif /* _XTENSA_SYSTEM_H */ diff --git a/include/asm-xtensa/termbits.h b/include/asm-xtensa/termbits.h new file mode 100644 index 0000000..c780593 --- /dev/null +++ b/include/asm-xtensa/termbits.h @@ -0,0 +1,194 @@ +/* + * include/asm-xtensa/termbits.h + * + * Copied from SH. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_TERMBITS_H +#define _XTENSA_TERMBITS_H + + +#include <linux/posix_types.h> + +typedef unsigned char cc_t; +typedef unsigned int speed_t; +typedef unsigned int tcflag_t; + +#define NCCS 19 +struct termios { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_line; /* line discipline */ + cc_t c_cc[NCCS]; /* control characters */ +}; + +/* c_cc characters */ + +#define VINTR 0 +#define VQUIT 1 +#define VERASE 2 +#define VKILL 3 +#define VEOF 4 +#define VTIME 5 +#define VMIN 6 +#define VSWTC 7 +#define VSTART 8 +#define VSTOP 9 +#define VSUSP 10 +#define VEOL 11 +#define VREPRINT 12 +#define VDISCARD 13 +#define VWERASE 14 +#define VLNEXT 15 +#define VEOL2 16 + +/* c_iflag bits */ + +#define IGNBRK 0000001 +#define BRKINT 0000002 +#define IGNPAR 0000004 +#define PARMRK 0000010 +#define INPCK 0000020 +#define ISTRIP 0000040 +#define INLCR 0000100 +#define IGNCR 0000200 +#define ICRNL 0000400 +#define IUCLC 0001000 +#define IXON 0002000 +#define IXANY 0004000 +#define IXOFF 0010000 +#define IMAXBEL 0020000 +#define IUTF8 0040000 + +/* c_oflag bits */ + +#define OPOST 0000001 +#define OLCUC 0000002 +#define ONLCR 0000004 +#define OCRNL 0000010 +#define ONOCR 0000020 +#define ONLRET 0000040 +#define OFILL 0000100 +#define OFDEL 0000200 +#define NLDLY 0000400 +#define NL0 0000000 +#define NL1 0000400 +#define CRDLY 0003000 +#define CR0 0000000 +#define CR1 0001000 +#define CR2 0002000 +#define CR3 0003000 +#define TABDLY 0014000 +#define TAB0 0000000 +#define TAB1 0004000 +#define TAB2 0010000 +#define TAB3 0014000 +#define XTABS 0014000 +#define BSDLY 0020000 +#define BS0 0000000 +#define BS1 0020000 +#define VTDLY 0040000 +#define VT0 0000000 +#define VT1 0040000 +#define FFDLY 0100000 +#define FF0 0000000 +#define FF1 0100000 + +/* c_cflag bit meaning */ + +#define CBAUD 0010017 +#define B0 0000000 /* hang up */ +#define B50 0000001 +#define B75 0000002 +#define B110 0000003 +#define B134 0000004 +#define B150 0000005 +#define B200 0000006 +#define B300 0000007 +#define B600 0000010 +#define B1200 0000011 +#define B1800 0000012 +#define B2400 0000013 +#define B4800 0000014 +#define B9600 0000015 +#define B19200 0000016 +#define B38400 0000017 +#define EXTA B19200 +#define EXTB B38400 +#define CSIZE 0000060 +#define CS5 0000000 +#define CS6 0000020 +#define CS7 0000040 +#define CS8 0000060 +#define CSTOPB 0000100 +#define CREAD 0000200 +#define PARENB 0000400 +#define PARODD 0001000 +#define HUPCL 0002000 +#define CLOCAL 0004000 +#define CBAUDEX 0010000 +#define B57600 0010001 +#define B115200 0010002 +#define B230400 0010003 +#define B460800 0010004 +#define B500000 0010005 +#define B576000 0010006 +#define B921600 0010007 +#define B1000000 0010010 +#define B1152000 0010011 +#define B1500000 0010012 +#define B2000000 0010013 +#define B2500000 0010014 +#define B3000000 0010015 +#define B3500000 0010016 +#define B4000000 0010017 +#define CIBAUD 002003600000 /* input baud rate (not used) */ +#define CMSPAR 010000000000 /* mark or space (stick) parity */ +#define CRTSCTS 020000000000 /* flow control */ + +/* c_lflag bits */ + +#define ISIG 0000001 +#define ICANON 0000002 +#define XCASE 0000004 +#define ECHO 0000010 +#define ECHOE 0000020 +#define ECHOK 0000040 +#define ECHONL 0000100 +#define NOFLSH 0000200 +#define TOSTOP 0000400 +#define ECHOCTL 0001000 +#define ECHOPRT 0002000 +#define ECHOKE 0004000 +#define FLUSHO 0010000 +#define PENDIN 0040000 +#define IEXTEN 0100000 + +/* tcflow() and TCXONC use these */ + +#define TCOOFF 0 +#define TCOON 1 +#define TCIOFF 2 +#define TCION 3 + +/* tcflush() and TCFLSH use these */ + +#define TCIFLUSH 0 +#define TCOFLUSH 1 +#define TCIOFLUSH 2 + +/* tcsetattr uses these */ + +#define TCSANOW 0 +#define TCSADRAIN 1 +#define TCSAFLUSH 2 + +#endif /* _XTENSA_TERMBITS_H */ diff --git a/include/asm-xtensa/termios.h b/include/asm-xtensa/termios.h new file mode 100644 index 0000000..83c6aed --- /dev/null +++ b/include/asm-xtensa/termios.h @@ -0,0 +1,122 @@ +/* + * include/asm-xtensa/termios.h + * + * Copied from SH. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_TERMIOS_H +#define _XTENSA_TERMIOS_H + +#include <asm/termbits.h> +#include <asm/ioctls.h> + +struct winsize { + unsigned short ws_row; + unsigned short ws_col; + unsigned short ws_xpixel; + unsigned short ws_ypixel; +}; + +#define NCC 8 +struct termio { + unsigned short c_iflag; /* input mode flags */ + unsigned short c_oflag; /* output mode flags */ + unsigned short c_cflag; /* control mode flags */ + unsigned short c_lflag; /* local mode flags */ + unsigned char c_line; /* line discipline */ + unsigned char c_cc[NCC]; /* control characters */ +}; + +/* Modem lines */ + +#define TIOCM_LE 0x001 +#define TIOCM_DTR 0x002 +#define TIOCM_RTS 0x004 +#define TIOCM_ST 0x008 +#define TIOCM_SR 0x010 +#define TIOCM_CTS 0x020 +#define TIOCM_CAR 0x040 +#define TIOCM_RNG 0x080 +#define TIOCM_DSR 0x100 +#define TIOCM_CD TIOCM_CAR +#define TIOCM_RI TIOCM_RNG +#define TIOCM_OUT1 0x2000 +#define TIOCM_OUT2 0x4000 +#define TIOCM_LOOP 0x8000 + +/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ + +/* Line disciplines */ + +#define N_TTY 0 +#define N_SLIP 1 +#define N_MOUSE 2 +#define N_PPP 3 +#define N_STRIP 4 +#define N_AX25 5 +#define N_X25 6 /* X.25 async */ +#define N_6PACK 7 +#define N_MASC 8 /* Reserved for Mobitex module <kaz@cafe.net> */ +#define N_R3964 9 /* Reserved for Simatic R3964 module */ +#define N_PROFIBUS_FDL 10 /* Reserved for Profibus <Dave@mvhi.com> */ +#define N_IRDA 11 /* Linux IR - http://irda.sourceforge.net/ */ +#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS messages */ +#define N_HDLC 13 /* synchronous HDLC */ +#define N_SYNC_PPP 14 +#define N_HCI 15 /* Bluetooth HCI UART */ + +#ifdef __KERNEL__ + +/* intr=^C quit=^\ erase=del kill=^U + eof=^D vtime=\0 vmin=\1 sxtc=\0 + start=^Q stop=^S susp=^Z eol=\0 + reprint=^R discard=^U werase=^W lnext=^V + eol2=\0 +*/ +#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" + +/* + * Translate a "termio" structure into a "termios". Ugh. + */ + +#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \ + unsigned short __tmp; \ + get_user(__tmp,&(termio)->x); \ + *(unsigned short *) &(termios)->x = __tmp; \ +} + +#define user_termio_to_kernel_termios(termios, termio) \ +({ \ + SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \ + SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \ + SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \ + SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \ + copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ +}) + +/* + * Translate a "termios" structure into a "termio". Ugh. + */ + +#define kernel_termios_to_user_termio(termio, termios) \ +({ \ + put_user((termios)->c_iflag, &(termio)->c_iflag); \ + put_user((termios)->c_oflag, &(termio)->c_oflag); \ + put_user((termios)->c_cflag, &(termio)->c_cflag); \ + put_user((termios)->c_lflag, &(termio)->c_lflag); \ + put_user((termios)->c_line, &(termio)->c_line); \ + copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ +}) + +#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) +#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) + +#endif /* __KERNEL__ */ + +#endif /* _XTENSA_TERMIOS_H */ diff --git a/include/asm-xtensa/thread_info.h b/include/asm-xtensa/thread_info.h new file mode 100644 index 0000000..af208d4 --- /dev/null +++ b/include/asm-xtensa/thread_info.h @@ -0,0 +1,146 @@ +/* + * include/asm-xtensa/thread_info.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_THREAD_INFO_H +#define _XTENSA_THREAD_INFO_H + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ +# include <asm/processor.h> +#endif + +/* + * low level task data that entry.S needs immediate access to + * - this struct should fit entirely inside of one cache line + * - this struct shares the supervisor stack pages + * - if the contents of this structure are changed, the assembly constants + * must also be changed + */ + +#ifndef __ASSEMBLY__ + +struct thread_info { + struct task_struct *task; /* main task structure */ + struct exec_domain *exec_domain; /* execution domain */ + unsigned long flags; /* low level flags */ + unsigned long status; /* thread-synchronous flags */ + __u32 cpu; /* current CPU */ + __s32 preempt_count; /* 0 => preemptable,< 0 => BUG*/ + + mm_segment_t addr_limit; /* thread address space */ + struct restart_block restart_block; + + +}; + +#else /* !__ASSEMBLY__ */ + +/* offsets into the thread_info struct for assembly code access */ +#define TI_TASK 0x00000000 +#define TI_EXEC_DOMAIN 0x00000004 +#define TI_FLAGS 0x00000008 +#define TI_STATUS 0x0000000C +#define TI_CPU 0x00000010 +#define TI_PRE_COUNT 0x00000014 +#define TI_ADDR_LIMIT 0x00000018 +#define TI_RESTART_BLOCK 0x000001C + +#endif + +#define PREEMPT_ACTIVE 0x10000000 + +/* + * macros/functions for gaining access to the thread information structure + * + * preempt_count needs to be 1 initially, until the scheduler is functional. + */ + +#ifndef __ASSEMBLY__ + +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .exec_domain = &default_exec_domain, \ + .flags = 0, \ + .cpu = 0, \ + .preempt_count = 1, \ + .addr_limit = KERNEL_DS, \ + .restart_block = { \ + .fn = do_no_restart_syscall, \ + }, \ +} + +#define init_thread_info (init_thread_union.thread_info) +#define init_stack (init_thread_union.stack) + +/* how to get the thread information struct from C */ +static inline struct thread_info *current_thread_info(void) +{ + struct thread_info *ti; + __asm__("extui %0,a1,0,13\n\t" + "xor %0, a1, %0" : "=&r" (ti) : ); + return ti; +} + +/* thread information allocation */ +#define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) +#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) +#define get_thread_info(ti) get_task_struct((ti)->task) +#define put_thread_info(ti) put_task_struct((ti)->task) + +#else /* !__ASSEMBLY__ */ + +/* how to get the thread information struct from ASM */ +#define GET_THREAD_INFO(reg,sp) \ + extui reg, sp, 0, 13; \ + xor reg, sp, reg +#endif + + +/* + * thread information flags + * - these are process state flags that various assembly files may need to access + * - pending work-to-be-done flags are in LSW + * - other flags in MSW + */ +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ +#define TIF_SIGPENDING 2 /* signal pending */ +#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ +#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */ +#define TIF_IRET 5 /* return with iret */ +#define TIF_MEMDIE 6 +#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ + +#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) +#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) +#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) +#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) +#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) +#define _TIF_IRET (1<<TIF_IRET) +#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) + +#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ +#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ + +/* + * Thread-synchronous status. + * + * This is different from the flags in that nobody else + * ever touches our thread-synchronous status, so we don't + * have to worry about atomic accesses. + */ +#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ + +#define THREAD_SIZE 8192 //(2*PAGE_SIZE) + +#endif /* __KERNEL__ */ +#endif /* _XTENSA_THREAD_INFO */ diff --git a/include/asm-xtensa/timex.h b/include/asm-xtensa/timex.h new file mode 100644 index 0000000..d14a375 --- /dev/null +++ b/include/asm-xtensa/timex.h @@ -0,0 +1,94 @@ +/* + * include/asm-xtensa/timex.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_TIMEX_H +#define _XTENSA_TIMEX_H + +#ifdef __KERNEL__ + +#include <asm/processor.h> +#include <linux/stringify.h> + +#if XCHAL_INT_LEVEL(XCHAL_TIMER0_INTERRUPT) == 1 +# define LINUX_TIMER 0 +#elif XCHAL_INT_LEVEL(XCHAL_TIMER1_INTERRUPT) == 1 +# define LINUX_TIMER 1 +#elif XCHAL_INT_LEVEL(XCHAL_TIMER2_INTERRUPT) == 1 +# define LINUX_TIMER 2 +#else +# error "Bad timer number for Linux configurations!" +#endif + +#define LINUX_TIMER_INT XCHAL_TIMER_INTERRUPT(LINUX_TIMER) +#define LINUX_TIMER_MASK (1L << LINUX_TIMER_INT) + +#define CLOCK_TICK_RATE 1193180 /* (everyone is using this value) */ +#define CLOCK_TICK_FACTOR 20 /* Factor of both 10^6 and CLOCK_TICK_RATE */ +#define FINETUNE ((((((long)LATCH * HZ - CLOCK_TICK_RATE) << SHIFT_HZ) * \ + (1000000/CLOCK_TICK_FACTOR) / (CLOCK_TICK_RATE/CLOCK_TICK_FACTOR)) \ + << (SHIFT_SCALE-SHIFT_HZ)) / HZ) + +#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT +extern unsigned long ccount_per_jiffy; +extern unsigned long ccount_nsec; +#define CCOUNT_PER_JIFFY ccount_per_jiffy +#define CCOUNT_NSEC ccount_nsec +#else +#define CCOUNT_PER_JIFFY (CONFIG_XTENSA_CPU_CLOCK*(1000000UL/HZ)) +#define CCOUNT_NSEC (1000000000UL / CONFIG_XTENSA_CPU_CLOCK) +#endif + + +typedef unsigned long long cycles_t; + +/* + * Only used for SMP. + */ + +extern cycles_t cacheflush_time; + +#define get_cycles() (0) + + +/* + * Register access. + */ + +#define WSR_CCOUNT(r) __asm__("wsr %0,"__stringify(CCOUNT) :: "a" (r)) +#define RSR_CCOUNT(r) __asm__("rsr %0,"__stringify(CCOUNT) : "=a" (r)) +#define WSR_CCOMPARE(x,r) __asm__("wsr %0,"__stringify(CCOMPARE_0)"+"__stringify(x) :: "a"(r)) +#define RSR_CCOMPARE(x,r) __asm__("rsr %0,"__stringify(CCOMPARE_0)"+"__stringify(x) : "=a"(r)) + +static inline unsigned long get_ccount (void) +{ + unsigned long ccount; + RSR_CCOUNT(ccount); + return ccount; +} + +static inline void set_ccount (unsigned long ccount) +{ + WSR_CCOUNT(ccount); +} + +static inline unsigned long get_linux_timer (void) +{ + unsigned ccompare; + RSR_CCOMPARE(LINUX_TIMER, ccompare); + return ccompare; +} + +static inline void set_linux_timer (unsigned long ccompare) +{ + WSR_CCOMPARE(LINUX_TIMER, ccompare); +} + +#endif /* __KERNEL__ */ +#endif /* _XTENSA_TIMEX_H */ diff --git a/include/asm-xtensa/tlb.h b/include/asm-xtensa/tlb.h new file mode 100644 index 0000000..4562b2d --- /dev/null +++ b/include/asm-xtensa/tlb.h @@ -0,0 +1,25 @@ +/* + * include/asm-xtensa/tlb.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_TLB_H +#define _XTENSA_TLB_H + +#define tlb_start_vma(tlb,vma) do { } while (0) +#define tlb_end_vma(tlb,vma) do { } while (0) +#define __tlb_remove_tlb_entry(tlb,pte,addr) do { } while (0) + +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) + +#include <asm-generic/tlb.h> +#include <asm/page.h> + +#define __pte_free_tlb(tlb,pte) pte_free(pte) + +#endif /* _XTENSA_TLB_H */ diff --git a/include/asm-xtensa/tlbflush.h b/include/asm-xtensa/tlbflush.h new file mode 100644 index 0000000..23bfe9d --- /dev/null +++ b/include/asm-xtensa/tlbflush.h @@ -0,0 +1,200 @@ +/* + * include/asm-xtensa/tlbflush.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_TLBFLUSH_H +#define _XTENSA_TLBFLUSH_H + +#define DEBUG_TLB + +#ifdef __KERNEL__ + +#include <asm/processor.h> +#include <linux/stringify.h> + +/* TLB flushing: + * + * - flush_tlb_all() flushes all processes TLB entries + * - flush_tlb_mm(mm) flushes the specified mm context TLB entries + * - flush_tlb_page(mm, vmaddr) flushes a single page + * - flush_tlb_range(mm, start, end) flushes a range of pages + */ + +extern void flush_tlb_all(void); +extern void flush_tlb_mm(struct mm_struct*); +extern void flush_tlb_page(struct vm_area_struct*,unsigned long); +extern void flush_tlb_range(struct vm_area_struct*,unsigned long,unsigned long); + +#define flush_tlb_kernel_range(start,end) flush_tlb_all() + + +/* This is calld in munmap when we have freed up some page-table pages. + * We don't need to do anything here, there's nothing special about our + * page-table pages. + */ + +extern inline void flush_tlb_pgtables(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +/* TLB operations. */ + +#define ITLB_WAYS_LOG2 XCHAL_ITLB_WAY_BITS +#define DTLB_WAYS_LOG2 XCHAL_DTLB_WAY_BITS +#define ITLB_PROBE_SUCCESS (1 << ITLB_WAYS_LOG2) +#define DTLB_PROBE_SUCCESS (1 << DTLB_WAYS_LOG2) + +extern inline unsigned long itlb_probe(unsigned long addr) +{ + unsigned long tmp; + __asm__ __volatile__("pitlb %0, %1\n\t" : "=a" (tmp) : "a" (addr)); + return tmp; +} + +extern inline unsigned long dtlb_probe(unsigned long addr) +{ + unsigned long tmp; + __asm__ __volatile__("pdtlb %0, %1\n\t" : "=a" (tmp) : "a" (addr)); + return tmp; +} + +extern inline void invalidate_itlb_entry (unsigned long probe) +{ + __asm__ __volatile__("iitlb %0; isync\n\t" : : "a" (probe)); +} + +extern inline void invalidate_dtlb_entry (unsigned long probe) +{ + __asm__ __volatile__("idtlb %0; dsync\n\t" : : "a" (probe)); +} + +/* Use the .._no_isync functions with caution. Generally, these are + * handy for bulk invalidates followed by a single 'isync'. The + * caller must follow up with an 'isync', which can be relatively + * expensive on some Xtensa implementations. + */ +extern inline void invalidate_itlb_entry_no_isync (unsigned entry) +{ + /* Caller must follow up with 'isync'. */ + __asm__ __volatile__ ("iitlb %0\n" : : "a" (entry) ); +} + +extern inline void invalidate_dtlb_entry_no_isync (unsigned entry) +{ + /* Caller must follow up with 'isync'. */ + __asm__ __volatile__ ("idtlb %0\n" : : "a" (entry) ); +} + +extern inline void set_itlbcfg_register (unsigned long val) +{ + __asm__ __volatile__("wsr %0, "__stringify(ITLBCFG)"\n\t" "isync\n\t" + : : "a" (val)); +} + +extern inline void set_dtlbcfg_register (unsigned long val) +{ + __asm__ __volatile__("wsr %0, "__stringify(DTLBCFG)"; dsync\n\t" + : : "a" (val)); +} + +extern inline void set_ptevaddr_register (unsigned long val) +{ + __asm__ __volatile__(" wsr %0, "__stringify(PTEVADDR)"; isync\n" + : : "a" (val)); +} + +extern inline unsigned long read_ptevaddr_register (void) +{ + unsigned long tmp; + __asm__ __volatile__("rsr %0, "__stringify(PTEVADDR)"\n\t" : "=a" (tmp)); + return tmp; +} + +extern inline void write_dtlb_entry (pte_t entry, int way) +{ + __asm__ __volatile__("wdtlb %1, %0; dsync\n\t" + : : "r" (way), "r" (entry) ); +} + +extern inline void write_itlb_entry (pte_t entry, int way) +{ + __asm__ __volatile__("witlb %1, %0; isync\n\t" + : : "r" (way), "r" (entry) ); +} + +extern inline void invalidate_page_directory (void) +{ + invalidate_dtlb_entry (DTLB_WAY_PGTABLE); +} + +extern inline void invalidate_itlb_mapping (unsigned address) +{ + unsigned long tlb_entry; + while ((tlb_entry = itlb_probe (address)) & ITLB_PROBE_SUCCESS) + invalidate_itlb_entry (tlb_entry); +} + +extern inline void invalidate_dtlb_mapping (unsigned address) +{ + unsigned long tlb_entry; + while ((tlb_entry = dtlb_probe (address)) & DTLB_PROBE_SUCCESS) + invalidate_dtlb_entry (tlb_entry); +} + +#define check_pgt_cache() do { } while (0) + + +#ifdef DEBUG_TLB + +/* DO NOT USE THESE FUNCTIONS. These instructions aren't part of the Xtensa + * ISA and exist only for test purposes.. + * You may find it helpful for MMU debugging, however. + * + * 'at' is the unmodified input register + * 'as' is the output register, as follows (specific to the Linux config): + * + * as[31..12] contain the virtual address + * as[11..08] are meaningless + * as[07..00] contain the asid + */ + +extern inline unsigned long read_dtlb_virtual (int way) +{ + unsigned long tmp; + __asm__ __volatile__("rdtlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way)); + return tmp; +} + +extern inline unsigned long read_dtlb_translation (int way) +{ + unsigned long tmp; + __asm__ __volatile__("rdtlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way)); + return tmp; +} + +extern inline unsigned long read_itlb_virtual (int way) +{ + unsigned long tmp; + __asm__ __volatile__("ritlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way)); + return tmp; +} + +extern inline unsigned long read_itlb_translation (int way) +{ + unsigned long tmp; + __asm__ __volatile__("ritlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way)); + return tmp; +} + +#endif /* DEBUG_TLB */ + + +#endif /* __KERNEL__ */ +#endif /* _XTENSA_PGALLOC_H */ diff --git a/include/asm-xtensa/topology.h b/include/asm-xtensa/topology.h new file mode 100644 index 0000000..7309e38 --- /dev/null +++ b/include/asm-xtensa/topology.h @@ -0,0 +1,16 @@ +/* + * include/asm-xtensa/topology.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_TOPOLOGY_H +#define _XTENSA_TOPOLOGY_H + +#include <asm-generic/topology.h> + +#endif /* _XTENSA_TOPOLOGY_H */ diff --git a/include/asm-xtensa/types.h b/include/asm-xtensa/types.h new file mode 100644 index 0000000..ebac004 --- /dev/null +++ b/include/asm-xtensa/types.h @@ -0,0 +1,66 @@ +/* + * include/asm-xtensa/types.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_TYPES_H +#define _XTENSA_TYPES_H + +#ifndef __ASSEMBLY__ + +typedef unsigned short umode_t; + +/* + * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the + * header files exported to user space + */ + +typedef __signed__ char __s8; +typedef unsigned char __u8; + +typedef __signed__ short __s16; +typedef unsigned short __u16; + +typedef __signed__ int __s32; +typedef unsigned int __u32; + +#if defined(__GNUC__) && !defined(__STRICT_ANSI__) +typedef __signed__ long long __s64; +typedef unsigned long long __u64; +#endif + +/* + * These aren't exported outside the kernel to avoid name space clashes + */ +#ifdef __KERNEL__ + +typedef __signed__ char s8; +typedef unsigned char u8; + +typedef __signed__ short s16; +typedef unsigned short u16; + +typedef __signed__ int s32; +typedef unsigned int u32; + +typedef __signed__ long long s64; +typedef unsigned long long u64; + + +#define BITS_PER_LONG 32 + +/* Dma addresses are 32-bits wide. */ + +typedef u32 dma_addr_t; + +typedef unsigned int kmem_bufctl_t; + +#endif /* __KERNEL__ */ +#endif + +#endif /* _XTENSA_TYPES_H */ diff --git a/include/asm-xtensa/uaccess.h b/include/asm-xtensa/uaccess.h new file mode 100644 index 0000000..35576b2 --- /dev/null +++ b/include/asm-xtensa/uaccess.h @@ -0,0 +1,532 @@ +/* + * include/asm-xtensa/uaccess.h + * + * User space memory access functions + * + * These routines provide basic accessing functions to the user memory + * space for the kernel. This header file provides fuctions such as: + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_UACCESS_H +#define _XTENSA_UACCESS_H + +#include <linux/errno.h> + +#define VERIFY_READ 0 +#define VERIFY_WRITE 1 + +#ifdef __ASSEMBLY__ + +#define _ASMLANGUAGE +#include <asm/current.h> +#include <asm/offsets.h> +#include <asm/processor.h> + +/* + * These assembly macros mirror the C macros that follow below. They + * should always have identical functionality. See + * arch/xtensa/kernel/sys.S for usage. + */ + +#define KERNEL_DS 0 +#define USER_DS 1 + +#define get_ds (KERNEL_DS) + +/* + * get_fs reads current->thread.current_ds into a register. + * On Entry: + * <ad> anything + * <sp> stack + * On Exit: + * <ad> contains current->thread.current_ds + */ + .macro get_fs ad, sp + GET_CURRENT(\ad,\sp) + l32i \ad, \ad, THREAD_CURRENT_DS + .endm + +/* + * set_fs sets current->thread.current_ds to some value. + * On Entry: + * <at> anything (temp register) + * <av> value to write + * <sp> stack + * On Exit: + * <at> destroyed (actually, current) + * <av> preserved, value to write + */ + .macro set_fs at, av, sp + GET_CURRENT(\at,\sp) + s32i \av, \at, THREAD_CURRENT_DS + .endm + +/* + * kernel_ok determines whether we should bypass addr/size checking. + * See the equivalent C-macro version below for clarity. + * On success, kernel_ok branches to a label indicated by parameter + * <success>. This implies that the macro falls through to the next + * insruction on an error. + * + * Note that while this macro can be used independently, we designed + * in for optimal use in the access_ok macro below (i.e., we fall + * through on error). + * + * On Entry: + * <at> anything (temp register) + * <success> label to branch to on success; implies + * fall-through macro on error + * <sp> stack pointer + * On Exit: + * <at> destroyed (actually, current->thread.current_ds) + */ + +#if ((KERNEL_DS != 0) || (USER_DS == 0)) +# error Assembly macro kernel_ok fails +#endif + .macro kernel_ok at, sp, success + get_fs \at, \sp + beqz \at, \success + .endm + +/* + * user_ok determines whether the access to user-space memory is allowed. + * See the equivalent C-macro version below for clarity. + * + * On error, user_ok branches to a label indicated by parameter + * <error>. This implies that the macro falls through to the next + * instruction on success. + * + * Note that while this macro can be used independently, we designed + * in for optimal use in the access_ok macro below (i.e., we fall + * through on success). + * + * On Entry: + * <aa> register containing memory address + * <as> register containing memory size + * <at> temp register + * <error> label to branch to on error; implies fall-through + * macro on success + * On Exit: + * <aa> preserved + * <as> preserved + * <at> destroyed (actually, (TASK_SIZE + 1 - size)) + */ + .macro user_ok aa, as, at, error + movi \at, (TASK_SIZE+1) + bgeu \as, \at, \error + sub \at, \at, \as + bgeu \aa, \at, \error + .endm + +/* + * access_ok determines whether a memory access is allowed. See the + * equivalent C-macro version below for clarity. + * + * On error, access_ok branches to a label indicated by parameter + * <error>. This implies that the macro falls through to the next + * instruction on success. + * + * Note that we assume success is the common case, and we optimize the + * branch fall-through case on success. + * + * On Entry: + * <aa> register containing memory address + * <as> register containing memory size + * <at> temp register + * <sp> + * <error> label to branch to on error; implies fall-through + * macro on success + * On Exit: + * <aa> preserved + * <as> preserved + * <at> destroyed + */ + .macro access_ok aa, as, at, sp, error + kernel_ok \at, \sp, .Laccess_ok_\@ + user_ok \aa, \as, \at, \error +.Laccess_ok_\@: + .endm + +/* + * verify_area determines whether a memory access is allowed. It's + * mostly an unnecessary wrapper for access_ok, but we provide it as a + * duplicate of the verify_area() C inline function below. See the + * equivalent C version below for clarity. + * + * On error, verify_area branches to a label indicated by parameter + * <error>. This implies that the macro falls through to the next + * instruction on success. + * + * Note that we assume success is the common case, and we optimize the + * branch fall-through case on success. + * + * On Entry: + * <aa> register containing memory address + * <as> register containing memory size + * <at> temp register + * <error> label to branch to on error; implies fall-through + * macro on success + * On Exit: + * <aa> preserved + * <as> preserved + * <at> destroyed + */ + .macro verify_area aa, as, at, sp, error + access_ok \at, \aa, \as, \sp, \error + .endm + + +#else /* __ASSEMBLY__ not defined */ + +#include <linux/sched.h> +#include <asm/types.h> + +/* + * The fs value determines whether argument validity checking should + * be performed or not. If get_fs() == USER_DS, checking is + * performed, with get_fs() == KERNEL_DS, checking is bypassed. + * + * For historical reasons (Data Segment Register?), these macros are + * grossly misnamed. + */ + +#define KERNEL_DS ((mm_segment_t) { 0 }) +#define USER_DS ((mm_segment_t) { 1 }) + +#define get_ds() (KERNEL_DS) +#define get_fs() (current->thread.current_ds) +#define set_fs(val) (current->thread.current_ds = (val)) + +#define segment_eq(a,b) ((a).seg == (b).seg) + +#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) +#define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) +#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size))) +#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) + +extern inline int verify_area(int type, const void * addr, unsigned long size) +{ + return access_ok(type,addr,size) ? 0 : -EFAULT; +} + +/* + * These are the main single-value transfer routines. They + * automatically use the right size if we just have the right pointer + * type. + * + * This gets kind of ugly. We want to return _two_ values in + * "get_user()" and yet we don't want to do any pointers, because that + * is too much of a performance impact. Thus we have a few rather ugly + * macros here, and hide all the uglyness from the user. + * + * Careful to not + * (a) re-use the arguments for side effects (sizeof is ok) + * (b) require any knowledge of processes at this stage + */ +#define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr))) +#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr))) + +/* + * The "__xxx" versions of the user access functions are versions that + * do not verify the address space, that must have been done previously + * with a separate "access_ok()" call (this is used when we do multiple + * accesses to the same area of user memory). + */ +#define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr))) +#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr))) + + +extern long __put_user_bad(void); + +#define __put_user_nocheck(x,ptr,size) \ +({ \ + long __pu_err; \ + __put_user_size((x),(ptr),(size),__pu_err); \ + __pu_err; \ +}) + +#define __put_user_check(x,ptr,size) \ +({ \ + long __pu_err = -EFAULT; \ + __typeof__(*(ptr)) *__pu_addr = (ptr); \ + if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ + __put_user_size((x),__pu_addr,(size),__pu_err); \ + __pu_err; \ +}) + +#define __put_user_size(x,ptr,size,retval) \ +do { \ + retval = 0; \ + switch (size) { \ + case 1: __put_user_asm(x,ptr,retval,1,"s8i"); break; \ + case 2: __put_user_asm(x,ptr,retval,2,"s16i"); break; \ + case 4: __put_user_asm(x,ptr,retval,4,"s32i"); break; \ + case 8: { \ + __typeof__(*ptr) __v64 = x; \ + retval = __copy_to_user(ptr,&__v64,8); \ + break; \ + } \ + default: __put_user_bad(); \ + } \ +} while (0) + + +/* + * Consider a case of a user single load/store would cause both an + * unaligned exception and an MMU-related exception (unaligned + * exceptions happen first): + * + * User code passes a bad variable ptr to a system call. + * Kernel tries to access the variable. + * Unaligned exception occurs. + * Unaligned exception handler tries to make aligned accesses. + * Double exception occurs for MMU-related cause (e.g., page not mapped). + * do_page_fault() thinks the fault address belongs to the kernel, not the + * user, and panics. + * + * The kernel currently prohibits user unaligned accesses. We use the + * __check_align_* macros to check for unaligned addresses before + * accessing user space so we don't crash the kernel. Both + * __put_user_asm and __get_user_asm use these alignment macros, so + * macro-specific labels such as 0f, 1f, %0, %2, and %3 must stay in + * sync. + */ + +#define __check_align_1 "" + +#define __check_align_2 \ + " _bbci.l %2, 0, 1f \n" \ + " movi %0, %3 \n" \ + " _j 2f \n" + +#define __check_align_4 \ + " _bbsi.l %2, 0, 0f \n" \ + " _bbci.l %2, 1, 1f \n" \ + "0: movi %0, %3 \n" \ + " _j 2f \n" + + +/* + * We don't tell gcc that we are accessing memory, but this is OK + * because we do not write to any memory gcc knows about, so there + * are no aliasing issues. + * + * WARNING: If you modify this macro at all, verify that the + * __check_align_* macros still work. + */ +#define __put_user_asm(x, addr, err, align, insn) \ + __asm__ __volatile__( \ + __check_align_##align \ + "1: "insn" %1, %2, 0 \n" \ + "2: \n" \ + " .section .fixup,\"ax\" \n" \ + " .align 4 \n" \ + "4: \n" \ + " .long 2b \n" \ + "5: \n" \ + " l32r %2, 4b \n" \ + " movi %0, %3 \n" \ + " jx %2 \n" \ + " .previous \n" \ + " .section __ex_table,\"a\" \n" \ + " .long 1b, 5b \n" \ + " .previous" \ + :"=r" (err) \ + :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err)) + +#define __get_user_nocheck(x,ptr,size) \ +({ \ + long __gu_err, __gu_val; \ + __get_user_size(__gu_val,(ptr),(size),__gu_err); \ + (x) = (__typeof__(*(ptr)))__gu_val; \ + __gu_err; \ +}) + +#define __get_user_check(x,ptr,size) \ +({ \ + long __gu_err = -EFAULT, __gu_val = 0; \ + const __typeof__(*(ptr)) *__gu_addr = (ptr); \ + if (access_ok(VERIFY_READ,__gu_addr,size)) \ + __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \ + (x) = (__typeof__(*(ptr)))__gu_val; \ + __gu_err; \ +}) + +extern long __get_user_bad(void); + +#define __get_user_size(x,ptr,size,retval) \ +do { \ + retval = 0; \ + switch (size) { \ + case 1: __get_user_asm(x,ptr,retval,1,"l8ui"); break; \ + case 2: __get_user_asm(x,ptr,retval,2,"l16ui"); break; \ + case 4: __get_user_asm(x,ptr,retval,4,"l32i"); break; \ + case 8: retval = __copy_from_user(&x,ptr,8); break; \ + default: (x) = __get_user_bad(); \ + } \ +} while (0) + + +/* + * WARNING: If you modify this macro at all, verify that the + * __check_align_* macros still work. + */ +#define __get_user_asm(x, addr, err, align, insn) \ + __asm__ __volatile__( \ + __check_align_##align \ + "1: "insn" %1, %2, 0 \n" \ + "2: \n" \ + " .section .fixup,\"ax\" \n" \ + " .align 4 \n" \ + "4: \n" \ + " .long 2b \n" \ + "5: \n" \ + " l32r %2, 4b \n" \ + " movi %1, 0 \n" \ + " movi %0, %3 \n" \ + " jx %2 \n" \ + " .previous \n" \ + " .section __ex_table,\"a\" \n" \ + " .long 1b, 5b \n" \ + " .previous" \ + :"=r" (err), "=r" (x) \ + :"r" (addr), "i" (-EFAULT), "0" (err)) + + +/* + * Copy to/from user space + */ + +/* + * We use a generic, arbitrary-sized copy subroutine. The Xtensa + * architecture would cause heavy code bloat if we tried to inline + * these functions and provide __constant_copy_* equivalents like the + * i386 versions. __xtensa_copy_user is quite efficient. See the + * .fixup section of __xtensa_copy_user for a discussion on the + * X_zeroing equivalents for Xtensa. + */ + +extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n); +#define __copy_user(to,from,size) __xtensa_copy_user(to,from,size) + + +static inline unsigned long +__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n) +{ + return __copy_user(to,from,n); +} + +static inline unsigned long +__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n) +{ + return __copy_user(to,from,n); +} + +static inline unsigned long +__generic_copy_to_user(void *to, const void *from, unsigned long n) +{ + prefetch(from); + if (access_ok(VERIFY_WRITE, to, n)) + return __copy_user(to,from,n); + return n; +} + +static inline unsigned long +__generic_copy_from_user(void *to, const void *from, unsigned long n) +{ + prefetchw(to); + if (access_ok(VERIFY_READ, from, n)) + return __copy_user(to,from,n); + else + memset(to, 0, n); + return n; +} + +#define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n)) +#define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n)) +#define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n)) +#define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n)) +#define __copy_to_user_inatomic __copy_to_user +#define __copy_from_user_inatomic __copy_from_user + + +/* + * We need to return the number of bytes not cleared. Our memset() + * returns zero if a problem occurs while accessing user-space memory. + * In that event, return no memory cleared. Otherwise, zero for + * success. + */ + +extern inline unsigned long +__xtensa_clear_user(void *addr, unsigned long size) +{ + if ( ! memset(addr, 0, size) ) + return size; + return 0; +} + +extern inline unsigned long +clear_user(void *addr, unsigned long size) +{ + if (access_ok(VERIFY_WRITE, addr, size)) + return __xtensa_clear_user(addr, size); + return size ? -EFAULT : 0; +} + +#define __clear_user __xtensa_clear_user + + +extern long __strncpy_user(char *, const char *, long); +#define __strncpy_from_user __strncpy_user + +extern inline long +strncpy_from_user(char *dst, const char *src, long count) +{ + if (access_ok(VERIFY_READ, src, 1)) + return __strncpy_from_user(dst, src, count); + return -EFAULT; +} + + +#define strlen_user(str) strnlen_user((str), TASK_SIZE - 1) + +/* + * Return the size of a string (including the ending 0!) + */ +extern long __strnlen_user(const char *, long); + +extern inline long strnlen_user(const char *str, long len) +{ + unsigned long top = __kernel_ok ? ~0UL : TASK_SIZE - 1; + + if ((unsigned long)str > top) + return 0; + return __strnlen_user(str, len); +} + + +struct exception_table_entry +{ + unsigned long insn, fixup; +}; + +/* Returns 0 if exception not found and fixup.unit otherwise. */ + +extern unsigned long search_exception_table(unsigned long addr); +extern void sort_exception_table(void); + +/* Returns the new pc */ +#define fixup_exception(map_reg, fixup_unit, pc) \ +({ \ + fixup_unit; \ +}) + +#endif /* __ASSEMBLY__ */ +#endif /* _XTENSA_UACCESS_H */ diff --git a/include/asm-xtensa/ucontext.h b/include/asm-xtensa/ucontext.h new file mode 100644 index 0000000..94c94ed --- /dev/null +++ b/include/asm-xtensa/ucontext.h @@ -0,0 +1,22 @@ +/* + * include/asm-xtensa/ucontext.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_UCONTEXT_H +#define _XTENSA_UCONTEXT_H + +struct ucontext { + unsigned long uc_flags; + struct ucontext *uc_link; + stack_t uc_stack; + struct sigcontext uc_mcontext; + sigset_t uc_sigmask; /* mask last for extensibility */ +}; + +#endif /* _XTENSA_UCONTEXT_H */ diff --git a/include/asm-xtensa/unaligned.h b/include/asm-xtensa/unaligned.h new file mode 100644 index 0000000..2822089 --- /dev/null +++ b/include/asm-xtensa/unaligned.h @@ -0,0 +1,28 @@ +/* + * include/asm-xtensa/unaligned.h + * + * Xtensa doesn't handle unaligned accesses efficiently. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_UNALIGNED_H +#define _XTENSA_UNALIGNED_H + +#include <linux/string.h> + +/* Use memmove here, so gcc does not insert a __builtin_memcpy. */ + +#define get_unaligned(ptr) \ + ({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; }) + +#define put_unaligned(val, ptr) \ + ({ __typeof__(*(ptr)) __tmp = (val); \ + memmove((ptr), &__tmp, sizeof(*(ptr))); \ + (void)0; }) + +#endif /* _XTENSA_UNALIGNED_H */ diff --git a/include/asm-xtensa/unistd.h b/include/asm-xtensa/unistd.h new file mode 100644 index 0000000..64c64dd --- /dev/null +++ b/include/asm-xtensa/unistd.h @@ -0,0 +1,537 @@ +/* + * include/asm-xtensa/unistd.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_UNISTD_H +#define _XTENSA_UNISTD_H + +#include <linux/linkage.h> + +//#define __NR_setup 0 /* used only by init, to get system going */ +#define __NR_spill 0 +#define __NR_exit 1 +#define __NR_fork 2 +#define __NR_read 3 +#define __NR_write 4 +#define __NR_open 5 +#define __NR_close 6 +#define __NR_waitpid 7 +#define __NR_creat 8 +#define __NR_link 9 +#define __NR_unlink 10 +#define __NR_execve 11 +#define __NR_chdir 12 +#define __NR_time 13 +#define __NR_mknod 14 +#define __NR_chmod 15 +#define __NR_lchown 16 +#define __NR_break 17 +#define __NR_oldstat 18 +#define __NR_lseek 19 +#define __NR_getpid 20 +#define __NR_mount 21 +#define __NR_oldumount 22 +#define __NR_setuid 23 +#define __NR_getuid 24 +#define __NR_stime 25 +#define __NR_ptrace 26 +#define __NR_alarm 27 +#define __NR_oldfstat 28 +#define __NR_pause 29 +#define __NR_utime 30 +#define __NR_stty 31 +#define __NR_gtty 32 +#define __NR_access 33 +#define __NR_nice 34 +#define __NR_ftime 35 +#define __NR_sync 36 +#define __NR_kill 37 +#define __NR_rename 38 +#define __NR_mkdir 39 +#define __NR_rmdir 40 +#define __NR_dup 41 +#define __NR_pipe 42 +#define __NR_times 43 +#define __NR_prof 44 +#define __NR_brk 45 +#define __NR_setgid 46 +#define __NR_getgid 47 +#define __NR_signal 48 +#define __NR_geteuid 49 +#define __NR_getegid 50 +#define __NR_acct 51 +#define __NR_umount 52 +#define __NR_lock 53 +#define __NR_ioctl 54 +#define __NR_fcntl 55 +#define __NR_mpx 56 +#define __NR_setpgid 57 +#define __NR_ulimit 58 +#define __NR_oldolduname 59 +#define __NR_umask 60 +#define __NR_chroot 61 +#define __NR_ustat 62 +#define __NR_dup2 63 +#define __NR_getppid 64 +#define __NR_getpgrp 65 +#define __NR_setsid 66 +#define __NR_sigaction 67 +#define __NR_sgetmask 68 +#define __NR_ssetmask 69 +#define __NR_setreuid 70 +#define __NR_setregid 71 +#define __NR_sigsuspend 72 +#define __NR_sigpending 73 +#define __NR_sethostname 74 +#define __NR_setrlimit 75 +#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */ +#define __NR_getrusage 77 +#define __NR_gettimeofday 78 +#define __NR_settimeofday 79 +#define __NR_getgroups 80 +#define __NR_setgroups 81 +#define __NR_select 82 +#define __NR_symlink 83 +#define __NR_oldlstat 84 +#define __NR_readlink 85 +#define __NR_uselib 86 +#define __NR_swapon 87 +#define __NR_reboot 88 +#define __NR_readdir 89 +#define __NR_mmap 90 +#define __NR_munmap 91 +#define __NR_truncate 92 +#define __NR_ftruncate 93 +#define __NR_fchmod 94 +#define __NR_fchown 95 +#define __NR_getpriority 96 +#define __NR_setpriority 97 +#define __NR_profil 98 +#define __NR_statfs 99 +#define __NR_fstatfs 100 +#define __NR_ioperm 101 +#define __NR_socketcall 102 +#define __NR_syslog 103 +#define __NR_setitimer 104 +#define __NR_getitimer 105 +#define __NR_stat 106 +#define __NR_lstat 107 +#define __NR_fstat 108 +#define __NR_olduname 109 +#define __NR_iopl 110 +#define __NR_vhangup 111 +#define __NR_idle 112 +#define __NR_vm86 113 +#define __NR_wait4 114 +#define __NR_swapoff 115 +#define __NR_sysinfo 116 +#define __NR_ipc 117 +#define __NR_fsync 118 +#define __NR_sigreturn 119 +#define __NR_clone 120 +#define __NR_setdomainname 121 +#define __NR_uname 122 +#define __NR_modify_ldt 123 +#define __NR_adjtimex 124 +#define __NR_mprotect 125 +#define __NR_sigprocmask 126 +#define __NR_create_module 127 +#define __NR_init_module 128 +#define __NR_delete_module 129 +#define __NR_get_kernel_syms 130 +#define __NR_quotactl 131 +#define __NR_getpgid 132 +#define __NR_fchdir 133 +#define __NR_bdflush 134 +#define __NR_sysfs 135 +#define __NR_personality 136 +#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ +#define __NR_setfsuid 138 +#define __NR_setfsgid 139 +#define __NR__llseek 140 +#define __NR_getdents 141 +#define __NR__newselect 142 +#define __NR_flock 143 +#define __NR_msync 144 +#define __NR_readv 145 +#define __NR_writev 146 +#define __NR_cacheflush 147 +#define __NR_cachectl 148 +#define __NR_sysxtensa 149 +#define __NR_sysdummy 150 +#define __NR_getsid 151 +#define __NR_fdatasync 152 +#define __NR__sysctl 153 +#define __NR_mlock 154 +#define __NR_munlock 155 +#define __NR_mlockall 156 +#define __NR_munlockall 157 +#define __NR_sched_setparam 158 +#define __NR_sched_getparam 159 +#define __NR_sched_setscheduler 160 +#define __NR_sched_getscheduler 161 +#define __NR_sched_yield 162 +#define __NR_sched_get_priority_max 163 +#define __NR_sched_get_priority_min 164 +#define __NR_sched_rr_get_interval 165 +#define __NR_nanosleep 166 +#define __NR_mremap 167 +#define __NR_accept 168 +#define __NR_bind 169 +#define __NR_connect 170 +#define __NR_getpeername 171 +#define __NR_getsockname 172 +#define __NR_getsockopt 173 +#define __NR_listen 174 +#define __NR_recv 175 +#define __NR_recvfrom 176 +#define __NR_recvmsg 177 +#define __NR_send 178 +#define __NR_sendmsg 179 +#define __NR_sendto 180 +#define __NR_setsockopt 181 +#define __NR_shutdown 182 +#define __NR_socket 183 +#define __NR_socketpair 184 +#define __NR_setresuid 185 +#define __NR_getresuid 186 +#define __NR_query_module 187 +#define __NR_poll 188 +#define __NR_nfsservctl 189 +#define __NR_setresgid 190 +#define __NR_getresgid 191 +#define __NR_prctl 192 +#define __NR_rt_sigreturn 193 +#define __NR_rt_sigaction 194 +#define __NR_rt_sigprocmask 195 +#define __NR_rt_sigpending 196 +#define __NR_rt_sigtimedwait 197 +#define __NR_rt_sigqueueinfo 198 +#define __NR_rt_sigsuspend 199 +#define __NR_pread 200 +#define __NR_pwrite 201 +#define __NR_chown 202 +#define __NR_getcwd 203 +#define __NR_capget 204 +#define __NR_capset 205 +#define __NR_sigaltstack 206 +#define __NR_sendfile 207 +#define __NR_streams1 208 /* some people actually want it */ +#define __NR_streams2 209 /* some people actually want it */ +#define __NR_mmap2 210 +#define __NR_truncate64 211 +#define __NR_ftruncate64 212 +#define __NR_stat64 213 +#define __NR_lstat64 214 +#define __NR_fstat64 215 +#define __NR_pivot_root 216 +#define __NR_mincore 217 +#define __NR_madvise 218 +#define __NR_getdents64 219 +#define __NR_vfork 220 + +/* Keep this last; should always equal the last valid call number. */ +#define __NR_Linux_syscalls 220 + +/* user-visible error numbers are in the range -1 - -125: see + * <asm-xtensa/errno.h> */ + +#define SYSXTENSA_RESERVED 0 /* don't use this */ +#define SYSXTENSA_ATOMIC_SET 1 /* set variable */ +#define SYSXTENSA_ATOMIC_EXG_ADD 2 /* exchange memory and add */ +#define SYSXTENSA_ATOMIC_ADD 3 /* add to memory */ +#define SYSXTENSA_ATOMIC_CMP_SWP 4 /* compare and swap */ + +#define SYSXTENSA_COUNT 5 /* count of syscall0 functions*/ + +#ifdef __KERNEL__ +#define __syscall_return(type, res) return ((type)(res)) +#else +#define __syscall_return(type, res) \ +do { \ + if ((unsigned long)(res) >= (unsigned long)(-125)) { \ + /* Avoid using "res" which is declared to be in register r2; \ + * errno might expand to a function call and clobber it. */ \ + int __err = -(res); \ + errno = __err; \ + res = -1; \ + } \ + return (type) (res); \ +} while (0) +#endif + + +/* Tensilica's xt-xcc compiler is much more agressive at code + * optimization than gcc. Multiple __asm__ statements are + * insufficient for xt-xcc because subsequent optimization passes + * (beyond the front-end that knows of __asm__ statements and other + * such GNU Extensions to C) can modify the register selection for + * containment of C variables. + * + * xt-xcc cannot modify the contents of a single __asm__ statement, so + * we create single-asm versions of the syscall macros that are + * suitable and optimal for both xt-xcc and gcc. + * + * Linux takes system-call arguments in registers. The following + * design is optimized for user-land apps (e.g., glibc) which + * typically have a function wrapper around the "syscall" assembly + * instruction. It satisfies the Xtensa ABI while minizing argument + * shifting. + * + * The Xtensa ABI and software conventions require the system-call + * number in a2. If an argument exists in a2, we move it to the next + * available register. Note that for improved efficiency, we do NOT + * shift all parameters down one register to maintain the original + * order. + * + * At best case (zero arguments), we just write the syscall number to + * a2. At worst case (1 to 6 arguments), we move the argument in a2 + * to the next available register, then write the syscall number to + * a2. + * + * For clarity, the following truth table enumerates all possibilities. + * + * arguments syscall number arg0, arg1, arg2, arg3, arg4, arg5 + * --------- -------------- ---------------------------------- + * 0 a2 + * 1 a2 a3 + * 2 a2 a4, a3 + * 3 a2 a5, a3, a4 + * 4 a2 a6, a3, a4, a5 + * 5 a2 a7, a3, a4, a5, a6 + * 6 a2 a8, a3, a4, a5, a6, a7 + */ + +#define _syscall0(type,name) \ +type name(void) \ +{ \ +long __res; \ +__asm__ __volatile__ ( \ + " movi a2, %1 \n" \ + " syscall \n" \ + " mov %0, a2 \n" \ + : "=a" (__res) \ + : "i" (__NR_##name) \ + : "a2" \ + ); \ +__syscall_return(type,__res); \ +} + +#define _syscall1(type,name,type0,arg0) \ +type name(type0 arg0) \ +{ \ +long __res; \ +__asm__ __volatile__ ( \ + " mov a3, %2 \n" \ + " movi a2, %1 \n" \ + " syscall \n" \ + " mov %0, a2 \n" \ + : "=a" (__res) \ + : "i" (__NR_##name), "a" (arg0) \ + : "a2", "a3" \ + ); \ +__syscall_return(type,__res); \ +} + +#define _syscall2(type,name,type0,arg0,type1,arg1) \ +type name(type0 arg0,type1 arg1) \ +{ \ +long __res; \ +__asm__ __volatile__ ( \ + " mov a4, %2 \n" \ + " mov a3, %3 \n" \ + " movi a2, %1 \n" \ + " syscall \n" \ + " mov %0, a2 \n" \ + : "=a" (__res) \ + : "i" (__NR_##name), "a" (arg0), "a" (arg1) \ + : "a2", "a3", "a4" \ + ); \ +__syscall_return(type,__res); \ +} + +#define _syscall3(type,name,type0,arg0,type1,arg1,type2,arg2) \ +type name(type0 arg0,type1 arg1,type2 arg2) \ +{ \ +long __res; \ +__asm__ __volatile__ ( \ + " mov a5, %2 \n" \ + " mov a4, %4 \n" \ + " mov a3, %3 \n" \ + " movi a2, %1 \n" \ + " syscall \n" \ + " mov %0, a2 \n" \ + : "=a" (__res) \ + : "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2) \ + : "a2", "a3", "a4", "a5" \ + ); \ +__syscall_return(type,__res); \ +} + +#define _syscall4(type,name,type0,arg0,type1,arg1,type2,arg2,type3,arg3) \ +type name(type0 arg0,type1 arg1,type2 arg2,type3 arg3) \ +{ \ +long __res; \ +__asm__ __volatile__ ( \ + " mov a6, %2 \n" \ + " mov a5, %5 \n" \ + " mov a4, %4 \n" \ + " mov a3, %3 \n" \ + " movi a2, %1 \n" \ + " syscall \n" \ + " mov %0, a2 \n" \ + : "=a" (__res) \ + : "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2), "a" (arg3) \ + : "a2", "a3", "a4", "a5", "a6" \ + ); \ +__syscall_return(type,__res); \ +} + +/* Note that we save and restore the a7 frame pointer. + * Including a7 in the clobber list doesn't do what you'd expect. + */ +#define _syscall5(type,name,type0,arg0,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ +type name(type0 arg0,type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ +{ \ +long __res; \ +__asm__ __volatile__ ( \ + " mov a9, a7 \n" \ + " mov a7, %2 \n" \ + " mov a6, %6 \n" \ + " mov a5, %5 \n" \ + " mov a4, %4 \n" \ + " mov a3, %3 \n" \ + " movi a2, %1 \n" \ + " syscall \n" \ + " mov a7, a9 \n" \ + " mov %0, a2 \n" \ + : "=a" (__res) \ + : "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2), \ + "a" (arg3), "a" (arg4) \ + : "a2", "a3", "a4", "a5", "a6", "a9" \ + ); \ +__syscall_return(type,__res); \ +} + +/* Note that we save and restore the a7 frame pointer. + * Including a7 in the clobber list doesn't do what you'd expect. + */ +#define _syscall6(type,name,type0,arg0,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \ +type name(type0 arg0,type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ +{ \ +long __res; \ +__asm__ __volatile__ ( \ + " mov a9, a7 \n" \ + " mov a8, %2 \n" \ + " mov a7, %7 \n" \ + " mov a6, %6 \n" \ + " mov a5, %5 \n" \ + " mov a4, %4 \n" \ + " mov a3, %3 \n" \ + " movi a2, %1 \n" \ + " syscall \n" \ + " mov a7, a9 \n" \ + " mov %0, a2 \n" \ + : "=a" (__res) \ + : "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2), \ + "a" (arg3), "a" (arg4), "a" (arg5) \ + : "a2", "a3", "a4", "a5", "a6", "a8", "a9" \ + ); \ +__syscall_return(type,__res); \ +} + + +#ifdef __KERNEL_SYSCALLS__ + +#include <linux/compiler.h> +#include <linux/types.h> +#include <linux/syscalls.h> + +/* + * we need this inline - forking from kernel space will result + * in NO COPY ON WRITE (!!!), until an execve is executed. This + * is no problem, but for the stack. This is handled by not letting + * main() use the stack at all after fork(). Thus, no function + * calls - which means inline code for fork too, as otherwise we + * would use the stack upon exit from 'fork()'. + * + * Actually only pause and fork are needed inline, so that there + * won't be any messing with the stack from main(), but we define + * some others too. + */ + +#define __NR__exit __NR_exit + +static __inline__ _syscall0(int,pause) +//static __inline__ _syscall1(int,setup,int,magic) FIXME +static __inline__ _syscall0(int,sync) +static __inline__ _syscall0(pid_t,setsid) +static __inline__ _syscall3(int,write,int,fd,const char *,buf,off_t,count) +static __inline__ _syscall3(int,read,int,fd,char *,buf,off_t,count) +static __inline__ _syscall3(off_t,lseek,int,fd,off_t,offset,int,count) +static __inline__ _syscall1(int,dup,int,fd) +static __inline__ _syscall3(int,execve,const char*,file,char**,argv,char**,envp) +static __inline__ _syscall3(int,open,const char *,file,int,flag,int,mode) +static __inline__ _syscall1(int,close,int,fd) +static __inline__ _syscall1(int,_exit,int,exitcode) +static __inline__ _syscall3(pid_t,waitpid,pid_t,pid,int *,wait_stat,int,options) +static __inline__ _syscall1(int,delete_module,const char *,name) + +struct stat; +static __inline__ _syscall2(int,fstat,int,fd,struct stat *,buf) +static __inline__ _syscall0(pid_t,getpid) +static __inline__ _syscall2(int,kill,int,pid,int,sig) +static __inline__ _syscall2(int,stat,const char *, path,struct stat *,buf) +static __inline__ _syscall1(int,unlink,char *,pathname) + + + +extern pid_t waitpid(int, int*, int ); +static __inline__ pid_t wait(int * wait_stat) +{ + return waitpid(-1,wait_stat,0); +} +#endif + +/* + * "Conditional" syscalls + * + * What we want is __attribute__((weak,alias("sys_ni_syscall"))), + * but it doesn't work on all toolchains, so we just do it by hand + */ +#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall"); + +#ifdef __KERNEL__ +#define __ARCH_WANT_IPC_PARSE_VERSION +#define __ARCH_WANT_OLD_READDIR +#define __ARCH_WANT_OLD_STAT +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYS_ALARM +#define __ARCH_WANT_SYS_GETHOSTNAME +#define __ARCH_WANT_SYS_PAUSE +#define __ARCH_WANT_SYS_SGETMASK +#define __ARCH_WANT_SYS_SIGNAL +#define __ARCH_WANT_SYS_TIME +#define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_WAITPID +#define __ARCH_WANT_SYS_SOCKETCALL +#define __ARCH_WANT_SYS_FADVISE64 +#define __ARCH_WANT_SYS_GETPGRP +#define __ARCH_WANT_SYS_LLSEEK +#define __ARCH_WANT_SYS_NICE +#define __ARCH_WANT_SYS_OLD_GETRLIMIT +#define __ARCH_WANT_SYS_OLDUMOUNT +#define __ARCH_WANT_SYS_SIGPENDING +#define __ARCH_WANT_SYS_SIGPROCMASK +#define __ARCH_WANT_SYS_RT_SIGACTION +#endif + + + +#endif /* _XTENSA_UNISTD_H */ diff --git a/include/asm-xtensa/user.h b/include/asm-xtensa/user.h new file mode 100644 index 0000000..2c3ed23 --- /dev/null +++ b/include/asm-xtensa/user.h @@ -0,0 +1,20 @@ +/* + * include/asm-xtensa/user.h + * + * Xtensa Processor version. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_USER_H +#define _XTENSA_USER_H + +/* This file usually defines a 'struct user' structure. However, it it only + * used for a.out file, which are not supported on Xtensa. + */ + +#endif /* _XTENSA_USER_H */ diff --git a/include/asm-xtensa/vga.h b/include/asm-xtensa/vga.h new file mode 100644 index 0000000..23d82f6 --- /dev/null +++ b/include/asm-xtensa/vga.h @@ -0,0 +1,19 @@ +/* + * include/asm-xtensa/vga.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_VGA_H +#define _XTENSA_VGA_H + +#define VGA_MAP_MEM(x) (unsigned long)phys_to_virt(x) + +#define vga_readb(x) (*(x)) +#define vga_writeb(x,y) (*(y) = (x)) + +#endif diff --git a/include/asm-xtensa/xor.h b/include/asm-xtensa/xor.h new file mode 100644 index 0000000..e7b1f08 --- /dev/null +++ b/include/asm-xtensa/xor.h @@ -0,0 +1,16 @@ +/* + * include/asm-xtensa/xor.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_XOR_H +#define _XTENSA_XOR_H + +#include <asm-generic/xor.h> + +#endif diff --git a/include/asm-xtensa/xtensa/cacheasm.h b/include/asm-xtensa/xtensa/cacheasm.h new file mode 100644 index 0000000..0cdbb0b --- /dev/null +++ b/include/asm-xtensa/xtensa/cacheasm.h @@ -0,0 +1,708 @@ +#ifndef XTENSA_CACHEASM_H +#define XTENSA_CACHEASM_H + +/* + * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND + * + * include/asm-xtensa/xtensa/cacheasm.h -- assembler-specific cache + * related definitions that depend on CORE configuration. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2002 Tensilica Inc. + */ + + +#include <xtensa/coreasm.h> + + +/* + * This header file defines assembler macros of the form: + * <x>cache_<func> + * where <x> is 'i' or 'd' for instruction and data caches, + * and <func> indicates the function of the macro. + * + * The following functions <func> are defined, + * and apply only to the specified cache (I or D): + * + * reset + * Resets the cache. + * + * sync + * Makes sure any previous cache instructions have been completed; + * ie. makes sure any previous cache control operations + * have had full effect and been synchronized to memory. + * Eg. any invalidate completed [so as not to generate a hit], + * any writebacks or other pipelined writes written to memory, etc. + * + * invalidate_line (single cache line) + * invalidate_region (specified memory range) + * invalidate_all (entire cache) + * Invalidates all cache entries that cache + * data from the specified memory range. + * NOTE: locked entries are not invalidated. + * + * writeback_line (single cache line) + * writeback_region (specified memory range) + * writeback_all (entire cache) + * Writes back to memory all dirty cache entries + * that cache data from the specified memory range, + * and marks these entries as clean. + * NOTE: on some future implementations, this might + * also invalidate. + * NOTE: locked entries are written back, but never invalidated. + * NOTE: instruction caches never implement writeback. + * + * writeback_inv_line (single cache line) + * writeback_inv_region (specified memory range) + * writeback_inv_all (entire cache) + * Writes back to memory all dirty cache entries + * that cache data from the specified memory range, + * and invalidates these entries (including all clean + * cache entries that cache data from that range). + * NOTE: locked entries are written back but not invalidated. + * NOTE: instruction caches never implement writeback. + * + * lock_line (single cache line) + * lock_region (specified memory range) + * Prefetch and lock the specified memory range into cache. + * NOTE: if any part of the specified memory range cannot + * be locked, a ??? exception occurs. These macros don't + * do anything special (yet anyway) to handle this situation. + * + * unlock_line (single cache line) + * unlock_region (specified memory range) + * unlock_all (entire cache) + * Unlock cache entries that cache the specified memory range. + * Entries not already locked are unaffected. + */ + + + +/*************************** GENERIC -- ALL CACHES ***************************/ + + +/* + * The following macros assume the following cache size/parameter limits + * in the current Xtensa core implementation: + * cache size: 1024 bytes minimum + * line size: 16 - 64 bytes + * way count: 1 - 4 + * + * Minimum entries per way (ie. per associativity) = 1024 / 64 / 4 = 4 + * Hence the assumption that each loop can execute four cache instructions. + * + * Correspondingly, the offset range of instructions is assumed able to cover + * four lines, ie. offsets {0,1,2,3} * line_size are assumed valid for + * both hit and indexed cache instructions. Ie. these offsets are all + * valid: 0, 16, 32, 48, 64, 96, 128, 192 (for line sizes 16, 32, 64). + * This is true of all original cache instructions + * (dhi, ihi, dhwb, dhwbi, dii, iii) which have offsets + * of 0 to 1020 in multiples of 4 (ie. 8 bits shifted by 2). + * This is also true of subsequent cache instructions + * (dhu, ihu, diu, iiu, diwb, diwbi, dpfl, ipfl) which have offsets + * of 0 to 240 in multiples of 16 (ie. 4 bits shifted by 4). + * + * (Maximum cache size, currently 32k, doesn't affect the following macros. + * Cache ways > MMU min page size cause aliasing but that's another matter.) + */ + + + +/* + * Macro to apply an 'indexed' cache instruction to the entire cache. + * + * Parameters: + * cainst instruction/ that takes an address register parameter + * and an offset parameter (in range 0 .. 3*linesize). + * size size of cache in bytes + * linesize size of cache line in bytes + * assoc_or1 number of associativities (ways/sets) in cache + * if all sets affected by cainst, + * or 1 if only one set (or not all sets) of the cache + * is affected by cainst (eg. DIWB or DIWBI [not yet ISA defined]). + * aa, ab unique address registers (temporaries) + */ + + .macro cache_index_all cainst, size, linesize, assoc_or1, aa, ab + + // Sanity-check on cache parameters: + .ifne (\size % (\linesize * \assoc_or1 * 4)) + .err // cache configuration outside expected/supported range! + .endif + + // \size byte cache, \linesize byte lines, \assoc_or1 way(s) affected by each \cainst. + movi \aa, (\size / (\linesize * \assoc_or1 * 4)) + // Possible improvement: need only loop if \aa > 1 ; + // however that particular condition is highly unlikely. + movi \ab, 0 // to iterate over cache + floop \aa, cachex\@ + \cainst \ab, 0*\linesize + \cainst \ab, 1*\linesize + \cainst \ab, 2*\linesize + \cainst \ab, 3*\linesize + addi \ab, \ab, 4*\linesize // move to next line + floopend \aa, cachex\@ + + .endm + + +/* + * Macro to apply a 'hit' cache instruction to a memory region, + * ie. to any cache entries that cache a specified portion (region) of memory. + * Takes care of the unaligned cases, ie. may apply to one + * more cache line than $asize / lineSize if $aaddr is not aligned. + * + * + * Parameters are: + * cainst instruction/macro that takes an address register parameter + * and an offset parameter (currently always zero) + * and generates a cache instruction (eg. "dhi", "dhwb", "ihi", etc.) + * linesize_log2 log2(size of cache line in bytes) + * addr register containing start address of region (clobbered) + * asize register containing size of the region in bytes (clobbered) + * askew unique register used as temporary + * + * !?!?! 2DO: optimization: iterate max(cache_size and \asize) / linesize + */ + + .macro cache_hit_region cainst, linesize_log2, addr, asize, askew + + // Make \asize the number of iterations: + extui \askew, \addr, 0, \linesize_log2 // get unalignment amount of \addr + add \asize, \asize, \askew // ... and add it to \asize + addi \asize, \asize, (1 << \linesize_log2) - 1 // round up! + srli \asize, \asize, \linesize_log2 + + // Iterate over region: + floopnez \asize, cacheh\@ + \cainst \addr, 0 + addi \addr, \addr, (1 << \linesize_log2) // move to next line + floopend \asize, cacheh\@ + + .endm + + + + + +/*************************** INSTRUCTION CACHE ***************************/ + + +/* + * Reset/initialize the instruction cache by simply invalidating it: + * (need to unlock first also, if cache locking implemented): + * + * Parameters: + * aa, ab unique address registers (temporaries) + */ + .macro icache_reset aa, ab + icache_unlock_all \aa, \ab + icache_invalidate_all \aa, \ab + .endm + + +/* + * Synchronize after an instruction cache operation, + * to be sure everything is in sync with memory as to be + * expected following any previous instruction cache control operations. + * + * Parameters are: + * ar an address register (temporary) (currently unused, but may be used in future) + */ + .macro icache_sync ar +#if XCHAL_ICACHE_SIZE > 0 + isync +#endif + .endm + + + +/* + * Invalidate a single line of the instruction cache. + * Parameters are: + * ar address register that contains (virtual) address to invalidate + * (may get clobbered in a future implementation, but not currently) + * offset (optional) offset to add to \ar to compute effective address to invalidate + * (note: some number of lsbits are ignored) + */ + .macro icache_invalidate_line ar, offset +#if XCHAL_ICACHE_SIZE > 0 + ihi \ar, \offset // invalidate icache line + /* + * NOTE: in some version of the silicon [!!!SHOULD HAVE BEEN DOCUMENTED!!!] + * 'ihi' doesn't work, so it had been replaced with 'iii' + * (which would just invalidate more than it should, + * which should be okay other than the performance hit + * because cache locking did not exist in that version, + * unless user somehow relies on something being cached). + * [WHAT VERSION IS IT!!?!? + * IS THERE ANY WAY TO TEST FOR THAT HERE, TO OUTPUT 'III' ONLY IF NEEDED!?!?]. + * + * iii \ar, \offset + */ + icache_sync \ar +#endif + .endm + + + + +/* + * Invalidate instruction cache entries that cache a specified portion of memory. + * Parameters are: + * astart start address (register gets clobbered) + * asize size of the region in bytes (register gets clobbered) + * ac unique register used as temporary + */ + .macro icache_invalidate_region astart, asize, ac +#if XCHAL_ICACHE_SIZE > 0 + // Instruction cache region invalidation: + cache_hit_region ihi, XCHAL_ICACHE_LINEWIDTH, \astart, \asize, \ac + icache_sync \ac + // End of instruction cache region invalidation +#endif + .endm + + + +/* + * Invalidate entire instruction cache. + * + * Parameters: + * aa, ab unique address registers (temporaries) + */ + .macro icache_invalidate_all aa, ab +#if XCHAL_ICACHE_SIZE > 0 + // Instruction cache invalidation: + cache_index_all iii, XCHAL_ICACHE_SIZE, XCHAL_ICACHE_LINESIZE, XCHAL_ICACHE_WAYS, \aa, \ab + icache_sync \aa + // End of instruction cache invalidation +#endif + .endm + + + +/* + * Lock (prefetch & lock) a single line of the instruction cache. + * + * Parameters are: + * ar address register that contains (virtual) address to lock + * (may get clobbered in a future implementation, but not currently) + * offset offset to add to \ar to compute effective address to lock + * (note: some number of lsbits are ignored) + */ + .macro icache_lock_line ar, offset +#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE + ipfl \ar, \offset /* prefetch and lock icache line */ + icache_sync \ar +#endif + .endm + + + +/* + * Lock (prefetch & lock) a specified portion of memory into the instruction cache. + * Parameters are: + * astart start address (register gets clobbered) + * asize size of the region in bytes (register gets clobbered) + * ac unique register used as temporary + */ + .macro icache_lock_region astart, asize, ac +#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE + // Instruction cache region lock: + cache_hit_region ipfl, XCHAL_ICACHE_LINEWIDTH, \astart, \asize, \ac + icache_sync \ac + // End of instruction cache region lock +#endif + .endm + + + +/* + * Unlock a single line of the instruction cache. + * + * Parameters are: + * ar address register that contains (virtual) address to unlock + * (may get clobbered in a future implementation, but not currently) + * offset offset to add to \ar to compute effective address to unlock + * (note: some number of lsbits are ignored) + */ + .macro icache_unlock_line ar, offset +#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE + ihu \ar, \offset /* unlock icache line */ + icache_sync \ar +#endif + .endm + + + +/* + * Unlock a specified portion of memory from the instruction cache. + * Parameters are: + * astart start address (register gets clobbered) + * asize size of the region in bytes (register gets clobbered) + * ac unique register used as temporary + */ + .macro icache_unlock_region astart, asize, ac +#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE + // Instruction cache region unlock: + cache_hit_region ihu, XCHAL_ICACHE_LINEWIDTH, \astart, \asize, \ac + icache_sync \ac + // End of instruction cache region unlock +#endif + .endm + + + +/* + * Unlock entire instruction cache. + * + * Parameters: + * aa, ab unique address registers (temporaries) + */ + .macro icache_unlock_all aa, ab +#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE + // Instruction cache unlock: + cache_index_all iiu, XCHAL_ICACHE_SIZE, XCHAL_ICACHE_LINESIZE, 1, \aa, \ab + icache_sync \aa + // End of instruction cache unlock +#endif + .endm + + + + + +/*************************** DATA CACHE ***************************/ + + + +/* + * Reset/initialize the data cache by simply invalidating it + * (need to unlock first also, if cache locking implemented): + * + * Parameters: + * aa, ab unique address registers (temporaries) + */ + .macro dcache_reset aa, ab + dcache_unlock_all \aa, \ab + dcache_invalidate_all \aa, \ab + .endm + + + + +/* + * Synchronize after a data cache operation, + * to be sure everything is in sync with memory as to be + * expected following any previous data cache control operations. + * + * Parameters are: + * ar an address register (temporary) (currently unused, but may be used in future) + */ + .macro dcache_sync ar +#if XCHAL_DCACHE_SIZE > 0 + // This previous sequence errs on the conservative side (too much so); a DSYNC should be sufficient: + //memw // synchronize data cache changes relative to subsequent memory accesses + //isync // be conservative and ISYNC as well (just to be sure) + + dsync +#endif + .endm + + + +/* + * Synchronize after a data store operation, + * to be sure the stored data is completely off the processor + * (and assuming there is no buffering outside the processor, + * that the data is in memory). This may be required to + * ensure that the processor's write buffers are emptied. + * A MEMW followed by a read guarantees this, by definition. + * We also try to make sure the read itself completes. + * + * Parameters are: + * ar an address register (temporary) + */ + .macro write_sync ar + memw // ensure previous memory accesses are complete prior to subsequent memory accesses + l32i \ar, sp, 0 // completing this read ensures any previous write has completed, because of MEMW + //slot + add \ar, \ar, \ar // use the result of the read to help ensure the read completes (in future architectures) + .endm + + +/* + * Invalidate a single line of the data cache. + * Parameters are: + * ar address register that contains (virtual) address to invalidate + * (may get clobbered in a future implementation, but not currently) + * offset (optional) offset to add to \ar to compute effective address to invalidate + * (note: some number of lsbits are ignored) + */ + .macro dcache_invalidate_line ar, offset +#if XCHAL_DCACHE_SIZE > 0 + dhi \ar, \offset + dcache_sync \ar +#endif + .endm + + + + + +/* + * Invalidate data cache entries that cache a specified portion of memory. + * Parameters are: + * astart start address (register gets clobbered) + * asize size of the region in bytes (register gets clobbered) + * ac unique register used as temporary + */ + .macro dcache_invalidate_region astart, asize, ac +#if XCHAL_DCACHE_SIZE > 0 + // Data cache region invalidation: + cache_hit_region dhi, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac + dcache_sync \ac + // End of data cache region invalidation +#endif + .endm + + + +#if 0 +/* + * This is a work-around for a bug in SiChip1 (???). + * There should be a proper mechanism for not outputting + * these instructions when not needed. + * To enable work-around, uncomment this and replace 'dii' + * with 'dii_s1' everywhere, eg. in dcache_invalidate_all + * macro below. + */ + .macro dii_s1 ar, offset + dii \ar, \offset + or \ar, \ar, \ar + or \ar, \ar, \ar + or \ar, \ar, \ar + or \ar, \ar, \ar + .endm +#endif + + +/* + * Invalidate entire data cache. + * + * Parameters: + * aa, ab unique address registers (temporaries) + */ + .macro dcache_invalidate_all aa, ab +#if XCHAL_DCACHE_SIZE > 0 + // Data cache invalidation: + cache_index_all dii, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, XCHAL_DCACHE_WAYS, \aa, \ab + dcache_sync \aa + // End of data cache invalidation +#endif + .endm + + + +/* + * Writeback a single line of the data cache. + * Parameters are: + * ar address register that contains (virtual) address to writeback + * (may get clobbered in a future implementation, but not currently) + * offset offset to add to \ar to compute effective address to writeback + * (note: some number of lsbits are ignored) + */ + .macro dcache_writeback_line ar, offset +#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK + dhwb \ar, \offset + dcache_sync \ar +#endif + .endm + + + +/* + * Writeback dirty data cache entries that cache a specified portion of memory. + * Parameters are: + * astart start address (register gets clobbered) + * asize size of the region in bytes (register gets clobbered) + * ac unique register used as temporary + */ + .macro dcache_writeback_region astart, asize, ac +#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK + // Data cache region writeback: + cache_hit_region dhwb, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac + dcache_sync \ac + // End of data cache region writeback +#endif + .endm + + + +/* + * Writeback entire data cache. + * Parameters: + * aa, ab unique address registers (temporaries) + */ + .macro dcache_writeback_all aa, ab +#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK + // Data cache writeback: + cache_index_all diwb, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab + dcache_sync \aa + // End of data cache writeback +#endif + .endm + + + +/* + * Writeback and invalidate a single line of the data cache. + * Parameters are: + * ar address register that contains (virtual) address to writeback and invalidate + * (may get clobbered in a future implementation, but not currently) + * offset offset to add to \ar to compute effective address to writeback and invalidate + * (note: some number of lsbits are ignored) + */ + .macro dcache_writeback_inv_line ar, offset +#if XCHAL_DCACHE_SIZE > 0 + dhwbi \ar, \offset /* writeback and invalidate dcache line */ + dcache_sync \ar +#endif + .endm + + + +/* + * Writeback and invalidate data cache entries that cache a specified portion of memory. + * Parameters are: + * astart start address (register gets clobbered) + * asize size of the region in bytes (register gets clobbered) + * ac unique register used as temporary + */ + .macro dcache_writeback_inv_region astart, asize, ac +#if XCHAL_DCACHE_SIZE > 0 + // Data cache region writeback and invalidate: + cache_hit_region dhwbi, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac + dcache_sync \ac + // End of data cache region writeback and invalidate +#endif + .endm + + + +/* + * Writeback and invalidate entire data cache. + * Parameters: + * aa, ab unique address registers (temporaries) + */ + .macro dcache_writeback_inv_all aa, ab +#if XCHAL_DCACHE_SIZE > 0 + // Data cache writeback and invalidate: +#if XCHAL_DCACHE_IS_WRITEBACK + cache_index_all diwbi, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab + dcache_sync \aa +#else /*writeback*/ + // Data cache does not support writeback, so just invalidate: */ + dcache_invalidate_all \aa, \ab +#endif /*writeback*/ + // End of data cache writeback and invalidate +#endif + .endm + + + + +/* + * Lock (prefetch & lock) a single line of the data cache. + * + * Parameters are: + * ar address register that contains (virtual) address to lock + * (may get clobbered in a future implementation, but not currently) + * offset offset to add to \ar to compute effective address to lock + * (note: some number of lsbits are ignored) + */ + .macro dcache_lock_line ar, offset +#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE + dpfl \ar, \offset /* prefetch and lock dcache line */ + dcache_sync \ar +#endif + .endm + + + +/* + * Lock (prefetch & lock) a specified portion of memory into the data cache. + * Parameters are: + * astart start address (register gets clobbered) + * asize size of the region in bytes (register gets clobbered) + * ac unique register used as temporary + */ + .macro dcache_lock_region astart, asize, ac +#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE + // Data cache region lock: + cache_hit_region dpfl, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac + dcache_sync \ac + // End of data cache region lock +#endif + .endm + + + +/* + * Unlock a single line of the data cache. + * + * Parameters are: + * ar address register that contains (virtual) address to unlock + * (may get clobbered in a future implementation, but not currently) + * offset offset to add to \ar to compute effective address to unlock + * (note: some number of lsbits are ignored) + */ + .macro dcache_unlock_line ar, offset +#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE + dhu \ar, \offset /* unlock dcache line */ + dcache_sync \ar +#endif + .endm + + + +/* + * Unlock a specified portion of memory from the data cache. + * Parameters are: + * astart start address (register gets clobbered) + * asize size of the region in bytes (register gets clobbered) + * ac unique register used as temporary + */ + .macro dcache_unlock_region astart, asize, ac +#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE + // Data cache region unlock: + cache_hit_region dhu, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac + dcache_sync \ac + // End of data cache region unlock +#endif + .endm + + + +/* + * Unlock entire data cache. + * + * Parameters: + * aa, ab unique address registers (temporaries) + */ + .macro dcache_unlock_all aa, ab +#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE + // Data cache unlock: + cache_index_all diu, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab + dcache_sync \aa + // End of data cache unlock +#endif + .endm + + +#endif /*XTENSA_CACHEASM_H*/ + diff --git a/include/asm-xtensa/xtensa/cacheattrasm.h b/include/asm-xtensa/xtensa/cacheattrasm.h new file mode 100644 index 0000000..1c3e117 --- /dev/null +++ b/include/asm-xtensa/xtensa/cacheattrasm.h @@ -0,0 +1,432 @@ +#ifndef XTENSA_CACHEATTRASM_H +#define XTENSA_CACHEATTRASM_H + +/* + * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND + * + * include/asm-xtensa/xtensa/cacheattrasm.h -- assembler-specific + * CACHEATTR register related definitions that depend on CORE + * configuration. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2002 Tensilica Inc. + */ + + +#include <xtensa/coreasm.h> + + +/* + * This header file defines assembler macros of the form: + * <x>cacheattr_<func> + * where: + * <x> is 'i', 'd' or absent for instruction, data + * or both caches; and + * <func> indicates the function of the macro. + * + * The following functions are defined: + * + * icacheattr_get + * Reads I-cache CACHEATTR into a2 (clobbers a3-a5). + * + * dcacheattr_get + * Reads D-cache CACHEATTR into a2 (clobbers a3-a5). + * (Note: for configs with a real CACHEATTR register, the + * above two macros are identical.) + * + * cacheattr_set + * Writes both I-cache and D-cache CACHEATTRs from a2 (a3-a8 clobbered). + * Works even when changing one's own code's attributes. + * + * icacheattr_is_enabled label + * Branches to \label if I-cache appears to have been enabled + * (eg. if CACHEATTR contains a cache-enabled attribute). + * (clobbers a2-a5,SAR) + * + * dcacheattr_is_enabled label + * Branches to \label if D-cache appears to have been enabled + * (eg. if CACHEATTR contains a cache-enabled attribute). + * (clobbers a2-a5,SAR) + * + * cacheattr_is_enabled label + * Branches to \label if either I-cache or D-cache appears to have been enabled + * (eg. if CACHEATTR contains a cache-enabled attribute). + * (clobbers a2-a5,SAR) + * + * The following macros are only defined under certain conditions: + * + * icacheattr_set (if XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR) + * Writes I-cache CACHEATTR from a2 (a3-a8 clobbered). + * + * dcacheattr_set (if XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR) + * Writes D-cache CACHEATTR from a2 (a3-a8 clobbered). + */ + + + +/*************************** GENERIC -- ALL CACHES ***************************/ + +/* + * _cacheattr_get + * + * (Internal macro.) + * Returns value of CACHEATTR register (or closest equivalent) in a2. + * + * Entry: + * (none) + * Exit: + * a2 value read from CACHEATTR + * a3-a5 clobbered (temporaries) + */ + .macro _cacheattr_get tlb +#if XCHAL_HAVE_CACHEATTR + rsr a2, CACHEATTR +#elif XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR + // We have a config that "mimics" CACHEATTR using a simplified + // "MMU" composed of a single statically-mapped way. + // DTLB and ITLB are independent, so there's no single + // cache attribute that can describe both. So for now + // just return the DTLB state. + movi a5, 0xE0000000 + movi a2, 0 + movi a3, 0 +1: add a3, a3, a5 // next segment + r&tlb&1 a4, a3 // get PPN+CA of segment at 0xE0000000, 0xC0000000, ..., 0 + dsync // interlock??? + slli a2, a2, 4 + extui a4, a4, 0, 4 // extract CA + or a2, a2, a4 + bnez a3, 1b +#else + // This macro isn't applicable to arbitrary MMU configurations. + // Just return zero. + movi a2, 0 +#endif + .endm + + .macro icacheattr_get + _cacheattr_get itlb + .endm + + .macro dcacheattr_get + _cacheattr_get dtlb + .endm + + +#define XCHAL_CACHEATTR_ALL_BYPASS 0x22222222 /* default (powerup/reset) value of CACHEATTR, all BYPASS + mode (ie. disabled/bypassed caches) */ + +#if XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR + +#define XCHAL_FCA_ENAMASK 0x001A /* bitmap of fetch attributes that require enabled icache */ +#define XCHAL_LCA_ENAMASK 0x0003 /* bitmap of load attributes that require enabled dcache */ +#define XCHAL_SCA_ENAMASK 0x0003 /* bitmap of store attributes that require enabled dcache */ +#define XCHAL_LSCA_ENAMASK (XCHAL_LCA_ENAMASK|XCHAL_SCA_ENAMASK) /* l/s attrs requiring enabled dcache */ +#define XCHAL_ALLCA_ENAMASK (XCHAL_FCA_ENAMASK|XCHAL_LSCA_ENAMASK) /* all attrs requiring enabled caches */ + +/* + * _cacheattr_is_enabled + * + * (Internal macro.) + * Branches to \label if CACHEATTR in a2 indicates an enabled + * cache, using mask in a3. + * + * Parameters: + * label where to branch to if cache is enabled + * Entry: + * a2 contains CACHEATTR value used to determine whether + * caches are enabled + * a3 16-bit constant where each bit correspond to + * one of the 16 possible CA values (in a CACHEATTR mask); + * CA values that indicate the cache is enabled + * have their corresponding bit set in this mask + * (eg. use XCHAL_xCA_ENAMASK , above) + * Exit: + * a2,a4,a5 clobbered + * SAR clobbered + */ + .macro _cacheattr_is_enabled label + movi a4, 8 // loop 8 times +.Lcaife\@: + extui a5, a2, 0, 4 // get CA nibble + ssr a5 // index into mask according to CA... + srl a5, a3 // ...and get CA's mask bit in a5 bit 0 + bbsi.l a5, 0, \label // if CA indicates cache enabled, jump to label + srli a2, a2, 4 // next nibble + addi a4, a4, -1 + bnez a4, .Lcaife\@ // loop for each nibble + .endm + +#else /* XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR */ + .macro _cacheattr_is_enabled label + j \label // macro not applicable, assume caches always enabled + .endm +#endif /* XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR */ + + + +/* + * icacheattr_is_enabled + * + * Branches to \label if I-cache is enabled. + * + * Parameters: + * label where to branch to if icache is enabled + * Entry: + * (none) + * Exit: + * a2-a5, SAR clobbered (temporaries) + */ + .macro icacheattr_is_enabled label +#if XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR + icacheattr_get + movi a3, XCHAL_FCA_ENAMASK +#endif + _cacheattr_is_enabled \label + .endm + +/* + * dcacheattr_is_enabled + * + * Branches to \label if D-cache is enabled. + * + * Parameters: + * label where to branch to if dcache is enabled + * Entry: + * (none) + * Exit: + * a2-a5, SAR clobbered (temporaries) + */ + .macro dcacheattr_is_enabled label +#if XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR + dcacheattr_get + movi a3, XCHAL_LSCA_ENAMASK +#endif + _cacheattr_is_enabled \label + .endm + +/* + * cacheattr_is_enabled + * + * Branches to \label if either I-cache or D-cache is enabled. + * + * Parameters: + * label where to branch to if a cache is enabled + * Entry: + * (none) + * Exit: + * a2-a5, SAR clobbered (temporaries) + */ + .macro cacheattr_is_enabled label +#if XCHAL_HAVE_CACHEATTR + rsr a2, CACHEATTR + movi a3, XCHAL_ALLCA_ENAMASK +#elif XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR + icacheattr_get + movi a3, XCHAL_FCA_ENAMASK + _cacheattr_is_enabled \label + dcacheattr_get + movi a3, XCHAL_LSCA_ENAMASK +#endif + _cacheattr_is_enabled \label + .endm + + + +/* + * The ISA does not have a defined way to change the + * instruction cache attributes of the running code, + * ie. of the memory area that encloses the current PC. + * However, each micro-architecture (or class of + * configurations within a micro-architecture) + * provides a way to deal with this issue. + * + * Here are a few macros used to implement the relevant + * approach taken. + */ + +#if XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR + // We have a config that "mimics" CACHEATTR using a simplified + // "MMU" composed of a single statically-mapped way. + +/* + * icacheattr_set + * + * Entry: + * a2 cacheattr value to set + * Exit: + * a2 unchanged + * a3-a8 clobbered (temporaries) + */ + .macro icacheattr_set + + movi a5, 0xE0000000 // mask of upper 3 bits + movi a6, 3f // PC where ITLB is set + movi a3, 0 // start at region 0 (0 .. 7) + and a6, a6, a5 // upper 3 bits of local PC area + mov a7, a2 // copy a2 so it doesn't get clobbered + j 3f + +# if XCHAL_HAVE_XLT_CACHEATTR + // Can do translations, use generic method: +1: sub a6, a3, a5 // address of some other segment + ritlb1 a8, a6 // save its PPN+CA + dsync // interlock?? + witlb a4, a6 // make it translate to this code area + movi a6, 5f // where to jump into it + isync + sub a6, a6, a5 // adjust jump address within that other segment + jx a6 + + // Note that in the following code snippet, which runs at a different virtual + // address than it is assembled for, we avoid using literals (eg. via movi/l32r) + // just in case literals end up in a different 512 MB segment, and we avoid + // instructions that rely on the current PC being what is expected. + // + .align 4 + _j 6f // this is at label '5' minus 4 bytes + .align 4 +5: witlb a4, a3 // we're in other segment, now can write previous segment's CA + isync + add a6, a6, a5 // back to previous segment + addi a6, a6, -4 // next jump label + jx a6 + +6: sub a6, a3, a5 // address of some other segment + witlb a8, a6 // restore PPN+CA of other segment + mov a6, a3 // restore a6 + isync +# else /* XCHAL_HAVE_XLT_CACHEATTR */ + // Use micro-architecture specific method. + // The following 4-instruction sequence is aligned such that + // it all fits within a single I-cache line. Sixteen byte + // alignment is sufficient for this (using XCHAL_ICACHE_LINESIZE + // actually causes problems because that can be greater than + // the alignment of the reset vector, where this macro is often + // invoked, which would cause the linker to align the reset + // vector code away from the reset vector!!). + .align 16 /*XCHAL_ICACHE_LINESIZE*/ +1: _witlb a4, a3 // write wired PTE (CA, no PPN) of 512MB segment to ITLB + _isync + nop + nop +# endif /* XCHAL_HAVE_XLT_CACHEATTR */ + beq a3, a5, 4f // done? + + // Note that in the WITLB loop, we don't do any load/stores + // (may not be an issue here, but it is important in the DTLB case). +2: srli a7, a7, 4 // next CA + sub a3, a3, a5 // next segment (add 0x20000000) +3: +# if XCHAL_HAVE_XLT_CACHEATTR /* if have translation, preserve it */ + ritlb1 a8, a3 // get current PPN+CA of segment + dsync // interlock??? + extui a4, a7, 0, 4 // extract CA to set + srli a8, a8, 4 // clear CA but keep PPN ... + slli a8, a8, 4 // ... + add a4, a4, a8 // combine new CA with PPN to preserve +# else + extui a4, a7, 0, 4 // extract CA +# endif + beq a3, a6, 1b // current PC's region? if so, do it in a safe way + witlb a4, a3 // write wired PTE (CA [+PPN]) of 512MB segment to ITLB + bne a3, a5, 2b + isync // make sure all ifetch changes take effect +4: + .endm // icacheattr_set + + +/* + * dcacheattr_set + * + * Entry: + * a2 cacheattr value to set + * Exit: + * a2 unchanged + * a3-a8 clobbered (temporaries) + */ + + .macro dcacheattr_set + + movi a5, 0xE0000000 // mask of upper 3 bits + movi a3, 0 // start at region 0 (0 .. 7) + mov a7, a2 // copy a2 so it doesn't get clobbered + j 3f + // Note that in the WDTLB loop, we don't do any load/stores + // (including implicit l32r via movi) because it isn't safe. +2: srli a7, a7, 4 // next CA + sub a3, a3, a5 // next segment (add 0x20000000) +3: +# if XCHAL_HAVE_XLT_CACHEATTR /* if have translation, preserve it */ + rdtlb1 a8, a3 // get current PPN+CA of segment + dsync // interlock??? + extui a4, a7, 0, 4 // extract CA to set + srli a8, a8, 4 // clear CA but keep PPN ... + slli a8, a8, 4 // ... + add a4, a4, a8 // combine new CA with PPN to preserve +# else + extui a4, a7, 0, 4 // extract CA to set +# endif + wdtlb a4, a3 // write wired PTE (CA [+PPN]) of 512MB segment to DTLB + bne a3, a5, 2b + dsync // make sure all data path changes take effect + .endm // dcacheattr_set + +#endif /* XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR */ + + + +/* + * cacheattr_set + * + * Macro that sets the current CACHEATTR safely + * (both i and d) according to the current contents of a2. + * It works even when changing the cache attributes of + * the currently running code. + * + * Entry: + * a2 cacheattr value to set + * Exit: + * a2 unchanged + * a3-a8 clobbered (temporaries) + */ + .macro cacheattr_set + +#if XCHAL_HAVE_CACHEATTR +# if XCHAL_ICACHE_LINESIZE < 4 + // No i-cache, so can always safely write to CACHEATTR: + wsr a2, CACHEATTR +# else + // The Athens micro-architecture, when using the old + // exception architecture option (ie. with the CACHEATTR register) + // allows changing the cache attributes of the running code + // using the following exact sequence aligned to be within + // an instruction cache line. (NOTE: using XCHAL_ICACHE_LINESIZE + // alignment actually causes problems because that can be greater + // than the alignment of the reset vector, where this macro is often + // invoked, which would cause the linker to align the reset + // vector code away from the reset vector!!). + j 1f + .align 16 /*XCHAL_ICACHE_LINESIZE*/ // align to within an I-cache line +1: _wsr a2, CACHEATTR + _isync + nop + nop +# endif +#elif XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR + // DTLB and ITLB are independent, but to keep semantics + // of this macro we simply write to both. + icacheattr_set + dcacheattr_set +#else + // This macro isn't applicable to arbitrary MMU configurations. + // Do nothing in this case. +#endif + .endm + + +#endif /*XTENSA_CACHEATTRASM_H*/ + diff --git a/include/asm-xtensa/xtensa/config-linux_be/core.h b/include/asm-xtensa/xtensa/config-linux_be/core.h new file mode 100644 index 0000000..d54fe5e --- /dev/null +++ b/include/asm-xtensa/xtensa/config-linux_be/core.h @@ -0,0 +1,1270 @@ +/* + * xtensa/config/core.h -- HAL definitions that are dependent on CORE configuration + * + * This header file is sometimes referred to as the "compile-time HAL" or CHAL. + * It was generated for a specific Xtensa processor configuration. + * + * Source for configuration-independent binaries (which link in a + * configuration-specific HAL library) must NEVER include this file. + * It is perfectly normal, however, for the HAL source itself to include this file. + */ + +/* + * Copyright (c) 2003 Tensilica, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2.1 of the GNU Lesser General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, + * USA. + */ + + +#ifndef XTENSA_CONFIG_CORE_H +#define XTENSA_CONFIG_CORE_H + +#include <xtensa/hal.h> + + +/*---------------------------------------------------------------------- + GENERAL + ----------------------------------------------------------------------*/ + +/* + * Separators for macros that expand into arrays. + * These can be predefined by files that #include this one, + * when different separators are required. + */ +/* Element separator for macros that expand into 1-dimensional arrays: */ +#ifndef XCHAL_SEP +#define XCHAL_SEP , +#endif +/* Array separator for macros that expand into 2-dimensional arrays: */ +#ifndef XCHAL_SEP2 +#define XCHAL_SEP2 },{ +#endif + + +/*---------------------------------------------------------------------- + ENDIANNESS + ----------------------------------------------------------------------*/ + +#define XCHAL_HAVE_BE 1 +#define XCHAL_HAVE_LE 0 +#define XCHAL_MEMORY_ORDER XTHAL_BIGENDIAN + + +/*---------------------------------------------------------------------- + REGISTER WINDOWS + ----------------------------------------------------------------------*/ + +#define XCHAL_HAVE_WINDOWED 1 /* 1 if windowed registers option configured, 0 otherwise */ +#define XCHAL_NUM_AREGS 64 /* number of physical address regs */ +#define XCHAL_NUM_AREGS_LOG2 6 /* log2(XCHAL_NUM_AREGS) */ + + +/*---------------------------------------------------------------------- + ADDRESS ALIGNMENT + ----------------------------------------------------------------------*/ + +/* These apply to a selected set of core load and store instructions only (see ISA): */ +#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* 1 if unaligned loads cause an exception, 0 otherwise */ +#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* 1 if unaligned stores cause an exception, 0 otherwise */ + + +/*---------------------------------------------------------------------- + INTERRUPTS + ----------------------------------------------------------------------*/ + +#define XCHAL_HAVE_INTERRUPTS 1 /* 1 if interrupt option configured, 0 otherwise */ +#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* 1 if high-priority interrupt option configured, 0 otherwise */ +#define XCHAL_HAVE_HIGHLEVEL_INTERRUPTS XCHAL_HAVE_HIGHPRI_INTERRUPTS +#define XCHAL_HAVE_NMI 0 /* 1 if NMI option configured, 0 otherwise */ +#define XCHAL_NUM_INTERRUPTS 17 /* number of interrupts */ +#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* number of bits to hold an interrupt number: roundup(log2(number of interrupts)) */ +#define XCHAL_NUM_EXTINTERRUPTS 10 /* number of external interrupts */ +#define XCHAL_NUM_INTLEVELS 4 /* number of interrupt levels (not including level zero!) */ +#define XCHAL_NUM_LOWPRI_LEVELS 1 /* number of low-priority interrupt levels (always 1) */ +#define XCHAL_FIRST_HIGHPRI_LEVEL (XCHAL_NUM_LOWPRI_LEVELS+1) /* level of first high-priority interrupt (always 2) */ +#define XCHAL_EXCM_LEVEL 1 /* level of interrupts masked by PS.EXCM (XEA2 only; always 1 in T10xx); + for XEA1, where there is no PS.EXCM, this is always 1; + interrupts at levels FIRST_HIGHPRI <= n <= EXCM_LEVEL, if any, + are termed "medium priority" interrupts (post T10xx only) */ +/* Note: 1 <= LOWPRI_LEVELS <= EXCM_LEVEL < DEBUGLEVEL <= NUM_INTLEVELS < NMILEVEL <= 15 */ + +/* Masks of interrupts at each interrupt level: */ +#define XCHAL_INTLEVEL0_MASK 0x00000000 +#define XCHAL_INTLEVEL1_MASK 0x000064F9 +#define XCHAL_INTLEVEL2_MASK 0x00008902 +#define XCHAL_INTLEVEL3_MASK 0x00011204 +#define XCHAL_INTLEVEL4_MASK 0x00000000 +#define XCHAL_INTLEVEL5_MASK 0x00000000 +#define XCHAL_INTLEVEL6_MASK 0x00000000 +#define XCHAL_INTLEVEL7_MASK 0x00000000 +#define XCHAL_INTLEVEL8_MASK 0x00000000 +#define XCHAL_INTLEVEL9_MASK 0x00000000 +#define XCHAL_INTLEVEL10_MASK 0x00000000 +#define XCHAL_INTLEVEL11_MASK 0x00000000 +#define XCHAL_INTLEVEL12_MASK 0x00000000 +#define XCHAL_INTLEVEL13_MASK 0x00000000 +#define XCHAL_INTLEVEL14_MASK 0x00000000 +#define XCHAL_INTLEVEL15_MASK 0x00000000 +/* As an array of entries (eg. for C constant arrays): */ +#define XCHAL_INTLEVEL_MASKS 0x00000000 XCHAL_SEP \ + 0x000064F9 XCHAL_SEP \ + 0x00008902 XCHAL_SEP \ + 0x00011204 XCHAL_SEP \ + 0x00000000 XCHAL_SEP \ + 0x00000000 XCHAL_SEP \ + 0x00000000 XCHAL_SEP \ + 0x00000000 XCHAL_SEP \ + 0x00000000 XCHAL_SEP \ + 0x00000000 XCHAL_SEP \ + 0x00000000 XCHAL_SEP \ + 0x00000000 XCHAL_SEP \ + 0x00000000 XCHAL_SEP \ + 0x00000000 XCHAL_SEP \ + 0x00000000 XCHAL_SEP \ + 0x00000000 + +/* Masks of interrupts at each range 1..n of interrupt levels: */ +#define XCHAL_INTLEVEL0_ANDBELOW_MASK 0x00000000 +#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x000064F9 +#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x0000EDFB +#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x0001FFFF +#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x0001FFFF +#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x0001FFFF +#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x0001FFFF +#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x0001FFFF +#define XCHAL_INTLEVEL8_ANDBELOW_MASK 0x0001FFFF +#define XCHAL_INTLEVEL9_ANDBELOW_MASK 0x0001FFFF +#define XCHAL_INTLEVEL10_ANDBELOW_MASK 0x0001FFFF +#define XCHAL_INTLEVEL11_ANDBELOW_MASK 0x0001FFFF +#define XCHAL_INTLEVEL12_ANDBELOW_MASK 0x0001FFFF +#define XCHAL_INTLEVEL13_ANDBELOW_MASK 0x0001FFFF +#define XCHAL_INTLEVEL14_ANDBELOW_MASK 0x0001FFFF +#define XCHAL_INTLEVEL15_ANDBELOW_MASK 0x0001FFFF +#define XCHAL_LOWPRI_MASK XCHAL_INTLEVEL1_ANDBELOW_MASK /* mask of all low-priority interrupts */ +#define XCHAL_EXCM_MASK XCHAL_INTLEVEL1_ANDBELOW_MASK /* mask of all interrupts masked by PS.EXCM (or CEXCM) */ +/* As an array of entries (eg. for C constant arrays): */ +#define XCHAL_INTLEVEL_ANDBELOW_MASKS 0x00000000 XCHAL_SEP \ + 0x000064F9 XCHAL_SEP \ + 0x0000EDFB XCHAL_SEP \ + 0x0001FFFF XCHAL_SEP \ + 0x0001FFFF XCHAL_SEP \ + 0x0001FFFF XCHAL_SEP \ + 0x0001FFFF XCHAL_SEP \ + 0x0001FFFF XCHAL_SEP \ + 0x0001FFFF XCHAL_SEP \ + 0x0001FFFF XCHAL_SEP \ + 0x0001FFFF XCHAL_SEP \ + 0x0001FFFF XCHAL_SEP \ + 0x0001FFFF XCHAL_SEP \ + 0x0001FFFF XCHAL_SEP \ + 0x0001FFFF XCHAL_SEP \ + 0x0001FFFF + +/* Interrupt numbers for each interrupt level at which only one interrupt was configured: */ +/*#define XCHAL_INTLEVEL1_NUM ...more than one interrupt at this level...*/ +/*#define XCHAL_INTLEVEL2_NUM ...more than one interrupt at this level...*/ +/*#define XCHAL_INTLEVEL3_NUM ...more than one interrupt at this level...*/ + +/* Level of each interrupt: */ +#define XCHAL_INT0_LEVEL 1 +#define XCHAL_INT1_LEVEL 2 +#define XCHAL_INT2_LEVEL 3 +#define XCHAL_INT3_LEVEL 1 +#define XCHAL_INT4_LEVEL 1 +#define XCHAL_INT5_LEVEL 1 +#define XCHAL_INT6_LEVEL 1 +#define XCHAL_INT7_LEVEL 1 +#define XCHAL_INT8_LEVEL 2 +#define XCHAL_INT9_LEVEL 3 +#define XCHAL_INT10_LEVEL 1 +#define XCHAL_INT11_LEVEL 2 +#define XCHAL_INT12_LEVEL 3 +#define XCHAL_INT13_LEVEL 1 +#define XCHAL_INT14_LEVEL 1 +#define XCHAL_INT15_LEVEL 2 +#define XCHAL_INT16_LEVEL 3 +#define XCHAL_INT17_LEVEL 0 +#define XCHAL_INT18_LEVEL 0 +#define XCHAL_INT19_LEVEL 0 +#define XCHAL_INT20_LEVEL 0 +#define XCHAL_INT21_LEVEL 0 +#define XCHAL_INT22_LEVEL 0 +#define XCHAL_INT23_LEVEL 0 +#define XCHAL_INT24_LEVEL 0 +#define XCHAL_INT25_LEVEL 0 +#define XCHAL_INT26_LEVEL 0 +#define XCHAL_INT27_LEVEL 0 +#define XCHAL_INT28_LEVEL 0 +#define XCHAL_INT29_LEVEL 0 +#define XCHAL_INT30_LEVEL 0 +#define XCHAL_INT31_LEVEL 0 +/* As an array of entries (eg. for C constant arrays): */ +#define XCHAL_INT_LEVELS 1 XCHAL_SEP \ + 2 XCHAL_SEP \ + 3 XCHAL_SEP \ + 1 XCHAL_SEP \ + 1 XCHAL_SEP \ + 1 XCHAL_SEP \ + 1 XCHAL_SEP \ + 1 XCHAL_SEP \ + 2 XCHAL_SEP \ + 3 XCHAL_SEP \ + 1 XCHAL_SEP \ + 2 XCHAL_SEP \ + 3 XCHAL_SEP \ + 1 XCHAL_SEP \ + 1 XCHAL_SEP \ + 2 XCHAL_SEP \ + 3 XCHAL_SEP \ + 0 XCHAL_SEP \ + 0 XCHAL_SEP \ + 0 XCHAL_SEP \ + 0 XCHAL_SEP \ + 0 XCHAL_SEP \ + 0 XCHAL_SEP \ + 0 XCHAL_SEP \ + 0 XCHAL_SEP \ + 0 XCHAL_SEP \ + 0 XCHAL_SEP \ + 0 XCHAL_SEP \ + 0 XCHAL_SEP \ + 0 XCHAL_SEP \ + 0 XCHAL_SEP \ + 0 + +/* Type of each interrupt: */ +#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL +#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL +#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL +#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL +#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL +#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL +#define XCHAL_INT6_TYPE XTHAL_INTTYPE_EXTERN_LEVEL +#define XCHAL_INT7_TYPE XTHAL_INTTYPE_EXTERN_EDGE +#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_EDGE +#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_EDGE +#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER +#define XCHAL_INT11_TYPE XTHAL_INTTYPE_TIMER +#define XCHAL_INT12_TYPE XTHAL_INTTYPE_TIMER +#define XCHAL_INT13_TYPE XTHAL_INTTYPE_SOFTWARE +#define XCHAL_INT14_TYPE XTHAL_INTTYPE_SOFTWARE +#define XCHAL_INT15_TYPE XTHAL_INTTYPE_SOFTWARE +#define XCHAL_INT16_TYPE XTHAL_INTTYPE_SOFTWARE +#define XCHAL_INT17_TYPE XTHAL_INTTYPE_UNCONFIGURED +#define XCHAL_INT18_TYPE XTHAL_INTTYPE_UNCONFIGURED +#define XCHAL_INT19_TYPE XTHAL_INTTYPE_UNCONFIGURED +#define XCHAL_INT20_TYPE XTHAL_INTTYPE_UNCONFIGURED +#define XCHAL_INT21_TYPE XTHAL_INTTYPE_UNCONFIGURED +#define XCHAL_INT22_TYPE XTHAL_INTTYPE_UNCONFIGURED +#define XCHAL_INT23_TYPE XTHAL_INTTYPE_UNCONFIGURED +#define XCHAL_INT24_TYPE XTHAL_INTTYPE_UNCONFIGURED +#define XCHAL_INT25_TYPE XTHAL_INTTYPE_UNCONFIGURED +#define XCHAL_INT26_TYPE XTHAL_INTTYPE_UNCONFIGURED +#define XCHAL_INT27_TYPE XTHAL_INTTYPE_UNCONFIGURED +#define XCHAL_INT28_TYPE XTHAL_INTTYPE_UNCONFIGURED +#define XCHAL_INT29_TYPE XTHAL_INTTYPE_UNCONFIGURED +#define XCHAL_INT30_TYPE XTHAL_INTTYPE_UNCONFIGURED +#define XCHAL_INT31_TYPE XTHAL_INTTYPE_UNCONFIGURED +/* As an array of entries (eg. for C constant arrays): */ +#define XCHAL_INT_TYPES XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \ + XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \ + XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \ + XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \ + XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \ + XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \ + XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \ + XTHAL_INTTYPE_EXTERN_EDGE XCHAL_SEP \ + XTHAL_INTTYPE_EXTERN_EDGE XCHAL_SEP \ + XTHAL_INTTYPE_EXTERN_EDGE XCHAL_SEP \ + XTHAL_INTTYPE_TIMER XCHAL_SEP \ + XTHAL_INTTYPE_TIMER XCHAL_SEP \ + XTHAL_INTTYPE_TIMER XCHAL_SEP \ + XTHAL_INTTYPE_SOFTWARE XCHAL_SEP \ + XTHAL_INTTYPE_SOFTWARE XCHAL_SEP \ + XTHAL_INTTYPE_SOFTWARE XCHAL_SEP \ + XTHAL_INTTYPE_SOFTWARE XCHAL_SEP \ + XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \ + XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \ + XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \ + XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \ + XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \ + XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \ + XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \ + XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \ + XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \ + XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \ + XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \ + XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \ + XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \ + XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \ + XTHAL_INTTYPE_UNCONFIGURED + +/* Masks of interrupts for each type of interrupt: */ +#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFFE0000 +#define XCHAL_INTTYPE_MASK_SOFTWARE 0x0001E000 +#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x00000380 +#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000007F +#define XCHAL_INTTYPE_MASK_TIMER 0x00001C00 +#define XCHAL_INTTYPE_MASK_NMI 0x00000000 +/* As an array of entries (eg. for C constant arrays): */ +#define XCHAL_INTTYPE_MASKS 0xFFFE0000 XCHAL_SEP \ + 0x0001E000 XCHAL_SEP \ + 0x00000380 XCHAL_SEP \ + 0x0000007F XCHAL_SEP \ + 0x00001C00 XCHAL_SEP \ + 0x00000000 + +/* Interrupts assigned to each timer (CCOMPARE0 to CCOMPARE3), -1 if unassigned */ +#define XCHAL_TIMER0_INTERRUPT 10 +#define XCHAL_TIMER1_INTERRUPT 11 +#define XCHAL_TIMER2_INTERRUPT 12 +#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED +/* As an array of entries (eg. for C constant arrays): */ +#define XCHAL_TIMER_INTERRUPTS 10 XCHAL_SEP \ + 11 XCHAL_SEP \ + 12 XCHAL_SEP \ + XTHAL_TIMER_UNCONFIGURED + +/* Indexing macros: */ +#define _XCHAL_INTLEVEL_MASK(n) XCHAL_INTLEVEL ## n ## _MASK +#define XCHAL_INTLEVEL_MASK(n) _XCHAL_INTLEVEL_MASK(n) /* n = 0 .. 15 */ +#define _XCHAL_INTLEVEL_ANDBELOWMASK(n) XCHAL_INTLEVEL ## n ## _ANDBELOW_MASK +#define XCHAL_INTLEVEL_ANDBELOW_MASK(n) _XCHAL_INTLEVEL_ANDBELOWMASK(n) /* n = 0 .. 15 */ +#define _XCHAL_INT_LEVEL(n) XCHAL_INT ## n ## _LEVEL +#define XCHAL_INT_LEVEL(n) _XCHAL_INT_LEVEL(n) /* n = 0 .. 31 */ +#define _XCHAL_INT_TYPE(n) XCHAL_INT ## n ## _TYPE +#define XCHAL_INT_TYPE(n) _XCHAL_INT_TYPE(n) /* n = 0 .. 31 */ +#define _XCHAL_TIMER_INTERRUPT(n) XCHAL_TIMER ## n ## _INTERRUPT +#define XCHAL_TIMER_INTERRUPT(n) _XCHAL_TIMER_INTERRUPT(n) /* n = 0 .. 3 */ + + + +/* + * External interrupt vectors/levels. + * These macros describe how Xtensa processor interrupt numbers + * (as numbered internally, eg. in INTERRUPT and INTENABLE registers) + * map to external BInterrupt<n> pins, for those interrupts + * configured as external (level-triggered, edge-triggered, or NMI). + * See the Xtensa processor databook for more details. + */ + +/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */ +#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */ +#define XCHAL_EXTINT1_NUM 1 /* (intlevel 2) */ +#define XCHAL_EXTINT2_NUM 2 /* (intlevel 3) */ +#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */ +#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */ +#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */ +#define XCHAL_EXTINT6_NUM 6 /* (intlevel 1) */ +#define XCHAL_EXTINT7_NUM 7 /* (intlevel 1) */ +#define XCHAL_EXTINT8_NUM 8 /* (intlevel 2) */ +#define XCHAL_EXTINT9_NUM 9 /* (intlevel 3) */ + +/* Corresponding interrupt masks: */ +#define XCHAL_EXTINT0_MASK 0x00000001 +#define XCHAL_EXTINT1_MASK 0x00000002 +#define XCHAL_EXTINT2_MASK 0x00000004 +#define XCHAL_EXTINT3_MASK 0x00000008 +#define XCHAL_EXTINT4_MASK 0x00000010 +#define XCHAL_EXTINT5_MASK 0x00000020 +#define XCHAL_EXTINT6_MASK 0x00000040 +#define XCHAL_EXTINT7_MASK 0x00000080 +#define XCHAL_EXTINT8_MASK 0x00000100 +#define XCHAL_EXTINT9_MASK 0x00000200 + +/* Core config interrupt levels mapped to each external interrupt: */ +#define XCHAL_EXTINT0_LEVEL 1 /* (int number 0) */ +#define XCHAL_EXTINT1_LEVEL 2 /* (int number 1) */ +#define XCHAL_EXTINT2_LEVEL 3 /* (int number 2) */ +#define XCHAL_EXTINT3_LEVEL 1 /* (int number 3) */ +#define XCHAL_EXTINT4_LEVEL 1 /* (int number 4) */ +#define XCHAL_EXTINT5_LEVEL 1 /* (int number 5) */ +#define XCHAL_EXTINT6_LEVEL 1 /* (int number 6) */ +#define XCHAL_EXTINT7_LEVEL 1 /* (int number 7) */ +#define XCHAL_EXTINT8_LEVEL 2 /* (int number 8) */ +#define XCHAL_EXTINT9_LEVEL 3 /* (int number 9) */ + + +/*---------------------------------------------------------------------- + EXCEPTIONS and VECTORS + ----------------------------------------------------------------------*/ + +#define XCHAL_HAVE_EXCEPTIONS 1 /* 1 if exception option configured, 0 otherwise */ + +#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture number: 1 for XEA1 (old), 2 for XEA2 (new) */ +#define XCHAL_HAVE_XEA1 0 /* 1 if XEA1, 0 otherwise */ +#define XCHAL_HAVE_XEA2 1 /* 1 if XEA2, 0 otherwise */ +/* For backward compatibility ONLY -- DO NOT USE (will be removed in future release): */ +#define XCHAL_HAVE_OLD_EXC_ARCH XCHAL_HAVE_XEA1 /* (DEPRECATED) 1 if old exception architecture (XEA1), 0 otherwise (eg. XEA2) */ +#define XCHAL_HAVE_EXCM XCHAL_HAVE_XEA2 /* (DEPRECATED) 1 if PS.EXCM bit exists (currently equals XCHAL_HAVE_TLBS) */ + +#define XCHAL_RESET_VECTOR_VADDR 0xFE000020 +#define XCHAL_RESET_VECTOR_PADDR 0xFE000020 +#define XCHAL_USER_VECTOR_VADDR 0xD0000220 +#define XCHAL_PROGRAMEXC_VECTOR_VADDR XCHAL_USER_VECTOR_VADDR /* for backward compatibility */ +#define XCHAL_USEREXC_VECTOR_VADDR XCHAL_USER_VECTOR_VADDR /* for backward compatibility */ +#define XCHAL_USER_VECTOR_PADDR 0x00000220 +#define XCHAL_PROGRAMEXC_VECTOR_PADDR XCHAL_USER_VECTOR_PADDR /* for backward compatibility */ +#define XCHAL_USEREXC_VECTOR_PADDR XCHAL_USER_VECTOR_PADDR /* for backward compatibility */ +#define XCHAL_KERNEL_VECTOR_VADDR 0xD0000200 +#define XCHAL_STACKEDEXC_VECTOR_VADDR XCHAL_KERNEL_VECTOR_VADDR /* for backward compatibility */ +#define XCHAL_KERNELEXC_VECTOR_VADDR XCHAL_KERNEL_VECTOR_VADDR /* for backward compatibility */ +#define XCHAL_KERNEL_VECTOR_PADDR 0x00000200 +#define XCHAL_STACKEDEXC_VECTOR_PADDR XCHAL_KERNEL_VECTOR_PADDR /* for backward compatibility */ +#define XCHAL_KERNELEXC_VECTOR_PADDR XCHAL_KERNEL_VECTOR_PADDR /* for backward compatibility */ +#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0xD0000290 +#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x00000290 +#define XCHAL_WINDOW_VECTORS_VADDR 0xD0000000 +#define XCHAL_WINDOW_VECTORS_PADDR 0x00000000 +#define XCHAL_INTLEVEL2_VECTOR_VADDR 0xD0000240 +#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00000240 +#define XCHAL_INTLEVEL3_VECTOR_VADDR 0xD0000250 +#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x00000250 +#define XCHAL_INTLEVEL4_VECTOR_VADDR 0xFE000520 +#define XCHAL_INTLEVEL4_VECTOR_PADDR 0xFE000520 +#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR +#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL4_VECTOR_PADDR + +/* Indexing macros: */ +#define _XCHAL_INTLEVEL_VECTOR_VADDR(n) XCHAL_INTLEVEL ## n ## _VECTOR_VADDR +#define XCHAL_INTLEVEL_VECTOR_VADDR(n) _XCHAL_INTLEVEL_VECTOR_VADDR(n) /* n = 0 .. 15 */ + +/* + * General Exception Causes + * (values of EXCCAUSE special register set by general exceptions, + * which vector to the user, kernel, or double-exception vectors): + */ +#define XCHAL_EXCCAUSE_ILLEGAL_INSTRUCTION 0 /* Illegal Instruction (IllegalInstruction) */ +#define XCHAL_EXCCAUSE_SYSTEM_CALL 1 /* System Call (SystemCall) */ +#define XCHAL_EXCCAUSE_INSTRUCTION_FETCH_ERROR 2 /* Instruction Fetch Error (InstructionFetchError) */ +#define XCHAL_EXCCAUSE_LOAD_STORE_ERROR 3 /* Load Store Error (LoadStoreError) */ +#define XCHAL_EXCCAUSE_LEVEL1_INTERRUPT 4 /* Level 1 Interrupt (Level1Interrupt) */ +#define XCHAL_EXCCAUSE_ALLOCA 5 /* Stack Extension Assist (Alloca) */ +#define XCHAL_EXCCAUSE_INTEGER_DIVIDE_BY_ZERO 6 /* Integer Divide by Zero (IntegerDivideByZero) */ +#define XCHAL_EXCCAUSE_SPECULATION 7 /* Speculation (Speculation) */ +#define XCHAL_EXCCAUSE_PRIVILEGED 8 /* Privileged Instruction (Privileged) */ +#define XCHAL_EXCCAUSE_UNALIGNED 9 /* Unaligned Load Store (Unaligned) */ +#define XCHAL_EXCCAUSE_ITLB_MISS 16 /* ITlb Miss Exception (ITlbMiss) */ +#define XCHAL_EXCCAUSE_ITLB_MULTIHIT 17 /* ITlb Mutltihit Exception (ITlbMultihit) */ +#define XCHAL_EXCCAUSE_ITLB_PRIVILEGE 18 /* ITlb Privilege Exception (ITlbPrivilege) */ +#define XCHAL_EXCCAUSE_ITLB_SIZE_RESTRICTION 19 /* ITlb Size Restriction Exception (ITlbSizeRestriction) */ +#define XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE 20 /* Fetch Cache Attribute Exception (FetchCacheAttribute) */ +#define XCHAL_EXCCAUSE_DTLB_MISS 24 /* DTlb Miss Exception (DTlbMiss) */ +#define XCHAL_EXCCAUSE_DTLB_MULTIHIT 25 /* DTlb Multihit Exception (DTlbMultihit) */ +#define XCHAL_EXCCAUSE_DTLB_PRIVILEGE 26 /* DTlb Privilege Exception (DTlbPrivilege) */ +#define XCHAL_EXCCAUSE_DTLB_SIZE_RESTRICTION 27 /* DTlb Size Restriction Exception (DTlbSizeRestriction) */ +#define XCHAL_EXCCAUSE_LOAD_CACHE_ATTRIBUTE 28 /* Load Cache Attribute Exception (LoadCacheAttribute) */ +#define XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE 29 /* Store Cache Attribute Exception (StoreCacheAttribute) */ +#define XCHAL_EXCCAUSE_FLOATING_POINT 40 /* Floating Point Exception (FloatingPoint) */ + + + +/*---------------------------------------------------------------------- + TIMERS + ----------------------------------------------------------------------*/ + +#define XCHAL_HAVE_CCOUNT 1 /* 1 if have CCOUNT, 0 otherwise */ +/*#define XCHAL_HAVE_TIMERS XCHAL_HAVE_CCOUNT*/ +#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */ + + + +/*---------------------------------------------------------------------- + DEBUG + ----------------------------------------------------------------------*/ + +#define XCHAL_HAVE_DEBUG 1 /* 1 if debug option configured, 0 otherwise */ +#define XCHAL_HAVE_OCD 1 /* 1 if OnChipDebug option configured, 0 otherwise */ +#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */ +#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */ +#define XCHAL_DEBUGLEVEL 4 /* debug interrupt level */ +/*DebugExternalInterrupt 0 0|1*/ +/*DebugUseDIRArray 0 0|1*/ + + + + +/*---------------------------------------------------------------------- + COPROCESSORS and EXTRA STATE + ----------------------------------------------------------------------*/ + +#define XCHAL_HAVE_CP 0 /* 1 if coprocessor option configured (CPENABLE present) */ +#define XCHAL_CP_MAXCFG 0 /* max allowed cp id plus one (per cfg) */ + +#include <xtensa/config/tie.h> + + + + +/*---------------------------------------------------------------------- + INTERNAL I/D RAM/ROMs and XLMI + ----------------------------------------------------------------------*/ + +#define XCHAL_NUM_INSTROM 0 /* number of core instruction ROMs configured */ +#define XCHAL_NUM_INSTRAM 0 /* number of core instruction RAMs configured */ +#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs configured */ +#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs configured */ +#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports configured */ +#define XCHAL_NUM_IROM XCHAL_NUM_INSTROM /* (DEPRECATED) */ +#define XCHAL_NUM_IRAM XCHAL_NUM_INSTRAM /* (DEPRECATED) */ +#define XCHAL_NUM_DROM XCHAL_NUM_DATAROM /* (DEPRECATED) */ +#define XCHAL_NUM_DRAM XCHAL_NUM_DATARAM /* (DEPRECATED) */ + + + +/*---------------------------------------------------------------------- + CACHE + ----------------------------------------------------------------------*/ + +/* Size of the cache lines in log2(bytes): */ +#define XCHAL_ICACHE_LINEWIDTH 4 +#define XCHAL_DCACHE_LINEWIDTH 4 +/* Size of the cache lines in bytes: */ +#define XCHAL_ICACHE_LINESIZE 16 +#define XCHAL_DCACHE_LINESIZE 16 +/* Max for both I-cache and D-cache (used for general alignment): */ +#define XCHAL_CACHE_LINEWIDTH_MAX 4 +#define XCHAL_CACHE_LINESIZE_MAX 16 + +/* Number of cache sets in log2(lines per way): */ +#define XCHAL_ICACHE_SETWIDTH 8 +#define XCHAL_DCACHE_SETWIDTH 8 +/* Max for both I-cache and D-cache (used for general cache-coherency page alignment): */ +#define XCHAL_CACHE_SETWIDTH_MAX 8 +#define XCHAL_CACHE_SETSIZE_MAX 256 + +/* Cache set associativity (number of ways): */ +#define XCHAL_ICACHE_WAYS 2 +#define XCHAL_DCACHE_WAYS 2 + +/* Size of the caches in bytes (ways * 2^(linewidth + setwidth)): */ +#define XCHAL_ICACHE_SIZE 8192 +#define XCHAL_DCACHE_SIZE 8192 + +/* Cache features: */ +#define XCHAL_DCACHE_IS_WRITEBACK 0 +/* Whether cache locking feature is available: */ +#define XCHAL_ICACHE_LINE_LOCKABLE 0 +#define XCHAL_DCACHE_LINE_LOCKABLE 0 + +/* Number of (encoded) cache attribute bits: */ +#define XCHAL_CA_BITS 4 /* number of bits needed to hold cache attribute encoding */ +/* (The number of access mode bits (decoded cache attribute bits) is defined by the architecture; see xtensa/hal.h?) */ + + +/* Cache Attribute encodings -- lists of access modes for each cache attribute: */ +#define XCHAL_FCA_LIST XTHAL_FAM_EXCEPTION XCHAL_SEP \ + XTHAL_FAM_BYPASS XCHAL_SEP \ + XTHAL_FAM_EXCEPTION XCHAL_SEP \ + XTHAL_FAM_BYPASS XCHAL_SEP \ + XTHAL_FAM_EXCEPTION XCHAL_SEP \ + XTHAL_FAM_CACHED XCHAL_SEP \ + XTHAL_FAM_EXCEPTION XCHAL_SEP \ + XTHAL_FAM_CACHED XCHAL_SEP \ + XTHAL_FAM_EXCEPTION XCHAL_SEP \ + XTHAL_FAM_CACHED XCHAL_SEP \ + XTHAL_FAM_EXCEPTION XCHAL_SEP \ + XTHAL_FAM_CACHED XCHAL_SEP \ + XTHAL_FAM_EXCEPTION XCHAL_SEP \ + XTHAL_FAM_EXCEPTION XCHAL_SEP \ + XTHAL_FAM_EXCEPTION XCHAL_SEP \ + XTHAL_FAM_EXCEPTION +#define XCHAL_LCA_LIST XTHAL_LAM_EXCEPTION XCHAL_SEP \ + XTHAL_LAM_BYPASSG XCHAL_SEP \ + XTHAL_LAM_EXCEPTION XCHAL_SEP \ + XTHAL_LAM_BYPASSG XCHAL_SEP \ + XTHAL_LAM_EXCEPTION XCHAL_SEP \ + XTHAL_LAM_CACHED XCHAL_SEP \ + XTHAL_LAM_EXCEPTION XCHAL_SEP \ + XTHAL_LAM_CACHED XCHAL_SEP \ + XTHAL_LAM_EXCEPTION XCHAL_SEP \ + XTHAL_LAM_NACACHED XCHAL_SEP \ + XTHAL_LAM_EXCEPTION XCHAL_SEP \ + XTHAL_LAM_NACACHED XCHAL_SEP \ + XTHAL_LAM_EXCEPTION XCHAL_SEP \ + XTHAL_LAM_ISOLATE XCHAL_SEP \ + XTHAL_LAM_EXCEPTION XCHAL_SEP \ + XTHAL_LAM_CACHED +#define XCHAL_SCA_LIST XTHAL_SAM_EXCEPTION XCHAL_SEP \ + XTHAL_SAM_EXCEPTION XCHAL_SEP \ + XTHAL_SAM_EXCEPTION XCHAL_SEP \ + XTHAL_SAM_BYPASS XCHAL_SEP \ + XTHAL_SAM_EXCEPTION XCHAL_SEP \ + XTHAL_SAM_EXCEPTION XCHAL_SEP \ + XTHAL_SAM_EXCEPTION XCHAL_SEP \ + XTHAL_SAM_WRITETHRU XCHAL_SEP \ + XTHAL_SAM_EXCEPTION XCHAL_SEP \ + XTHAL_SAM_EXCEPTION XCHAL_SEP \ + XTHAL_SAM_EXCEPTION XCHAL_SEP \ + XTHAL_SAM_WRITETHRU XCHAL_SEP \ + XTHAL_SAM_EXCEPTION XCHAL_SEP \ + XTHAL_SAM_ISOLATE XCHAL_SEP \ + XTHAL_SAM_EXCEPTION XCHAL_SEP \ + XTHAL_SAM_WRITETHRU + +/* Test: + read/only: 0 + 1 + 2 + 4 + 5 + 6 + 8 + 9 + 10 + 12 + 14 + read/only: 0 + 1 + 2 + 4 + 5 + 6 + 8 + 9 + 10 + 12 + 14 + all: 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + fault: 0 + 2 + 4 + 6 + 8 + 10 + 12 + 14 + r/w/x cached: + r/w/x dcached: + I-bypass: 1 + 3 + + load guard bit set: 1 + 3 + load guard bit clr: 0 + 2 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + hit-cache r/w/x: 7 + 11 + + fams: 5 + fams: 0 / 6 / 18 / 1 / 2 + fams: Bypass / Isolate / Cached / Exception / NACached + + MMU okay: yes +*/ + + +/*---------------------------------------------------------------------- + MMU + ----------------------------------------------------------------------*/ + +/* + * General notes on MMU parameters. + * + * Terminology: + * ASID = address-space ID (acts as an "extension" of virtual addresses) + * VPN = virtual page number + * PPN = physical page number + * CA = encoded cache attribute (access modes) + * TLB = translation look-aside buffer (term is stretched somewhat here) + * I = instruction (fetch accesses) + * D = data (load and store accesses) + * way = each TLB (ITLB and DTLB) consists of a number of "ways" + * that simultaneously match the virtual address of an access; + * a TLB successfully translates a virtual address if exactly + * one way matches the vaddr; if none match, it is a miss; + * if multiple match, one gets a "multihit" exception; + * each way can be independently configured in terms of number of + * entries, page sizes, which fields are writable or constant, etc. + * set = group of contiguous ways with exactly identical parameters + * ARF = auto-refill; hardware services a 1st-level miss by loading a PTE + * from the page table and storing it in one of the auto-refill ways; + * if this PTE load also misses, a miss exception is posted for s/w. + * min-wired = a "min-wired" way can be used to map a single (minimum-sized) + * page arbitrarily under program control; it has a single entry, + * is non-auto-refill (some other way(s) must be auto-refill), + * all its fields (VPN, PPN, ASID, CA) are all writable, and it + * supports the XCHAL_MMU_MIN_PTE_PAGE_SIZE page size (a current + * restriction is that this be the only page size it supports). + * + * TLB way entries are virtually indexed. + * TLB ways that support multiple page sizes: + * - must have all writable VPN and PPN fields; + * - can only use one page size at any given time (eg. setup at startup), + * selected by the respective ITLBCFG or DTLBCFG special register, + * whose bits n*4+3 .. n*4 index the list of page sizes for way n + * (XCHAL_xTLB_SETm_PAGESZ_LOG2_LIST for set m corresponding to way n); + * this list may be sparse for auto-refill ways because auto-refill + * ways have independent lists of supported page sizes sharing a + * common encoding with PTE entries; the encoding is the index into + * this list; unsupported sizes for a given way are zero in the list; + * selecting unsupported sizes results in undefined hardware behaviour; + * - is only possible for ways 0 thru 7 (due to ITLBCFG/DTLBCFG definition). + */ + +#define XCHAL_HAVE_CACHEATTR 0 /* 1 if CACHEATTR register present, 0 if TLBs present instead */ +#define XCHAL_HAVE_TLBS 1 /* 1 if TLBs present, 0 if CACHEATTR present instead */ +#define XCHAL_HAVE_MMU XCHAL_HAVE_TLBS /* (DEPRECATED; use XCHAL_HAVE_TLBS instead; will be removed in future release) */ +#define XCHAL_HAVE_SPANNING_WAY 0 /* 1 if single way maps entire virtual address space in I+D */ +#define XCHAL_HAVE_IDENTITY_MAP 0 /* 1 if virtual addr == physical addr always, 0 otherwise */ +#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* 1 if have MMU that mimics a CACHEATTR config (CaMMU) */ +#define XCHAL_HAVE_XLT_CACHEATTR 0 /* 1 if have MMU that mimics a CACHEATTR config, but with translation (CaXltMMU) */ + +#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs (address space IDs) */ +#define XCHAL_MMU_ASID_INVALID 0 /* ASID value indicating invalid address space */ +#define XCHAL_MMU_ASID_KERNEL 1 /* ASID value indicating kernel (ring 0) address space */ +#define XCHAL_MMU_RINGS 4 /* number of rings supported (1..4) */ +#define XCHAL_MMU_RING_BITS 2 /* number of bits needed to hold ring number */ +#define XCHAL_MMU_SR_BITS 0 /* number of size-restriction bits supported */ +#define XCHAL_MMU_CA_BITS 4 /* number of bits needed to hold cache attribute encoding */ +#define XCHAL_MMU_MAX_PTE_PAGE_SIZE 12 /* max page size in a PTE structure (log2) */ +#define XCHAL_MMU_MIN_PTE_PAGE_SIZE 12 /* min page size in a PTE structure (log2) */ + + +/*** Instruction TLB: ***/ + +#define XCHAL_ITLB_WAY_BITS 3 /* number of bits holding the ways */ +#define XCHAL_ITLB_WAYS 7 /* number of ways (n-way set-associative TLB) */ +#define XCHAL_ITLB_ARF_WAYS 4 /* number of auto-refill ways */ +#define XCHAL_ITLB_SETS 4 /* number of sets (groups of ways with identical settings) */ + +/* Way set to which each way belongs: */ +#define XCHAL_ITLB_WAY0_SET 0 +#define XCHAL_ITLB_WAY1_SET 0 +#define XCHAL_ITLB_WAY2_SET 0 +#define XCHAL_ITLB_WAY3_SET 0 +#define XCHAL_ITLB_WAY4_SET 1 +#define XCHAL_ITLB_WAY5_SET 2 +#define XCHAL_ITLB_WAY6_SET 3 + +/* Ways sets that are used by hardware auto-refill (ARF): */ +#define XCHAL_ITLB_ARF_SETS 1 /* number of auto-refill sets */ +#define XCHAL_ITLB_ARF_SET0 0 /* index of n'th auto-refill set */ + +/* Way sets that are "min-wired" (see terminology comment above): */ +#define XCHAL_ITLB_MINWIRED_SETS 0 /* number of "min-wired" sets */ + + +/* ITLB way set 0 (group of ways 0 thru 3): */ +#define XCHAL_ITLB_SET0_WAY 0 /* index of first way in this way set */ +#define XCHAL_ITLB_SET0_WAYS 4 /* number of (contiguous) ways in this way set */ +#define XCHAL_ITLB_SET0_ENTRIES_LOG2 2 /* log2(number of entries in this way) */ +#define XCHAL_ITLB_SET0_ENTRIES 4 /* number of entries in this way (always a power of 2) */ +#define XCHAL_ITLB_SET0_ARF 1 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */ +#define XCHAL_ITLB_SET0_PAGESIZES 1 /* number of supported page sizes in this way */ +#define XCHAL_ITLB_SET0_PAGESZ_BITS 0 /* number of bits to encode the page size */ +#define XCHAL_ITLB_SET0_PAGESZ_LOG2_MIN 12 /* log2(minimum supported page size) */ +#define XCHAL_ITLB_SET0_PAGESZ_LOG2_MAX 12 /* log2(maximum supported page size) */ +#define XCHAL_ITLB_SET0_PAGESZ_LOG2_LIST 12 /* list of log2(page size)s, separated by XCHAL_SEP; + 2^PAGESZ_BITS entries in list, unsupported entries are zero */ +#define XCHAL_ITLB_SET0_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */ +#define XCHAL_ITLB_SET0_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */ +#define XCHAL_ITLB_SET0_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */ +#define XCHAL_ITLB_SET0_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */ +#define XCHAL_ITLB_SET0_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */ +#define XCHAL_ITLB_SET0_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */ +#define XCHAL_ITLB_SET0_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */ +#define XCHAL_ITLB_SET0_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */ + +/* ITLB way set 1 (group of ways 4 thru 4): */ +#define XCHAL_ITLB_SET1_WAY 4 /* index of first way in this way set */ +#define XCHAL_ITLB_SET1_WAYS 1 /* number of (contiguous) ways in this way set */ +#define XCHAL_ITLB_SET1_ENTRIES_LOG2 2 /* log2(number of entries in this way) */ +#define XCHAL_ITLB_SET1_ENTRIES 4 /* number of entries in this way (always a power of 2) */ +#define XCHAL_ITLB_SET1_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */ +#define XCHAL_ITLB_SET1_PAGESIZES 4 /* number of supported page sizes in this way */ +#define XCHAL_ITLB_SET1_PAGESZ_BITS 2 /* number of bits to encode the page size */ +#define XCHAL_ITLB_SET1_PAGESZ_LOG2_MIN 20 /* log2(minimum supported page size) */ +#define XCHAL_ITLB_SET1_PAGESZ_LOG2_MAX 26 /* log2(maximum supported page size) */ +#define XCHAL_ITLB_SET1_PAGESZ_LOG2_LIST 20 XCHAL_SEP 22 XCHAL_SEP 24 XCHAL_SEP 26 /* list of log2(page size)s, separated by XCHAL_SEP; + 2^PAGESZ_BITS entries in list, unsupported entries are zero */ +#define XCHAL_ITLB_SET1_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */ +#define XCHAL_ITLB_SET1_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */ +#define XCHAL_ITLB_SET1_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */ +#define XCHAL_ITLB_SET1_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */ +#define XCHAL_ITLB_SET1_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */ +#define XCHAL_ITLB_SET1_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */ +#define XCHAL_ITLB_SET1_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */ +#define XCHAL_ITLB_SET1_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */ + +/* ITLB way set 2 (group of ways 5 thru 5): */ +#define XCHAL_ITLB_SET2_WAY 5 /* index of first way in this way set */ +#define XCHAL_ITLB_SET2_WAYS 1 /* number of (contiguous) ways in this way set */ +#define XCHAL_ITLB_SET2_ENTRIES_LOG2 1 /* log2(number of entries in this way) */ +#define XCHAL_ITLB_SET2_ENTRIES 2 /* number of entries in this way (always a power of 2) */ +#define XCHAL_ITLB_SET2_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */ +#define XCHAL_ITLB_SET2_PAGESIZES 1 /* number of supported page sizes in this way */ +#define XCHAL_ITLB_SET2_PAGESZ_BITS 0 /* number of bits to encode the page size */ +#define XCHAL_ITLB_SET2_PAGESZ_LOG2_MIN 27 /* log2(minimum supported page size) */ +#define XCHAL_ITLB_SET2_PAGESZ_LOG2_MAX 27 /* log2(maximum supported page size) */ +#define XCHAL_ITLB_SET2_PAGESZ_LOG2_LIST 27 /* list of log2(page size)s, separated by XCHAL_SEP; + 2^PAGESZ_BITS entries in list, unsupported entries are zero */ +#define XCHAL_ITLB_SET2_ASID_CONSTMASK 0xFF /* constant ASID bits; 0 if all writable */ +#define XCHAL_ITLB_SET2_VPN_CONSTMASK 0xF0000000 /* constant VPN bits, not including entry index bits; 0 if all writable */ +#define XCHAL_ITLB_SET2_PPN_CONSTMASK 0xF8000000 /* constant PPN bits, including entry index bits; 0 if all writable */ +#define XCHAL_ITLB_SET2_CA_CONSTMASK 0x0000000F /* constant CA bits; 0 if all writable */ +#define XCHAL_ITLB_SET2_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */ +#define XCHAL_ITLB_SET2_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */ +#define XCHAL_ITLB_SET2_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */ +#define XCHAL_ITLB_SET2_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */ +/* Constant ASID values for each entry of ITLB way set 2 (because ASID_CONSTMASK is non-zero): */ +#define XCHAL_ITLB_SET2_E0_ASID_CONST 0x01 +#define XCHAL_ITLB_SET2_E1_ASID_CONST 0x01 +/* Constant VPN values for each entry of ITLB way set 2 (because VPN_CONSTMASK is non-zero): */ +#define XCHAL_ITLB_SET2_E0_VPN_CONST 0xD0000000 +#define XCHAL_ITLB_SET2_E1_VPN_CONST 0xD8000000 +/* Constant PPN values for each entry of ITLB way set 2 (because PPN_CONSTMASK is non-zero): */ +#define XCHAL_ITLB_SET2_E0_PPN_CONST 0x00000000 +#define XCHAL_ITLB_SET2_E1_PPN_CONST 0x00000000 +/* Constant CA values for each entry of ITLB way set 2 (because CA_CONSTMASK is non-zero): */ +#define XCHAL_ITLB_SET2_E0_CA_CONST 0x07 +#define XCHAL_ITLB_SET2_E1_CA_CONST 0x03 + +/* ITLB way set 3 (group of ways 6 thru 6): */ +#define XCHAL_ITLB_SET3_WAY 6 /* index of first way in this way set */ +#define XCHAL_ITLB_SET3_WAYS 1 /* number of (contiguous) ways in this way set */ +#define XCHAL_ITLB_SET3_ENTRIES_LOG2 1 /* log2(number of entries in this way) */ +#define XCHAL_ITLB_SET3_ENTRIES 2 /* number of entries in this way (always a power of 2) */ +#define XCHAL_ITLB_SET3_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */ +#define XCHAL_ITLB_SET3_PAGESIZES 1 /* number of supported page sizes in this way */ +#define XCHAL_ITLB_SET3_PAGESZ_BITS 0 /* number of bits to encode the page size */ +#define XCHAL_ITLB_SET3_PAGESZ_LOG2_MIN 28 /* log2(minimum supported page size) */ +#define XCHAL_ITLB_SET3_PAGESZ_LOG2_MAX 28 /* log2(maximum supported page size) */ +#define XCHAL_ITLB_SET3_PAGESZ_LOG2_LIST 28 /* list of log2(page size)s, separated by XCHAL_SEP; + 2^PAGESZ_BITS entries in list, unsupported entries are zero */ +#define XCHAL_ITLB_SET3_ASID_CONSTMASK 0xFF /* constant ASID bits; 0 if all writable */ +#define XCHAL_ITLB_SET3_VPN_CONSTMASK 0xE0000000 /* constant VPN bits, not including entry index bits; 0 if all writable */ +#define XCHAL_ITLB_SET3_PPN_CONSTMASK 0xF0000000 /* constant PPN bits, including entry index bits; 0 if all writable */ +#define XCHAL_ITLB_SET3_CA_CONSTMASK 0x0000000F /* constant CA bits; 0 if all writable */ +#define XCHAL_ITLB_SET3_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */ +#define XCHAL_ITLB_SET3_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */ +#define XCHAL_ITLB_SET3_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */ +#define XCHAL_ITLB_SET3_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */ +/* Constant ASID values for each entry of ITLB way set 3 (because ASID_CONSTMASK is non-zero): */ +#define XCHAL_ITLB_SET3_E0_ASID_CONST 0x01 +#define XCHAL_ITLB_SET3_E1_ASID_CONST 0x01 +/* Constant VPN values for each entry of ITLB way set 3 (because VPN_CONSTMASK is non-zero): */ +#define XCHAL_ITLB_SET3_E0_VPN_CONST 0xE0000000 +#define XCHAL_ITLB_SET3_E1_VPN_CONST 0xF0000000 +/* Constant PPN values for each entry of ITLB way set 3 (because PPN_CONSTMASK is non-zero): */ +#define XCHAL_ITLB_SET3_E0_PPN_CONST 0xF0000000 +#define XCHAL_ITLB_SET3_E1_PPN_CONST 0xF0000000 +/* Constant CA values for each entry of ITLB way set 3 (because CA_CONSTMASK is non-zero): */ +#define XCHAL_ITLB_SET3_E0_CA_CONST 0x07 +#define XCHAL_ITLB_SET3_E1_CA_CONST 0x03 + +/* Indexing macros: */ +#define _XCHAL_ITLB_SET(n,_what) XCHAL_ITLB_SET ## n ## _what +#define XCHAL_ITLB_SET(n,what) _XCHAL_ITLB_SET(n, _ ## what ) +#define _XCHAL_ITLB_SET_E(n,i,_what) XCHAL_ITLB_SET ## n ## _E ## i ## _what +#define XCHAL_ITLB_SET_E(n,i,what) _XCHAL_ITLB_SET_E(n,i, _ ## what ) +/* + * Example use: XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,ENTRIES) + * to get the value of XCHAL_ITLB_SET<n>_ENTRIES where <n> is the first auto-refill set. + */ + + +/*** Data TLB: ***/ + +#define XCHAL_DTLB_WAY_BITS 4 /* number of bits holding the ways */ +#define XCHAL_DTLB_WAYS 10 /* number of ways (n-way set-associative TLB) */ +#define XCHAL_DTLB_ARF_WAYS 4 /* number of auto-refill ways */ +#define XCHAL_DTLB_SETS 5 /* number of sets (groups of ways with identical settings) */ + +/* Way set to which each way belongs: */ +#define XCHAL_DTLB_WAY0_SET 0 +#define XCHAL_DTLB_WAY1_SET 0 +#define XCHAL_DTLB_WAY2_SET 0 +#define XCHAL_DTLB_WAY3_SET 0 +#define XCHAL_DTLB_WAY4_SET 1 +#define XCHAL_DTLB_WAY5_SET 2 +#define XCHAL_DTLB_WAY6_SET 3 +#define XCHAL_DTLB_WAY7_SET 4 +#define XCHAL_DTLB_WAY8_SET 4 +#define XCHAL_DTLB_WAY9_SET 4 + +/* Ways sets that are used by hardware auto-refill (ARF): */ +#define XCHAL_DTLB_ARF_SETS 1 /* number of auto-refill sets */ +#define XCHAL_DTLB_ARF_SET0 0 /* index of n'th auto-refill set */ + +/* Way sets that are "min-wired" (see terminology comment above): */ +#define XCHAL_DTLB_MINWIRED_SETS 1 /* number of "min-wired" sets */ +#define XCHAL_DTLB_MINWIRED_SET0 4 /* index of n'th "min-wired" set */ + + +/* DTLB way set 0 (group of ways 0 thru 3): */ +#define XCHAL_DTLB_SET0_WAY 0 /* index of first way in this way set */ +#define XCHAL_DTLB_SET0_WAYS 4 /* number of (contiguous) ways in this way set */ +#define XCHAL_DTLB_SET0_ENTRIES_LOG2 2 /* log2(number of entries in this way) */ +#define XCHAL_DTLB_SET0_ENTRIES 4 /* number of entries in this way (always a power of 2) */ +#define XCHAL_DTLB_SET0_ARF 1 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */ +#define XCHAL_DTLB_SET0_PAGESIZES 1 /* number of supported page sizes in this way */ +#define XCHAL_DTLB_SET0_PAGESZ_BITS 0 /* number of bits to encode the page size */ +#define XCHAL_DTLB_SET0_PAGESZ_LOG2_MIN 12 /* log2(minimum supported page size) */ +#define XCHAL_DTLB_SET0_PAGESZ_LOG2_MAX 12 /* log2(maximum supported page size) */ +#define XCHAL_DTLB_SET0_PAGESZ_LOG2_LIST 12 /* list of log2(page size)s, separated by XCHAL_SEP; + 2^PAGESZ_BITS entries in list, unsupported entries are zero */ +#define XCHAL_DTLB_SET0_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */ +#define XCHAL_DTLB_SET0_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */ +#define XCHAL_DTLB_SET0_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */ +#define XCHAL_DTLB_SET0_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */ +#define XCHAL_DTLB_SET0_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */ +#define XCHAL_DTLB_SET0_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */ +#define XCHAL_DTLB_SET0_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */ +#define XCHAL_DTLB_SET0_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */ + +/* DTLB way set 1 (group of ways 4 thru 4): */ +#define XCHAL_DTLB_SET1_WAY 4 /* index of first way in this way set */ +#define XCHAL_DTLB_SET1_WAYS 1 /* number of (contiguous) ways in this way set */ +#define XCHAL_DTLB_SET1_ENTRIES_LOG2 2 /* log2(number of entries in this way) */ +#define XCHAL_DTLB_SET1_ENTRIES 4 /* number of entries in this way (always a power of 2) */ +#define XCHAL_DTLB_SET1_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */ +#define XCHAL_DTLB_SET1_PAGESIZES 4 /* number of supported page sizes in this way */ +#define XCHAL_DTLB_SET1_PAGESZ_BITS 2 /* number of bits to encode the page size */ +#define XCHAL_DTLB_SET1_PAGESZ_LOG2_MIN 20 /* log2(minimum supported page size) */ +#define XCHAL_DTLB_SET1_PAGESZ_LOG2_MAX 26 /* log2(maximum supported page size) */ +#define XCHAL_DTLB_SET1_PAGESZ_LOG2_LIST 20 XCHAL_SEP 22 XCHAL_SEP 24 XCHAL_SEP 26 /* list of log2(page size)s, separated by XCHAL_SEP; + 2^PAGESZ_BITS entries in list, unsupported entries are zero */ +#define XCHAL_DTLB_SET1_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */ +#define XCHAL_DTLB_SET1_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */ +#define XCHAL_DTLB_SET1_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */ +#define XCHAL_DTLB_SET1_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */ +#define XCHAL_DTLB_SET1_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */ +#define XCHAL_DTLB_SET1_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */ +#define XCHAL_DTLB_SET1_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */ +#define XCHAL_DTLB_SET1_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */ + +/* DTLB way set 2 (group of ways 5 thru 5): */ +#define XCHAL_DTLB_SET2_WAY 5 /* index of first way in this way set */ +#define XCHAL_DTLB_SET2_WAYS 1 /* number of (contiguous) ways in this way set */ +#define XCHAL_DTLB_SET2_ENTRIES_LOG2 1 /* log2(number of entries in this way) */ +#define XCHAL_DTLB_SET2_ENTRIES 2 /* number of entries in this way (always a power of 2) */ +#define XCHAL_DTLB_SET2_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */ +#define XCHAL_DTLB_SET2_PAGESIZES 1 /* number of supported page sizes in this way */ +#define XCHAL_DTLB_SET2_PAGESZ_BITS 0 /* number of bits to encode the page size */ +#define XCHAL_DTLB_SET2_PAGESZ_LOG2_MIN 27 /* log2(minimum supported page size) */ +#define XCHAL_DTLB_SET2_PAGESZ_LOG2_MAX 27 /* log2(maximum supported page size) */ +#define XCHAL_DTLB_SET2_PAGESZ_LOG2_LIST 27 /* list of log2(page size)s, separated by XCHAL_SEP; + 2^PAGESZ_BITS entries in list, unsupported entries are zero */ +#define XCHAL_DTLB_SET2_ASID_CONSTMASK 0xFF /* constant ASID bits; 0 if all writable */ +#define XCHAL_DTLB_SET2_VPN_CONSTMASK 0xF0000000 /* constant VPN bits, not including entry index bits; 0 if all writable */ +#define XCHAL_DTLB_SET2_PPN_CONSTMASK 0xF8000000 /* constant PPN bits, including entry index bits; 0 if all writable */ +#define XCHAL_DTLB_SET2_CA_CONSTMASK 0x0000000F /* constant CA bits; 0 if all writable */ +#define XCHAL_DTLB_SET2_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */ +#define XCHAL_DTLB_SET2_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */ +#define XCHAL_DTLB_SET2_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */ +#define XCHAL_DTLB_SET2_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */ +/* Constant ASID values for each entry of DTLB way set 2 (because ASID_CONSTMASK is non-zero): */ +#define XCHAL_DTLB_SET2_E0_ASID_CONST 0x01 +#define XCHAL_DTLB_SET2_E1_ASID_CONST 0x01 +/* Constant VPN values for each entry of DTLB way set 2 (because VPN_CONSTMASK is non-zero): */ +#define XCHAL_DTLB_SET2_E0_VPN_CONST 0xD0000000 +#define XCHAL_DTLB_SET2_E1_VPN_CONST 0xD8000000 +/* Constant PPN values for each entry of DTLB way set 2 (because PPN_CONSTMASK is non-zero): */ +#define XCHAL_DTLB_SET2_E0_PPN_CONST 0x00000000 +#define XCHAL_DTLB_SET2_E1_PPN_CONST 0x00000000 +/* Constant CA values for each entry of DTLB way set 2 (because CA_CONSTMASK is non-zero): */ +#define XCHAL_DTLB_SET2_E0_CA_CONST 0x07 +#define XCHAL_DTLB_SET2_E1_CA_CONST 0x03 + +/* DTLB way set 3 (group of ways 6 thru 6): */ +#define XCHAL_DTLB_SET3_WAY 6 /* index of first way in this way set */ +#define XCHAL_DTLB_SET3_WAYS 1 /* number of (contiguous) ways in this way set */ +#define XCHAL_DTLB_SET3_ENTRIES_LOG2 1 /* log2(number of entries in this way) */ +#define XCHAL_DTLB_SET3_ENTRIES 2 /* number of entries in this way (always a power of 2) */ +#define XCHAL_DTLB_SET3_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */ +#define XCHAL_DTLB_SET3_PAGESIZES 1 /* number of supported page sizes in this way */ +#define XCHAL_DTLB_SET3_PAGESZ_BITS 0 /* number of bits to encode the page size */ +#define XCHAL_DTLB_SET3_PAGESZ_LOG2_MIN 28 /* log2(minimum supported page size) */ +#define XCHAL_DTLB_SET3_PAGESZ_LOG2_MAX 28 /* log2(maximum supported page size) */ +#define XCHAL_DTLB_SET3_PAGESZ_LOG2_LIST 28 /* list of log2(page size)s, separated by XCHAL_SEP; + 2^PAGESZ_BITS entries in list, unsupported entries are zero */ +#define XCHAL_DTLB_SET3_ASID_CONSTMASK 0xFF /* constant ASID bits; 0 if all writable */ +#define XCHAL_DTLB_SET3_VPN_CONSTMASK 0xE0000000 /* constant VPN bits, not including entry index bits; 0 if all writable */ +#define XCHAL_DTLB_SET3_PPN_CONSTMASK 0xF0000000 /* constant PPN bits, including entry index bits; 0 if all writable */ +#define XCHAL_DTLB_SET3_CA_CONSTMASK 0x0000000F /* constant CA bits; 0 if all writable */ +#define XCHAL_DTLB_SET3_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */ +#define XCHAL_DTLB_SET3_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */ +#define XCHAL_DTLB_SET3_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */ +#define XCHAL_DTLB_SET3_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */ +/* Constant ASID values for each entry of DTLB way set 3 (because ASID_CONSTMASK is non-zero): */ +#define XCHAL_DTLB_SET3_E0_ASID_CONST 0x01 +#define XCHAL_DTLB_SET3_E1_ASID_CONST 0x01 +/* Constant VPN values for each entry of DTLB way set 3 (because VPN_CONSTMASK is non-zero): */ +#define XCHAL_DTLB_SET3_E0_VPN_CONST 0xE0000000 +#define XCHAL_DTLB_SET3_E1_VPN_CONST 0xF0000000 +/* Constant PPN values for each entry of DTLB way set 3 (because PPN_CONSTMASK is non-zero): */ +#define XCHAL_DTLB_SET3_E0_PPN_CONST 0xF0000000 +#define XCHAL_DTLB_SET3_E1_PPN_CONST 0xF0000000 +/* Constant CA values for each entry of DTLB way set 3 (because CA_CONSTMASK is non-zero): */ +#define XCHAL_DTLB_SET3_E0_CA_CONST 0x07 +#define XCHAL_DTLB_SET3_E1_CA_CONST 0x03 + +/* DTLB way set 4 (group of ways 7 thru 9): */ +#define XCHAL_DTLB_SET4_WAY 7 /* index of first way in this way set */ +#define XCHAL_DTLB_SET4_WAYS 3 /* number of (contiguous) ways in this way set */ +#define XCHAL_DTLB_SET4_ENTRIES_LOG2 0 /* log2(number of entries in this way) */ +#define XCHAL_DTLB_SET4_ENTRIES 1 /* number of entries in this way (always a power of 2) */ +#define XCHAL_DTLB_SET4_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */ +#define XCHAL_DTLB_SET4_PAGESIZES 1 /* number of supported page sizes in this way */ +#define XCHAL_DTLB_SET4_PAGESZ_BITS 0 /* number of bits to encode the page size */ +#define XCHAL_DTLB_SET4_PAGESZ_LOG2_MIN 12 /* log2(minimum supported page size) */ +#define XCHAL_DTLB_SET4_PAGESZ_LOG2_MAX 12 /* log2(maximum supported page size) */ +#define XCHAL_DTLB_SET4_PAGESZ_LOG2_LIST 12 /* list of log2(page size)s, separated by XCHAL_SEP; + 2^PAGESZ_BITS entries in list, unsupported entries are zero */ +#define XCHAL_DTLB_SET4_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */ +#define XCHAL_DTLB_SET4_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */ +#define XCHAL_DTLB_SET4_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */ +#define XCHAL_DTLB_SET4_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */ +#define XCHAL_DTLB_SET4_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */ +#define XCHAL_DTLB_SET4_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */ +#define XCHAL_DTLB_SET4_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */ +#define XCHAL_DTLB_SET4_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */ + +/* Indexing macros: */ +#define _XCHAL_DTLB_SET(n,_what) XCHAL_DTLB_SET ## n ## _what +#define XCHAL_DTLB_SET(n,what) _XCHAL_DTLB_SET(n, _ ## what ) +#define _XCHAL_DTLB_SET_E(n,i,_what) XCHAL_DTLB_SET ## n ## _E ## i ## _what +#define XCHAL_DTLB_SET_E(n,i,what) _XCHAL_DTLB_SET_E(n,i, _ ## what ) +/* + * Example use: XCHAL_DTLB_SET(XCHAL_DTLB_ARF_SET0,ENTRIES) + * to get the value of XCHAL_DTLB_SET<n>_ENTRIES where <n> is the first auto-refill set. + */ + + +/* + * Determine whether we have a full MMU (with Page Table and Protection) + * usable for an MMU-based OS: + */ +#if XCHAL_HAVE_TLBS && !XCHAL_HAVE_SPANNING_WAY && XCHAL_ITLB_ARF_WAYS > 0 && XCHAL_DTLB_ARF_WAYS > 0 && XCHAL_MMU_RINGS >= 2 +# define XCHAL_HAVE_PTP_MMU 1 /* have full MMU (with page table [autorefill] and protection) */ +#else +# define XCHAL_HAVE_PTP_MMU 0 /* don't have full MMU */ +#endif + +/* + * For full MMUs, report kernel RAM segment and kernel I/O segment static page mappings: + */ +#if XCHAL_HAVE_PTP_MMU +#define XCHAL_KSEG_CACHED_VADDR 0xD0000000 /* virt.addr of kernel RAM cached static map */ +#define XCHAL_KSEG_CACHED_PADDR 0x00000000 /* phys.addr of kseg_cached */ +#define XCHAL_KSEG_CACHED_SIZE 0x08000000 /* size in bytes of kseg_cached (assumed power of 2!!!) */ +#define XCHAL_KSEG_BYPASS_VADDR 0xD8000000 /* virt.addr of kernel RAM bypass (uncached) static map */ +#define XCHAL_KSEG_BYPASS_PADDR 0x00000000 /* phys.addr of kseg_bypass */ +#define XCHAL_KSEG_BYPASS_SIZE 0x08000000 /* size in bytes of kseg_bypass (assumed power of 2!!!) */ + +#define XCHAL_KIO_CACHED_VADDR 0xE0000000 /* virt.addr of kernel I/O cached static map */ +#define XCHAL_KIO_CACHED_PADDR 0xF0000000 /* phys.addr of kio_cached */ +#define XCHAL_KIO_CACHED_SIZE 0x10000000 /* size in bytes of kio_cached (assumed power of 2!!!) */ +#define XCHAL_KIO_BYPASS_VADDR 0xF0000000 /* virt.addr of kernel I/O bypass (uncached) static map */ +#define XCHAL_KIO_BYPASS_PADDR 0xF0000000 /* phys.addr of kio_bypass */ +#define XCHAL_KIO_BYPASS_SIZE 0x10000000 /* size in bytes of kio_bypass (assumed power of 2!!!) */ + +#define XCHAL_SEG_MAPPABLE_VADDR 0x00000000 /* start of largest non-static-mapped virtual addr area */ +#define XCHAL_SEG_MAPPABLE_SIZE 0xD0000000 /* size in bytes of " */ +/* define XCHAL_SEG_MAPPABLE2_xxx if more areas present, sorted in order of descending size. */ +#endif + + +/*---------------------------------------------------------------------- + MISC + ----------------------------------------------------------------------*/ + +#define XCHAL_NUM_WRITEBUFFER_ENTRIES 4 /* number of write buffer entries */ + +#define XCHAL_CORE_ID "linux_be" /* configuration's alphanumeric core identifier + (CoreID) set in the Xtensa Processor Generator */ + +#define XCHAL_BUILD_UNIQUE_ID 0x00003256 /* software build-unique ID (22-bit) */ + +/* These definitions describe the hardware targeted by this software: */ +#define XCHAL_HW_CONFIGID0 0xC103D1FF /* config ID reg 0 value (upper 32 of 64 bits) */ +#define XCHAL_HW_CONFIGID1 0x00803256 /* config ID reg 1 value (lower 32 of 64 bits) */ +#define XCHAL_CONFIGID0 XCHAL_HW_CONFIGID0 /* for backward compatibility only -- don't use! */ +#define XCHAL_CONFIGID1 XCHAL_HW_CONFIGID1 /* for backward compatibility only -- don't use! */ +#define XCHAL_HW_RELEASE_MAJOR 1050 /* major release of targeted hardware */ +#define XCHAL_HW_RELEASE_MINOR 1 /* minor release of targeted hardware */ +#define XCHAL_HW_RELEASE_NAME "T1050.1" /* full release name of targeted hardware */ +#define XTHAL_HW_REL_T1050 1 +#define XTHAL_HW_REL_T1050_1 1 +#define XCHAL_HW_CONFIGID_RELIABLE 1 + + +/* + * Miscellaneous special register fields: + */ + + +/* DBREAKC (special register number 160): */ +#define XCHAL_DBREAKC_VALIDMASK 0xC000003F /* bits of DBREAKC that are defined */ +/* MASK field: */ +#define XCHAL_DBREAKC_MASK_BITS 6 /* number of bits in MASK field */ +#define XCHAL_DBREAKC_MASK_NUM 64 /* max number of possible causes (2^bits) */ +#define XCHAL_DBREAKC_MASK_SHIFT 0 /* position of MASK bits in DBREAKC, starting from lsbit */ +#define XCHAL_DBREAKC_MASK_MASK 0x0000003F /* mask of bits in MASK field of DBREAKC */ +/* LOADBREAK field: */ +#define XCHAL_DBREAKC_LOADBREAK_BITS 1 /* number of bits in LOADBREAK field */ +#define XCHAL_DBREAKC_LOADBREAK_NUM 2 /* max number of possible causes (2^bits) */ +#define XCHAL_DBREAKC_LOADBREAK_SHIFT 30 /* position of LOADBREAK bits in DBREAKC, starting from lsbit */ +#define XCHAL_DBREAKC_LOADBREAK_MASK 0x40000000 /* mask of bits in LOADBREAK field of DBREAKC */ +/* STOREBREAK field: */ +#define XCHAL_DBREAKC_STOREBREAK_BITS 1 /* number of bits in STOREBREAK field */ +#define XCHAL_DBREAKC_STOREBREAK_NUM 2 /* max number of possible causes (2^bits) */ +#define XCHAL_DBREAKC_STOREBREAK_SHIFT 31 /* position of STOREBREAK bits in DBREAKC, starting from lsbit */ +#define XCHAL_DBREAKC_STOREBREAK_MASK 0x80000000 /* mask of bits in STOREBREAK field of DBREAKC */ + +/* PS (special register number 230): */ +#define XCHAL_PS_VALIDMASK 0x00070FFF /* bits of PS that are defined */ +/* INTLEVEL field: */ +#define XCHAL_PS_INTLEVEL_BITS 4 /* number of bits in INTLEVEL field */ +#define XCHAL_PS_INTLEVEL_NUM 16 /* max number of possible causes (2^bits) */ +#define XCHAL_PS_INTLEVEL_SHIFT 0 /* position of INTLEVEL bits in PS, starting from lsbit */ +#define XCHAL_PS_INTLEVEL_MASK 0x0000000F /* mask of bits in INTLEVEL field of PS */ +/* EXCM field: */ +#define XCHAL_PS_EXCM_BITS 1 /* number of bits in EXCM field */ +#define XCHAL_PS_EXCM_NUM 2 /* max number of possible causes (2^bits) */ +#define XCHAL_PS_EXCM_SHIFT 4 /* position of EXCM bits in PS, starting from lsbit */ +#define XCHAL_PS_EXCM_MASK 0x00000010 /* mask of bits in EXCM field of PS */ +/* PROGSTACK field: */ +#define XCHAL_PS_PROGSTACK_BITS 1 /* number of bits in PROGSTACK field */ +#define XCHAL_PS_PROGSTACK_NUM 2 /* max number of possible causes (2^bits) */ +#define XCHAL_PS_PROGSTACK_SHIFT 5 /* position of PROGSTACK bits in PS, starting from lsbit */ +#define XCHAL_PS_PROGSTACK_MASK 0x00000020 /* mask of bits in PROGSTACK field of PS */ +/* RING field: */ +#define XCHAL_PS_RING_BITS 2 /* number of bits in RING field */ +#define XCHAL_PS_RING_NUM 4 /* max number of possible causes (2^bits) */ +#define XCHAL_PS_RING_SHIFT 6 /* position of RING bits in PS, starting from lsbit */ +#define XCHAL_PS_RING_MASK 0x000000C0 /* mask of bits in RING field of PS */ +/* OWB field: */ +#define XCHAL_PS_OWB_BITS 4 /* number of bits in OWB field */ +#define XCHAL_PS_OWB_NUM 16 /* max number of possible causes (2^bits) */ +#define XCHAL_PS_OWB_SHIFT 8 /* position of OWB bits in PS, starting from lsbit */ +#define XCHAL_PS_OWB_MASK 0x00000F00 /* mask of bits in OWB field of PS */ +/* CALLINC field: */ +#define XCHAL_PS_CALLINC_BITS 2 /* number of bits in CALLINC field */ +#define XCHAL_PS_CALLINC_NUM 4 /* max number of possible causes (2^bits) */ +#define XCHAL_PS_CALLINC_SHIFT 16 /* position of CALLINC bits in PS, starting from lsbit */ +#define XCHAL_PS_CALLINC_MASK 0x00030000 /* mask of bits in CALLINC field of PS */ +/* WOE field: */ +#define XCHAL_PS_WOE_BITS 1 /* number of bits in WOE field */ +#define XCHAL_PS_WOE_NUM 2 /* max number of possible causes (2^bits) */ +#define XCHAL_PS_WOE_SHIFT 18 /* position of WOE bits in PS, starting from lsbit */ +#define XCHAL_PS_WOE_MASK 0x00040000 /* mask of bits in WOE field of PS */ + +/* EXCCAUSE (special register number 232): */ +#define XCHAL_EXCCAUSE_VALIDMASK 0x0000003F /* bits of EXCCAUSE that are defined */ +/* EXCCAUSE field: */ +#define XCHAL_EXCCAUSE_BITS 6 /* number of bits in EXCCAUSE register */ +#define XCHAL_EXCCAUSE_NUM 64 /* max number of possible causes (2^bits) */ +#define XCHAL_EXCCAUSE_SHIFT 0 /* position of EXCCAUSE bits in register, starting from lsbit */ +#define XCHAL_EXCCAUSE_MASK 0x0000003F /* mask of bits in EXCCAUSE register */ + +/* DEBUGCAUSE (special register number 233): */ +#define XCHAL_DEBUGCAUSE_VALIDMASK 0x0000003F /* bits of DEBUGCAUSE that are defined */ +/* ICOUNT field: */ +#define XCHAL_DEBUGCAUSE_ICOUNT_BITS 1 /* number of bits in ICOUNT field */ +#define XCHAL_DEBUGCAUSE_ICOUNT_NUM 2 /* max number of possible causes (2^bits) */ +#define XCHAL_DEBUGCAUSE_ICOUNT_SHIFT 0 /* position of ICOUNT bits in DEBUGCAUSE, starting from lsbit */ +#define XCHAL_DEBUGCAUSE_ICOUNT_MASK 0x00000001 /* mask of bits in ICOUNT field of DEBUGCAUSE */ +/* IBREAK field: */ +#define XCHAL_DEBUGCAUSE_IBREAK_BITS 1 /* number of bits in IBREAK field */ +#define XCHAL_DEBUGCAUSE_IBREAK_NUM 2 /* max number of possible causes (2^bits) */ +#define XCHAL_DEBUGCAUSE_IBREAK_SHIFT 1 /* position of IBREAK bits in DEBUGCAUSE, starting from lsbit */ +#define XCHAL_DEBUGCAUSE_IBREAK_MASK 0x00000002 /* mask of bits in IBREAK field of DEBUGCAUSE */ +/* DBREAK field: */ +#define XCHAL_DEBUGCAUSE_DBREAK_BITS 1 /* number of bits in DBREAK field */ +#define XCHAL_DEBUGCAUSE_DBREAK_NUM 2 /* max number of possible causes (2^bits) */ +#define XCHAL_DEBUGCAUSE_DBREAK_SHIFT 2 /* position of DBREAK bits in DEBUGCAUSE, starting from lsbit */ +#define XCHAL_DEBUGCAUSE_DBREAK_MASK 0x00000004 /* mask of bits in DBREAK field of DEBUGCAUSE */ +/* BREAK field: */ +#define XCHAL_DEBUGCAUSE_BREAK_BITS 1 /* number of bits in BREAK field */ +#define XCHAL_DEBUGCAUSE_BREAK_NUM 2 /* max number of possible causes (2^bits) */ +#define XCHAL_DEBUGCAUSE_BREAK_SHIFT 3 /* position of BREAK bits in DEBUGCAUSE, starting from lsbit */ +#define XCHAL_DEBUGCAUSE_BREAK_MASK 0x00000008 /* mask of bits in BREAK field of DEBUGCAUSE */ +/* BREAKN field: */ +#define XCHAL_DEBUGCAUSE_BREAKN_BITS 1 /* number of bits in BREAKN field */ +#define XCHAL_DEBUGCAUSE_BREAKN_NUM 2 /* max number of possible causes (2^bits) */ +#define XCHAL_DEBUGCAUSE_BREAKN_SHIFT 4 /* position of BREAKN bits in DEBUGCAUSE, starting from lsbit */ +#define XCHAL_DEBUGCAUSE_BREAKN_MASK 0x00000010 /* mask of bits in BREAKN field of DEBUGCAUSE */ +/* DEBUGINT field: */ +#define XCHAL_DEBUGCAUSE_DEBUGINT_BITS 1 /* number of bits in DEBUGINT field */ +#define XCHAL_DEBUGCAUSE_DEBUGINT_NUM 2 /* max number of possible causes (2^bits) */ +#define XCHAL_DEBUGCAUSE_DEBUGINT_SHIFT 5 /* position of DEBUGINT bits in DEBUGCAUSE, starting from lsbit */ +#define XCHAL_DEBUGCAUSE_DEBUGINT_MASK 0x00000020 /* mask of bits in DEBUGINT field of DEBUGCAUSE */ + + + +/*---------------------------------------------------------------------- + ISA + ----------------------------------------------------------------------*/ + +#define XCHAL_HAVE_DENSITY 1 /* 1 if density option configured, 0 otherwise */ +#define XCHAL_HAVE_LOOPS 1 /* 1 if zero-overhead loops option configured, 0 otherwise */ +/* Misc instructions: */ +#define XCHAL_HAVE_NSA 0 /* 1 if NSA/NSAU instructions option configured, 0 otherwise */ +#define XCHAL_HAVE_MINMAX 0 /* 1 if MIN/MAX instructions option configured, 0 otherwise */ +#define XCHAL_HAVE_SEXT 0 /* 1 if sign-extend instruction option configured, 0 otherwise */ +#define XCHAL_HAVE_CLAMPS 0 /* 1 if CLAMPS instruction option configured, 0 otherwise */ +#define XCHAL_HAVE_MAC16 0 /* 1 if MAC16 option configured, 0 otherwise */ +#define XCHAL_HAVE_MUL16 0 /* 1 if 16-bit integer multiply option configured, 0 otherwise */ +/*#define XCHAL_HAVE_POPC 0*/ /* 1 if CRC instruction option configured, 0 otherwise */ +/*#define XCHAL_HAVE_CRC 0*/ /* 1 if POPC instruction option configured, 0 otherwise */ + +#define XCHAL_HAVE_SPECULATION 0 /* 1 if speculation option configured, 0 otherwise */ +/*#define XCHAL_HAVE_MP_SYNC 0*/ /* 1 if multiprocessor sync. option configured, 0 otherwise */ +#define XCHAL_HAVE_PRID 0 /* 1 if processor ID register configured, 0 otherwise */ + +#define XCHAL_NUM_MISC_REGS 2 /* number of miscellaneous registers (0..4) */ + +/* These relate a bit more to TIE: */ +#define XCHAL_HAVE_BOOLEANS 0 /* 1 if booleans option configured, 0 otherwise */ +#define XCHAL_HAVE_MUL32 0 /* 1 if 32-bit integer multiply option configured, 0 otherwise */ +#define XCHAL_HAVE_MUL32_HIGH 0 /* 1 if MUL32 option includes MULUH and MULSH, 0 otherwise */ +#define XCHAL_HAVE_FP 0 /* 1 if floating point option configured, 0 otherwise */ + + +/*---------------------------------------------------------------------- + DERIVED + ----------------------------------------------------------------------*/ + +#if XCHAL_HAVE_BE +#define XCHAL_INST_ILLN 0xD60F /* 2-byte illegal instruction, msb-first */ +#define XCHAL_INST_ILLN_BYTE0 0xD6 /* 2-byte illegal instruction, 1st byte */ +#define XCHAL_INST_ILLN_BYTE1 0x0F /* 2-byte illegal instruction, 2nd byte */ +#else +#define XCHAL_INST_ILLN 0xF06D /* 2-byte illegal instruction, lsb-first */ +#define XCHAL_INST_ILLN_BYTE0 0x6D /* 2-byte illegal instruction, 1st byte */ +#define XCHAL_INST_ILLN_BYTE1 0xF0 /* 2-byte illegal instruction, 2nd byte */ +#endif +/* Belongs in xtensa/hal.h: */ +#define XTHAL_INST_ILL 0x000000 /* 3-byte illegal instruction */ + + +/* + * Because information as to exactly which hardware release is targeted + * by a given software build is not always available, compile-time HAL + * Hardware-Release "_AT" macros are fuzzy (return 0, 1, or XCHAL_MAYBE): + */ +#ifndef XCHAL_HW_RELEASE_MAJOR +# define XCHAL_HW_CONFIGID_RELIABLE 0 +#endif +#if XCHAL_HW_CONFIGID_RELIABLE +# define XCHAL_HW_RELEASE_AT_OR_BELOW(major,minor) (XTHAL_REL_LE( XCHAL_HW_RELEASE_MAJOR,XCHAL_HW_RELEASE_MINOR, major,minor ) ? 1 : 0) +# define XCHAL_HW_RELEASE_AT_OR_ABOVE(major,minor) (XTHAL_REL_GE( XCHAL_HW_RELEASE_MAJOR,XCHAL_HW_RELEASE_MINOR, major,minor ) ? 1 : 0) +# define XCHAL_HW_RELEASE_AT(major,minor) (XTHAL_REL_EQ( XCHAL_HW_RELEASE_MAJOR,XCHAL_HW_RELEASE_MINOR, major,minor ) ? 1 : 0) +# define XCHAL_HW_RELEASE_MAJOR_AT(major) ((XCHAL_HW_RELEASE_MAJOR == (major)) ? 1 : 0) +#else +# define XCHAL_HW_RELEASE_AT_OR_BELOW(major,minor) ( ((major) < 1040 && XCHAL_HAVE_XEA2) ? 0 \ + : ((major) > 1050 && XCHAL_HAVE_XEA1) ? 1 \ + : XTHAL_MAYBE ) +# define XCHAL_HW_RELEASE_AT_OR_ABOVE(major,minor) ( ((major) >= 2000 && XCHAL_HAVE_XEA1) ? 0 \ + : (XTHAL_REL_LE(major,minor, 1040,0) && XCHAL_HAVE_XEA2) ? 1 \ + : XTHAL_MAYBE ) +# define XCHAL_HW_RELEASE_AT(major,minor) ( (((major) < 1040 && XCHAL_HAVE_XEA2) || \ + ((major) >= 2000 && XCHAL_HAVE_XEA1)) ? 0 : XTHAL_MAYBE) +# define XCHAL_HW_RELEASE_MAJOR_AT(major) XCHAL_HW_RELEASE_AT(major,0) +#endif + +/* + * Specific errata: + */ + +/* + * Erratum T1020.H13, T1030.H7, T1040.H10, T1050.H4 (fixed in T1040.3 and T1050.1; + * relevant only in XEA1, kernel-vector mode, level-one interrupts and overflows enabled): + */ +#define XCHAL_MAYHAVE_ERRATUM_XEA1KWIN (XCHAL_HAVE_XEA1 && \ + (XCHAL_HW_RELEASE_AT_OR_BELOW(1040,2) != 0 \ + || XCHAL_HW_RELEASE_AT(1050,0))) + + + +#endif /*XTENSA_CONFIG_CORE_H*/ + diff --git a/include/asm-xtensa/xtensa/config-linux_be/defs.h b/include/asm-xtensa/xtensa/config-linux_be/defs.h new file mode 100644 index 0000000..f7c58b2 --- /dev/null +++ b/include/asm-xtensa/xtensa/config-linux_be/defs.h @@ -0,0 +1,270 @@ +/* Definitions for Xtensa instructions, types, and protos. */ + +/* + * Copyright (c) 2003 Tensilica, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2.1 of the GNU Lesser General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, + * USA. + */ + +/* Do not modify. This is automatically generated.*/ + +#ifndef _XTENSA_BASE_HEADER +#define _XTENSA_BASE_HEADER + +#ifdef __XTENSA__ +#if defined(__GNUC__) && !defined(__XCC__) + +#define L8UI_ASM(arr, ars, imm) { \ + __asm__ volatile("l8ui %0, %1, %2" : "=a" (arr) : "a" (ars) , "i" (imm)); \ +} + +#define XT_L8UI(ars, imm) \ +({ \ + unsigned char _arr; \ + const unsigned char *_ars = ars; \ + L8UI_ASM(_arr, _ars, imm); \ + _arr; \ +}) + +#define L16UI_ASM(arr, ars, imm) { \ + __asm__ volatile("l16ui %0, %1, %2" : "=a" (arr) : "a" (ars) , "i" (imm)); \ +} + +#define XT_L16UI(ars, imm) \ +({ \ + unsigned short _arr; \ + const unsigned short *_ars = ars; \ + L16UI_ASM(_arr, _ars, imm); \ + _arr; \ +}) + +#define L16SI_ASM(arr, ars, imm) {\ + __asm__ volatile("l16si %0, %1, %2" : "=a" (arr) : "a" (ars) , "i" (imm)); \ +} + +#define XT_L16SI(ars, imm) \ +({ \ + signed short _arr; \ + const signed short *_ars = ars; \ + L16SI_ASM(_arr, _ars, imm); \ + _arr; \ +}) + +#define L32I_ASM(arr, ars, imm) { \ + __asm__ volatile("l32i %0, %1, %2" : "=a" (arr) : "a" (ars) , "i" (imm)); \ +} + +#define XT_L32I(ars, imm) \ +({ \ + unsigned _arr; \ + const unsigned *_ars = ars; \ + L32I_ASM(_arr, _ars, imm); \ + _arr; \ +}) + +#define S8I_ASM(arr, ars, imm) {\ + __asm__ volatile("s8i %0, %1, %2" : : "a" (arr), "a" (ars) , "i" (imm) : "memory" ); \ +} + +#define XT_S8I(arr, ars, imm) \ +({ \ + signed char _arr = arr; \ + const signed char *_ars = ars; \ + S8I_ASM(_arr, _ars, imm); \ +}) + +#define S16I_ASM(arr, ars, imm) {\ + __asm__ volatile("s16i %0, %1, %2" : : "a" (arr), "a" (ars) , "i" (imm) : "memory" ); \ +} + +#define XT_S16I(arr, ars, imm) \ +({ \ + signed short _arr = arr; \ + const signed short *_ars = ars; \ + S16I_ASM(_arr, _ars, imm); \ +}) + +#define S32I_ASM(arr, ars, imm) { \ + __asm__ volatile("s32i %0, %1, %2" : : "a" (arr), "a" (ars) , "i" (imm) : "memory" ); \ +} + +#define XT_S32I(arr, ars, imm) \ +({ \ + signed int _arr = arr; \ + const signed int *_ars = ars; \ + S32I_ASM(_arr, _ars, imm); \ +}) + +#define ADDI_ASM(art, ars, imm) {\ + __asm__ ("addi %0, %1, %2" : "=a" (art) : "a" (ars), "i" (imm)); \ +} + +#define XT_ADDI(ars, imm) \ +({ \ + unsigned _art; \ + unsigned _ars = ars; \ + ADDI_ASM(_art, _ars, imm); \ + _art; \ +}) + +#define ABS_ASM(arr, art) {\ + __asm__ ("abs %0, %1" : "=a" (arr) : "a" (art)); \ +} + +#define XT_ABS(art) \ +({ \ + unsigned _arr; \ + signed _art = art; \ + ABS_ASM(_arr, _art); \ + _arr; \ +}) + +/* Note: In the following macros that reference SAR, the magic "state" + register is used to capture the dependency on SAR. This is because + SAR is a 5-bit register and thus there are no C types that can be + used to represent it. It doesn't appear that the SAR register is + even relevant to GCC, but it is marked as "clobbered" just in + case. */ + +#define SRC_ASM(arr, ars, art) {\ + register int _xt_sar __asm__ ("state"); \ + __asm__ ("src %0, %1, %2" \ + : "=a" (arr) : "a" (ars), "a" (art), "t" (_xt_sar)); \ +} + +#define XT_SRC(ars, art) \ +({ \ + unsigned _arr; \ + unsigned _ars = ars; \ + unsigned _art = art; \ + SRC_ASM(_arr, _ars, _art); \ + _arr; \ +}) + +#define SSR_ASM(ars) {\ + register int _xt_sar __asm__ ("state"); \ + __asm__ ("ssr %1" : "=t" (_xt_sar) : "a" (ars) : "sar"); \ +} + +#define XT_SSR(ars) \ +({ \ + unsigned _ars = ars; \ + SSR_ASM(_ars); \ +}) + +#define SSL_ASM(ars) {\ + register int _xt_sar __asm__ ("state"); \ + __asm__ ("ssl %1" : "=t" (_xt_sar) : "a" (ars) : "sar"); \ +} + +#define XT_SSL(ars) \ +({ \ + unsigned _ars = ars; \ + SSL_ASM(_ars); \ +}) + +#define SSA8B_ASM(ars) {\ + register int _xt_sar __asm__ ("state"); \ + __asm__ ("ssa8b %1" : "=t" (_xt_sar) : "a" (ars) : "sar"); \ +} + +#define XT_SSA8B(ars) \ +({ \ + unsigned _ars = ars; \ + SSA8B_ASM(_ars); \ +}) + +#define SSA8L_ASM(ars) {\ + register int _xt_sar __asm__ ("state"); \ + __asm__ ("ssa8l %1" : "=t" (_xt_sar) : "a" (ars) : "sar"); \ +} + +#define XT_SSA8L(ars) \ +({ \ + unsigned _ars = ars; \ + SSA8L_ASM(_ars); \ +}) + +#define SSAI_ASM(imm) {\ + register int _xt_sar __asm__ ("state"); \ + __asm__ ("ssai %1" : "=t" (_xt_sar) : "i" (imm) : "sar"); \ +} + +#define XT_SSAI(imm) \ +({ \ + SSAI_ASM(imm); \ +}) + + + + + + + + +#endif /* __GNUC__ && !__XCC__ */ + +#ifdef __XCC__ + +/* Core load/store instructions */ +extern unsigned char _TIE_L8UI(const unsigned char * ars, immediate imm); +extern unsigned short _TIE_L16UI(const unsigned short * ars, immediate imm); +extern signed short _TIE_L16SI(const signed short * ars, immediate imm); +extern unsigned _TIE_L32I(const unsigned * ars, immediate imm); +extern void _TIE_S8I(unsigned char arr, unsigned char * ars, immediate imm); +extern void _TIE_S16I(unsigned short arr, unsigned short * ars, immediate imm); +extern void _TIE_S32I(unsigned arr, unsigned * ars, immediate imm); + +#define XT_L8UI _TIE_L8UI +#define XT_L16UI _TIE_L16UI +#define XT_L16SI _TIE_L16SI +#define XT_L32I _TIE_L32I +#define XT_S8I _TIE_S8I +#define XT_S16I _TIE_S16I +#define XT_S32I _TIE_S32I + +/* Add-immediate instruction */ +extern unsigned _TIE_ADDI(unsigned ars, immediate imm); +#define XT_ADDI _TIE_ADDI + +/* Absolute value instruction */ +extern unsigned _TIE_ABS(int art); +#define XT_ABS _TIE_ABS + +/* funnel shift instructions */ +extern unsigned _TIE_SRC(unsigned ars, unsigned art); +#define XT_SRC _TIE_SRC +extern void _TIE_SSR(unsigned ars); +#define XT_SSR _TIE_SSR +extern void _TIE_SSL(unsigned ars); +#define XT_SSL _TIE_SSL +extern void _TIE_SSA8B(unsigned ars); +#define XT_SSA8B _TIE_SSA8B +extern void _TIE_SSA8L(unsigned ars); +#define XT_SSA8L _TIE_SSA8L +extern void _TIE_SSAI(immediate imm); +#define XT_SSAI _TIE_SSAI + + +#endif /* __XCC__ */ + +#endif /* __XTENSA__ */ +#endif /* !_XTENSA_BASE_HEADER */ diff --git a/include/asm-xtensa/xtensa/config-linux_be/specreg.h b/include/asm-xtensa/xtensa/config-linux_be/specreg.h new file mode 100644 index 0000000..fa4106a --- /dev/null +++ b/include/asm-xtensa/xtensa/config-linux_be/specreg.h @@ -0,0 +1,99 @@ +/* + * Xtensa Special Register symbolic names + */ + +/* $Id: specreg.h,v 1.2 2003/03/07 19:15:18 joetaylor Exp $ */ + +/* + * Copyright (c) 2003 Tensilica, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2.1 of the GNU Lesser General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, + * USA. + */ + +#ifndef XTENSA_SPECREG_H +#define XTENSA_SPECREG_H + +/* Include these special register bitfield definitions, for historical reasons: */ +#include <xtensa/corebits.h> + + +/* Special registers: */ +#define LBEG 0 +#define LEND 1 +#define LCOUNT 2 +#define SAR 3 +#define WINDOWBASE 72 +#define WINDOWSTART 73 +#define PTEVADDR 83 +#define RASID 90 +#define ITLBCFG 91 +#define DTLBCFG 92 +#define IBREAKENABLE 96 +#define DDR 104 +#define IBREAKA_0 128 +#define IBREAKA_1 129 +#define DBREAKA_0 144 +#define DBREAKA_1 145 +#define DBREAKC_0 160 +#define DBREAKC_1 161 +#define EPC_1 177 +#define EPC_2 178 +#define EPC_3 179 +#define EPC_4 180 +#define DEPC 192 +#define EPS_2 194 +#define EPS_3 195 +#define EPS_4 196 +#define EXCSAVE_1 209 +#define EXCSAVE_2 210 +#define EXCSAVE_3 211 +#define EXCSAVE_4 212 +#define INTERRUPT 226 +#define INTENABLE 228 +#define PS 230 +#define EXCCAUSE 232 +#define DEBUGCAUSE 233 +#define CCOUNT 234 +#define ICOUNT 236 +#define ICOUNTLEVEL 237 +#define EXCVADDR 238 +#define CCOMPARE_0 240 +#define CCOMPARE_1 241 +#define CCOMPARE_2 242 +#define MISC_REG_0 244 +#define MISC_REG_1 245 + +/* Special cases (bases of special register series): */ +#define IBREAKA 128 +#define DBREAKA 144 +#define DBREAKC 160 +#define EPC 176 +#define EPS 192 +#define EXCSAVE 208 +#define CCOMPARE 240 + +/* Special names for read-only and write-only interrupt registers: */ +#define INTREAD 226 +#define INTSET 226 +#define INTCLEAR 227 + +#endif /* XTENSA_SPECREG_H */ + diff --git a/include/asm-xtensa/xtensa/config-linux_be/system.h b/include/asm-xtensa/xtensa/config-linux_be/system.h new file mode 100644 index 0000000..cf9d4d3 --- /dev/null +++ b/include/asm-xtensa/xtensa/config-linux_be/system.h @@ -0,0 +1,198 @@ +/* + * xtensa/config/system.h -- HAL definitions that are dependent on SYSTEM configuration + * + * NOTE: The location and contents of this file are highly subject to change. + * + * Source for configuration-independent binaries (which link in a + * configuration-specific HAL library) must NEVER include this file. + * The HAL itself has historically included this file in some instances, + * but this is not appropriate either, because the HAL is meant to be + * core-specific but system independent. + */ + +/* + * Copyright (c) 2003 Tensilica, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2.1 of the GNU Lesser General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, + * USA. + */ + + +#ifndef XTENSA_CONFIG_SYSTEM_H +#define XTENSA_CONFIG_SYSTEM_H + +/*#include <xtensa/hal.h>*/ + + + +/*---------------------------------------------------------------------- + DEVICE ADDRESSES + ----------------------------------------------------------------------*/ + +/* + * Strange place to find these, but the configuration GUI + * allows moving these around to account for various core + * configurations. Specific boards (and their BSP software) + * will have specific meanings for these components. + */ + +/* I/O Block areas: */ +#define XSHAL_IOBLOCK_CACHED_VADDR 0xE0000000 +#define XSHAL_IOBLOCK_CACHED_PADDR 0xF0000000 +#define XSHAL_IOBLOCK_CACHED_SIZE 0x0E000000 + +#define XSHAL_IOBLOCK_BYPASS_VADDR 0xF0000000 +#define XSHAL_IOBLOCK_BYPASS_PADDR 0xF0000000 +#define XSHAL_IOBLOCK_BYPASS_SIZE 0x0E000000 + +/* System ROM: */ +#define XSHAL_ROM_VADDR 0xEE000000 +#define XSHAL_ROM_PADDR 0xFE000000 +#define XSHAL_ROM_SIZE 0x00400000 +/* Largest available area (free of vectors): */ +#define XSHAL_ROM_AVAIL_VADDR 0xEE00052C +#define XSHAL_ROM_AVAIL_VSIZE 0x003FFAD4 + +/* System RAM: */ +#define XSHAL_RAM_VADDR 0xD0000000 +#define XSHAL_RAM_PADDR 0x00000000 +#define XSHAL_RAM_VSIZE 0x08000000 +#define XSHAL_RAM_PSIZE 0x10000000 +#define XSHAL_RAM_SIZE XSHAL_RAM_PSIZE +/* Largest available area (free of vectors): */ +#define XSHAL_RAM_AVAIL_VADDR 0xD0000370 +#define XSHAL_RAM_AVAIL_VSIZE 0x07FFFC90 + +/* + * Shadow system RAM (same device as system RAM, at different address). + * (Emulation boards need this for the SONIC Ethernet driver + * when data caches are configured for writeback mode.) + * NOTE: on full MMU configs, this points to the BYPASS virtual address + * of system RAM, ie. is the same as XSHAL_RAM_* except that virtual + * addresses are viewed through the BYPASS static map rather than + * the CACHED static map. + */ +#define XSHAL_RAM_BYPASS_VADDR 0xD8000000 +#define XSHAL_RAM_BYPASS_PADDR 0x00000000 +#define XSHAL_RAM_BYPASS_PSIZE 0x08000000 + +/* Alternate system RAM (different device than system RAM): */ +#define XSHAL_ALTRAM_VADDR 0xCEE00000 +#define XSHAL_ALTRAM_PADDR 0xC0000000 +#define XSHAL_ALTRAM_SIZE 0x00200000 + + +/*---------------------------------------------------------------------- + * DEVICE-ADDRESS DEPENDENT... + * + * Values written to CACHEATTR special register (or its equivalent) + * to enable and disable caches in various modes. + *----------------------------------------------------------------------*/ + +/*---------------------------------------------------------------------- + BACKWARD COMPATIBILITY ... + ----------------------------------------------------------------------*/ + +/* + * NOTE: the following two macros are DEPRECATED. Use the latter + * board-specific macros instead, which are specially tuned for the + * particular target environments' memory maps. + */ +#define XSHAL_CACHEATTR_BYPASS XSHAL_XT2000_CACHEATTR_BYPASS /* disable caches in bypass mode */ +#define XSHAL_CACHEATTR_DEFAULT XSHAL_XT2000_CACHEATTR_DEFAULT /* default setting to enable caches (no writeback!) */ + +/*---------------------------------------------------------------------- + ISS (Instruction Set Simulator) SPECIFIC ... + ----------------------------------------------------------------------*/ + +#define XSHAL_ISS_CACHEATTR_WRITEBACK 0x1122222F /* enable caches in write-back mode */ +#define XSHAL_ISS_CACHEATTR_WRITEALLOC 0x1122222F /* enable caches in write-allocate mode */ +#define XSHAL_ISS_CACHEATTR_WRITETHRU 0x1122222F /* enable caches in write-through mode */ +#define XSHAL_ISS_CACHEATTR_BYPASS 0x2222222F /* disable caches in bypass mode */ +#define XSHAL_ISS_CACHEATTR_DEFAULT XSHAL_ISS_CACHEATTR_WRITEBACK /* default setting to enable caches */ + +/* For Coware only: */ +#define XSHAL_COWARE_CACHEATTR_WRITEBACK 0x11222222 /* enable caches in write-back mode */ +#define XSHAL_COWARE_CACHEATTR_WRITEALLOC 0x11222222 /* enable caches in write-allocate mode */ +#define XSHAL_COWARE_CACHEATTR_WRITETHRU 0x11222222 /* enable caches in write-through mode */ +#define XSHAL_COWARE_CACHEATTR_BYPASS 0x22222222 /* disable caches in bypass mode */ +#define XSHAL_COWARE_CACHEATTR_DEFAULT XSHAL_COWARE_CACHEATTR_WRITEBACK /* default setting to enable caches */ + +/* For BFM and other purposes: */ +#define XSHAL_ALLVALID_CACHEATTR_WRITEBACK 0x11222222 /* enable caches without any invalid regions */ +#define XSHAL_ALLVALID_CACHEATTR_DEFAULT XSHAL_ALLVALID_CACHEATTR_WRITEBACK /* default setting for caches without any invalid regions */ + +#define XSHAL_ISS_PIPE_REGIONS 0 +#define XSHAL_ISS_SDRAM_REGIONS 0 + + +/*---------------------------------------------------------------------- + XT2000 BOARD SPECIFIC ... + ----------------------------------------------------------------------*/ + +#define XSHAL_XT2000_CACHEATTR_WRITEBACK 0x22FFFFFF /* enable caches in write-back mode */ +#define XSHAL_XT2000_CACHEATTR_WRITEALLOC 0x22FFFFFF /* enable caches in write-allocate mode */ +#define XSHAL_XT2000_CACHEATTR_WRITETHRU 0x22FFFFFF /* enable caches in write-through mode */ +#define XSHAL_XT2000_CACHEATTR_BYPASS 0x22FFFFFF /* disable caches in bypass mode */ +#define XSHAL_XT2000_CACHEATTR_DEFAULT XSHAL_XT2000_CACHEATTR_WRITEBACK /* default setting to enable caches */ + +#define XSHAL_XT2000_PIPE_REGIONS 0x00001000 /* BusInt pipeline regions */ +#define XSHAL_XT2000_SDRAM_REGIONS 0x00000005 /* BusInt SDRAM regions */ + + +/*---------------------------------------------------------------------- + VECTOR SIZES + ----------------------------------------------------------------------*/ + +/* + * Sizes allocated to vectors by the system (memory map) configuration. + * These sizes are constrained by core configuration (eg. one vector's + * code cannot overflow into another vector) but are dependent on the + * system or board (or LSP) memory map configuration. + * + * Whether or not each vector happens to be in a system ROM is also + * a system configuration matter, sometimes useful, included here also: + */ +#define XSHAL_RESET_VECTOR_SIZE 0x000004E0 +#define XSHAL_RESET_VECTOR_ISROM 1 +#define XSHAL_USER_VECTOR_SIZE 0x0000001C +#define XSHAL_USER_VECTOR_ISROM 0 +#define XSHAL_PROGRAMEXC_VECTOR_SIZE XSHAL_USER_VECTOR_SIZE /* for backward compatibility */ +#define XSHAL_USEREXC_VECTOR_SIZE XSHAL_USER_VECTOR_SIZE /* for backward compatibility */ +#define XSHAL_KERNEL_VECTOR_SIZE 0x0000001C +#define XSHAL_KERNEL_VECTOR_ISROM 0 +#define XSHAL_STACKEDEXC_VECTOR_SIZE XSHAL_KERNEL_VECTOR_SIZE /* for backward compatibility */ +#define XSHAL_KERNELEXC_VECTOR_SIZE XSHAL_KERNEL_VECTOR_SIZE /* for backward compatibility */ +#define XSHAL_DOUBLEEXC_VECTOR_SIZE 0x000000E0 +#define XSHAL_DOUBLEEXC_VECTOR_ISROM 0 +#define XSHAL_WINDOW_VECTORS_SIZE 0x00000180 +#define XSHAL_WINDOW_VECTORS_ISROM 0 +#define XSHAL_INTLEVEL2_VECTOR_SIZE 0x0000000C +#define XSHAL_INTLEVEL2_VECTOR_ISROM 0 +#define XSHAL_INTLEVEL3_VECTOR_SIZE 0x0000000C +#define XSHAL_INTLEVEL3_VECTOR_ISROM 0 +#define XSHAL_INTLEVEL4_VECTOR_SIZE 0x0000000C +#define XSHAL_INTLEVEL4_VECTOR_ISROM 1 +#define XSHAL_DEBUG_VECTOR_SIZE XSHAL_INTLEVEL4_VECTOR_SIZE +#define XSHAL_DEBUG_VECTOR_ISROM XSHAL_INTLEVEL4_VECTOR_ISROM + + +#endif /*XTENSA_CONFIG_SYSTEM_H*/ + diff --git a/include/asm-xtensa/xtensa/config-linux_be/tie.h b/include/asm-xtensa/xtensa/config-linux_be/tie.h new file mode 100644 index 0000000..3c2e514 --- /dev/null +++ b/include/asm-xtensa/xtensa/config-linux_be/tie.h @@ -0,0 +1,275 @@ +/* + * xtensa/config/tie.h -- HAL definitions that are dependent on CORE and TIE configuration + * + * This header file is sometimes referred to as the "compile-time HAL" or CHAL. + * It was generated for a specific Xtensa processor configuration, + * and furthermore for a specific set of TIE source files that extend + * basic core functionality. + * + * Source for configuration-independent binaries (which link in a + * configuration-specific HAL library) must NEVER include this file. + * It is perfectly normal, however, for the HAL source itself to include this file. + */ + +/* + * Copyright (c) 2003 Tensilica, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2.1 of the GNU Lesser General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, + * USA. + */ + + +#ifndef XTENSA_CONFIG_TIE_H +#define XTENSA_CONFIG_TIE_H + +#include <xtensa/hal.h> + + +/*---------------------------------------------------------------------- + GENERAL + ----------------------------------------------------------------------*/ + +/* + * Separators for macros that expand into arrays. + * These can be predefined by files that #include this one, + * when different separators are required. + */ +/* Element separator for macros that expand into 1-dimensional arrays: */ +#ifndef XCHAL_SEP +#define XCHAL_SEP , +#endif +/* Array separator for macros that expand into 2-dimensional arrays: */ +#ifndef XCHAL_SEP2 +#define XCHAL_SEP2 },{ +#endif + + + + + + +/*---------------------------------------------------------------------- + COPROCESSORS and EXTRA STATE + ----------------------------------------------------------------------*/ + +#define XCHAL_CP_NUM 0 /* number of coprocessors */ +#define XCHAL_CP_MAX 0 /* max coprocessor id plus one (0 if none) */ +#define XCHAL_CP_MASK 0x00 /* bitmask of coprocessors by id */ + +/* Space for coprocessors' state save areas: */ +#define XCHAL_CP0_SA_SIZE 0 +#define XCHAL_CP1_SA_SIZE 0 +#define XCHAL_CP2_SA_SIZE 0 +#define XCHAL_CP3_SA_SIZE 0 +#define XCHAL_CP4_SA_SIZE 0 +#define XCHAL_CP5_SA_SIZE 0 +#define XCHAL_CP6_SA_SIZE 0 +#define XCHAL_CP7_SA_SIZE 0 +/* Minimum required alignments of CP state save areas: */ +#define XCHAL_CP0_SA_ALIGN 1 +#define XCHAL_CP1_SA_ALIGN 1 +#define XCHAL_CP2_SA_ALIGN 1 +#define XCHAL_CP3_SA_ALIGN 1 +#define XCHAL_CP4_SA_ALIGN 1 +#define XCHAL_CP5_SA_ALIGN 1 +#define XCHAL_CP6_SA_ALIGN 1 +#define XCHAL_CP7_SA_ALIGN 1 + +/* Indexing macros: */ +#define _XCHAL_CP_SA_SIZE(n) XCHAL_CP ## n ## _SA_SIZE +#define XCHAL_CP_SA_SIZE(n) _XCHAL_CP_SA_SIZE(n) /* n = 0 .. 7 */ +#define _XCHAL_CP_SA_ALIGN(n) XCHAL_CP ## n ## _SA_ALIGN +#define XCHAL_CP_SA_ALIGN(n) _XCHAL_CP_SA_ALIGN(n) /* n = 0 .. 7 */ + + +/* Space for "extra" state (user special registers and non-cp TIE) save area: */ +#define XCHAL_EXTRA_SA_SIZE 0 +#define XCHAL_EXTRA_SA_ALIGN 1 + +/* Total save area size (extra + all coprocessors) */ +/* (not useful until xthal_{save,restore}_all_extra() is implemented, */ +/* but included for Tor2 beta; doesn't account for alignment!): */ +#define XCHAL_CPEXTRA_SA_SIZE_TOR2 0 /* Tor2Beta temporary definition -- do not use */ + +/* Combined required alignment for all CP and EXTRA state save areas */ +/* (does not include required alignment for any base config registers): */ +#define XCHAL_CPEXTRA_SA_ALIGN 1 + +/* ... */ + + +#ifdef _ASMLANGUAGE +/* + * Assembly-language specific definitions (assembly macros, etc.). + */ +#include <xtensa/config/specreg.h> + +/******************** + * Macros to save and restore the non-coprocessor TIE portion of EXTRA state. + */ + +/* (none) */ + + +/******************** + * Macros to create functions that save and restore all EXTRA (non-coprocessor) state + * (does not include zero-overhead loop registers and non-optional registers). + */ + + /* + * Macro that expands to the body of a function that + * stores the extra (non-coprocessor) optional/custom state. + * Entry: a2 = ptr to save area in which to save extra state + * Exit: any register a2-a15 (?) may have been clobbered. + */ + .macro xchal_extra_store_funcbody + .endm + + + /* + * Macro that expands to the body of a function that + * loads the extra (non-coprocessor) optional/custom state. + * Entry: a2 = ptr to save area from which to restore extra state + * Exit: any register a2-a15 (?) may have been clobbered. + */ + .macro xchal_extra_load_funcbody + .endm + + +/******************** + * Macros to save and restore the state of each TIE coprocessor. + */ + + + +/******************** + * Macros to create functions that save and restore the state of *any* TIE coprocessor. + */ + + /* + * Macro that expands to the body of a function + * that stores the selected coprocessor's state (registers etc). + * Entry: a2 = ptr to save area in which to save cp state + * a3 = coprocessor number + * Exit: any register a2-a15 (?) may have been clobbered. + */ + .macro xchal_cpi_store_funcbody + .endm + + + /* + * Macro that expands to the body of a function + * that loads the selected coprocessor's state (registers etc). + * Entry: a2 = ptr to save area from which to restore cp state + * a3 = coprocessor number + * Exit: any register a2-a15 (?) may have been clobbered. + */ + .macro xchal_cpi_load_funcbody + .endm + +#endif /*_ASMLANGUAGE*/ + + +/* + * Contents of save areas in terms of libdb register numbers. + * NOTE: CONTENTS_LIBDB_{UREG,REGF} macros are not defined in this file; + * it is up to the user of this header file to define these macros + * usefully before each expansion of the CONTENTS_LIBDB macros. + * (Fields rsv[123] are reserved for future additions; they are currently + * set to zero but may be set to some useful values in the future.) + * + * CONTENTS_LIBDB_SREG(libdbnum, offset, size, align, rsv1, name, sregnum, bitmask, rsv2, rsv3) + * CONTENTS_LIBDB_UREG(libdbnum, offset, size, align, rsv1, name, uregnum, bitmask, rsv2, rsv3) + * CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index, numentries, contentsize, regname_base, regfile_name, rsv2, rsv3) + */ + +#define XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM 0 +#define XCHAL_EXTRA_SA_CONTENTS_LIBDB /* empty */ + +#define XCHAL_CP0_SA_CONTENTS_LIBDB_NUM 0 +#define XCHAL_CP0_SA_CONTENTS_LIBDB /* empty */ + +#define XCHAL_CP1_SA_CONTENTS_LIBDB_NUM 0 +#define XCHAL_CP1_SA_CONTENTS_LIBDB /* empty */ + +#define XCHAL_CP2_SA_CONTENTS_LIBDB_NUM 0 +#define XCHAL_CP2_SA_CONTENTS_LIBDB /* empty */ + +#define XCHAL_CP3_SA_CONTENTS_LIBDB_NUM 0 +#define XCHAL_CP3_SA_CONTENTS_LIBDB /* empty */ + +#define XCHAL_CP4_SA_CONTENTS_LIBDB_NUM 0 +#define XCHAL_CP4_SA_CONTENTS_LIBDB /* empty */ + +#define XCHAL_CP5_SA_CONTENTS_LIBDB_NUM 0 +#define XCHAL_CP5_SA_CONTENTS_LIBDB /* empty */ + +#define XCHAL_CP6_SA_CONTENTS_LIBDB_NUM 0 +#define XCHAL_CP6_SA_CONTENTS_LIBDB /* empty */ + +#define XCHAL_CP7_SA_CONTENTS_LIBDB_NUM 0 +#define XCHAL_CP7_SA_CONTENTS_LIBDB /* empty */ + + + + + + +/*---------------------------------------------------------------------- + MISC + ----------------------------------------------------------------------*/ + +#if 0 /* is there something equivalent for user TIE? */ +#define XCHAL_CORE_ID "linux_be" /* configuration's alphanumeric core identifier + (CoreID) set in the Xtensa Processor Generator */ + +#define XCHAL_BUILD_UNIQUE_ID 0x00003256 /* software build-unique ID (22-bit) */ + +/* These definitions describe the hardware targeted by this software: */ +#define XCHAL_HW_CONFIGID0 0xC103D1FF /* config ID reg 0 value (upper 32 of 64 bits) */ +#define XCHAL_HW_CONFIGID1 0x00803256 /* config ID reg 1 value (lower 32 of 64 bits) */ +#define XCHAL_CONFIGID0 XCHAL_HW_CONFIGID0 /* for backward compatibility only -- don't use! */ +#define XCHAL_CONFIGID1 XCHAL_HW_CONFIGID1 /* for backward compatibility only -- don't use! */ +#define XCHAL_HW_RELEASE_MAJOR 1050 /* major release of targeted hardware */ +#define XCHAL_HW_RELEASE_MINOR 1 /* minor release of targeted hardware */ +#define XCHAL_HW_RELEASE_NAME "T1050.1" /* full release name of targeted hardware */ +#define XTHAL_HW_REL_T1050 1 +#define XTHAL_HW_REL_T1050_1 1 +#define XCHAL_HW_CONFIGID_RELIABLE 1 +#endif /*0*/ + + + +/*---------------------------------------------------------------------- + ISA + ----------------------------------------------------------------------*/ + +#if 0 /* these probably don't belong here, but are related to or implemented using TIE */ +#define XCHAL_HAVE_BOOLEANS 0 /* 1 if booleans option configured, 0 otherwise */ +/* Misc instructions: */ +#define XCHAL_HAVE_MUL32 0 /* 1 if 32-bit integer multiply option configured, 0 otherwise */ +#define XCHAL_HAVE_MUL32_HIGH 0 /* 1 if MUL32 option includes MULUH and MULSH, 0 otherwise */ + +#define XCHAL_HAVE_FP 0 /* 1 if floating point option configured, 0 otherwise */ +#endif /*0*/ + + +#endif /*XTENSA_CONFIG_TIE_H*/ + diff --git a/include/asm-xtensa/xtensa/coreasm.h b/include/asm-xtensa/xtensa/coreasm.h new file mode 100644 index 0000000..a8cfb54 --- /dev/null +++ b/include/asm-xtensa/xtensa/coreasm.h @@ -0,0 +1,526 @@ +#ifndef XTENSA_COREASM_H +#define XTENSA_COREASM_H + +/* + * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND + * + * include/asm-xtensa/xtensa/coreasm.h -- assembler-specific + * definitions that depend on CORE configuration. + * + * Source for configuration-independent binaries (which link in a + * configuration-specific HAL library) must NEVER include this file. + * It is perfectly normal, however, for the HAL itself to include this + * file. + * + * This file must NOT include xtensa/config/system.h. Any assembler + * header file that depends on system information should likely go in + * a new systemasm.h (or sysasm.h) header file. + * + * NOTE: macro beqi32 is NOT configuration-dependent, and is placed + * here til we will have configuration-independent header file. + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of + * this archive for more details. + * + * Copyright (C) 2002 Tensilica Inc. + */ + + +#include <xtensa/config/core.h> +#include <xtensa/config/specreg.h> + +/* + * Assembly-language specific definitions (assembly macros, etc.). + */ + +/*---------------------------------------------------------------------- + * find_ms_setbit + * + * This macro finds the most significant bit that is set in <as> + * and return its index + <base> in <ad>, or <base> - 1 if <as> is zero. + * The index counts starting at zero for the lsbit, so the return + * value ranges from <base>-1 (no bit set) to <base>+31 (msbit set). + * + * Parameters: + * <ad> destination address register (any register) + * <as> source address register + * <at> temporary address register (must be different than <as>) + * <base> constant value added to result (usually 0 or 1) + * On entry: + * <ad> = undefined if different than <as> + * <as> = value whose most significant set bit is to be found + * <at> = undefined + * no other registers are used by this macro. + * On exit: + * <ad> = <base> + index of msbit set in original <as>, + * = <base> - 1 if original <as> was zero. + * <as> clobbered (if not <ad>) + * <at> clobbered (if not <ad>) + * Example: + * find_ms_setbit a0, a4, a0, 0 -- return in a0 index of msbit set in a4 + */ + + .macro find_ms_setbit ad, as, at, base +#if XCHAL_HAVE_NSA + movi \at, 31+\base + nsau \as, \as // get index of \as, numbered from msbit (32 if absent) + sub \ad, \at, \as // get numbering from lsbit (0..31, -1 if absent) +#else /* XCHAL_HAVE_NSA */ + movi \at, \base // start with result of 0 (point to lsbit of 32) + + beqz \as, 2f // special case for zero argument: return -1 + bltui \as, 0x10000, 1f // is it one of the 16 lsbits? (if so, check lower 16 bits) + addi \at, \at, 16 // no, increment result to upper 16 bits (of 32) + //srli \as, \as, 16 // check upper half (shift right 16 bits) + extui \as, \as, 16, 16 // check upper half (shift right 16 bits) +1: bltui \as, 0x100, 1f // is it one of the 8 lsbits? (if so, check lower 8 bits) + addi \at, \at, 8 // no, increment result to upper 8 bits (of 16) + srli \as, \as, 8 // shift right to check upper 8 bits +1: bltui \as, 0x10, 1f // is it one of the 4 lsbits? (if so, check lower 4 bits) + addi \at, \at, 4 // no, increment result to upper 4 bits (of 8) + srli \as, \as, 4 // shift right 4 bits to check upper half +1: bltui \as, 0x4, 1f // is it one of the 2 lsbits? (if so, check lower 2 bits) + addi \at, \at, 2 // no, increment result to upper 2 bits (of 4) + srli \as, \as, 2 // shift right 2 bits to check upper half +1: bltui \as, 0x2, 1f // is it the lsbit? + addi \at, \at, 2 // no, increment result to upper bit (of 2) +2: addi \at, \at, -1 // (from just above: add 1; from beqz: return -1) + //srli \as, \as, 1 +1: // done! \at contains index of msbit set (or -1 if none set) + .if 0x\ad - 0x\at // destination different than \at ? (works because regs are a0-a15) + mov \ad, \at // then move result to \ad + .endif +#endif /* XCHAL_HAVE_NSA */ + .endm // find_ms_setbit + +/*---------------------------------------------------------------------- + * find_ls_setbit + * + * This macro finds the least significant bit that is set in <as>, + * and return its index in <ad>. + * Usage is the same as for the find_ms_setbit macro. + * Example: + * find_ls_setbit a0, a4, a0, 0 -- return in a0 index of lsbit set in a4 + */ + + .macro find_ls_setbit ad, as, at, base + neg \at, \as // keep only the least-significant bit that is set... + and \as, \at, \as // ... in \as + find_ms_setbit \ad, \as, \at, \base + .endm // find_ls_setbit + +/*---------------------------------------------------------------------- + * find_ls_one + * + * Same as find_ls_setbit with base zero. + * Source (as) and destination (ad) registers must be different. + * Provided for backward compatibility. + */ + + .macro find_ls_one ad, as + find_ls_setbit \ad, \as, \ad, 0 + .endm // find_ls_one + +/*---------------------------------------------------------------------- + * floop, floopnez, floopgtz, floopend + * + * These macros are used for fast inner loops that + * work whether or not the Loops options is configured. + * If the Loops option is configured, they simply use + * the zero-overhead LOOP instructions; otherwise + * they use explicit decrement and branch instructions. + * + * They are used in pairs, with floop, floopnez or floopgtz + * at the beginning of the loop, and floopend at the end. + * + * Each pair of loop macro calls must be given the loop count + * address register and a unique label for that loop. + * + * Example: + * + * movi a3, 16 // loop 16 times + * floop a3, myloop1 + * : + * bnez a7, end1 // exit loop if a7 != 0 + * : + * floopend a3, myloop1 + * end1: + * + * Like the LOOP instructions, these macros cannot be + * nested, must include at least one instruction, + * cannot call functions inside the loop, etc. + * The loop can be exited by jumping to the instruction + * following floopend (or elsewhere outside the loop), + * or continued by jumping to a NOP instruction placed + * immediately before floopend. + * + * Unlike LOOP instructions, the register passed to floop* + * cannot be used inside the loop, because it is used as + * the loop counter if the Loops option is not configured. + * And its value is undefined after exiting the loop. + * And because the loop counter register is active inside + * the loop, you can't easily use this construct to loop + * across a register file using ROTW as you might with LOOP + * instructions, unless you copy the loop register along. + */ + + /* Named label version of the macros: */ + + .macro floop ar, endlabel + floop_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel + .endm + + .macro floopnez ar, endlabel + floopnez_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel + .endm + + .macro floopgtz ar, endlabel + floopgtz_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel + .endm + + .macro floopend ar, endlabel + floopend_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel + .endm + + /* Numbered local label version of the macros: */ +#if 0 /*UNTESTED*/ + .macro floop89 ar + floop_ \ar, 8, 9f + .endm + + .macro floopnez89 ar + floopnez_ \ar, 8, 9f + .endm + + .macro floopgtz89 ar + floopgtz_ \ar, 8, 9f + .endm + + .macro floopend89 ar + floopend_ \ar, 8b, 9 + .endm +#endif /*0*/ + + /* Underlying version of the macros: */ + + .macro floop_ ar, startlabel, endlabelref + .ifdef _infloop_ + .if _infloop_ + .err // Error: floop cannot be nested + .endif + .endif + .set _infloop_, 1 +#if XCHAL_HAVE_LOOPS + loop \ar, \endlabelref +#else /* XCHAL_HAVE_LOOPS */ +\startlabel: + addi \ar, \ar, -1 +#endif /* XCHAL_HAVE_LOOPS */ + .endm // floop_ + + .macro floopnez_ ar, startlabel, endlabelref + .ifdef _infloop_ + .if _infloop_ + .err // Error: floopnez cannot be nested + .endif + .endif + .set _infloop_, 1 +#if XCHAL_HAVE_LOOPS + loopnez \ar, \endlabelref +#else /* XCHAL_HAVE_LOOPS */ + beqz \ar, \endlabelref +\startlabel: + addi \ar, \ar, -1 +#endif /* XCHAL_HAVE_LOOPS */ + .endm // floopnez_ + + .macro floopgtz_ ar, startlabel, endlabelref + .ifdef _infloop_ + .if _infloop_ + .err // Error: floopgtz cannot be nested + .endif + .endif + .set _infloop_, 1 +#if XCHAL_HAVE_LOOPS + loopgtz \ar, \endlabelref +#else /* XCHAL_HAVE_LOOPS */ + bltz \ar, \endlabelref + beqz \ar, \endlabelref +\startlabel: + addi \ar, \ar, -1 +#endif /* XCHAL_HAVE_LOOPS */ + .endm // floopgtz_ + + + .macro floopend_ ar, startlabelref, endlabel + .ifndef _infloop_ + .err // Error: floopend without matching floopXXX + .endif + .ifeq _infloop_ + .err // Error: floopend without matching floopXXX + .endif + .set _infloop_, 0 +#if ! XCHAL_HAVE_LOOPS + bnez \ar, \startlabelref +#endif /* XCHAL_HAVE_LOOPS */ +\endlabel: + .endm // floopend_ + +/*---------------------------------------------------------------------- + * crsil -- conditional RSIL (read/set interrupt level) + * + * Executes the RSIL instruction if it exists, else just reads PS. + * The RSIL instruction does not exist in the new exception architecture + * if the interrupt option is not selected. + */ + + .macro crsil ar, newlevel +#if XCHAL_HAVE_OLD_EXC_ARCH || XCHAL_HAVE_INTERRUPTS + rsil \ar, \newlevel +#else + rsr \ar, PS +#endif + .endm // crsil + +/*---------------------------------------------------------------------- + * window_spill{4,8,12} + * + * These macros spill callers' register windows to the stack. + * They work for both privileged and non-privileged tasks. + * Must be called from a windowed ABI context, eg. within + * a windowed ABI function (ie. valid stack frame, window + * exceptions enabled, not in exception mode, etc). + * + * This macro requires a single invocation of the window_spill_common + * macro in the same assembly unit and section. + * + * Note that using window_spill{4,8,12} macros is more efficient + * than calling a function implemented using window_spill_function, + * because the latter needs extra code to figure out the size of + * the call to the spilling function. + * + * Example usage: + * + * .text + * .align 4 + * .global some_function + * .type some_function,@function + * some_function: + * entry a1, 16 + * : + * : + * + * window_spill4 // spill windows of some_function's callers; preserves a0..a3 only; + * // to use window_spill{8,12} in this example function we'd have + * // to increase space allocated by the entry instruction, because + * // 16 bytes only allows call4; 32 or 48 bytes (+locals) are needed + * // for call8/window_spill8 or call12/window_spill12 respectively. + * : + * + * retw + * + * window_spill_common // instantiates code used by window_spill4 + * + * + * On entry: + * none (if window_spill4) + * stack frame has enough space allocated for call8 (if window_spill8) + * stack frame has enough space allocated for call12 (if window_spill12) + * On exit: + * a4..a15 clobbered (if window_spill4) + * a8..a15 clobbered (if window_spill8) + * a12..a15 clobbered (if window_spill12) + * no caller windows are in live registers + */ + + .macro window_spill4 +#if XCHAL_HAVE_WINDOWED +# if XCHAL_NUM_AREGS == 16 + movi a15, 0 // for 16-register files, no need to call to reach the end +# elif XCHAL_NUM_AREGS == 32 + call4 .L__wdwspill_assist28 // call deep enough to clear out any live callers +# elif XCHAL_NUM_AREGS == 64 + call4 .L__wdwspill_assist60 // call deep enough to clear out any live callers +# endif +#endif + .endm // window_spill4 + + .macro window_spill8 +#if XCHAL_HAVE_WINDOWED +# if XCHAL_NUM_AREGS == 16 + movi a15, 0 // for 16-register files, no need to call to reach the end +# elif XCHAL_NUM_AREGS == 32 + call8 .L__wdwspill_assist24 // call deep enough to clear out any live callers +# elif XCHAL_NUM_AREGS == 64 + call8 .L__wdwspill_assist56 // call deep enough to clear out any live callers +# endif +#endif + .endm // window_spill8 + + .macro window_spill12 +#if XCHAL_HAVE_WINDOWED +# if XCHAL_NUM_AREGS == 16 + movi a15, 0 // for 16-register files, no need to call to reach the end +# elif XCHAL_NUM_AREGS == 32 + call12 .L__wdwspill_assist20 // call deep enough to clear out any live callers +# elif XCHAL_NUM_AREGS == 64 + call12 .L__wdwspill_assist52 // call deep enough to clear out any live callers +# endif +#endif + .endm // window_spill12 + +/*---------------------------------------------------------------------- + * window_spill_function + * + * This macro outputs a function that will spill its caller's callers' + * register windows to the stack. Eg. it could be used to implement + * a version of xthal_window_spill() that works in non-privileged tasks. + * This works for both privileged and non-privileged tasks. + * + * Typical usage: + * + * .text + * .align 4 + * .global my_spill_function + * .type my_spill_function,@function + * my_spill_function: + * window_spill_function + * + * On entry to resulting function: + * none + * On exit from resulting function: + * none (no caller windows are in live registers) + */ + + .macro window_spill_function +#if XCHAL_HAVE_WINDOWED +# if XCHAL_NUM_AREGS == 32 + entry sp, 48 + bbci.l a0, 31, 1f // branch if called with call4 + bbsi.l a0, 30, 2f // branch if called with call12 + call8 .L__wdwspill_assist16 // called with call8, only need another 8 + retw +1: call12 .L__wdwspill_assist16 // called with call4, only need another 12 + retw +2: call4 .L__wdwspill_assist16 // called with call12, only need another 4 + retw +# elif XCHAL_NUM_AREGS == 64 + entry sp, 48 + bbci.l a0, 31, 1f // branch if called with call4 + bbsi.l a0, 30, 2f // branch if called with call12 + call4 .L__wdwspill_assist52 // called with call8, only need a call4 + retw +1: call8 .L__wdwspill_assist52 // called with call4, only need a call8 + retw +2: call12 .L__wdwspill_assist40 // called with call12, can skip a call12 + retw +# elif XCHAL_NUM_AREGS == 16 + entry sp, 16 + bbci.l a0, 31, 1f // branch if called with call4 + bbsi.l a0, 30, 2f // branch if called with call12 + movi a7, 0 // called with call8 + retw +1: movi a11, 0 // called with call4 +2: retw // if called with call12, everything already spilled + +// movi a15, 0 // trick to spill all but the direct caller +// j 1f +// // The entry instruction is magical in the assembler (gets auto-aligned) +// // so we have to jump to it to avoid falling through the padding. +// // We need entry/retw to know where to return. +//1: entry sp, 16 +// retw +# else +# error "unrecognized address register file size" +# endif +#endif /* XCHAL_HAVE_WINDOWED */ + window_spill_common + .endm // window_spill_function + +/*---------------------------------------------------------------------- + * window_spill_common + * + * Common code used by any number of invocations of the window_spill## + * and window_spill_function macros. + * + * Must be instantiated exactly once within a given assembly unit, + * within call/j range of and same section as window_spill## + * macro invocations for that assembly unit. + * (Is automatically instantiated by the window_spill_function macro.) + */ + + .macro window_spill_common +#if XCHAL_HAVE_WINDOWED && (XCHAL_NUM_AREGS == 32 || XCHAL_NUM_AREGS == 64) + .ifndef .L__wdwspill_defined +# if XCHAL_NUM_AREGS >= 64 +.L__wdwspill_assist60: + entry sp, 32 + call8 .L__wdwspill_assist52 + retw +.L__wdwspill_assist56: + entry sp, 16 + call4 .L__wdwspill_assist52 + retw +.L__wdwspill_assist52: + entry sp, 48 + call12 .L__wdwspill_assist40 + retw +.L__wdwspill_assist40: + entry sp, 48 + call12 .L__wdwspill_assist28 + retw +# endif +.L__wdwspill_assist28: + entry sp, 48 + call12 .L__wdwspill_assist16 + retw +.L__wdwspill_assist24: + entry sp, 32 + call8 .L__wdwspill_assist16 + retw +.L__wdwspill_assist20: + entry sp, 16 + call4 .L__wdwspill_assist16 + retw +.L__wdwspill_assist16: + entry sp, 16 + movi a15, 0 + retw + .set .L__wdwspill_defined, 1 + .endif +#endif /* XCHAL_HAVE_WINDOWED with 32 or 64 aregs */ + .endm // window_spill_common + +/*---------------------------------------------------------------------- + * beqi32 + * + * macro implements version of beqi for arbitrary 32-bit immidiate value + * + * beqi32 ax, ay, imm32, label + * + * Compares value in register ax with imm32 value and jumps to label if + * equal. Clobberes register ay if needed + * + */ + .macro beqi32 ax, ay, imm, label + .ifeq ((\imm-1) & ~7) // 1..8 ? + beqi \ax, \imm, \label + .else + .ifeq (\imm+1) // -1 ? + beqi \ax, \imm, \label + .else + .ifeq (\imm) // 0 ? + beqz \ax, \label + .else + // We could also handle immediates 10,12,16,32,64,128,256 + // but it would be a long macro... + movi \ay, \imm + beq \ax, \ay, \label + .endif + .endif + .endif + .endm // beqi32 + +#endif /*XTENSA_COREASM_H*/ + diff --git a/include/asm-xtensa/xtensa/corebits.h b/include/asm-xtensa/xtensa/corebits.h new file mode 100644 index 0000000..e578ade --- /dev/null +++ b/include/asm-xtensa/xtensa/corebits.h @@ -0,0 +1,77 @@ +#ifndef XTENSA_COREBITS_H +#define XTENSA_COREBITS_H + +/* + * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND + * + * xtensa/corebits.h - Xtensa Special Register field positions and masks. + * + * (In previous releases, these were defined in specreg.h, a generated file. + * This file is not generated, i.e. it is processor configuration independent.) + */ + + +/* EXCCAUSE register fields: */ +#define EXCCAUSE_EXCCAUSE_SHIFT 0 +#define EXCCAUSE_EXCCAUSE_MASK 0x3F +/* Exception causes (mostly incomplete!): */ +#define EXCCAUSE_ILLEGAL 0 +#define EXCCAUSE_SYSCALL 1 +#define EXCCAUSE_IFETCHERROR 2 +#define EXCCAUSE_LOADSTOREERROR 3 +#define EXCCAUSE_LEVEL1INTERRUPT 4 +#define EXCCAUSE_ALLOCA 5 + +/* PS register fields: */ +#define PS_WOE_SHIFT 18 +#define PS_WOE_MASK 0x00040000 +#define PS_WOE PS_WOE_MASK +#define PS_CALLINC_SHIFT 16 +#define PS_CALLINC_MASK 0x00030000 +#define PS_CALLINC(n) (((n)&3)<<PS_CALLINC_SHIFT) /* n = 0..3 */ +#define PS_OWB_SHIFT 8 +#define PS_OWB_MASK 0x00000F00 +#define PS_OWB(n) (((n)&15)<<PS_OWB_SHIFT) /* n = 0..15 (or 0..7) */ +#define PS_RING_SHIFT 6 +#define PS_RING_MASK 0x000000C0 +#define PS_RING(n) (((n)&3)<<PS_RING_SHIFT) /* n = 0..3 */ +#define PS_UM_SHIFT 5 +#define PS_UM_MASK 0x00000020 +#define PS_UM PS_UM_MASK +#define PS_EXCM_SHIFT 4 +#define PS_EXCM_MASK 0x00000010 +#define PS_EXCM PS_EXCM_MASK +#define PS_INTLEVEL_SHIFT 0 +#define PS_INTLEVEL_MASK 0x0000000F +#define PS_INTLEVEL(n) ((n)&PS_INTLEVEL_MASK) /* n = 0..15 */ +/* Backward compatibility (deprecated): */ +#define PS_PROGSTACK_SHIFT PS_UM_SHIFT +#define PS_PROGSTACK_MASK PS_UM_MASK +#define PS_PROG_SHIFT PS_UM_SHIFT +#define PS_PROG_MASK PS_UM_MASK +#define PS_PROG PS_UM + +/* DBREAKCn register fields: */ +#define DBREAKC_MASK_SHIFT 0 +#define DBREAKC_MASK_MASK 0x0000003F +#define DBREAKC_LOADBREAK_SHIFT 30 +#define DBREAKC_LOADBREAK_MASK 0x40000000 +#define DBREAKC_STOREBREAK_SHIFT 31 +#define DBREAKC_STOREBREAK_MASK 0x80000000 + +/* DEBUGCAUSE register fields: */ +#define DEBUGCAUSE_DEBUGINT_SHIFT 5 +#define DEBUGCAUSE_DEBUGINT_MASK 0x20 /* debug interrupt */ +#define DEBUGCAUSE_BREAKN_SHIFT 4 +#define DEBUGCAUSE_BREAKN_MASK 0x10 /* BREAK.N instruction */ +#define DEBUGCAUSE_BREAK_SHIFT 3 +#define DEBUGCAUSE_BREAK_MASK 0x08 /* BREAK instruction */ +#define DEBUGCAUSE_DBREAK_SHIFT 2 +#define DEBUGCAUSE_DBREAK_MASK 0x04 /* DBREAK match */ +#define DEBUGCAUSE_IBREAK_SHIFT 1 +#define DEBUGCAUSE_IBREAK_MASK 0x02 /* IBREAK match */ +#define DEBUGCAUSE_ICOUNT_SHIFT 0 +#define DEBUGCAUSE_ICOUNT_MASK 0x01 /* ICOUNT would increment to zero */ + +#endif /*XTENSA_COREBITS_H*/ + diff --git a/include/asm-xtensa/xtensa/hal.h b/include/asm-xtensa/xtensa/hal.h new file mode 100644 index 0000000..d104725 --- /dev/null +++ b/include/asm-xtensa/xtensa/hal.h @@ -0,0 +1,822 @@ +#ifndef XTENSA_HAL_H +#define XTENSA_HAL_H + +/* + * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND + * + * include/asm-xtensa/xtensa/hal.h -- contains a definition of the + * Core HAL interface. + * + * All definitions in this header file are independent of any specific + * Xtensa processor configuration. Thus an OS or other software can + * include this header file and be compiled into configuration- + * independent objects that can be distributed and eventually linked + * to the HAL library (libhal.a) to create a configuration-specific + * final executable. + * + * Certain definitions, however, are release-specific -- such as the + * XTHAL_RELEASE_xxx macros (or additions made in later releases). + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2002 Tensilica Inc. + */ + + +/*---------------------------------------------------------------------- + Constant Definitions + (shared with assembly) + ----------------------------------------------------------------------*/ + +/* Software release information (not configuration-specific!): */ +#define XTHAL_RELEASE_MAJOR 1050 +#define XTHAL_RELEASE_MINOR 0 +#define XTHAL_RELEASE_NAME "T1050.0-2002-08-06-eng0" +#define XTHAL_RELEASE_INTERNAL "2002-08-06-eng0" +#define XTHAL_REL_T1050 1 +#define XTHAL_REL_T1050_0 1 +#define XTHAL_REL_T1050_0_2002 1 +#define XTHAL_REL_T1050_0_2002_08 1 +#define XTHAL_REL_T1050_0_2002_08_06 1 +#define XTHAL_REL_T1050_0_2002_08_06_ENG0 1 + +/* HAL version numbers (these names are for backward compatibility): */ +#define XTHAL_MAJOR_REV XTHAL_RELEASE_MAJOR +#define XTHAL_MINOR_REV XTHAL_RELEASE_MINOR +/* + * A bit of software release history on values of XTHAL_{MAJOR,MINOR}_REV: + * + * Release MAJOR MINOR Comment + * ======= ===== ===== ======= + * T1015.n n/a n/a (HAL not yet available) + * T1020.{0,1,2} 0 1 (HAL beta) + * T1020.{3,4} 0 2 First release. + * T1020.n (n>4) 0 2 or >3 (TBD) + * T1030.0 0 1 (HAL beta) + * T1030.{1,2} 0 3 Equivalent to first release. + * T1030.n (n>=3) 0 >= 3 (TBD) + * T1040.n 1040 n Full CHAL available from T1040.2 + * T1050.n 1050 n Current release. + * + * + * Note: there is a distinction between the software release with + * which something is compiled (accessible using XTHAL_RELEASE_* macros) + * and the software release with which the HAL library was compiled + * (accessible using Xthal_release_* global variables). This + * distinction is particularly relevant for vendors that distribute + * configuration-independent binaries (eg. an OS), where their customer + * might link it with a HAL of a different Xtensa software release. + * In this case, it may be appropriate for the OS to verify at run-time + * whether XTHAL_RELEASE_* and Xthal_release_* are compatible. + * [Guidelines as to which release is compatible with which are not + * currently provided explicitly, but might be inferred from reading + * OSKit documentation for all releases -- compatibility is also highly + * dependent on which HAL features are used. Each release is usually + * backward compatible, with very few exceptions if any.] + * + * Notes: + * Tornado 2.0 supported in T1020.3+, T1030.1+, and T1040.{0,1} only. + * Tornado 2.0.2 supported in T1040.2+, and T1050. + * Compile-time HAL port of NucleusPlus supported by T1040.2+ and T1050. + */ + + +/* + * Architectural limits, independent of configuration. + * Note that these are ISA-defined limits, not micro-architecture implementation + * limits enforced by the Xtensa Processor Generator (which may be stricter than + * these below). + */ +#define XTHAL_MAX_CPS 8 /* max number of coprocessors (0..7) */ +#define XTHAL_MAX_INTERRUPTS 32 /* max number of interrupts (0..31) */ +#define XTHAL_MAX_INTLEVELS 16 /* max number of interrupt levels (0..15) */ + /* (as of T1040, implementation limit is 7: 0..6) */ +#define XTHAL_MAX_TIMERS 4 /* max number of timers (CCOMPARE0..CCOMPARE3) */ + /* (as of T1040, implementation limit is 3: 0..2) */ + +/* Misc: */ +#define XTHAL_LITTLEENDIAN 0 +#define XTHAL_BIGENDIAN 1 + + +/* Interrupt types: */ +#define XTHAL_INTTYPE_UNCONFIGURED 0 +#define XTHAL_INTTYPE_SOFTWARE 1 +#define XTHAL_INTTYPE_EXTERN_EDGE 2 +#define XTHAL_INTTYPE_EXTERN_LEVEL 3 +#define XTHAL_INTTYPE_TIMER 4 +#define XTHAL_INTTYPE_NMI 5 +#define XTHAL_MAX_INTTYPES 6 /* number of interrupt types */ + +/* Timer related: */ +#define XTHAL_TIMER_UNCONFIGURED -1 /* Xthal_timer_interrupt[] value for non-existent timers */ +#define XTHAL_TIMER_UNASSIGNED XTHAL_TIMER_UNCONFIGURED /* (for backwards compatibility only) */ + + +/* Access Mode bits (tentative): */ /* bit abbr unit short_name PPC equ - Description */ +#define XTHAL_AMB_EXCEPTION 0 /* 001 E EX fls: EXception none - generate exception on any access (aka "illegal") */ +#define XTHAL_AMB_HITCACHE 1 /* 002 C CH fls: use Cache on Hit ~(I CI) - use cache on hit -- way from tag match [or H HC, or U UC] (ISA: same, except for Isolate case) */ +#define XTHAL_AMB_ALLOCATE 2 /* 004 A AL fl?: ALlocate none - refill cache on miss -- way from LRU [or F FI fill] (ISA: Read/Write Miss Refill) */ +#define XTHAL_AMB_WRITETHRU 3 /* 008 W WT --s: WriteThrough W WT - store immediately to memory (ISA: same) */ +#define XTHAL_AMB_ISOLATE 4 /* 010 I IS fls: ISolate none - use cache regardless of hit-vs-miss -- way from vaddr (ISA: use-cache-on-miss+hit) */ +#define XTHAL_AMB_GUARD 5 /* 020 G GU ?l?: GUard G * - non-speculative; spec/replay refs not permitted */ +#if 0 +#define XTHAL_AMB_ORDERED x /* 000 O OR fls: ORdered G * - mem accesses cannot be out of order */ +#define XTHAL_AMB_FUSEWRITES x /* 000 F FW --s: FuseWrites none - allow combining/merging multiple writes (to same datapath data unit) into one (implied by writeback) */ +#define XTHAL_AMB_COHERENT x /* 000 M MC fl?: Mem/MP Coherent M - on reads, other CPUs/bus-masters may need to supply data */ +#define XTHAL_AMB_TRUSTED x /* 000 T TR ?l?: TRusted none - memory will not bus error (if it does, handle as fatal imprecise interrupt) */ +#define XTHAL_AMB_PREFETCH x /* 000 P PR fl?: PRefetch none - on refill, read line+1 into prefetch buffers */ +#define XTHAL_AMB_STREAM x /* 000 S ST ???: STreaming none - access one of N stream buffers */ +#endif /*0*/ + +#define XTHAL_AM_EXCEPTION (1<<XTHAL_AMB_EXCEPTION) +#define XTHAL_AM_HITCACHE (1<<XTHAL_AMB_HITCACHE) +#define XTHAL_AM_ALLOCATE (1<<XTHAL_AMB_ALLOCATE) +#define XTHAL_AM_WRITETHRU (1<<XTHAL_AMB_WRITETHRU) +#define XTHAL_AM_ISOLATE (1<<XTHAL_AMB_ISOLATE) +#define XTHAL_AM_GUARD (1<<XTHAL_AMB_GUARD) +#if 0 +#define XTHAL_AM_ORDERED (1<<XTHAL_AMB_ORDERED) +#define XTHAL_AM_FUSEWRITES (1<<XTHAL_AMB_FUSEWRITES) +#define XTHAL_AM_COHERENT (1<<XTHAL_AMB_COHERENT) +#define XTHAL_AM_TRUSTED (1<<XTHAL_AMB_TRUSTED) +#define XTHAL_AM_PREFETCH (1<<XTHAL_AMB_PREFETCH) +#define XTHAL_AM_STREAM (1<<XTHAL_AMB_STREAM) +#endif /*0*/ + +/* + * Allowed Access Modes (bit combinations). + * + * Columns are: + * "FOGIWACE" + * Access mode bits (see XTHAL_AMB_xxx above). + * <letter> = bit is set + * '-' = bit is clear + * '.' = bit is irrelevant / don't care, as follows: + * E=1 makes all others irrelevant + * W,F relevant only for stores + * "2345" + * Indicates which Xtensa releases support the corresponding + * access mode. Releases for each character column are: + * 2 = prior to T1020.2: T1015 (V1.5), T1020.0, T1020.1 + * 3 = T1020.2 and later: T1020.2+, T1030 + * 4 = T1040 + * 5 = T1050 (maybe) + * And the character column contents are: + * <number> = support by release(s) + * "." = unsupported by release(s) + * "?" = support unknown + */ + /* FOGIWACE 2345 */ +/* For instruction fetch: */ +#define XTHAL_FAM_EXCEPTION 0x001 /* .......E 2345 exception */ +#define XTHAL_FAM_ISOLATE 0x012 /* .--I.-C- .... isolate */ +#define XTHAL_FAM_BYPASS 0x000 /* .---.--- 2345 bypass */ +#define XTHAL_FAM_NACACHED 0x002 /* .---.-C- .... cached no-allocate (frozen) */ +#define XTHAL_FAM_CACHED 0x006 /* .---.AC- 2345 cached */ +/* For data load: */ +#define XTHAL_LAM_EXCEPTION 0x001 /* .......E 2345 exception */ +#define XTHAL_LAM_ISOLATE 0x012 /* .--I.-C- 2345 isolate */ +#define XTHAL_LAM_BYPASS 0x000 /* .O--.--- 2... bypass speculative */ +#define XTHAL_LAM_BYPASSG 0x020 /* .OG-.--- .345 bypass guarded */ +#define XTHAL_LAM_NACACHED 0x002 /* .O--.-C- 2... cached no-allocate speculative */ +#define XTHAL_LAM_NACACHEDG 0x022 /* .OG-.-C- .345 cached no-allocate guarded */ +#define XTHAL_LAM_CACHED 0x006 /* .---.AC- 2345 cached speculative */ +#define XTHAL_LAM_CACHEDG 0x026 /* .?G-.AC- .... cached guarded */ +/* For data store: */ +#define XTHAL_SAM_EXCEPTION 0x001 /* .......E 2345 exception */ +#define XTHAL_SAM_ISOLATE 0x032 /* .-GI--C- 2345 isolate */ +#define XTHAL_SAM_BYPASS 0x028 /* -OG-W--- 2345 bypass */ +/*efine XTHAL_SAM_BYPASSF 0x028*/ /* F-G-W--- ...? bypass write-combined */ +#define XTHAL_SAM_WRITETHRU 0x02A /* -OG-W-C- 234? writethrough */ +/*efine XTHAL_SAM_WRITETHRUF 0x02A*/ /* F-G-W-C- ...5 writethrough write-combined */ +#define XTHAL_SAM_WRITEALLOC 0x02E /* -OG-WAC- ...? writethrough-allocate */ +/*efine XTHAL_SAM_WRITEALLOCF 0x02E*/ /* F-G-WAC- ...? writethrough-allocate write-combined */ +#define XTHAL_SAM_WRITEBACK 0x026 /* F-G--AC- ...5 writeback */ + +#if 0 +/* + Cache attribute encoding for CACHEATTR (per ISA): + (Note: if this differs from ISA Ref Manual, ISA has precedence) + + Inst-fetches Loads Stores + ------------- ------------ ------------- +0x0 FCA_EXCEPTION ?LCA_NACACHED_G* SCA_WRITETHRU "uncached" +0x1 FCA_CACHED LCA_CACHED SCA_WRITETHRU cached +0x2 FCA_BYPASS LCA_BYPASS_G* SCA_BYPASS bypass +0x3 FCA_CACHED LCA_CACHED SCA_WRITEALLOCF write-allocate + or LCA_EXCEPTION SCA_EXCEPTION (if unimplemented) +0x4 FCA_CACHED LCA_CACHED SCA_WRITEBACK write-back + or LCA_EXCEPTION SCA_EXCEPTION (if unimplemented) +0x5..D FCA_EXCEPTION LCA_EXCEPTION SCA_EXCEPTION (reserved) +0xE FCA_EXCEPTION LCA_ISOLATE SCA_ISOLATE isolate +0xF FCA_EXCEPTION LCA_EXCEPTION SCA_EXCEPTION illegal + * Prior to T1020.2?, guard feature not supported, this defaulted to speculative (no _G) +*/ +#endif /*0*/ + + +#if !defined(__ASSEMBLY__) && !defined(_NOCLANGUAGE) +#ifdef __cplusplus +extern "C" { +#endif + +/*---------------------------------------------------------------------- + HAL + ----------------------------------------------------------------------*/ + +/* Constant to be checked in build = (XTHAL_MAJOR_REV<<16)|XTHAL_MINOR_REV */ +extern const unsigned int Xthal_rev_no; + + +/*---------------------------------------------------------------------- + Processor State + ----------------------------------------------------------------------*/ +/* save & restore the extra processor state */ +extern void xthal_save_extra(void *base); +extern void xthal_restore_extra(void *base); + +extern void xthal_save_cpregs(void *base, int); +extern void xthal_restore_cpregs(void *base, int); + +/*extern void xthal_save_all_extra(void *base);*/ +/*extern void xthal_restore_all_extra(void *base);*/ + +/* space for processor state */ +extern const unsigned int Xthal_extra_size; +extern const unsigned int Xthal_extra_align; +/* space for TIE register files */ +extern const unsigned int Xthal_cpregs_size[XTHAL_MAX_CPS]; +extern const unsigned int Xthal_cpregs_align[XTHAL_MAX_CPS]; + +/* total of space for the processor state (for Tor2) */ +extern const unsigned int Xthal_all_extra_size; +extern const unsigned int Xthal_all_extra_align; + +/* initialize the extra processor */ +/*extern void xthal_init_extra(void);*/ +/* initialize the TIE coprocessor */ +/*extern void xthal_init_cp(int);*/ + +/* initialize the extra processor */ +extern void xthal_init_mem_extra(void *); +/* initialize the TIE coprocessor */ +extern void xthal_init_mem_cp(void *, int); + +/* validate & invalidate the TIE register file */ +extern void xthal_validate_cp(int); +extern void xthal_invalidate_cp(int); + +/* the number of TIE coprocessors contiguous from zero (for Tor2) */ +extern const unsigned int Xthal_num_coprocessors; + +/* actual number of coprocessors */ +extern const unsigned char Xthal_cp_num; +/* index of highest numbered coprocessor, plus one */ +extern const unsigned char Xthal_cp_max; +/* index of highest allowed coprocessor number, per cfg, plus one */ +/*extern const unsigned char Xthal_cp_maxcfg;*/ +/* bitmask of which coprocessors are present */ +extern const unsigned int Xthal_cp_mask; + +/* read and write cpenable register */ +extern void xthal_set_cpenable(unsigned); +extern unsigned xthal_get_cpenable(void); + +/* read & write extra state register */ +/*extern int xthal_read_extra(void *base, unsigned reg, unsigned *value);*/ +/*extern int xthal_write_extra(void *base, unsigned reg, unsigned value);*/ + +/* read & write a TIE coprocessor register */ +/*extern int xthal_read_cpreg(void *base, int cp, unsigned reg, unsigned *value);*/ +/*extern int xthal_write_cpreg(void *base, int cp, unsigned reg, unsigned value);*/ + +/* return coprocessor number based on register */ +/*extern int xthal_which_cp(unsigned reg);*/ + +/*---------------------------------------------------------------------- + Interrupts + ----------------------------------------------------------------------*/ + +/* the number of interrupt levels */ +extern const unsigned char Xthal_num_intlevels; +/* the number of interrupts */ +extern const unsigned char Xthal_num_interrupts; + +/* mask for level of interrupts */ +extern const unsigned int Xthal_intlevel_mask[XTHAL_MAX_INTLEVELS]; +/* mask for level 0 to N interrupts */ +extern const unsigned int Xthal_intlevel_andbelow_mask[XTHAL_MAX_INTLEVELS]; + +/* level of each interrupt */ +extern const unsigned char Xthal_intlevel[XTHAL_MAX_INTERRUPTS]; + +/* type per interrupt */ +extern const unsigned char Xthal_inttype[XTHAL_MAX_INTERRUPTS]; + +/* masks of each type of interrupt */ +extern const unsigned int Xthal_inttype_mask[XTHAL_MAX_INTTYPES]; + +/* interrupt numbers assigned to each timer interrupt */ +extern const int Xthal_timer_interrupt[XTHAL_MAX_TIMERS]; + +/*** Virtual interrupt prioritization: ***/ + +/* Convert between interrupt levels (as per PS.INTLEVEL) and virtual interrupt priorities: */ +extern unsigned xthal_vpri_to_intlevel(unsigned vpri); +extern unsigned xthal_intlevel_to_vpri(unsigned intlevel); + +/* Enables/disables given set (mask) of interrupts; returns previous enabled-mask of all ints: */ +extern unsigned xthal_int_enable(unsigned); +extern unsigned xthal_int_disable(unsigned); + +/* Set/get virtual priority of an interrupt: */ +extern int xthal_set_int_vpri(int intnum, int vpri); +extern int xthal_get_int_vpri(int intnum); + +/* Set/get interrupt lockout level for exclusive access to virtual priority data structures: */ +extern void xthal_set_vpri_locklevel(unsigned intlevel); +extern unsigned xthal_get_vpri_locklevel(void); + +/* Set/get current virtual interrupt priority: */ +extern unsigned xthal_set_vpri(unsigned vpri); +extern unsigned xthal_get_vpri(unsigned vpri); +extern unsigned xthal_set_vpri_intlevel(unsigned intlevel); +extern unsigned xthal_set_vpri_lock(void); + + + +/*---------------------------------------------------------------------- + Generic Interrupt Trampolining Support + ----------------------------------------------------------------------*/ + +typedef void (XtHalVoidFunc)(void); + +/* + * Bitmask of interrupts currently trampolining down: + */ +extern unsigned Xthal_tram_pending; + +/* + * Bitmask of which interrupts currently trampolining down + * synchronously are actually enabled; this bitmask is necessary + * because INTENABLE cannot hold that state (sync-trampolining + * interrupts must be kept disabled while trampolining); + * in the current implementation, any bit set here is not set + * in INTENABLE, and vice-versa; once a sync-trampoline is + * handled (at level one), its enable bit must be moved from + * here to INTENABLE: + */ +extern unsigned Xthal_tram_enabled; + +/* + * Bitmask of interrupts configured for sync trampolining: + */ +extern unsigned Xthal_tram_sync; + + +/* Trampoline support functions: */ +extern unsigned xthal_tram_pending_to_service( void ); +extern void xthal_tram_done( unsigned serviced_mask ); +extern int xthal_tram_set_sync( int intnum, int sync ); +extern XtHalVoidFunc* xthal_set_tram_trigger_func( XtHalVoidFunc *trigger_fn ); + +/* INTENABLE,INTREAD,INTSET,INTCLEAR register access functions: */ +extern unsigned xthal_get_intenable( void ); +extern void xthal_set_intenable( unsigned ); +extern unsigned xthal_get_intread( void ); +extern void xthal_set_intset( unsigned ); +extern void xthal_set_intclear( unsigned ); + + +/*---------------------------------------------------------------------- + Register Windows + ----------------------------------------------------------------------*/ + +/* number of registers in register window */ +extern const unsigned int Xthal_num_aregs; +extern const unsigned char Xthal_num_aregs_log2; + +/* This spill any live register windows (other than the caller's): */ +extern void xthal_window_spill( void ); + + +/*---------------------------------------------------------------------- + Cache + ----------------------------------------------------------------------*/ + +/* size of the cache lines in log2(bytes) */ +extern const unsigned char Xthal_icache_linewidth; +extern const unsigned char Xthal_dcache_linewidth; +/* size of the cache lines in bytes */ +extern const unsigned short Xthal_icache_linesize; +extern const unsigned short Xthal_dcache_linesize; +/* number of cache sets in log2(lines per way) */ +extern const unsigned char Xthal_icache_setwidth; +extern const unsigned char Xthal_dcache_setwidth; +/* cache set associativity (number of ways) */ +extern const unsigned int Xthal_icache_ways; +extern const unsigned int Xthal_dcache_ways; +/* size of the caches in bytes (ways * 2^(linewidth + setwidth)) */ +extern const unsigned int Xthal_icache_size; +extern const unsigned int Xthal_dcache_size; +/* cache features */ +extern const unsigned char Xthal_dcache_is_writeback; +extern const unsigned char Xthal_icache_line_lockable; +extern const unsigned char Xthal_dcache_line_lockable; + +/* cache attribute register control (used by other HAL routines) */ +extern unsigned xthal_get_cacheattr( void ); +extern unsigned xthal_get_icacheattr( void ); +extern unsigned xthal_get_dcacheattr( void ); +extern void xthal_set_cacheattr( unsigned ); +extern void xthal_set_icacheattr( unsigned ); +extern void xthal_set_dcacheattr( unsigned ); + +/* initialize cache support (must be called once at startup, before all other cache calls) */ +/*extern void xthal_cache_startinit( void );*/ +/* reset caches */ +/*extern void xthal_icache_reset( void );*/ +/*extern void xthal_dcache_reset( void );*/ +/* enable caches */ +extern void xthal_icache_enable( void ); /* DEPRECATED */ +extern void xthal_dcache_enable( void ); /* DEPRECATED */ +/* disable caches */ +extern void xthal_icache_disable( void ); /* DEPRECATED */ +extern void xthal_dcache_disable( void ); /* DEPRECATED */ + +/* invalidate the caches */ +extern void xthal_icache_all_invalidate( void ); +extern void xthal_dcache_all_invalidate( void ); +extern void xthal_icache_region_invalidate( void *addr, unsigned size ); +extern void xthal_dcache_region_invalidate( void *addr, unsigned size ); +extern void xthal_icache_line_invalidate(void *addr); +extern void xthal_dcache_line_invalidate(void *addr); +/* write dirty data back */ +extern void xthal_dcache_all_writeback( void ); +extern void xthal_dcache_region_writeback( void *addr, unsigned size ); +extern void xthal_dcache_line_writeback(void *addr); +/* write dirty data back and invalidate */ +extern void xthal_dcache_all_writeback_inv( void ); +extern void xthal_dcache_region_writeback_inv( void *addr, unsigned size ); +extern void xthal_dcache_line_writeback_inv(void *addr); +/* prefetch and lock specified memory range into cache */ +extern void xthal_icache_region_lock( void *addr, unsigned size ); +extern void xthal_dcache_region_lock( void *addr, unsigned size ); +extern void xthal_icache_line_lock(void *addr); +extern void xthal_dcache_line_lock(void *addr); +/* unlock from cache */ +extern void xthal_icache_all_unlock( void ); +extern void xthal_dcache_all_unlock( void ); +extern void xthal_icache_region_unlock( void *addr, unsigned size ); +extern void xthal_dcache_region_unlock( void *addr, unsigned size ); +extern void xthal_icache_line_unlock(void *addr); +extern void xthal_dcache_line_unlock(void *addr); + + +/* sync icache and memory */ +extern void xthal_icache_sync( void ); +/* sync dcache and memory */ +extern void xthal_dcache_sync( void ); + +/*---------------------------------------------------------------------- + Debug + ----------------------------------------------------------------------*/ + +/* 1 if debug option configured, 0 if not: */ +extern const int Xthal_debug_configured; + +/* Number of instruction and data break registers: */ +extern const int Xthal_num_ibreak; +extern const int Xthal_num_dbreak; + +/* Set (plant) and remove software breakpoint, both synchronizing cache: */ +extern unsigned int xthal_set_soft_break(void *addr); +extern void xthal_remove_soft_break(void *addr, unsigned int); + + +/*---------------------------------------------------------------------- + Disassembler + ----------------------------------------------------------------------*/ + +/* Max expected size of the return buffer for a disassembled instruction (hint only): */ +#define XTHAL_DISASM_BUFSIZE 80 + +/* Disassembly option bits for selecting what to return: */ +#define XTHAL_DISASM_OPT_ADDR 0x0001 /* display address */ +#define XTHAL_DISASM_OPT_OPHEX 0x0002 /* display opcode bytes in hex */ +#define XTHAL_DISASM_OPT_OPCODE 0x0004 /* display opcode name (mnemonic) */ +#define XTHAL_DISASM_OPT_PARMS 0x0008 /* display parameters */ +#define XTHAL_DISASM_OPT_ALL 0x0FFF /* display everything */ + +/* routine to get a string for the disassembled instruction */ +extern int xthal_disassemble( unsigned char *instr_buf, void *tgt_addr, + char *buffer, unsigned buflen, unsigned options ); + +/* routine to get the size of the next instruction. Returns 0 for + illegal instruction */ +extern int xthal_disassemble_size( unsigned char *instr_buf ); + + +/*---------------------------------------------------------------------- + Core Counter + ----------------------------------------------------------------------*/ + +/* counter info */ +extern const unsigned char Xthal_have_ccount; /* set if CCOUNT register present */ +extern const unsigned char Xthal_num_ccompare; /* number of CCOMPAREn registers */ + +/* get CCOUNT register (if not present return 0) */ +extern unsigned xthal_get_ccount(void); + +/* set and get CCOMPAREn registers (if not present, get returns 0) */ +extern void xthal_set_ccompare(int, unsigned); +extern unsigned xthal_get_ccompare(int); + + +/*---------------------------------------------------------------------- + Instruction/Data RAM/ROM Access + ----------------------------------------------------------------------*/ + +extern void* xthal_memcpy(void *dst, const void *src, unsigned len); +extern void* xthal_bcopy(const void *src, void *dst, unsigned len); + +/*---------------------------------------------------------------------- + MP Synchronization + ----------------------------------------------------------------------*/ +extern int xthal_compare_and_set( int *addr, int test_val, int compare_val ); +extern unsigned xthal_get_prid( void ); + +/*extern const char Xthal_have_s32c1i;*/ +extern const unsigned char Xthal_have_prid; + + +/*---------------------------------------------------------------------- + Miscellaneous + ----------------------------------------------------------------------*/ + +extern const unsigned int Xthal_release_major; +extern const unsigned int Xthal_release_minor; +extern const char * const Xthal_release_name; +extern const char * const Xthal_release_internal; + +extern const unsigned char Xthal_memory_order; +extern const unsigned char Xthal_have_windowed; +extern const unsigned char Xthal_have_density; +extern const unsigned char Xthal_have_booleans; +extern const unsigned char Xthal_have_loops; +extern const unsigned char Xthal_have_nsa; +extern const unsigned char Xthal_have_minmax; +extern const unsigned char Xthal_have_sext; +extern const unsigned char Xthal_have_clamps; +extern const unsigned char Xthal_have_mac16; +extern const unsigned char Xthal_have_mul16; +extern const unsigned char Xthal_have_fp; +extern const unsigned char Xthal_have_speculation; +extern const unsigned char Xthal_have_exceptions; +extern const unsigned char Xthal_xea_version; +extern const unsigned char Xthal_have_interrupts; +extern const unsigned char Xthal_have_highlevel_interrupts; +extern const unsigned char Xthal_have_nmi; + +extern const unsigned short Xthal_num_writebuffer_entries; + +extern const unsigned int Xthal_build_unique_id; +/* Release info for hardware targeted by software upgrades: */ +extern const unsigned int Xthal_hw_configid0; +extern const unsigned int Xthal_hw_configid1; +extern const unsigned int Xthal_hw_release_major; +extern const unsigned int Xthal_hw_release_minor; +extern const char * const Xthal_hw_release_name; +extern const char * const Xthal_hw_release_internal; + + +/* Internal memories... */ + +extern const unsigned char Xthal_num_instrom; +extern const unsigned char Xthal_num_instram; +extern const unsigned char Xthal_num_datarom; +extern const unsigned char Xthal_num_dataram; +extern const unsigned char Xthal_num_xlmi; +extern const unsigned int Xthal_instrom_vaddr[1]; +extern const unsigned int Xthal_instrom_paddr[1]; +extern const unsigned int Xthal_instrom_size [1]; +extern const unsigned int Xthal_instram_vaddr[1]; +extern const unsigned int Xthal_instram_paddr[1]; +extern const unsigned int Xthal_instram_size [1]; +extern const unsigned int Xthal_datarom_vaddr[1]; +extern const unsigned int Xthal_datarom_paddr[1]; +extern const unsigned int Xthal_datarom_size [1]; +extern const unsigned int Xthal_dataram_vaddr[1]; +extern const unsigned int Xthal_dataram_paddr[1]; +extern const unsigned int Xthal_dataram_size [1]; +extern const unsigned int Xthal_xlmi_vaddr[1]; +extern const unsigned int Xthal_xlmi_paddr[1]; +extern const unsigned int Xthal_xlmi_size [1]; + + + +/*---------------------------------------------------------------------- + Memory Management Unit + ----------------------------------------------------------------------*/ + +extern const unsigned char Xthal_have_spanning_way; +extern const unsigned char Xthal_have_identity_map; +extern const unsigned char Xthal_have_mimic_cacheattr; +extern const unsigned char Xthal_have_xlt_cacheattr; +extern const unsigned char Xthal_have_cacheattr; +extern const unsigned char Xthal_have_tlbs; + +extern const unsigned char Xthal_mmu_asid_bits; /* 0 .. 8 */ +extern const unsigned char Xthal_mmu_asid_kernel; +extern const unsigned char Xthal_mmu_rings; /* 1 .. 4 (perhaps 0 if no MMU and/or no protection?) */ +extern const unsigned char Xthal_mmu_ring_bits; +extern const unsigned char Xthal_mmu_sr_bits; +extern const unsigned char Xthal_mmu_ca_bits; +extern const unsigned int Xthal_mmu_max_pte_page_size; +extern const unsigned int Xthal_mmu_min_pte_page_size; + +extern const unsigned char Xthal_itlb_way_bits; +extern const unsigned char Xthal_itlb_ways; +extern const unsigned char Xthal_itlb_arf_ways; +extern const unsigned char Xthal_dtlb_way_bits; +extern const unsigned char Xthal_dtlb_ways; +extern const unsigned char Xthal_dtlb_arf_ways; + +/* Convert between virtual and physical addresses (through static maps only): */ +/*** WARNING: these two functions may go away in a future release; don't depend on them! ***/ +extern int xthal_static_v2p( unsigned vaddr, unsigned *paddrp ); +extern int xthal_static_p2v( unsigned paddr, unsigned *vaddrp, unsigned cached ); + +#if 0 +/******************* EXPERIMENTAL AND TENTATIVE ONLY ********************/ + +#define XTHAL_MMU_PAGESZ_COUNT_MAX 8 /* maximum number of different page sizes */ +extern const char Xthal_mmu_pagesz_count; /* 0 .. 8 number of different page sizes configured */ + +/* Note: the following table doesn't necessarily have page sizes in increasing order: */ +extern const char Xthal_mmu_pagesz_log2[XTHAL_MMU_PAGESZ_COUNT_MAX]; /* 10 .. 28 (0 past count) */ + +/* Sorted (increasing) table of page sizes, that indexes into the above table: */ +extern const char Xthal_mmu_pagesz_sorted[XTHAL_MMU_PAGESZ_COUNT_MAX]; /* 0 .. 7 (0 past count) */ + +/*u32 Xthal_virtual_exceptions;*/ /* bitmask of which exceptions execute in virtual mode... */ + +extern const char Xthal_mmu_pte_pagesz_log2_min; /* ?? minimum page size in PTEs */ +extern const char Xthal_mmu_pte_pagesz_log2_max; /* ?? maximum page size in PTEs */ + +/* Cache Attribute Bits Implemented by the Cache (part of the cache abstraction) */ +extern const char Xthal_icache_fca_bits_implemented; /* ITLB/UTLB only! */ +extern const char Xthal_dcache_lca_bits_implemented; /* DTLB/UTLB only! */ +extern const char Xthal_dcache_sca_bits_implemented; /* DTLB/UTLB only! */ + +/* Per TLB Parameters (Instruction, Data, Unified) */ +struct XtHalMmuTlb Xthal_itlb; /* description of MMU I-TLB generic features */ +struct XtHalMmuTlb Xthal_dtlb; /* description of MMU D-TLB generic features */ +struct XtHalMmuTlb Xthal_utlb; /* description of MMU U-TLB generic features */ + +#define XTHAL_MMU_WAYS_MAX 8 /* maximum number of ways (associativities) for each TLB */ + +/* Structure for common information described for each possible TLB (instruction, data and unified): */ +typedef struct XtHalMmuTlb { + u8 va_bits; /* 32 (number of virtual address bits) */ + u8 pa_bits; /* 32 (number of physical address bits) */ + bool tlb_va_indexed; /* 1 (set if TLB is indexed by virtual address) */ + bool tlb_va_tagged; /* 0 (set if TLB is tagged by virtual address) */ + bool cache_va_indexed; /* 1 (set if cache is indexed by virtual address) */ + bool cache_va_tagged; /* 0 (set if cache is tagged by virtual address) */ + /*bool (whether page tables are traversed in vaddr sorted order, paddr sorted order, ...) */ + /*u8 (set of available page attribute bits, other than cache attribute bits defined above) */ + /*u32 (various masks for pages, MMU table/TLB entries, etc.) */ + u8 way_count; /* 0 .. 8 (number of ways, a.k.a. associativities, for this TLB) */ + XtHalMmuTlbWay * ways[XTHAL_MMU_WAYS_MAX]; /* pointers to per-way parms for each way */ +} XtHalMmuTlb; + +/* Per TLB Way (Per Associativity) Parameters */ +typedef struct XtHalMmuTlbWay { + u32 index_count_log2; /* 0 .. 4 */ + u32 pagesz_mask; /* 0 .. 2^pagesz_count - 1 (each bit corresponds to a size */ + /* defined in the Xthal_mmu_pagesz_log2[] table) */ + u32 vpn_const_mask; + u32 vpn_const_value; + u64 ppn_const_mask; /* future may support pa_bits > 32 */ + u64 ppn_const_value; + u32 ppn_id_mask; /* paddr bits taken directly from vaddr */ + bool backgnd_match; /* 0 or 1 */ + /* These are defined in terms of the XTHAL_CACHE_xxx bits: */ + u8 fca_const_mask; /* ITLB/UTLB only! */ + u8 fca_const_value; /* ITLB/UTLB only! */ + u8 lca_const_mask; /* DTLB/UTLB only! */ + u8 lca_const_value; /* DTLB/UTLB only! */ + u8 sca_const_mask; /* DTLB/UTLB only! */ + u8 sca_const_value; /* DTLB/UTLB only! */ + /* These define an encoding that map 5 bits in TLB and PTE entries to */ + /* 8 bits (FCA, ITLB), 16 bits (LCA+SCA, DTLB) or 24 bits (FCA+LCA+SCA, UTLB): */ + /* (they may be moved to struct XtHalMmuTlb) */ + u8 ca_bits; /* number of bits in TLB/PTE entries for cache attributes */ + u32 * ca_map; /* pointer to array of 2^ca_bits entries of FCA+LCA+SCA bits */ +} XtHalMmuTlbWay; + +/* + * The way to determine whether protection support is present in core + * is to [look at Xthal_mmu_rings ???]. + * Give info on memory requirements for MMU tables and other in-memory + * data structures (globally, per task, base and per page, etc.) - whatever bounds can be calculated. + */ + + +/* Default vectors: */ +xthal_immu_fetch_miss_vector +xthal_dmmu_load_miss_vector +xthal_dmmu_store_miss_vector + +/* Functions called when a fault is detected: */ +typedef void (XtHalMmuFaultFunc)( unsigned vaddr, ...context... ); +/* Or, */ +/* a? = vaddr */ +/* a? = context... */ +/* PS.xxx = xxx */ +XtHalMMuFaultFunc *Xthal_immu_fetch_fault_func; +XtHalMMuFaultFunc *Xthal_dmmu_load_fault_func; +XtHalMMuFaultFunc *Xthal_dmmu_store_fault_func; + +/* Default Handlers: */ +/* The user and/or kernel exception handlers may jump to these handlers to handle the relevant exceptions, + * according to the value of EXCCAUSE. The exact register state on entry to these handlers is TBD. */ +/* When multiple TLB entries match (hit) on the same access: */ +xthal_immu_fetch_multihit_handler +xthal_dmmu_load_multihit_handler +xthal_dmmu_store_multihit_handler +/* Protection violations according to cache attributes, and other cache attribute mismatches: */ +xthal_immu_fetch_attr_handler +xthal_dmmu_load_attr_handler +xthal_dmmu_store_attr_handler +/* Protection violations due to insufficient ring level: */ +xthal_immu_fetch_priv_handler +xthal_dmmu_load_priv_handler +xthal_dmmu_store_priv_handler +/* Alignment exception handlers (if supported by the particular Xtensa MMU configuration): */ +xthal_dmmu_load_align_handler +xthal_dmmu_store_align_handler + +/* Or, alternatively, the OS user and/or kernel exception handlers may simply jump to the + * following entry points which will handle any values of EXCCAUSE not handled by the OS: */ +xthal_user_exc_default_handler +xthal_kernel_exc_default_handler + +#endif /*0*/ + +#ifdef INCLUDE_DEPRECATED_HAL_CODE +extern const unsigned char Xthal_have_old_exc_arch; +extern const unsigned char Xthal_have_mmu; +extern const unsigned int Xthal_num_regs; +extern const unsigned char Xthal_num_iroms; +extern const unsigned char Xthal_num_irams; +extern const unsigned char Xthal_num_droms; +extern const unsigned char Xthal_num_drams; +extern const unsigned int Xthal_configid0; +extern const unsigned int Xthal_configid1; +#endif + +#ifdef INCLUDE_DEPRECATED_HAL_DEBUG_CODE +#define XTHAL_24_BIT_BREAK 0x80000000 +#define XTHAL_16_BIT_BREAK 0x40000000 +extern const unsigned short Xthal_ill_inst_16[16]; +#define XTHAL_DEST_REG 0xf0000000 /* Mask for destination register */ +#define XTHAL_DEST_REG_INST 0x08000000 /* Branch address is in register */ +#define XTHAL_DEST_REL_INST 0x04000000 /* Branch address is relative */ +#define XTHAL_RFW_INST 0x00000800 +#define XTHAL_RFUE_INST 0x00000400 +#define XTHAL_RFI_INST 0x00000200 +#define XTHAL_RFE_INST 0x00000100 +#define XTHAL_RET_INST 0x00000080 +#define XTHAL_BREAK_INST 0x00000040 +#define XTHAL_SYSCALL_INST 0x00000020 +#define XTHAL_LOOP_END 0x00000010 /* Not set by xthal_inst_type */ +#define XTHAL_JUMP_INST 0x00000008 /* Call or jump instruction */ +#define XTHAL_BRANCH_INST 0x00000004 /* Branch instruction */ +#define XTHAL_24_BIT_INST 0x00000002 +#define XTHAL_16_BIT_INST 0x00000001 +typedef struct xthal_state { + unsigned pc; + unsigned ar[16]; + unsigned lbeg; + unsigned lend; + unsigned lcount; + unsigned extra_ptr; + unsigned cpregs_ptr[XTHAL_MAX_CPS]; +} XTHAL_STATE; +extern unsigned int xthal_inst_type(void *addr); +extern unsigned int xthal_branch_addr(void *addr); +extern unsigned int xthal_get_npc(XTHAL_STATE *user_state); +#endif /* INCLUDE_DEPRECATED_HAL_DEBUG_CODE */ + +#ifdef __cplusplus +} +#endif +#endif /*!__ASSEMBLY__ */ + +#endif /*XTENSA_HAL_H*/ + diff --git a/include/asm-xtensa/xtensa/simcall.h b/include/asm-xtensa/xtensa/simcall.h new file mode 100644 index 0000000..a2b8689 --- /dev/null +++ b/include/asm-xtensa/xtensa/simcall.h @@ -0,0 +1,130 @@ +#ifndef SIMCALL_INCLUDED +#define SIMCALL_INCLUDED + +/* + * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND + * + * include/asm-xtensa/xtensa/simcall.h - Simulator call numbers + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of + * this archive for more details. + * + * Copyright (C) 2002 Tensilica Inc. + */ + + +/* + * System call like services offered by the simulator host. + * These are modeled after the Linux 2.4 kernel system calls + * for Xtensa processors. However not all system calls and + * not all functionality of a given system call are implemented, + * or necessarily have well defined or equivalent semantics in + * the context of a simulation (as opposed to a Unix kernel). + * + * These services behave largely as if they had been invoked + * as a task in the simulator host's operating system + * (eg. files accessed are those of the simulator host). + * However, these SIMCALLs model a virtual operating system + * so that various definitions, bit assignments etc + * (eg. open mode bits, errno values, etc) are independent + * of the host operating system used to run the simulation. + * Rather these definitions are specific to the Xtensa ISS. + * This way Xtensa ISA code written to use these SIMCALLs + * can (in principle) be simulated on any host. + * + * Up to 6 parameters are passed in registers a3 to a8 + * (note the 6th parameter isn't passed on the stack, + * unlike windowed function calling conventions). + * The return value is in a2. A negative value in the + * range -4096 to -1 indicates a negated error code to be + * reported in errno with a return value of -1, otherwise + * the value in a2 is returned as is. + */ + +/* These #defines need to match what's in Xtensa/OS/vxworks/xtiss/simcalls.c */ + +#define SYS_nop 0 /* n/a - setup; used to flush register windows */ +#define SYS_exit 1 /*x*/ +#define SYS_fork 2 +#define SYS_read 3 /*x*/ +#define SYS_write 4 /*x*/ +#define SYS_open 5 /*x*/ +#define SYS_close 6 /*x*/ +#define SYS_rename 7 /*x 38 - waitpid */ +#define SYS_creat 8 /*x*/ +#define SYS_link 9 /*x (not implemented on WIN32) */ +#define SYS_unlink 10 /*x*/ +#define SYS_execv 11 /* n/a - execve */ +#define SYS_execve 12 /* 11 - chdir */ +#define SYS_pipe 13 /* 42 - time */ +#define SYS_stat 14 /* 106 - mknod */ +#define SYS_chmod 15 +#define SYS_chown 16 /* 202 - lchown */ +#define SYS_utime 17 /* 30 - break */ +#define SYS_wait 18 /* n/a - oldstat */ +#define SYS_lseek 19 /*x*/ +#define SYS_getpid 20 +#define SYS_isatty 21 /* n/a - mount */ +#define SYS_fstat 22 /* 108 - oldumount */ +#define SYS_time 23 /* 13 - setuid */ +#define SYS_gettimeofday 24 /*x 78 - getuid (not implemented on WIN32) */ +#define SYS_times 25 /*X 43 - stime (Xtensa-specific implementation) */ +#define SYS_socket 26 +#define SYS_sendto 27 +#define SYS_recvfrom 28 +#define SYS_select_one 29 /* not compitible select, one file descriptor at the time */ +#define SYS_bind 30 +#define SYS_ioctl 31 + +/* + * Other... + */ +#define SYS_iss_argc 1000 /* returns value of argc */ +#define SYS_iss_argv_size 1001 /* bytes needed for argv & arg strings */ +#define SYS_iss_set_argv 1002 /* saves argv & arg strings at given addr */ + +/* + * SIMCALLs for the ferret memory debugger. All are invoked by + * libferret.a ... ( Xtensa/Target-Libs/ferret ) + */ +#define SYS_ferret 1010 +#define SYS_malloc 1011 +#define SYS_free 1012 +#define SYS_more_heap 1013 +#define SYS_no_heap 1014 + + +/* + * Extra SIMCALLs for GDB: + */ +#define SYS_gdb_break -1 /* invoked by XTOS on user exceptions if EPC points + to a break.n/break, regardless of cause! */ +#define SYS_xmon_out -2 /* invoked by XMON: ... */ +#define SYS_xmon_in -3 /* invoked by XMON: ... */ +#define SYS_xmon_flush -4 /* invoked by XMON: ... */ +#define SYS_gdb_abort -5 /* invoked by XTOS in _xtos_panic() */ +#define SYS_gdb_illegal_inst -6 /* invoked by XTOS for illegal instructions (too deeply) */ +#define SYS_xmon_init -7 /* invoked by XMON: ... */ +#define SYS_gdb_enter_sktloop -8 /* invoked by XTOS on debug exceptions */ + +/* + * SIMCALLs for vxWorks xtiss BSP: + */ +#define SYS_setup_ppp_pipes -83 +#define SYS_log_msg -84 + +/* + * Test SIMCALLs: + */ +#define SYS_test_write_state -100 +#define SYS_test_read_state -101 + +/* + * SYS_select_one specifiers + */ +#define XTISS_SELECT_ONE_READ 1 +#define XTISS_SELECT_ONE_WRITE 2 +#define XTISS_SELECT_ONE_EXCEPT 3 + +#endif /* !SIMCALL_INCLUDED */ diff --git a/include/asm-xtensa/xtensa/xt2000-uart.h b/include/asm-xtensa/xtensa/xt2000-uart.h new file mode 100644 index 0000000..0154460 --- /dev/null +++ b/include/asm-xtensa/xtensa/xt2000-uart.h @@ -0,0 +1,155 @@ +#ifndef _uart_h_included_ +#define _uart_h_included_ + +/* + * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND + * + * include/asm-xtensa/xtensa/xt2000-uart.h -- NatSemi PC16552D DUART + * definitions + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2002 Tensilica Inc. + */ + + +#include <xtensa/xt2000.h> + + +/* 16550 UART DEVICE REGISTERS + The XT2000 board aligns each register to a 32-bit word but the UART device only uses + one byte of the word, which is the least-significant byte regardless of the + endianness of the core (ie. byte offset 0 for little-endian and 3 for big-endian). + So if using word accesses then endianness doesn't matter. + The macros provided here do that. +*/ +struct uart_dev_s { + union { + unsigned int rxb; /* DLAB=0: receive buffer, read-only */ + unsigned int txb; /* DLAB=0: transmit buffer, write-only */ + unsigned int dll; /* DLAB=1: divisor, least-significant byte latch (was write-only?) */ + } w0; + union { + unsigned int ier; /* DLAB=0: interrupt-enable register (was write-only?) */ + unsigned int dlm; /* DLAB=1: divisor, most-significant byte latch (was write-only?) */ + } w1; + + union { + unsigned int isr; /* DLAB=0: interrupt status register, read-only */ + unsigned int fcr; /* DLAB=0: FIFO control register, write-only */ + unsigned int afr; /* DLAB=1: alternate function register */ + } w2; + + unsigned int lcr; /* line control-register, write-only */ + unsigned int mcr; /* modem control-regsiter, write-only */ + unsigned int lsr; /* line status register, read-only */ + unsigned int msr; /* modem status register, read-only */ + unsigned int scr; /* scratch regsiter, read/write */ +}; + +#define _RXB(u) ((u)->w0.rxb) +#define _TXB(u) ((u)->w0.txb) +#define _DLL(u) ((u)->w0.dll) +#define _IER(u) ((u)->w1.ier) +#define _DLM(u) ((u)->w1.dlm) +#define _ISR(u) ((u)->w2.isr) +#define _FCR(u) ((u)->w2.fcr) +#define _AFR(u) ((u)->w2.afr) +#define _LCR(u) ((u)->lcr) +#define _MCR(u) ((u)->mcr) +#define _LSR(u) ((u)->lsr) +#define _MSR(u) ((u)->msr) +#define _SCR(u) ((u)->scr) + +typedef volatile struct uart_dev_s uart_dev_t; + +/* IER bits */ +#define RCVR_DATA_REG_INTENABLE 0x01 +#define XMIT_HOLD_REG_INTENABLE 0x02 +#define RCVR_STATUS_INTENABLE 0x04 +#define MODEM_STATUS_INTENABLE 0x08 + +/* FCR bits */ +#define _FIFO_ENABLE 0x01 +#define RCVR_FIFO_RESET 0x02 +#define XMIT_FIFO_RESET 0x04 +#define DMA_MODE_SELECT 0x08 +#define RCVR_TRIGGER_LSB 0x40 +#define RCVR_TRIGGER_MSB 0x80 + +/* AFR bits */ +#define AFR_CONC_WRITE 0x01 +#define AFR_BAUDOUT_SEL 0x02 +#define AFR_RXRDY_SEL 0x04 + +/* ISR bits */ +#define INT_STATUS(r) ((r)&1) +#define INT_PRIORITY(r) (((r)>>1)&0x7) + +/* LCR bits */ +#define WORD_LENGTH(n) (((n)-5)&0x3) +#define STOP_BIT_ENABLE 0x04 +#define PARITY_ENABLE 0x08 +#define EVEN_PARITY 0x10 +#define FORCE_PARITY 0x20 +#define XMIT_BREAK 0x40 +#define DLAB_ENABLE 0x80 + +/* MCR bits */ +#define _DTR 0x01 +#define _RTS 0x02 +#define _OP1 0x04 +#define _OP2 0x08 +#define LOOP_BACK 0x10 + +/* LSR Bits */ +#define RCVR_DATA_READY 0x01 +#define OVERRUN_ERROR 0x02 +#define PARITY_ERROR 0x04 +#define FRAMING_ERROR 0x08 +#define BREAK_INTERRUPT 0x10 +#define XMIT_HOLD_EMPTY 0x20 +#define XMIT_EMPTY 0x40 +#define FIFO_ERROR 0x80 +#define RCVR_READY(u) (_LSR(u)&RCVR_DATA_READY) +#define XMIT_READY(u) (_LSR(u)&XMIT_HOLD_EMPTY) + +/* MSR bits */ +#define _RDR 0x01 +#define DELTA_DSR 0x02 +#define DELTA_RI 0x04 +#define DELTA_CD 0x08 +#define _CTS 0x10 +#define _DSR 0x20 +#define _RI 0x40 +#define _CD 0x80 + +/* prototypes */ +void uart_init( uart_dev_t *u, int bitrate ); +void uart_out( uart_dev_t *u, char c ); +void uart_puts( uart_dev_t *u, char *s ); +char uart_in( uart_dev_t *u ); +void uart_enable_rcvr_int( uart_dev_t *u ); +void uart_disable_rcvr_int( uart_dev_t *u ); + +#ifdef DUART16552_1_VADDR +/* DUART present. */ +#define DUART_1_BASE (*(uart_dev_t*)DUART16552_1_VADDR) +#define DUART_2_BASE (*(uart_dev_t*)DUART16552_2_VADDR) +#define UART1_PUTS(s) uart_puts( &DUART_1_BASE, s ) +#define UART2_PUTS(s) uart_puts( &DUART_2_BASE, s ) +#else +/* DUART not configured, use dummy placeholders to allow compiles to work. */ +#define DUART_1_BASE (*(uart_dev_t*)0) +#define DUART_2_BASE (*(uart_dev_t*)0) +#define UART1_PUTS(s) +#define UART2_PUTS(s) +#endif + +/* Compute 16-bit divisor for baudrate generator, with rounding: */ +#define DUART_DIVISOR(crystal,speed) (((crystal)/16 + (speed)/2)/(speed)) + +#endif /*_uart_h_included_*/ + diff --git a/include/asm-xtensa/xtensa/xt2000.h b/include/asm-xtensa/xtensa/xt2000.h new file mode 100644 index 0000000..703a450 --- /dev/null +++ b/include/asm-xtensa/xtensa/xt2000.h @@ -0,0 +1,408 @@ +#ifndef _INC_XT2000_H_ +#define _INC_XT2000_H_ + +/* + * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND + * + * include/asm-xtensa/xtensa/xt2000.h - Definitions specific to the + * Tensilica XT2000 Emulation Board + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2002 Tensilica Inc. + */ + + +#include <xtensa/config/core.h> +#include <xtensa/config/system.h> + + +/* + * Default assignment of XT2000 devices to external interrupts. + */ + +/* Ethernet interrupt: */ +#ifdef XCHAL_EXTINT3_NUM +#define SONIC83934_INTNUM XCHAL_EXTINT3_NUM +#define SONIC83934_INTLEVEL XCHAL_EXTINT3_LEVEL +#define SONIC83934_INTMASK XCHAL_EXTINT3_MASK +#else +#define SONIC83934_INTMASK 0 +#endif + +/* DUART channel 1 interrupt (P1 - console): */ +#ifdef XCHAL_EXTINT4_NUM +#define DUART16552_1_INTNUM XCHAL_EXTINT4_NUM +#define DUART16552_1_INTLEVEL XCHAL_EXTINT4_LEVEL +#define DUART16552_1_INTMASK XCHAL_EXTINT4_MASK +#else +#define DUART16552_1_INTMASK 0 +#endif + +/* DUART channel 2 interrupt (P2 - 2nd serial port): */ +#ifdef XCHAL_EXTINT5_NUM +#define DUART16552_2_INTNUM XCHAL_EXTINT5_NUM +#define DUART16552_2_INTLEVEL XCHAL_EXTINT5_LEVEL +#define DUART16552_2_INTMASK XCHAL_EXTINT5_MASK +#else +#define DUART16552_2_INTMASK 0 +#endif + +/* FPGA-combined PCI/etc interrupts: */ +#ifdef XCHAL_EXTINT6_NUM +#define XT2000_FPGAPCI_INTNUM XCHAL_EXTINT6_NUM +#define XT2000_FPGAPCI_INTLEVEL XCHAL_EXTINT6_LEVEL +#define XT2000_FPGAPCI_INTMASK XCHAL_EXTINT6_MASK +#else +#define XT2000_FPGAPCI_INTMASK 0 +#endif + + + +/* + * Device addresses. + * + * Note: for endianness-independence, use 32-bit loads and stores for all + * register accesses to Ethernet, DUART and LED devices. Undefined bits + * may need to be masked out if needed when reading if the actual register + * size is smaller than 32 bits. + * + * Note: XT2000 bus byte lanes are defined in terms of msbyte and lsbyte + * relative to the processor. So 32-bit registers are accessed consistently + * from both big and little endian processors. However, this means byte + * sequences are not consistent between big and little endian processors. + * This is fine for RAM, and for ROM if ROM is created for a specific + * processor (and thus has correct byte sequences). However this may be + * unexpected for Flash, which might contain a file-system that one wants + * to use for multiple processor configurations (eg. the Flash might contain + * the Ethernet card's address, endianness-independent application data, etc). + * That is, byte sequences written in Flash by a core of a given endianness + * will be byte-swapped when seen by a core of the other endianness. + * Someone implementing an endianness-independent Flash file system will + * likely handle this byte-swapping issue in the Flash driver software. + */ + +#define DUART16552_XTAL_FREQ 18432000 /* crystal frequency in Hz */ +#define XTBOARD_FLASH_MAXSIZE 0x4000000 /* 64 MB (max; depends on what is socketed!) */ +#define XTBOARD_EPROM_MAXSIZE 0x0400000 /* 4 MB (max; depends on what is socketed!) */ +#define XTBOARD_EEPROM_MAXSIZE 0x0080000 /* 512 kB (max; depends on what is socketed!) */ +#define XTBOARD_ASRAM_SIZE 0x0100000 /* 1 MB */ +#define XTBOARD_PCI_MEM_SIZE 0x8000000 /* 128 MB (allocated) */ +#define XTBOARD_PCI_IO_SIZE 0x1000000 /* 16 MB (allocated) */ + +#ifdef XSHAL_IOBLOCK_BYPASS_PADDR +/* PCI memory space: */ +# define XTBOARD_PCI_MEM_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0x0000000) +/* Socketed Flash (eg. 2 x 16-bit devices): */ +# define XTBOARD_FLASH_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0x8000000) +/* PCI I/O space: */ +# define XTBOARD_PCI_IO_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xC000000) +/* V3 PCI interface chip register/config space: */ +# define XTBOARD_V3PCI_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD000000) +/* Bus Interface registers: */ +# define XTBOARD_BUSINT_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD010000) +/* FPGA registers: */ +# define XT2000_FPGAREGS_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD020000) +/* SONIC SN83934 Ethernet controller/transceiver: */ +# define SONIC83934_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD030000) +/* 8-character bitmapped LED display: */ +# define XTBOARD_LED_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD040000) +/* National-Semi PC16552D DUART: */ +# define DUART16552_1_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD050020) /* channel 1 (P1 - console) */ +# define DUART16552_2_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD050000) /* channel 2 (P2) */ +/* Asynchronous Static RAM: */ +# define XTBOARD_ASRAM_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD400000) +/* 8-bit EEPROM: */ +# define XTBOARD_EEPROM_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD600000) +/* 2 x 16-bit EPROMs: */ +# define XTBOARD_EPROM_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD800000) +#endif /* XSHAL_IOBLOCK_BYPASS_PADDR */ + +/* These devices might be accessed cached: */ +#ifdef XSHAL_IOBLOCK_CACHED_PADDR +# define XTBOARD_PCI_MEM_CACHED_PADDR (XSHAL_IOBLOCK_CACHED_PADDR+0x0000000) +# define XTBOARD_FLASH_CACHED_PADDR (XSHAL_IOBLOCK_CACHED_PADDR+0x8000000) +# define XTBOARD_ASRAM_CACHED_PADDR (XSHAL_IOBLOCK_CACHED_PADDR+0xD400000) +# define XTBOARD_EEPROM_CACHED_PADDR (XSHAL_IOBLOCK_CACHED_PADDR+0xD600000) +# define XTBOARD_EPROM_CACHED_PADDR (XSHAL_IOBLOCK_CACHED_PADDR+0xD800000) +#endif /* XSHAL_IOBLOCK_CACHED_PADDR */ + + +/*** Same thing over again, this time with virtual addresses: ***/ + +#ifdef XSHAL_IOBLOCK_BYPASS_VADDR +/* PCI memory space: */ +# define XTBOARD_PCI_MEM_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0x0000000) +/* Socketed Flash (eg. 2 x 16-bit devices): */ +# define XTBOARD_FLASH_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0x8000000) +/* PCI I/O space: */ +# define XTBOARD_PCI_IO_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xC000000) +/* V3 PCI interface chip register/config space: */ +# define XTBOARD_V3PCI_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD000000) +/* Bus Interface registers: */ +# define XTBOARD_BUSINT_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD010000) +/* FPGA registers: */ +# define XT2000_FPGAREGS_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD020000) +/* SONIC SN83934 Ethernet controller/transceiver: */ +# define SONIC83934_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD030000) +/* 8-character bitmapped LED display: */ +# define XTBOARD_LED_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD040000) +/* National-Semi PC16552D DUART: */ +# define DUART16552_1_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD050020) /* channel 1 (P1 - console) */ +# define DUART16552_2_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD050000) /* channel 2 (P2) */ +/* Asynchronous Static RAM: */ +# define XTBOARD_ASRAM_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD400000) +/* 8-bit EEPROM: */ +# define XTBOARD_EEPROM_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD600000) +/* 2 x 16-bit EPROMs: */ +# define XTBOARD_EPROM_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD800000) +#endif /* XSHAL_IOBLOCK_BYPASS_VADDR */ + +/* These devices might be accessed cached: */ +#ifdef XSHAL_IOBLOCK_CACHED_VADDR +# define XTBOARD_PCI_MEM_CACHED_VADDR (XSHAL_IOBLOCK_CACHED_VADDR+0x0000000) +# define XTBOARD_FLASH_CACHED_VADDR (XSHAL_IOBLOCK_CACHED_VADDR+0x8000000) +# define XTBOARD_ASRAM_CACHED_VADDR (XSHAL_IOBLOCK_CACHED_VADDR+0xD400000) +# define XTBOARD_EEPROM_CACHED_VADDR (XSHAL_IOBLOCK_CACHED_VADDR+0xD600000) +# define XTBOARD_EPROM_CACHED_VADDR (XSHAL_IOBLOCK_CACHED_VADDR+0xD800000) +#endif /* XSHAL_IOBLOCK_CACHED_VADDR */ + + +/* System ROM: */ +#define XTBOARD_ROM_SIZE XSHAL_ROM_SIZE +#ifdef XSHAL_ROM_VADDR +#define XTBOARD_ROM_VADDR XSHAL_ROM_VADDR +#endif +#ifdef XSHAL_ROM_PADDR +#define XTBOARD_ROM_PADDR XSHAL_ROM_PADDR +#endif + +/* System RAM: */ +#define XTBOARD_RAM_SIZE XSHAL_RAM_SIZE +#ifdef XSHAL_RAM_VADDR +#define XTBOARD_RAM_VADDR XSHAL_RAM_VADDR +#endif +#ifdef XSHAL_RAM_PADDR +#define XTBOARD_RAM_PADDR XSHAL_RAM_PADDR +#endif +#define XTBOARD_RAM_BYPASS_VADDR XSHAL_RAM_BYPASS_VADDR +#define XTBOARD_RAM_BYPASS_PADDR XSHAL_RAM_BYPASS_PADDR + + + +/* + * Things that depend on device addresses. + */ + + +#define XTBOARD_CACHEATTR_WRITEBACK XSHAL_XT2000_CACHEATTR_WRITEBACK +#define XTBOARD_CACHEATTR_WRITEALLOC XSHAL_XT2000_CACHEATTR_WRITEALLOC +#define XTBOARD_CACHEATTR_WRITETHRU XSHAL_XT2000_CACHEATTR_WRITETHRU +#define XTBOARD_CACHEATTR_BYPASS XSHAL_XT2000_CACHEATTR_BYPASS +#define XTBOARD_CACHEATTR_DEFAULT XSHAL_XT2000_CACHEATTR_DEFAULT + +#define XTBOARD_BUSINT_PIPE_REGIONS XSHAL_XT2000_PIPE_REGIONS +#define XTBOARD_BUSINT_SDRAM_REGIONS XSHAL_XT2000_SDRAM_REGIONS + + + +/* + * BusLogic (FPGA) registers. + * All these registers are normally accessed using 32-bit loads/stores. + */ + +/* Register offsets: */ +#define XT2000_DATECD_OFS 0x00 /* date code (read-only) */ +#define XT2000_STSREG_OFS 0x04 /* status (read-only) */ +#define XT2000_SYSLED_OFS 0x08 /* system LED */ +#define XT2000_WRPROT_OFS 0x0C /* write protect */ +#define XT2000_SWRST_OFS 0x10 /* software reset */ +#define XT2000_SYSRST_OFS 0x14 /* system (peripherals) reset */ +#define XT2000_IMASK_OFS 0x18 /* interrupt mask */ +#define XT2000_ISTAT_OFS 0x1C /* interrupt status */ +#define XT2000_V3CFG_OFS 0x20 /* V3 config (V320 PCI) */ + +/* Physical register addresses: */ +#ifdef XT2000_FPGAREGS_PADDR +#define XT2000_DATECD_PADDR (XT2000_FPGAREGS_PADDR+XT2000_DATECD_OFS) +#define XT2000_STSREG_PADDR (XT2000_FPGAREGS_PADDR+XT2000_STSREG_OFS) +#define XT2000_SYSLED_PADDR (XT2000_FPGAREGS_PADDR+XT2000_SYSLED_OFS) +#define XT2000_WRPROT_PADDR (XT2000_FPGAREGS_PADDR+XT2000_WRPROT_OFS) +#define XT2000_SWRST_PADDR (XT2000_FPGAREGS_PADDR+XT2000_SWRST_OFS) +#define XT2000_SYSRST_PADDR (XT2000_FPGAREGS_PADDR+XT2000_SYSRST_OFS) +#define XT2000_IMASK_PADDR (XT2000_FPGAREGS_PADDR+XT2000_IMASK_OFS) +#define XT2000_ISTAT_PADDR (XT2000_FPGAREGS_PADDR+XT2000_ISTAT_OFS) +#define XT2000_V3CFG_PADDR (XT2000_FPGAREGS_PADDR+XT2000_V3CFG_OFS) +#endif + +/* Virtual register addresses: */ +#ifdef XT2000_FPGAREGS_VADDR +#define XT2000_DATECD_VADDR (XT2000_FPGAREGS_VADDR+XT2000_DATECD_OFS) +#define XT2000_STSREG_VADDR (XT2000_FPGAREGS_VADDR+XT2000_STSREG_OFS) +#define XT2000_SYSLED_VADDR (XT2000_FPGAREGS_VADDR+XT2000_SYSLED_OFS) +#define XT2000_WRPROT_VADDR (XT2000_FPGAREGS_VADDR+XT2000_WRPROT_OFS) +#define XT2000_SWRST_VADDR (XT2000_FPGAREGS_VADDR+XT2000_SWRST_OFS) +#define XT2000_SYSRST_VADDR (XT2000_FPGAREGS_VADDR+XT2000_SYSRST_OFS) +#define XT2000_IMASK_VADDR (XT2000_FPGAREGS_VADDR+XT2000_IMASK_OFS) +#define XT2000_ISTAT_VADDR (XT2000_FPGAREGS_VADDR+XT2000_ISTAT_OFS) +#define XT2000_V3CFG_VADDR (XT2000_FPGAREGS_VADDR+XT2000_V3CFG_OFS) +/* Register access (for C code): */ +#define XT2000_DATECD_REG (*(volatile unsigned*) XT2000_DATECD_VADDR) +#define XT2000_STSREG_REG (*(volatile unsigned*) XT2000_STSREG_VADDR) +#define XT2000_SYSLED_REG (*(volatile unsigned*) XT2000_SYSLED_VADDR) +#define XT2000_WRPROT_REG (*(volatile unsigned*) XT2000_WRPROT_VADDR) +#define XT2000_SWRST_REG (*(volatile unsigned*) XT2000_SWRST_VADDR) +#define XT2000_SYSRST_REG (*(volatile unsigned*) XT2000_SYSRST_VADDR) +#define XT2000_IMASK_REG (*(volatile unsigned*) XT2000_IMASK_VADDR) +#define XT2000_ISTAT_REG (*(volatile unsigned*) XT2000_ISTAT_VADDR) +#define XT2000_V3CFG_REG (*(volatile unsigned*) XT2000_V3CFG_VADDR) +#endif + +/* DATECD (date code) bit fields: */ + +/* BCD-coded month (01..12): */ +#define XT2000_DATECD_MONTH_SHIFT 24 +#define XT2000_DATECD_MONTH_BITS 8 +#define XT2000_DATECD_MONTH_MASK 0xFF000000 +/* BCD-coded day (01..31): */ +#define XT2000_DATECD_DAY_SHIFT 16 +#define XT2000_DATECD_DAY_BITS 8 +#define XT2000_DATECD_DAY_MASK 0x00FF0000 +/* BCD-coded year (2001..9999): */ +#define XT2000_DATECD_YEAR_SHIFT 0 +#define XT2000_DATECD_YEAR_BITS 16 +#define XT2000_DATECD_YEAR_MASK 0x0000FFFF + +/* STSREG (status) bit fields: */ + +/* Switch SW3 setting bit fields (0=off/up, 1=on/down): */ +#define XT2000_STSREG_SW3_SHIFT 0 +#define XT2000_STSREG_SW3_BITS 4 +#define XT2000_STSREG_SW3_MASK 0x0000000F +/* Boot-select bits of switch SW3: */ +#define XT2000_STSREG_BOOTSEL_SHIFT 0 +#define XT2000_STSREG_BOOTSEL_BITS 2 +#define XT2000_STSREG_BOOTSEL_MASK 0x00000003 +/* Boot-select values: */ +#define XT2000_STSREG_BOOTSEL_FLASH 0 +#define XT2000_STSREG_BOOTSEL_EPROM16 1 +#define XT2000_STSREG_BOOTSEL_PROM8 2 +#define XT2000_STSREG_BOOTSEL_ASRAM 3 +/* User-defined bits of switch SW3: */ +#define XT2000_STSREG_SW3_2_SHIFT 2 +#define XT2000_STSREG_SW3_2_MASK 0x00000004 +#define XT2000_STSREG_SW3_3_SHIFT 3 +#define XT2000_STSREG_SW3_3_MASK 0x00000008 + +/* SYSLED (system LED) bit fields: */ + +/* LED control bit (0=off, 1=on): */ +#define XT2000_SYSLED_LEDON_SHIFT 0 +#define XT2000_SYSLED_LEDON_MASK 0x00000001 + +/* WRPROT (write protect) bit fields (0=writable, 1=write-protected [default]): */ + +/* Flash write protect: */ +#define XT2000_WRPROT_FLWP_SHIFT 0 +#define XT2000_WRPROT_FLWP_MASK 0x00000001 +/* Reserved but present write protect bits: */ +#define XT2000_WRPROT_WRP_SHIFT 1 +#define XT2000_WRPROT_WRP_BITS 7 +#define XT2000_WRPROT_WRP_MASK 0x000000FE + +/* SWRST (software reset; allows s/w to generate power-on equivalent reset): */ + +/* Software reset bits: */ +#define XT2000_SWRST_SWR_SHIFT 0 +#define XT2000_SWRST_SWR_BITS 16 +#define XT2000_SWRST_SWR_MASK 0x0000FFFF +/* Software reset value -- writing this value resets the board: */ +#define XT2000_SWRST_RESETVALUE 0x0000DEAD + +/* SYSRST (system reset; controls reset of individual peripherals): */ + +/* All-device reset: */ +#define XT2000_SYSRST_ALL_SHIFT 0 +#define XT2000_SYSRST_ALL_BITS 4 +#define XT2000_SYSRST_ALL_MASK 0x0000000F +/* HDSP-2534 LED display reset (1=reset, 0=nothing): */ +#define XT2000_SYSRST_LED_SHIFT 0 +#define XT2000_SYSRST_LED_MASK 0x00000001 +/* Sonic DP83934 Ethernet controller reset (1=reset, 0=nothing): */ +#define XT2000_SYSRST_SONIC_SHIFT 1 +#define XT2000_SYSRST_SONIC_MASK 0x00000002 +/* DP16552 DUART reset (1=reset, 0=nothing): */ +#define XT2000_SYSRST_DUART_SHIFT 2 +#define XT2000_SYSRST_DUART_MASK 0x00000004 +/* V3 V320 PCI bridge controller reset (1=reset, 0=nothing): */ +#define XT2000_SYSRST_V3_SHIFT 3 +#define XT2000_SYSRST_V3_MASK 0x00000008 + +/* IMASK (interrupt mask; 0=disable, 1=enable): */ +/* ISTAT (interrupt status; 0=inactive, 1=pending): */ + +/* PCI INTP interrupt: */ +#define XT2000_INTMUX_PCI_INTP_SHIFT 2 +#define XT2000_INTMUX_PCI_INTP_MASK 0x00000004 +/* PCI INTS interrupt: */ +#define XT2000_INTMUX_PCI_INTS_SHIFT 3 +#define XT2000_INTMUX_PCI_INTS_MASK 0x00000008 +/* PCI INTD interrupt: */ +#define XT2000_INTMUX_PCI_INTD_SHIFT 4 +#define XT2000_INTMUX_PCI_INTD_MASK 0x00000010 +/* V320 PCI controller interrupt: */ +#define XT2000_INTMUX_V3_SHIFT 5 +#define XT2000_INTMUX_V3_MASK 0x00000020 +/* PCI ENUM interrupt: */ +#define XT2000_INTMUX_PCI_ENUM_SHIFT 6 +#define XT2000_INTMUX_PCI_ENUM_MASK 0x00000040 +/* PCI DEG interrupt: */ +#define XT2000_INTMUX_PCI_DEG_SHIFT 7 +#define XT2000_INTMUX_PCI_DEG_MASK 0x00000080 + +/* V3CFG (V3 config, V320 PCI controller): */ + +/* V3 address control (0=pass-thru, 1=V3 address bits 31:28 set to 4'b0001 [default]): */ +#define XT2000_V3CFG_V3ADC_SHIFT 0 +#define XT2000_V3CFG_V3ADC_MASK 0x00000001 + +/* I2C Devices */ + +#define XT2000_I2C_RTC_ID 0x68 +#define XT2000_I2C_NVRAM0_ID 0x56 /* 1st 256 byte block */ +#define XT2000_I2C_NVRAM1_ID 0x57 /* 2nd 256 byte block */ + +/* NVRAM Board Info structure: */ + +#define XT2000_NVRAM_SIZE 512 + +#define XT2000_NVRAM_BINFO_START 0x100 +#define XT2000_NVRAM_BINFO_SIZE 0x20 +#define XT2000_NVRAM_BINFO_VERSION 0x10 /* version 1.0 */ +#if 0 +#define XT2000_NVRAM_BINFO_VERSION_OFFSET 0x00 +#define XT2000_NVRAM_BINFO_VERSION_SIZE 0x1 +#define XT2000_NVRAM_BINFO_ETH_ADDR_OFFSET 0x02 +#define XT2000_NVRAM_BINFO_ETH_ADDR_SIZE 0x6 +#define XT2000_NVRAM_BINFO_SN_OFFSET 0x10 +#define XT2000_NVRAM_BINFO_SN_SIZE 0xE +#define XT2000_NVRAM_BINFO_CRC_OFFSET 0x1E +#define XT2000_NVRAM_BINFO_CRC_SIZE 0x2 +#endif /*0*/ + +#if !defined(__ASSEMBLY__) && !defined(_NOCLANGUAGE) +typedef struct xt2000_nvram_binfo { + unsigned char version; + unsigned char reserved1; + unsigned char eth_addr[6]; + unsigned char reserved8[8]; + unsigned char serialno[14]; + unsigned char crc[2]; /* 16-bit CRC */ +} xt2000_nvram_binfo; +#endif /*!__ASSEMBLY__ && !_NOCLANGUAGE*/ + + +#endif /*_INC_XT2000_H_*/ + diff --git a/include/asm-xtensa/xtensa/xtboard.h b/include/asm-xtensa/xtensa/xtboard.h new file mode 100644 index 0000000..22469c1 --- /dev/null +++ b/include/asm-xtensa/xtensa/xtboard.h @@ -0,0 +1,120 @@ +#ifndef _xtboard_h_included_ +#define _xtboard_h_included_ + +/* + * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND + * + * xtboard.h -- Routines for getting useful information from the board. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2002 Tensilica Inc. + */ + + +#include <xtensa/xt2000.h> + +#define XTBOARD_RTC_ERROR -1 +#define XTBOARD_RTC_STOPPED -2 + + +/* xt2000-i2cdev.c: */ +typedef void XtboardDelayFunc( unsigned ); +extern XtboardDelayFunc* xtboard_set_nsdelay_func( XtboardDelayFunc *delay_fn ); +extern int xtboard_i2c_read (unsigned id, unsigned char *buf, unsigned addr, unsigned size); +extern int xtboard_i2c_write(unsigned id, unsigned char *buf, unsigned addr, unsigned size); +extern int xtboard_i2c_wait_nvram_ack(unsigned id, unsigned swtimer); + +/* xtboard.c: */ +extern int xtboard_nvram_read (unsigned addr, unsigned len, unsigned char *buf); +extern int xtboard_nvram_write(unsigned addr, unsigned len, unsigned char *buf); +extern int xtboard_nvram_binfo_read (xt2000_nvram_binfo *buf); +extern int xtboard_nvram_binfo_write(xt2000_nvram_binfo *buf); +extern int xtboard_nvram_binfo_valid(xt2000_nvram_binfo *buf); +extern int xtboard_ethermac_get(unsigned char *buf); +extern int xtboard_ethermac_set(unsigned char *buf); + +/*+*---------------------------------------------------------------------------- +/ Function: xtboard_get_rtc_time +/ +/ Description: Get time stored in real-time clock. +/ +/ Returns: time in seconds stored in real-time clock. +/-**----------------------------------------------------------------------------*/ + +extern unsigned xtboard_get_rtc_time(void); + +/*+*---------------------------------------------------------------------------- +/ Function: xtboard_set_rtc_time +/ +/ Description: Set time stored in real-time clock. +/ +/ Parameters: time -- time in seconds to store to real-time clock +/ +/ Returns: 0 on success, xtboard_i2c_write() error code otherwise. +/-**----------------------------------------------------------------------------*/ + +extern int xtboard_set_rtc_time(unsigned time); + + +/* xtfreq.c: */ +/*+*---------------------------------------------------------------------------- +/ Function: xtboard_measure_sys_clk +/ +/ Description: Get frequency of system clock. +/ +/ Parameters: none +/ +/ Returns: frequency of system clock. +/-**----------------------------------------------------------------------------*/ + +extern unsigned xtboard_measure_sys_clk(void); + + +#if 0 /* old stuff from xtboard.c: */ + +/*+*---------------------------------------------------------------------------- +/ Function: xtboard_nvram valid +/ +/ Description: Determines if data in NVRAM is valid. +/ +/ Parameters: delay -- 10us delay function +/ +/ Returns: 1 if NVRAM is valid, 0 otherwise +/-**----------------------------------------------------------------------------*/ + +extern unsigned xtboard_nvram_valid(void (*delay)( void )); + +/*+*---------------------------------------------------------------------------- +/ Function: xtboard_get_nvram_contents +/ +/ Description: Returns contents of NVRAM. +/ +/ Parameters: buf -- buffer to NVRAM contents. +/ delay -- 10us delay function +/ +/ Returns: 1 if NVRAM is valid, 0 otherwise +/-**----------------------------------------------------------------------------*/ + +extern unsigned xtboard_get_nvram_contents(unsigned char *buf, void (*delay)( void )); + +/*+*---------------------------------------------------------------------------- +/ Function: xtboard_get_ether_addr +/ +/ Description: Returns ethernet address of board. +/ +/ Parameters: buf -- buffer to store ethernet address +/ delay -- 10us delay function +/ +/ Returns: nothing. +/-**----------------------------------------------------------------------------*/ + +extern void xtboard_get_ether_addr(unsigned char *buf, void (*delay)( void )); + +#endif /*0*/ + + +#endif /*_xtboard_h_included_*/ + diff --git a/include/linux/a.out.h b/include/linux/a.out.h index af8a1df..f913cc3 100644 --- a/include/linux/a.out.h +++ b/include/linux/a.out.h @@ -138,7 +138,7 @@ enum machine_type { #endif #endif -#define _N_SEGMENT_ROUND(x) (((x) + SEGMENT_SIZE - 1) & ~(SEGMENT_SIZE - 1)) +#define _N_SEGMENT_ROUND(x) ALIGN(x, SEGMENT_SIZE) #define _N_TXTENDADDR(x) (N_TXTADDR(x)+(x).a_text) diff --git a/include/linux/acpi.h b/include/linux/acpi.h index aefe6d0..b123cc0 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -25,6 +25,10 @@ #ifndef _LINUX_ACPI_H #define _LINUX_ACPI_H +#include <linux/config.h> + +#ifdef CONFIG_ACPI + #ifndef _LINUX #define _LINUX #endif @@ -533,4 +537,5 @@ static inline int acpi_get_pxm(acpi_handle handle) extern int pnpacpi_disabled; -#endif /*_LINUX_ACPI_H*/ +#endif /* CONFIG_ACPI */ +#endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/arcfb.h b/include/linux/arcfb.h new file mode 100644 index 0000000..721e765 --- /dev/null +++ b/include/linux/arcfb.h @@ -0,0 +1,8 @@ +#ifndef __LINUX_ARCFB_H__ +#define __LINUX_ARCFB_H__ + +#define FBIO_WAITEVENT _IO('F', 0x88) +#define FBIO_GETCONTROL2 _IOR('F', 0x89, size_t) + +#endif + diff --git a/include/linux/ata.h b/include/linux/ata.h index f178894..ca5fcad 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -224,6 +224,7 @@ struct ata_taskfile { }; #define ata_id_is_ata(id) (((id)[0] & (1 << 15)) == 0) +#define ata_id_is_sata(id) ((id)[93] == 0) #define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6)) #define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5)) #define ata_id_has_flush(id) ((id)[83] & (1 << 12)) diff --git a/include/linux/atalk.h b/include/linux/atalk.h index 31d3fc2..09a1451 100644 --- a/include/linux/atalk.h +++ b/include/linux/atalk.h @@ -20,7 +20,7 @@ #define SIOCATALKDIFADDR (SIOCPROTOPRIVATE + 0) struct atalk_addr { - __u16 s_net; + __be16 s_net; __u8 s_node; }; @@ -33,8 +33,8 @@ struct sockaddr_at { struct atalk_netrange { __u8 nr_phase; - __u16 nr_firstnet; - __u16 nr_lastnet; + __be16 nr_firstnet; + __be16 nr_lastnet; }; #ifdef __KERNEL__ @@ -70,8 +70,8 @@ struct atalk_iface { struct atalk_sock { /* struct sock has to be the first member of atalk_sock */ struct sock sk; - unsigned short dest_net; - unsigned short src_net; + __be16 dest_net; + __be16 src_net; unsigned char dest_node; unsigned char src_node; unsigned char dest_port; @@ -95,9 +95,9 @@ struct ddpehdr { deh_hops:4, deh_len:10; #endif - __u16 deh_sum; - __u16 deh_dnet; - __u16 deh_snet; + __be16 deh_sum; + __be16 deh_dnet; + __be16 deh_snet; __u8 deh_dnode; __u8 deh_snode; __u8 deh_dport; @@ -142,24 +142,24 @@ struct ddpshdr { /* AppleTalk AARP headers */ struct elapaarp { - __u16 hw_type; + __be16 hw_type; #define AARP_HW_TYPE_ETHERNET 1 #define AARP_HW_TYPE_TOKENRING 2 - __u16 pa_type; + __be16 pa_type; __u8 hw_len; __u8 pa_len; #define AARP_PA_ALEN 4 - __u16 function; + __be16 function; #define AARP_REQUEST 1 #define AARP_REPLY 2 #define AARP_PROBE 3 __u8 hw_src[ETH_ALEN] __attribute__ ((packed)); __u8 pa_src_zero __attribute__ ((packed)); - __u16 pa_src_net __attribute__ ((packed)); + __be16 pa_src_net __attribute__ ((packed)); __u8 pa_src_node __attribute__ ((packed)); __u8 hw_dst[ETH_ALEN] __attribute__ ((packed)); __u8 pa_dst_zero __attribute__ ((packed)); - __u16 pa_dst_net __attribute__ ((packed)); + __be16 pa_dst_net __attribute__ ((packed)); __u8 pa_dst_node __attribute__ ((packed)); }; diff --git a/include/linux/audit.h b/include/linux/audit.h index 19f04b0..bf2ad3b 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -27,15 +27,52 @@ #include <linux/sched.h> #include <linux/elf.h> -/* Request and reply types */ -#define AUDIT_GET 1000 /* Get status */ -#define AUDIT_SET 1001 /* Set status (enable/disable/auditd) */ -#define AUDIT_LIST 1002 /* List filtering rules */ -#define AUDIT_ADD 1003 /* Add filtering rule */ -#define AUDIT_DEL 1004 /* Delete filtering rule */ -#define AUDIT_USER 1005 /* Send a message from user-space */ -#define AUDIT_LOGIN 1006 /* Define the login id and informaiton */ -#define AUDIT_KERNEL 2000 /* Asynchronous audit record. NOT A REQUEST. */ +/* The netlink messages for the audit system is divided into blocks: + * 1000 - 1099 are for commanding the audit system + * 1100 - 1199 user space trusted application messages + * 1200 - 1299 messages internal to the audit daemon + * 1300 - 1399 audit event messages + * 1400 - 1499 SE Linux use + * 1500 - 1999 future use + * 2000 is for otherwise unclassified kernel audit messages + * + * Messages from 1000-1199 are bi-directional. 1200-1299 are exclusively user + * space. Anything over that is kernel --> user space communication. + */ +#define AUDIT_GET 1000 /* Get status */ +#define AUDIT_SET 1001 /* Set status (enable/disable/auditd) */ +#define AUDIT_LIST 1002 /* List syscall filtering rules */ +#define AUDIT_ADD 1003 /* Add syscall filtering rule */ +#define AUDIT_DEL 1004 /* Delete syscall filtering rule */ +#define AUDIT_USER 1005 /* Message from userspace -- deprecated */ +#define AUDIT_LOGIN 1006 /* Define the login id and information */ +#define AUDIT_WATCH_INS 1007 /* Insert file/dir watch entry */ +#define AUDIT_WATCH_REM 1008 /* Remove file/dir watch entry */ +#define AUDIT_WATCH_LIST 1009 /* List all file/dir watches */ +#define AUDIT_SIGNAL_INFO 1010 /* Get info about sender of signal to auditd */ + +#define AUDIT_FIRST_USER_MSG 1100 /* Userspace messages uninteresting to kernel */ +#define AUDIT_LAST_USER_MSG 1199 + +#define AUDIT_DAEMON_START 1200 /* Daemon startup record */ +#define AUDIT_DAEMON_END 1201 /* Daemon normal stop record */ +#define AUDIT_DAEMON_ABORT 1202 /* Daemon error stop record */ +#define AUDIT_DAEMON_CONFIG 1203 /* Daemon config change */ + +#define AUDIT_SYSCALL 1300 /* Syscall event */ +#define AUDIT_FS_WATCH 1301 /* Filesystem watch event */ +#define AUDIT_PATH 1302 /* Filename path information */ +#define AUDIT_IPC 1303 /* IPC record */ +#define AUDIT_SOCKETCALL 1304 /* sys_socketcall arguments */ +#define AUDIT_CONFIG_CHANGE 1305 /* Audit system configuration change */ +#define AUDIT_SOCKADDR 1306 /* sockaddr copied as syscall arg */ +#define AUDIT_CWD 1307 /* Current working directory */ + +#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */ +#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */ +#define AUDIT_AVC_PATH 1402 /* dentry, vfsmount pair from avc */ + +#define AUDIT_KERNEL 2000 /* Asynchronous audit record. NOT A REQUEST. */ /* Rule flags */ #define AUDIT_PER_TASK 0x01 /* Apply rule at task creation (not syscall) */ @@ -132,16 +169,9 @@ #define AUDIT_ARCH_V850 (EM_V850|__AUDIT_ARCH_LE) #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) -#ifndef __KERNEL__ -struct audit_message { - struct nlmsghdr nlh; - char data[1200]; -}; -#endif - struct audit_status { __u32 mask; /* Bit mask for valid entries */ - __u32 enabled; /* 1 = enabled, 0 = disbaled */ + __u32 enabled; /* 1 = enabled, 0 = disabled */ __u32 failure; /* Failure-to-log action */ __u32 pid; /* pid of auditd process */ __u32 rate_limit; /* messages rate limit (per second) */ @@ -161,6 +191,11 @@ struct audit_rule { /* for AUDIT_LIST, AUDIT_ADD, and AUDIT_DEL */ #ifdef __KERNEL__ +struct audit_sig_info { + uid_t uid; + pid_t pid; +}; + struct audit_buffer; struct audit_context; struct inode; @@ -185,11 +220,16 @@ extern void audit_inode(const char *name, const struct inode *inode); /* Private API (for audit.c only) */ extern int audit_receive_filter(int type, int pid, int uid, int seq, void *data, uid_t loginuid); -extern void audit_get_stamp(struct audit_context *ctx, - struct timespec *t, unsigned int *serial); +extern unsigned int audit_serial(void); +extern void auditsc_get_stamp(struct audit_context *ctx, + struct timespec *t, unsigned int *serial); extern int audit_set_loginuid(struct task_struct *task, uid_t loginuid); extern uid_t audit_get_loginuid(struct audit_context *ctx); extern int audit_ipc_perms(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode); +extern int audit_socketcall(int nargs, unsigned long *args); +extern int audit_sockaddr(int len, void *addr); +extern int audit_avc_path(struct dentry *dentry, struct vfsmount *mnt); +extern void audit_signal_info(int sig, struct task_struct *t); #else #define audit_alloc(t) ({ 0; }) #define audit_free(t) do { ; } while (0) @@ -198,18 +238,24 @@ extern int audit_ipc_perms(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mo #define audit_getname(n) do { ; } while (0) #define audit_putname(n) do { ; } while (0) #define audit_inode(n,i) do { ; } while (0) +#define audit_receive_filter(t,p,u,s,d,l) ({ -EOPNOTSUPP; }) +#define auditsc_get_stamp(c,t,s) do { BUG(); } while (0) #define audit_get_loginuid(c) ({ -1; }) #define audit_ipc_perms(q,u,g,m) ({ 0; }) +#define audit_socketcall(n,a) ({ 0; }) +#define audit_sockaddr(len, addr) ({ 0; }) +#define audit_avc_path(dentry, mnt) ({ 0; }) +#define audit_signal_info(s,t) do { ; } while (0) #endif #ifdef CONFIG_AUDIT /* These are defined in audit.c */ /* Public API */ -extern void audit_log(struct audit_context *ctx, +extern void audit_log(struct audit_context *ctx, int type, const char *fmt, ...) - __attribute__((format(printf,2,3))); + __attribute__((format(printf,3,4))); -extern struct audit_buffer *audit_log_start(struct audit_context *ctx); +extern struct audit_buffer *audit_log_start(struct audit_context *ctx,int type); extern void audit_log_format(struct audit_buffer *ab, const char *fmt, ...) __attribute__((format(printf,2,3))); @@ -229,8 +275,8 @@ extern void audit_send_reply(int pid, int seq, int type, void *payload, int size); extern void audit_log_lost(const char *message); #else -#define audit_log(t,f,...) do { ; } while (0) -#define audit_log_start(t) ({ NULL; }) +#define audit_log(c,t,f,...) do { ; } while (0) +#define audit_log_start(c,t) ({ NULL; }) #define audit_log_vformat(b,f,a) do { ; } while (0) #define audit_log_format(b,f,...) do { ; } while (0) #define audit_log_end(b) do { ; } while (0) diff --git a/include/linux/auto_fs4.h b/include/linux/auto_fs4.h index a1657fb..9343c89 100644 --- a/include/linux/auto_fs4.h +++ b/include/linux/auto_fs4.h @@ -23,7 +23,7 @@ #define AUTOFS_MIN_PROTO_VERSION 3 #define AUTOFS_MAX_PROTO_VERSION 4 -#define AUTOFS_PROTO_SUBVERSION 6 +#define AUTOFS_PROTO_SUBVERSION 7 /* Mask for expire behaviour */ #define AUTOFS_EXP_IMMEDIATE 1 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 7e736e2..c1e82c5 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -69,6 +69,11 @@ extern void remove_arg_zero(struct linux_binprm *); extern int search_binary_handler(struct linux_binprm *,struct pt_regs *); extern int flush_old_exec(struct linux_binprm * bprm); +extern int suid_dumpable; +#define SUID_DUMP_DISABLE 0 /* No setuid dumping */ +#define SUID_DUMP_USER 1 /* Dump as user of process */ +#define SUID_DUMP_ROOT 2 /* Dump as root */ + /* Stack area protections */ #define EXSTACK_DEFAULT 0 /* Whatever the arch defaults to */ #define EXSTACK_DISABLE_X 1 /* Disable executable stacks */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ef1afc1..b54a034 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -285,16 +285,12 @@ enum blk_queue_state { Queue_up, }; -#define BLK_TAGS_PER_LONG (sizeof(unsigned long) * 8) -#define BLK_TAGS_MASK (BLK_TAGS_PER_LONG - 1) - struct blk_queue_tag { struct request **tag_index; /* map of busy tags */ unsigned long *tag_map; /* bit map of free/busy tags */ struct list_head busy_list; /* fifo list of busy tags */ int busy; /* current depth */ int max_depth; /* what we will send to device */ - int real_max_depth; /* what the array can hold */ atomic_t refcnt; /* map can be shared */ }; @@ -396,6 +392,7 @@ struct request_queue */ unsigned int sg_timeout; unsigned int sg_reserved_size; + int node; struct list_head drain_list; @@ -542,15 +539,12 @@ extern void generic_make_request(struct bio *bio); extern void blk_put_request(struct request *); extern void blk_end_sync_rq(struct request *rq); extern void blk_attempt_remerge(request_queue_t *, struct request *); -extern void __blk_attempt_remerge(request_queue_t *, struct request *); extern struct request *blk_get_request(request_queue_t *, int, int); -extern void blk_insert_request(request_queue_t *, struct request *, int, void *, int); +extern void blk_insert_request(request_queue_t *, struct request *, int, void *); extern void blk_requeue_request(request_queue_t *, struct request *); extern void blk_plug_device(request_queue_t *); extern int blk_remove_plug(request_queue_t *); extern void blk_recount_segments(request_queue_t *, struct bio *); -extern int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *); -extern int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *); extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *); extern void blk_start_queue(request_queue_t *q); extern void blk_stop_queue(request_queue_t *q); @@ -615,6 +609,8 @@ static inline void blkdev_dequeue_request(struct request *req) /* * Access functions for manipulating queue properties */ +extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn, + spinlock_t *lock, int node_id); extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *); extern void blk_cleanup_queue(request_queue_t *); extern void blk_queue_make_request(request_queue_t *, make_request_fn *); @@ -632,7 +628,6 @@ extern void blk_queue_dma_alignment(request_queue_t *, int); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); extern void blk_queue_ordered(request_queue_t *, int); extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); -extern int blkdev_scsi_issue_flush_fn(request_queue_t *, struct gendisk *, sector_t *); extern struct request *blk_start_pre_flush(request_queue_t *,struct request *); extern int blk_complete_barrier_rq(request_queue_t *, struct request *, int); extern int blk_complete_barrier_rq_locked(request_queue_t *, struct request *, int); @@ -646,7 +641,8 @@ extern void blk_wait_queue_drained(request_queue_t *, int); extern void blk_finish_queue_drain(request_queue_t *); int blk_get_queue(request_queue_t *); -request_queue_t *blk_alloc_queue(int); +request_queue_t *blk_alloc_queue(int gfp_mask); +request_queue_t *blk_alloc_queue_node(int,int); #define blk_put_queue(q) blk_cleanup_queue((q)) /* @@ -675,8 +671,6 @@ extern int blkdev_issue_flush(struct block_device *, sector_t *); #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) -extern void drive_stat_acct(struct request *, int, int); - static inline int queue_hardsect_size(request_queue_t *q) { int retval = 512; diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 0dd8ca1..82bd884 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -22,6 +22,10 @@ extern unsigned long min_low_pfn; */ extern unsigned long max_pfn; +#ifdef CONFIG_CRASH_DUMP +extern unsigned long saved_max_pfn; +#endif + /* * node_bootmem_map is a map pointer - the bits represent all physical * memory pages (including holes) on the node. @@ -67,6 +71,15 @@ extern void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, __alloc_bootmem_node((pgdat), (x), PAGE_SIZE, 0) #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ +#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP +extern void *alloc_remap(int nid, unsigned long size); +#else +static inline void *alloc_remap(int nid, unsigned long size) +{ + return NULL; +} +#endif + extern unsigned long __initdata nr_kernel_pages; extern unsigned long __initdata nr_all_pages; diff --git a/include/linux/chio.h b/include/linux/chio.h new file mode 100644 index 0000000..63035ae --- /dev/null +++ b/include/linux/chio.h @@ -0,0 +1,168 @@ +/* + * ioctl interface for the scsi media changer driver + */ + +/* changer element types */ +#define CHET_MT 0 /* media transport element (robot) */ +#define CHET_ST 1 /* storage element (media slots) */ +#define CHET_IE 2 /* import/export element */ +#define CHET_DT 3 /* data transfer element (tape/cdrom/whatever) */ +#define CHET_V1 4 /* vendor specific #1 */ +#define CHET_V2 5 /* vendor specific #2 */ +#define CHET_V3 6 /* vendor specific #3 */ +#define CHET_V4 7 /* vendor specific #4 */ + + +/* + * CHIOGPARAMS + * query changer properties + * + * CHIOVGPARAMS + * query vendor-specific element types + * + * accessing elements works by specifing type and unit of the element. + * for eample, storage elements are addressed with type = CHET_ST and + * unit = 0 .. cp_nslots-1 + * + */ +struct changer_params { + int cp_curpicker; /* current transport element */ + int cp_npickers; /* number of transport elements (CHET_MT) */ + int cp_nslots; /* number of storage elements (CHET_ST) */ + int cp_nportals; /* number of import/export elements (CHET_IE) */ + int cp_ndrives; /* number of data transfer elements (CHET_DT) */ +}; +struct changer_vendor_params { + int cvp_n1; /* number of vendor specific elems (CHET_V1) */ + char cvp_label1[16]; + int cvp_n2; /* number of vendor specific elems (CHET_V2) */ + char cvp_label2[16]; + int cvp_n3; /* number of vendor specific elems (CHET_V3) */ + char cvp_label3[16]; + int cvp_n4; /* number of vendor specific elems (CHET_V4) */ + char cvp_label4[16]; + int reserved[8]; +}; + + +/* + * CHIOMOVE + * move a medium from one element to another + */ +struct changer_move { + int cm_fromtype; /* type/unit of source element */ + int cm_fromunit; + int cm_totype; /* type/unit of destination element */ + int cm_tounit; + int cm_flags; +}; +#define CM_INVERT 1 /* flag: rotate media (for double-sided like MOD) */ + + +/* + * CHIOEXCHANGE + * move one medium from element #1 to element #2, + * and another one from element #2 to element #3. + * element #1 and #3 are allowed to be identical. + */ +struct changer_exchange { + int ce_srctype; /* type/unit of element #1 */ + int ce_srcunit; + int ce_fdsttype; /* type/unit of element #2 */ + int ce_fdstunit; + int ce_sdsttype; /* type/unit of element #3 */ + int ce_sdstunit; + int ce_flags; +}; +#define CE_INVERT1 1 +#define CE_INVERT2 2 + + +/* + * CHIOPOSITION + * move the transport element (robot arm) to a specific element. + */ +struct changer_position { + int cp_type; + int cp_unit; + int cp_flags; +}; +#define CP_INVERT 1 + + +/* + * CHIOGSTATUS + * get element status for all elements of a specific type + */ +struct changer_element_status { + int ces_type; + unsigned char *ces_data; +}; +#define CESTATUS_FULL 0x01 /* full */ +#define CESTATUS_IMPEXP 0x02 /* media was imported (inserted by sysop) */ +#define CESTATUS_EXCEPT 0x04 /* error condition */ +#define CESTATUS_ACCESS 0x08 /* access allowed */ +#define CESTATUS_EXENAB 0x10 /* element can export media */ +#define CESTATUS_INENAB 0x20 /* element can import media */ + + +/* + * CHIOGELEM + * get more detailed status informtion for a single element + */ +struct changer_get_element { + int cge_type; /* type/unit */ + int cge_unit; + int cge_status; /* status */ + int cge_errno; /* errno */ + int cge_srctype; /* source element of the last move/exchange */ + int cge_srcunit; + int cge_id; /* scsi id (for data transfer elements) */ + int cge_lun; /* scsi lun (for data transfer elements) */ + char cge_pvoltag[36]; /* primary volume tag */ + char cge_avoltag[36]; /* alternate volume tag */ + int cge_flags; +}; +/* flags */ +#define CGE_ERRNO 0x01 /* errno available */ +#define CGE_INVERT 0x02 /* media inverted */ +#define CGE_SRC 0x04 /* media src available */ +#define CGE_IDLUN 0x08 /* ID+LUN available */ +#define CGE_PVOLTAG 0x10 /* primary volume tag available */ +#define CGE_AVOLTAG 0x20 /* alternate volume tag available */ + + +/* + * CHIOSVOLTAG + * set volume tag + */ +struct changer_set_voltag { + int csv_type; /* type/unit */ + int csv_unit; + char csv_voltag[36]; /* volume tag */ + int csv_flags; +}; +#define CSV_PVOLTAG 0x01 /* primary volume tag */ +#define CSV_AVOLTAG 0x02 /* alternate volume tag */ +#define CSV_CLEARTAG 0x04 /* clear volume tag */ + +/* ioctls */ +#define CHIOMOVE _IOW('c', 1,struct changer_move) +#define CHIOEXCHANGE _IOW('c', 2,struct changer_exchange) +#define CHIOPOSITION _IOW('c', 3,struct changer_position) +#define CHIOGPICKER _IOR('c', 4,int) /* not impl. */ +#define CHIOSPICKER _IOW('c', 5,int) /* not impl. */ +#define CHIOGPARAMS _IOR('c', 6,struct changer_params) +#define CHIOGSTATUS _IOW('c', 8,struct changer_element_status) +#define CHIOGELEM _IOW('c',16,struct changer_get_element) +#define CHIOINITELEM _IO('c',17) +#define CHIOSVOLTAG _IOW('c',18,struct changer_set_voltag) +#define CHIOGVPARAMS _IOR('c',19,struct changer_vendor_params) + +/* ---------------------------------------------------------------------- */ + +/* + * Local variables: + * c-basic-offset: 8 + * End: + */ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index fe0298e..e8904c0 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -69,6 +69,7 @@ extern struct semaphore cpucontrol; register_cpu_notifier(&fn##_nb); \ } int cpu_down(unsigned int cpu); +extern int __attribute__((weak)) smp_prepare_cpu(int cpu); #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) #else #define lock_cpu_hotplug() do { } while (0) diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index f21af06..927daa8 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -49,7 +49,7 @@ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list); /* Frequency values here are CPU kHz so that hardware which doesn't run * with some frequencies can complain without having to guess what per * cent / per mille means. - * Maximum transition latency is in microseconds - if it's unknown, + * Maximum transition latency is in nanoseconds - if it's unknown, * CPUFREQ_ETERNAL shall be used. */ diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h new file mode 100644 index 0000000..534d750 --- /dev/null +++ b/include/linux/crash_dump.h @@ -0,0 +1,18 @@ +#ifndef LINUX_CRASH_DUMP_H +#define LINUX_CRASH_DUMP_H + +#ifdef CONFIG_CRASH_DUMP +#include <linux/kexec.h> +#include <linux/smp_lock.h> +#include <linux/device.h> +#include <linux/proc_fs.h> + +#define ELFCORE_ADDR_MAX (-1ULL) +extern unsigned long long elfcorehdr_addr; +extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, + unsigned long, int); +extern struct file_operations proc_vmcore_operations; +extern struct proc_dir_entry *proc_vmcore; + +#endif /* CONFIG_CRASH_DUMP */ +#endif /* LINUX_CRASHDUMP_H */ diff --git a/include/linux/device.h b/include/linux/device.h index df94c0d..7b781a7 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -14,6 +14,7 @@ #include <linux/config.h> #include <linux/ioport.h> #include <linux/kobject.h> +#include <linux/klist.h> #include <linux/list.h> #include <linux/types.h> #include <linux/module.h> @@ -44,14 +45,15 @@ struct device; struct device_driver; struct class; struct class_device; -struct class_simple; struct bus_type { - char * name; + const char * name; struct subsystem subsys; struct kset drivers; struct kset devices; + struct klist klist_devices; + struct klist klist_drivers; struct bus_attribute * bus_attrs; struct device_attribute * dev_attrs; @@ -98,17 +100,18 @@ extern int bus_create_file(struct bus_type *, struct bus_attribute *); extern void bus_remove_file(struct bus_type *, struct bus_attribute *); struct device_driver { - char * name; + const char * name; struct bus_type * bus; struct completion unloaded; struct kobject kobj; - struct list_head devices; + struct klist klist_devices; + struct klist_node knode_bus; - struct module * owner; + struct module * owner; int (*probe) (struct device * dev); - int (*remove) (struct device * dev); + int (*remove) (struct device * dev); void (*shutdown) (struct device * dev); int (*suspend) (struct device * dev, pm_message_t state, u32 level); int (*resume) (struct device * dev, u32 level); @@ -137,12 +140,16 @@ struct driver_attribute driver_attr_##_name = __ATTR(_name,_mode,_show,_store) extern int driver_create_file(struct device_driver *, struct driver_attribute *); extern void driver_remove_file(struct device_driver *, struct driver_attribute *); +extern int driver_for_each_device(struct device_driver * drv, struct device * start, + void * data, int (*fn)(struct device *, void *)); + /* * device classes */ struct class { - char * name; + const char * name; + struct module * owner; struct subsystem subsys; struct list_head children; @@ -185,6 +192,7 @@ struct class_device { struct kobject kobj; struct class * class; /* required */ dev_t devt; /* dev_t, creates the sysfs "dev" */ + struct class_device_attribute *devt_attr; struct device * dev; /* not necessary, but nice to have */ void * class_data; /* class-specific data */ @@ -245,26 +253,28 @@ struct class_interface { extern int class_interface_register(struct class_interface *); extern void class_interface_unregister(struct class_interface *); -/* interface for class simple stuff */ -extern struct class_simple *class_simple_create(struct module *owner, char *name); -extern void class_simple_destroy(struct class_simple *cs); -extern struct class_device *class_simple_device_add(struct class_simple *cs, dev_t dev, struct device *device, const char *fmt, ...) - __attribute__((format(printf,4,5))); -extern int class_simple_set_hotplug(struct class_simple *, - int (*hotplug)(struct class_device *dev, char **envp, int num_envp, char *buffer, int buffer_size)); -extern void class_simple_device_remove(dev_t dev); +extern struct class *class_create(struct module *owner, char *name); +extern void class_destroy(struct class *cls); +extern struct class_device *class_device_create(struct class *cls, dev_t devt, + struct device *device, char *fmt, ...) + __attribute__((format(printf,4,5))); +extern void class_device_destroy(struct class *cls, dev_t devt); struct device { - struct list_head node; /* node in sibling list */ - struct list_head bus_list; /* node in bus's list */ - struct list_head driver_list; - struct list_head children; + struct klist klist_children; + struct klist_node knode_parent; /* node in sibling list */ + struct klist_node knode_driver; + struct klist_node knode_bus; struct device * parent; struct kobject kobj; char bus_id[BUS_ID_SIZE]; /* position on parent bus */ + struct semaphore sem; /* semaphore to synchronize calls to + * its driver. + */ + struct bus_type * bus; /* type of bus device is on */ struct device_driver *driver; /* which driver has allocated this device */ @@ -288,12 +298,6 @@ struct device { void (*release)(struct device * dev); }; -static inline struct device * -list_to_dev(struct list_head *node) -{ - return list_entry(node, struct device, node); -} - static inline void * dev_get_drvdata (struct device *dev) { @@ -321,7 +325,6 @@ extern int device_for_each_child(struct device *, void *, * Manual binding of a device to driver. See drivers/base/bus.c * for information on use. */ -extern int driver_probe_device(struct device_driver * drv, struct device * dev); extern void device_bind_driver(struct device * dev); extern void device_release_driver(struct device * dev); extern int device_attach(struct device * dev); @@ -332,8 +335,10 @@ extern void driver_attach(struct device_driver * drv); struct device_attribute { struct attribute attr; - ssize_t (*show)(struct device * dev, char * buf); - ssize_t (*store)(struct device * dev, const char * buf, size_t count); + ssize_t (*show)(struct device *dev, struct device_attribute *attr, + char *buf); + ssize_t (*store)(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count); }; #define DEVICE_ATTR(_name,_mode,_show,_store) \ @@ -360,13 +365,12 @@ extern int (*platform_notify_remove)(struct device * dev); */ extern struct device * get_device(struct device * dev); extern void put_device(struct device * dev); -extern struct device *device_find(const char *name, struct bus_type *bus); /* drivers/base/platform.c */ struct platform_device { - char * name; + const char * name; u32 id; struct device dev; u32 num_resources; diff --git a/include/linux/dm9000.h b/include/linux/dm9000.h new file mode 100644 index 0000000..0008e2a --- /dev/null +++ b/include/linux/dm9000.h @@ -0,0 +1,36 @@ +/* include/linux/dm9000.h + * + * Copyright (c) 2004 Simtec Electronics + * Ben Dooks <ben@simtec.co.uk> + * + * Header file for dm9000 platform data + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * +*/ + +#ifndef __DM9000_PLATFORM_DATA +#define __DM9000_PLATFORM_DATA __FILE__ + +/* IO control flags */ + +#define DM9000_PLATF_8BITONLY (0x0001) +#define DM9000_PLATF_16BITONLY (0x0002) +#define DM9000_PLATF_32BITONLY (0x0004) + +/* platfrom data for platfrom device structure's platfrom_data field */ + +struct dm9000_plat_data { + unsigned int flags; + + /* allow replacement IO routines */ + + void (*inblk)(void __iomem *reg, void *data, int len); + void (*outblk)(void __iomem *reg, void *data, int len); + void (*dumpblk)(void __iomem *reg, int len); +}; + +#endif /* __DM9000_PLATFORM_DATA */ + diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 806c305..2d80cc7 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -14,7 +14,12 @@ enum dma_data_direction { }; #define DMA_64BIT_MASK 0xffffffffffffffffULL +#define DMA_40BIT_MASK 0x000000ffffffffffULL +#define DMA_39BIT_MASK 0x0000007fffffffffULL #define DMA_32BIT_MASK 0x00000000ffffffffULL +#define DMA_31BIT_MASK 0x000000007fffffffULL +#define DMA_30BIT_MASK 0x000000003fffffffULL +#define DMA_29BIT_MASK 0x000000001fffffffULL #include <asm/dma-mapping.h> diff --git a/include/linux/dmi.h b/include/linux/dmi.h index d2bcf55..5e93e6d 100644 --- a/include/linux/dmi.h +++ b/include/linux/dmi.h @@ -9,6 +9,7 @@ enum dmi_field { DMI_SYS_VENDOR, DMI_PRODUCT_NAME, DMI_PRODUCT_VERSION, + DMI_PRODUCT_SERIAL, DMI_BOARD_VENDOR, DMI_BOARD_NAME, DMI_BOARD_VERSION, diff --git a/include/linux/dqblk_v1.h b/include/linux/dqblk_v1.h index 42fbf47..57f1250 100644 --- a/include/linux/dqblk_v1.h +++ b/include/linux/dqblk_v1.h @@ -11,6 +11,12 @@ /* Root squash turned on */ #define V1_DQF_RSQUASH 1 +/* Numbers of blocks needed for updates */ +#define V1_INIT_ALLOC 1 +#define V1_INIT_REWRITE 1 +#define V1_DEL_ALLOC 0 +#define V1_DEL_REWRITE 2 + /* Special information about quotafile */ struct v1_mem_dqinfo { }; diff --git a/include/linux/dqblk_v2.h b/include/linux/dqblk_v2.h index 4a6c5f6..4f85332 100644 --- a/include/linux/dqblk_v2.h +++ b/include/linux/dqblk_v2.h @@ -10,6 +10,12 @@ /* id numbers of quota format */ #define QFMT_VFS_V0 2 +/* Numbers of blocks needed for updates */ +#define V2_INIT_ALLOC 4 +#define V2_INIT_REWRITE 2 +#define V2_DEL_ALLOC 0 +#define V2_DEL_REWRITE 6 + /* Inmemory copy of version specific information */ struct v2_mem_dqinfo { unsigned int dqi_blocks; diff --git a/include/linux/efi.h b/include/linux/efi.h index 047e722..73781ec 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -315,7 +315,7 @@ extern struct efi_memory_map memmap; */ static inline int efi_range_is_wc(unsigned long start, unsigned long len) { - int i; + unsigned long i; for (i = 0; i < len; i += (1UL << EFI_PAGE_SHIFT)) { unsigned long paddr = __pa(start + i); diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 220748b..a147825 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -56,18 +56,32 @@ static inline int is_zero_ether_addr(const u8 *addr) } /** + * is_multicast_ether_addr - Determine if the given Ethernet address is a + * multicast address. + * + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Return true if the address is a multicast address. + */ +static inline int is_multicast_ether_addr(const u8 *addr) +{ + return addr[0] & 0x01; +} + +/** * is_valid_ether_addr - Determine if the given Ethernet address is valid * @addr: Pointer to a six-byte array containing the Ethernet address * * Check that the Ethernet address (MAC) is not 00:00:00:00:00:00, is not - * a multicast address, and is not FF:FF:FF:FF:FF:FF. The multicast - * and FF:FF:... tests are combined into the single test "!(addr[0]&1)". + * a multicast address, and is not FF:FF:FF:FF:FF:FF. * * Return true if the address is valid. */ static inline int is_valid_ether_addr(const u8 *addr) { - return !(addr[0]&1) && !is_zero_ether_addr(addr); + /* FF:FF:FF:FF:FF:FF is a multicast address so we don't need to + * explicitly check for it here. */ + return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr); } /** @@ -83,6 +97,6 @@ static inline void random_ether_addr(u8 *addr) addr [0] &= 0xfe; /* clear multicast bit */ addr [0] |= 0x02; /* set local assignment bit (IEEE802) */ } -#endif +#endif /* __KERNEL__ */ #endif /* _LINUX_ETHERDEVICE_H */ diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index c85b210..a0ab26a 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -256,6 +256,7 @@ struct net_device; u32 ethtool_op_get_link(struct net_device *dev); u32 ethtool_op_get_tx_csum(struct net_device *dev); int ethtool_op_set_tx_csum(struct net_device *dev, u32 data); +int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data); u32 ethtool_op_get_sg(struct net_device *dev); int ethtool_op_set_sg(struct net_device *dev, u32 data); u32 ethtool_op_get_tso(struct net_device *dev); diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h index fab4352..a657130b 100644 --- a/include/linux/ext2_fs.h +++ b/include/linux/ext2_fs.h @@ -300,18 +300,19 @@ struct ext2_inode { /* * Mount flags */ -#define EXT2_MOUNT_CHECK 0x0001 /* Do mount-time checks */ -#define EXT2_MOUNT_OLDALLOC 0x0002 /* Don't use the new Orlov allocator */ -#define EXT2_MOUNT_GRPID 0x0004 /* Create files with directory's group */ -#define EXT2_MOUNT_DEBUG 0x0008 /* Some debugging messages */ -#define EXT2_MOUNT_ERRORS_CONT 0x0010 /* Continue on errors */ -#define EXT2_MOUNT_ERRORS_RO 0x0020 /* Remount fs ro on errors */ -#define EXT2_MOUNT_ERRORS_PANIC 0x0040 /* Panic on errors */ -#define EXT2_MOUNT_MINIX_DF 0x0080 /* Mimics the Minix statfs */ -#define EXT2_MOUNT_NOBH 0x0100 /* No buffer_heads */ -#define EXT2_MOUNT_NO_UID32 0x0200 /* Disable 32-bit UIDs */ -#define EXT2_MOUNT_XATTR_USER 0x4000 /* Extended user attributes */ -#define EXT2_MOUNT_POSIX_ACL 0x8000 /* POSIX Access Control Lists */ +#define EXT2_MOUNT_CHECK 0x000001 /* Do mount-time checks */ +#define EXT2_MOUNT_OLDALLOC 0x000002 /* Don't use the new Orlov allocator */ +#define EXT2_MOUNT_GRPID 0x000004 /* Create files with directory's group */ +#define EXT2_MOUNT_DEBUG 0x000008 /* Some debugging messages */ +#define EXT2_MOUNT_ERRORS_CONT 0x000010 /* Continue on errors */ +#define EXT2_MOUNT_ERRORS_RO 0x000020 /* Remount fs ro on errors */ +#define EXT2_MOUNT_ERRORS_PANIC 0x000040 /* Panic on errors */ +#define EXT2_MOUNT_MINIX_DF 0x000080 /* Mimics the Minix statfs */ +#define EXT2_MOUNT_NOBH 0x000100 /* No buffer_heads */ +#define EXT2_MOUNT_NO_UID32 0x000200 /* Disable 32-bit UIDs */ +#define EXT2_MOUNT_XATTR_USER 0x004000 /* Extended user attributes */ +#define EXT2_MOUNT_POSIX_ACL 0x008000 /* POSIX Access Control Lists */ +#define EXT2_MOUNT_XIP 0x010000 /* Execute in place */ #define clear_opt(o, opt) o &= ~EXT2_MOUNT_##opt #define set_opt(o, opt) o |= EXT2_MOUNT_##opt diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index 74ad317..4b6e1ab 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h @@ -358,6 +358,7 @@ struct ext3_inode { #define EXT3_MOUNT_RESERVATION 0x10000 /* Preallocation */ #define EXT3_MOUNT_BARRIER 0x20000 /* Use block barriers */ #define EXT3_MOUNT_NOBH 0x40000 /* No bufferheads */ +#define EXT3_MOUNT_QUOTA 0x80000 /* Some quota option set */ /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */ #ifndef _LINUX_EXT2_FS_H diff --git a/include/linux/ext3_jbd.h b/include/linux/ext3_jbd.h index e8292af..c8307c0 100644 --- a/include/linux/ext3_jbd.h +++ b/include/linux/ext3_jbd.h @@ -42,15 +42,15 @@ * superblock only gets updated once, of course, so don't bother * counting that again for the quota updates. */ -#define EXT3_DATA_TRANS_BLOCKS (EXT3_SINGLEDATA_TRANS_BLOCKS + \ +#define EXT3_DATA_TRANS_BLOCKS(sb) (EXT3_SINGLEDATA_TRANS_BLOCKS + \ EXT3_XATTR_TRANS_BLOCKS - 2 + \ - 2*EXT3_QUOTA_TRANS_BLOCKS) + 2*EXT3_QUOTA_TRANS_BLOCKS(sb)) /* Delete operations potentially hit one directory's namespace plus an * entire inode, plus arbitrary amounts of bitmap/indirection data. Be * generous. We can grow the delete transaction later if necessary. */ -#define EXT3_DELETE_TRANS_BLOCKS (2 * EXT3_DATA_TRANS_BLOCKS + 64) +#define EXT3_DELETE_TRANS_BLOCKS(sb) (2 * EXT3_DATA_TRANS_BLOCKS(sb) + 64) /* Define an arbitrary limit for the amount of data we will anticipate * writing to any given transaction. For unbounded transactions such as @@ -74,14 +74,17 @@ #ifdef CONFIG_QUOTA /* Amount of blocks needed for quota update - we know that the structure was * allocated so we need to update only inode+data */ -#define EXT3_QUOTA_TRANS_BLOCKS 2 +#define EXT3_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0) /* Amount of blocks needed for quota insert/delete - we do some block writes * but inode, sb and group updates are done only once */ -#define EXT3_QUOTA_INIT_BLOCKS (DQUOT_MAX_WRITES*\ - (EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3) +#define EXT3_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\ + (EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_INIT_REWRITE) : 0) +#define EXT3_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\ + (EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_DEL_REWRITE) : 0) #else -#define EXT3_QUOTA_TRANS_BLOCKS 0 -#define EXT3_QUOTA_INIT_BLOCKS 0 +#define EXT3_QUOTA_TRANS_BLOCKS(sb) 0 +#define EXT3_QUOTA_INIT_BLOCKS(sb) 0 +#define EXT3_QUOTA_DEL_BLOCKS(sb) 0 #endif int diff --git a/include/linux/fb.h b/include/linux/fb.h index b468bf4..bc24bee 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -524,11 +524,11 @@ struct fb_pixmap { u32 offset; /* current offset to buffer */ u32 buf_align; /* byte alignment of each bitmap */ u32 scan_align; /* alignment per scanline */ - u32 access_align; /* alignment per read/write */ + u32 access_align; /* alignment per read/write (bits) */ u32 flags; /* see FB_PIXMAP_* */ /* access methods */ - void (*outbuf)(struct fb_info *info, u8 *addr, u8 *src, unsigned int size); - u8 (*inbuf) (struct fb_info *info, u8 *addr); + void (*writeio)(struct fb_info *info, void __iomem *dst, void *src, unsigned int size); + void (*readio) (struct fb_info *info, void *dst, void __iomem *src, unsigned int size); }; @@ -816,18 +816,9 @@ extern int unregister_framebuffer(struct fb_info *fb_info); extern int fb_prepare_logo(struct fb_info *fb_info); extern int fb_show_logo(struct fb_info *fb_info); extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size); -extern void fb_iomove_buf_unaligned(struct fb_info *info, struct fb_pixmap *buf, - u8 *dst, u32 d_pitch, u8 *src, u32 idx, +extern void fb_pad_unaligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 idx, u32 height, u32 shift_high, u32 shift_low, u32 mod); -extern void fb_iomove_buf_aligned(struct fb_info *info, struct fb_pixmap *buf, - u8 *dst, u32 d_pitch, u8 *src, u32 s_pitch, - u32 height); -extern void fb_sysmove_buf_unaligned(struct fb_info *info, struct fb_pixmap *buf, - u8 *dst, u32 d_pitch, u8 *src, u32 idx, - u32 height, u32 shift_high, u32 shift_low, u32 mod); -extern void fb_sysmove_buf_aligned(struct fb_info *info, struct fb_pixmap *buf, - u8 *dst, u32 d_pitch, u8 *src, u32 s_pitch, - u32 height); +extern void fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 s_pitch, u32 height); extern void fb_set_suspend(struct fb_info *info, int state); extern int fb_get_color_depth(struct fb_var_screeninfo *var); extern int fb_get_options(char *name, char **option); diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h index 704fb76..8a7c821 100644 --- a/include/linux/fcntl.h +++ b/include/linux/fcntl.h @@ -25,6 +25,10 @@ #ifdef __KERNEL__ +#ifndef force_o_largefile +#define force_o_largefile() (BITS_PER_LONG != 32) +#endif + #if BITS_PER_LONG == 32 #define IS_GETLK32(cmd) ((cmd) == F_GETLK) #define IS_SETLK32(cmd) ((cmd) == F_SETLK) diff --git a/include/linux/font.h b/include/linux/font.h index fc2d690..8fc80a7 100644 --- a/include/linux/font.h +++ b/include/linux/font.h @@ -25,19 +25,23 @@ struct font_desc { #define VGA8x16_IDX 1 #define PEARL8x8_IDX 2 #define VGA6x11_IDX 3 -#define SUN8x16_IDX 4 -#define SUN12x22_IDX 5 -#define ACORN8x8_IDX 6 -#define MINI4x6_IDX 7 +#define FONT7x14_IDX 4 +#define FONT10x18_IDX 5 +#define SUN8x16_IDX 6 +#define SUN12x22_IDX 7 +#define ACORN8x8_IDX 8 +#define MINI4x6_IDX 9 extern struct font_desc font_vga_8x8, - font_vga_8x16, - font_pearl_8x8, - font_vga_6x11, - font_sun_8x16, - font_sun_12x22, - font_acorn_8x8, - font_mini_4x6; + font_vga_8x16, + font_pearl_8x8, + font_vga_6x11, + font_7x14, + font_10x18, + font_sun_8x16, + font_sun_12x22, + font_acorn_8x8, + font_mini_4x6; /* Find a font with a specific name */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 0180102..3ae8e37 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -220,6 +220,7 @@ extern int dir_notify_enable; struct iovec; struct nameidata; +struct kiocb; struct pipe_inode_info; struct poll_table_struct; struct kstatfs; @@ -240,7 +241,7 @@ typedef int (get_block_t)(struct inode *inode, sector_t iblock, typedef int (get_blocks_t)(struct inode *inode, sector_t iblock, unsigned long max_blocks, struct buffer_head *bh_result, int create); -typedef void (dio_iodone_t)(struct inode *inode, loff_t offset, +typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, ssize_t bytes, void *private); /* @@ -302,7 +303,6 @@ struct iattr { struct page; struct address_space; struct writeback_control; -struct kiocb; struct address_space_operations { int (*writepage)(struct page *page, struct writeback_control *wbc); @@ -330,6 +330,8 @@ struct address_space_operations { int (*releasepage) (struct page *, int); ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, loff_t offset, unsigned long nr_segs); + struct page* (*get_xip_page)(struct address_space *, sector_t, + int); }; struct backing_dev_info; @@ -581,7 +583,6 @@ struct file { atomic_t f_count; unsigned int f_flags; mode_t f_mode; - int f_error; loff_t f_pos; struct fown_struct f_owner; unsigned int f_uid, f_gid; @@ -674,6 +675,7 @@ struct file_lock { struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */ union { struct nfs_lock_info nfs_fl; + struct nfs4_lock_info nfs4_fl; } fl_u; }; @@ -883,7 +885,9 @@ struct block_device_operations { int (*open) (struct inode *, struct file *); int (*release) (struct inode *, struct file *); int (*ioctl) (struct inode *, struct file *, unsigned, unsigned long); + long (*unlocked_ioctl) (struct file *, unsigned, unsigned long); long (*compat_ioctl) (struct file *, unsigned, unsigned long); + int (*direct_access) (struct block_device *, sector_t, unsigned long *); int (*media_changed) (struct gendisk *); int (*revalidate_disk) (struct gendisk *); struct module *owner; @@ -1024,6 +1028,7 @@ struct super_operations { #define I_FREEING 16 #define I_CLEAR 32 #define I_NEW 64 +#define I_WILL_FREE 128 #define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) @@ -1494,6 +1499,23 @@ extern loff_t remote_llseek(struct file *file, loff_t offset, int origin); extern int generic_file_open(struct inode * inode, struct file * filp); extern int nonseekable_open(struct inode * inode, struct file * filp); +#ifdef CONFIG_FS_XIP +extern ssize_t xip_file_read(struct file *filp, char __user *buf, size_t len, + loff_t *ppos); +extern ssize_t xip_file_sendfile(struct file *in_file, loff_t *ppos, + size_t count, read_actor_t actor, + void *target); +extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma); +extern ssize_t xip_file_write(struct file *filp, const char __user *buf, + size_t len, loff_t *ppos); +extern int xip_truncate_page(struct address_space *mapping, loff_t from); +#else +static inline int xip_truncate_page(struct address_space *mapping, loff_t from) +{ + return 0; +} +#endif + static inline void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t * desc, read_actor_t actor) @@ -1657,6 +1679,52 @@ static inline void simple_transaction_set(struct file *file, size_t n) ar->size = n; } +/* + * simple attribute files + * + * These attributes behave similar to those in sysfs: + * + * Writing to an attribute immediately sets a value, an open file can be + * written to multiple times. + * + * Reading from an attribute creates a buffer from the value that might get + * read with multiple read calls. When the attribute has been read + * completely, no further read calls are possible until the file is opened + * again. + * + * All attributes contain a text representation of a numeric value + * that are accessed with the get() and set() functions. + */ +#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ +static int __fops ## _open(struct inode *inode, struct file *file) \ +{ \ + __simple_attr_check_format(__fmt, 0ull); \ + return simple_attr_open(inode, file, __get, __set, __fmt); \ +} \ +static struct file_operations __fops = { \ + .owner = THIS_MODULE, \ + .open = __fops ## _open, \ + .release = simple_attr_close, \ + .read = simple_attr_read, \ + .write = simple_attr_write, \ +}; + +static inline void __attribute__((format(printf, 1, 2))) +__simple_attr_check_format(const char *fmt, ...) +{ + /* don't do anything, just let the compiler check the arguments; */ +} + +int simple_attr_open(struct inode *inode, struct file *file, + u64 (*get)(void *), void (*set)(void *, u64), + const char *fmt); +int simple_attr_close(struct inode *inode, struct file *file); +ssize_t simple_attr_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos); +ssize_t simple_attr_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos); + + #ifdef CONFIG_SECURITY static inline char *alloc_secdata(void) { diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index faaff4c..70f54af 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h @@ -51,6 +51,7 @@ struct gianfar_platform_data { /* board specific information */ u32 board_flags; + u32 phy_flags; u32 phyid; u32 interruptPHY; u8 mac_addr[6]; @@ -61,9 +62,14 @@ struct gianfar_platform_data { #define FSL_GIANFAR_DEV_HAS_COALESCE 0x00000002 #define FSL_GIANFAR_DEV_HAS_RMON 0x00000004 #define FSL_GIANFAR_DEV_HAS_MULTI_INTR 0x00000008 +#define FSL_GIANFAR_DEV_HAS_CSUM 0x00000010 +#define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020 +#define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040 +#define FSL_GIANFAR_DEV_HAS_PADDING 0x00000080 /* Flags in gianfar_platform_data */ -#define FSL_GIANFAR_BRD_HAS_PHY_INTR 0x00000001 /* if not set use a timer */ +#define FSL_GIANFAR_BRD_HAS_PHY_INTR 0x00000001 /* set or use a timer */ +#define FSL_GIANFAR_BRD_IS_REDUCED 0x00000002 /* Set if RGMII, RMII */ struct fsl_i2c_platform_data { /* device specific information */ diff --git a/include/linux/gameport.h b/include/linux/gameport.h index b1272f82..cd623ec 100644 --- a/include/linux/gameport.h +++ b/include/linux/gameport.h @@ -67,6 +67,8 @@ int gameport_open(struct gameport *gameport, struct gameport_driver *drv, int mo void gameport_close(struct gameport *gameport); void gameport_rescan(struct gameport *gameport); +#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE)) + void __gameport_register_port(struct gameport *gameport, struct module *owner); static inline void gameport_register_port(struct gameport *gameport) { @@ -75,6 +77,29 @@ static inline void gameport_register_port(struct gameport *gameport) void gameport_unregister_port(struct gameport *gameport); +void gameport_set_phys(struct gameport *gameport, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); + +#else + +static inline void gameport_register_port(struct gameport *gameport) +{ + return; +} + +static inline void gameport_unregister_port(struct gameport *gameport) +{ + return; +} + +static inline void gameport_set_phys(struct gameport *gameport, + const char *fmt, ...) +{ + return; +} + +#endif + static inline struct gameport *gameport_allocate_port(void) { struct gameport *gameport = kcalloc(1, sizeof(struct gameport), GFP_KERNEL); @@ -92,9 +117,6 @@ static inline void gameport_set_name(struct gameport *gameport, const char *name strlcpy(gameport->name, name, sizeof(gameport->name)); } -void gameport_set_phys(struct gameport *gameport, const char *fmt, ...) - __attribute__ ((format (printf, 2, 3))); - /* * Use the following fucntions to manipulate gameport's per-port * driver-specific data. diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h new file mode 100644 index 0000000..7fd0576 --- /dev/null +++ b/include/linux/genalloc.h @@ -0,0 +1,40 @@ +/* + * Basic general purpose allocator for managing special purpose memory + * not managed by the regular kmalloc/kfree interface. + * Uses for this includes on-device special memory, uncached memory + * etc. + * + * This code is based on the buddy allocator found in the sym53c8xx_2 + * driver, adapted for general purpose use. + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#include <linux/spinlock.h> + +#define ALLOC_MIN_SHIFT 5 /* 32 bytes minimum */ +/* + * Link between free memory chunks of a given size. + */ +struct gen_pool_link { + struct gen_pool_link *next; +}; + +/* + * Memory pool descriptor. + */ +struct gen_pool { + spinlock_t lock; + unsigned long (*get_new_chunk)(struct gen_pool *); + struct gen_pool *next; + struct gen_pool_link *h; + unsigned long private; + int max_chunk_shift; +}; + +unsigned long gen_pool_alloc(struct gen_pool *poolp, int size); +void gen_pool_free(struct gen_pool *mp, unsigned long ptr, int size); +struct gen_pool *gen_pool_create(int nr_chunks, int max_chunk_shift, + unsigned long (*fp)(struct gen_pool *), + unsigned long data); diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 47dedaf..01796c4 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -224,7 +224,7 @@ static inline void free_disk_stats(struct gendisk *disk) extern void disk_round_stats(struct gendisk *disk); /* drivers/block/genhd.c */ -extern int get_blkdev_list(char *); +extern int get_blkdev_list(char *, int); extern void add_disk(struct gendisk *disk); extern void del_gendisk(struct gendisk *gp); extern void unlink_gendisk(struct gendisk *gp); @@ -403,6 +403,7 @@ extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); extern void add_partition(struct gendisk *, int, sector_t, sector_t); extern void delete_partition(struct gendisk *, int); +extern struct gendisk *alloc_disk_node(int minors, int node_id); extern struct gendisk *alloc_disk(int minors); extern struct kobject *get_disk(struct gendisk *disk); extern void put_disk(struct gendisk *disk); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index af7407e..8d6bf60 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -39,6 +39,7 @@ struct vm_area_struct; #define __GFP_COMP 0x4000u /* Add compound page metadata */ #define __GFP_ZERO 0x8000u /* Return zeroed page on success */ #define __GFP_NOMEMALLOC 0x10000u /* Don't use emergency reserves */ +#define __GFP_NORECLAIM 0x20000u /* No realy zone reclaim during allocation */ #define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ #define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1) @@ -47,7 +48,7 @@ struct vm_area_struct; #define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \ - __GFP_NOMEMALLOC) + __GFP_NOMEMALLOC|__GFP_NORECLAIM) #define GFP_ATOMIC (__GFP_HIGH) #define GFP_NOIO (__GFP_WAIT) @@ -132,5 +133,10 @@ extern void FASTCALL(free_cold_page(struct page *page)); #define free_page(addr) free_pages((addr),0) void page_alloc_init(void); +#ifdef CONFIG_NUMA +void drain_remote_pages(void); +#else +static inline void drain_remote_pages(void) { }; +#endif #endif /* __LINUX_GFP_H */ diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index ebc712e..8336dba 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -43,13 +43,17 @@ #define __IRQ_MASK(x) ((1UL << (x))-1) #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) -#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) +#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) +#if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS)) +#error PREEMPT_ACTIVE is too low! +#endif + #define hardirq_count() (preempt_count() & HARDIRQ_MASK) #define softirq_count() (preempt_count() & SOFTIRQ_MASK) #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h index 503194e..ed2927e 100644 --- a/include/linux/hdlc.h +++ b/include/linux/hdlc.h @@ -1,7 +1,7 @@ /* * Generic HDLC support routines for Linux * - * Copyright (C) 1999-2003 Krzysztof Halasa <khc@pm.waw.pl> + * Copyright (C) 1999-2005 Krzysztof Halasa <khc@pm.waw.pl> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License @@ -41,6 +41,7 @@ #define LMI_NONE 1 /* No LMI, all PVCs are static */ #define LMI_ANSI 2 /* ANSI Annex D */ #define LMI_CCITT 3 /* ITU-T Annex A */ +#define LMI_CISCO 4 /* The "original" LMI, aka Gang of Four */ #define HDLC_MAX_MTU 1500 /* Ethernet 1500 bytes */ #define HDLC_MAX_MRU (HDLC_MAX_MTU + 10 + 14 + 4) /* for ETH+VLAN over FR */ @@ -89,6 +90,7 @@ typedef struct pvc_device_struct { unsigned int deleted: 1; unsigned int fecn: 1; unsigned int becn: 1; + unsigned int bandwidth; /* Cisco LMI reporting only */ }state; }pvc_device; diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 2a7e6c6..6bece92 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -28,6 +28,7 @@ static inline void *kmap(struct page *page) #define kmap_atomic(page, idx) page_address(page) #define kunmap_atomic(addr, idx) do { } while (0) +#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn)) #define kmap_atomic_to_page(ptr) virt_to_page(ptr) #endif /* CONFIG_HIGHMEM */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 6af1ae4..f529d14 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -4,6 +4,7 @@ #ifdef CONFIG_HUGETLB_PAGE #include <linux/mempolicy.h> +#include <asm/tlbflush.h> struct ctl_table; @@ -22,12 +23,6 @@ int hugetlb_report_meminfo(char *); int hugetlb_report_node_meminfo(int, char *); int is_hugepage_mem_enough(size_t); unsigned long hugetlb_total_pages(void); -struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, - int write); -struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, - pmd_t *pmd, int write); -int is_aligned_hugepage_range(unsigned long addr, unsigned long len); -int pmd_huge(pmd_t pmd); struct page *alloc_huge_page(void); void free_huge_page(struct page *); @@ -35,6 +30,17 @@ extern unsigned long max_huge_pages; extern const unsigned long hugetlb_zero, hugetlb_infinity; extern int sysctl_hugetlb_shm_group; +/* arch callbacks */ + +pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr); +pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr); +struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, + int write); +struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, + pmd_t *pmd, int write); +int is_aligned_hugepage_range(unsigned long addr, unsigned long len); +int pmd_huge(pmd_t pmd); + #ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE #define is_hugepage_only_range(mm, addr, len) 0 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ @@ -48,6 +54,28 @@ extern int sysctl_hugetlb_shm_group; int prepare_hugepage_range(unsigned long addr, unsigned long len); #endif +#ifndef ARCH_HAS_SETCLEAR_HUGE_PTE +#define set_huge_pte_at(mm, addr, ptep, pte) set_pte_at(mm, addr, ptep, pte) +#define huge_ptep_get_and_clear(mm, addr, ptep) ptep_get_and_clear(mm, addr, ptep) +#else +void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte); +pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep); +#endif + +#ifndef ARCH_HAS_HUGETLB_PREFAULT_HOOK +#define hugetlb_prefault_arch_hook(mm) do { } while (0) +#else +void hugetlb_prefault_arch_hook(struct mm_struct *mm); +#endif + +#ifndef ARCH_HAS_HUGETLB_CLEAN_STALE_PGTABLE +#define hugetlb_clean_stale_pgtable(pte) BUG() +#else +void hugetlb_clean_stale_pgtable(pte_t *pte); +#endif + #else /* !CONFIG_HUGETLB_PAGE */ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h new file mode 100644 index 0000000..1b5018a --- /dev/null +++ b/include/linux/hwmon-sysfs.h @@ -0,0 +1,36 @@ +/* + * hwmon-sysfs.h - hardware monitoring chip driver sysfs defines + * + * Copyright (C) 2005 Yani Ioannou <yani.ioannou@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#ifndef _LINUX_HWMON_SYSFS_H +#define _LINUX_HWMON_SYSFS_H + +struct sensor_device_attribute{ + struct device_attribute dev_attr; + int index; +}; +#define to_sensor_dev_attr(_dev_attr) \ + container_of(_dev_attr, struct sensor_device_attribute, dev_attr) + +#define SENSOR_DEVICE_ATTR(_name,_mode,_show,_store,_index) \ +struct sensor_device_attribute sensor_dev_attr_##_name = { \ + .dev_attr = __ATTR(_name,_mode,_show,_store), \ + .index = _index, \ +} + +#endif /* _LINUX_HWMON_SYSFS_H */ diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index 89270ce..33f0825 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h @@ -108,6 +108,7 @@ #define I2C_DRIVERID_TDA7313 62 /* TDA7313 audio processor */ #define I2C_DRIVERID_MAX6900 63 /* MAX6900 real-time clock */ #define I2C_DRIVERID_SAA7114H 64 /* video decoder */ +#define I2C_DRIVERID_DS1374 65 /* DS1374 real time clock */ #define I2C_DRIVERID_EXP0 0xF0 /* experimental use id's */ diff --git a/include/linux/i2c-vid.h b/include/linux/i2c-vid.h index 974835e3..41d0635 100644 --- a/include/linux/i2c-vid.h +++ b/include/linux/i2c-vid.h @@ -97,3 +97,15 @@ static inline int vid_from_reg(int val, int vrm) 2050 - (val) * 50); } } + +static inline int vid_to_reg(int val, int vrm) +{ + switch (vrm) { + case 91: /* VRM 9.1 */ + case 90: /* VRM 9.0 */ + return ((val >= 1100) && (val <= 1850) ? + ((18499 - val * 10) / 25 + 5) / 10 : -1); + default: + return -1; + } +} diff --git a/include/linux/i2c.h b/include/linux/i2c.h index ebcd745..be837b1 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -290,11 +290,8 @@ static inline void i2c_set_adapdata (struct i2c_adapter *dev, void *data) */ struct i2c_client_address_data { unsigned short *normal_i2c; - unsigned short *normal_i2c_range; unsigned short *probe; - unsigned short *probe_range; unsigned short *ignore; - unsigned short *ignore_range; unsigned short *force; }; @@ -563,24 +560,15 @@ union i2c_smbus_data { #define I2C_CLIENT_INSMOD \ I2C_CLIENT_MODULE_PARM(probe, \ "List of adapter,address pairs to scan additionally"); \ - I2C_CLIENT_MODULE_PARM(probe_range, \ - "List of adapter,start-addr,end-addr triples to scan " \ - "additionally"); \ I2C_CLIENT_MODULE_PARM(ignore, \ "List of adapter,address pairs not to scan"); \ - I2C_CLIENT_MODULE_PARM(ignore_range, \ - "List of adapter,start-addr,end-addr triples not to " \ - "scan"); \ I2C_CLIENT_MODULE_PARM(force, \ "List of adapter,address pairs to boldly assume " \ "to be present"); \ static struct i2c_client_address_data addr_data = { \ .normal_i2c = normal_i2c, \ - .normal_i2c_range = normal_i2c_range, \ .probe = probe, \ - .probe_range = probe_range, \ .ignore = ignore, \ - .ignore_range = ignore_range, \ .force = force, \ } diff --git a/include/linux/i2o-dev.h b/include/linux/i2o-dev.h index ef7f644..36fd18c 100644 --- a/include/linux/i2o-dev.h +++ b/include/linux/i2o-dev.h @@ -24,6 +24,13 @@ #define MAX_I2O_CONTROLLERS 32 //#include <linux/ioctl.h> +#ifndef __KERNEL__ + +typedef unsigned char u8; +typedef unsigned short u16; +typedef unsigned int u32; + +#endif /* __KERNEL__ */ /* * I2O Control IOCTLs and structures @@ -113,6 +120,10 @@ struct i2o_evt_get { int lost; }; +typedef struct i2o_sg_io_hdr { + unsigned int flags; /* see I2O_DPT_SG_IO_FLAGS */ +} i2o_sg_io_hdr_t; + /************************************************************************** * HRT related constants and structures **************************************************************************/ @@ -126,14 +137,6 @@ struct i2o_evt_get { #define I2O_BUS_CARDBUS 7 #define I2O_BUS_UNKNOWN 0x80 -#ifndef __KERNEL__ - -typedef unsigned char u8; -typedef unsigned short u16; -typedef unsigned int u32; - -#endif /* __KERNEL__ */ - typedef struct _i2o_pci_bus { u8 PciFunctionNumber; u8 PciDeviceNumber; @@ -333,7 +336,7 @@ typedef struct _i2o_status_block { #define I2O_CLASS_ATE_PERIPHERAL 0x061 #define I2O_CLASS_FLOPPY_CONTROLLER 0x070 #define I2O_CLASS_FLOPPY_DEVICE 0x071 -#define I2O_CLASS_BUS_ADAPTER_PORT 0x080 +#define I2O_CLASS_BUS_ADAPTER 0x080 #define I2O_CLASS_PEER_TRANSPORT_AGENT 0x090 #define I2O_CLASS_PEER_TRANSPORT 0x091 #define I2O_CLASS_END 0xfff @@ -399,4 +402,26 @@ typedef struct _i2o_status_block { #define ADAPTER_STATE_FAILED 0x10 #define ADAPTER_STATE_FAULTED 0x11 +/* + * Software module types + */ +#define I2O_SOFTWARE_MODULE_IRTOS 0x11 +#define I2O_SOFTWARE_MODULE_IOP_PRIVATE 0x22 +#define I2O_SOFTWARE_MODULE_IOP_CONFIG 0x23 + +/* + * Vendors + */ +#define I2O_VENDOR_DPT 0x001b + +/* + * DPT / Adaptec specific values for i2o_sg_io_hdr flags. + */ +#define I2O_DPT_SG_FLAG_INTERPRET 0x00010000 +#define I2O_DPT_SG_FLAG_PHYSICAL 0x00020000 + +#define I2O_DPT_FLASH_FRAG_SIZE 0x10000 +#define I2O_DPT_FLASH_READ 0x0101 +#define I2O_DPT_FLASH_WRITE 0x0102 + #endif /* _I2O_DEV_H */ diff --git a/include/linux/i2o.h b/include/linux/i2o.h index ea9a3ad..bdc286e 100644 --- a/include/linux/i2o.h +++ b/include/linux/i2o.h @@ -119,12 +119,21 @@ struct i2o_driver { }; /* - * Contains all information which are necessary for DMA operations + * Contains DMA mapped address information */ struct i2o_dma { void *virt; dma_addr_t phys; - u32 len; + size_t len; +}; + +/* + * Contains IO mapped address information + */ +struct i2o_io { + void __iomem *virt; + unsigned long phys; + unsigned long len; }; /* @@ -147,28 +156,25 @@ struct i2o_controller { struct pci_dev *pdev; /* PCI device */ - unsigned int short_req:1; /* use small block sizes */ + unsigned int promise:1; /* Promise controller */ + unsigned int adaptec:1; /* DPT / Adaptec controller */ + unsigned int raptor:1; /* split bar */ unsigned int no_quiesce:1; /* dont quiesce before reset */ - unsigned int raptor:1; /* split bar */ - unsigned int promise:1; /* Promise controller */ - -#ifdef CONFIG_MTRR - int mtrr_reg0; - int mtrr_reg1; -#endif + unsigned int short_req:1; /* use small block sizes */ + unsigned int limit_sectors:1; /* limit number of sectors / request */ + unsigned int pae_support:1; /* controller has 64-bit SGL support */ struct list_head devices; /* list of I2O devices */ - - struct notifier_block *event_notifer; /* Events */ - atomic_t users; struct list_head list; /* Controller list */ - void __iomem *post_port; /* Inbout port address */ - void __iomem *reply_port; /* Outbound port address */ - void __iomem *irq_mask; /* Interrupt register address */ + + void __iomem *in_port; /* Inbout port address */ + void __iomem *out_port; /* Outbound port address */ + void __iomem *irq_status; /* Interrupt status register address */ + void __iomem *irq_mask; /* Interrupt mask register address */ /* Dynamic LCT related data */ - struct i2o_dma status; /* status of IOP */ + struct i2o_dma status; /* IOP status block */ struct i2o_dma hrt; /* HW Resource Table */ i2o_lct *lct; /* Logical Config Table */ @@ -176,21 +182,19 @@ struct i2o_controller { struct semaphore lct_lock; /* Lock for LCT updates */ struct i2o_dma status_block; /* IOP status block */ - struct i2o_dma base; /* controller messaging unit */ - struct i2o_dma in_queue; /* inbound message queue Host->IOP */ + struct i2o_io base; /* controller messaging unit */ + struct i2o_io in_queue; /* inbound message queue Host->IOP */ struct i2o_dma out_queue; /* outbound message queue IOP->Host */ - unsigned int battery:1; /* Has a battery backup */ + unsigned int battery:1; /* Has a battery backup */ unsigned int io_alloc:1; /* An I/O resource was allocated */ unsigned int mem_alloc:1; /* A memory resource was allocated */ struct resource io_resource; /* I/O resource allocated to the IOP */ struct resource mem_resource; /* Mem resource allocated to the IOP */ - struct proc_dir_entry *proc_entry; /* /proc dir */ - - struct list_head bus_list; /* list of busses on IOP */ struct device device; + struct class_device classdev; /* I2O controller class */ struct i2o_device *exec; /* Executive */ #if BITS_PER_LONG == 64 spinlock_t context_list_lock; /* lock for context_list */ @@ -241,9 +245,10 @@ struct i2o_sys_tbl { extern struct list_head i2o_controllers; /* Message functions */ -static inline u32 i2o_msg_get(struct i2o_controller *, struct i2o_message __iomem **); -extern u32 i2o_msg_get_wait(struct i2o_controller *, struct i2o_message __iomem **, - int); +static inline u32 i2o_msg_get(struct i2o_controller *, + struct i2o_message __iomem **); +extern u32 i2o_msg_get_wait(struct i2o_controller *, + struct i2o_message __iomem **, int); static inline void i2o_msg_post(struct i2o_controller *, u32); static inline int i2o_msg_post_wait(struct i2o_controller *, u32, unsigned long); @@ -252,15 +257,6 @@ extern int i2o_msg_post_wait_mem(struct i2o_controller *, u32, unsigned long, extern void i2o_msg_nop(struct i2o_controller *, u32); static inline void i2o_flush_reply(struct i2o_controller *, u32); -/* DMA handling functions */ -static inline int i2o_dma_alloc(struct device *, struct i2o_dma *, size_t, - unsigned int); -static inline void i2o_dma_free(struct device *, struct i2o_dma *); -int i2o_dma_realloc(struct device *, struct i2o_dma *, size_t, unsigned int); - -static inline int i2o_dma_map(struct device *, struct i2o_dma *); -static inline void i2o_dma_unmap(struct device *, struct i2o_dma *); - /* IOP functions */ extern int i2o_status_get(struct i2o_controller *); @@ -285,6 +281,16 @@ static inline u32 i2o_ptr_high(void *ptr) { return (u32) ((u64) ptr >> 32); }; + +static inline u32 i2o_dma_low(dma_addr_t dma_addr) +{ + return (u32) (u64) dma_addr; +}; + +static inline u32 i2o_dma_high(dma_addr_t dma_addr) +{ + return (u32) ((u64) dma_addr >> 32); +}; #else static inline u32 i2o_cntxt_list_add(struct i2o_controller *c, void *ptr) { @@ -315,8 +321,246 @@ static inline u32 i2o_ptr_high(void *ptr) { return 0; }; + +static inline u32 i2o_dma_low(dma_addr_t dma_addr) +{ + return (u32) dma_addr; +}; + +static inline u32 i2o_dma_high(dma_addr_t dma_addr) +{ + return 0; +}; #endif +/** + * i2o_sg_tablesize - Calculate the maximum number of elements in a SGL + * @c: I2O controller for which the calculation should be done + * @body_size: maximum body size used for message in 32-bit words. + * + * Return the maximum number of SG elements in a SG list. + */ +static inline u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size) +{ + i2o_status_block *sb = c->status_block.virt; + u16 sg_count = + (sb->inbound_frame_size - sizeof(struct i2o_message) / 4) - + body_size; + + if (c->pae_support) { + /* + * for 64-bit a SG attribute element must be added and each + * SG element needs 12 bytes instead of 8. + */ + sg_count -= 2; + sg_count /= 3; + } else + sg_count /= 2; + + if (c->short_req && (sg_count > 8)) + sg_count = 8; + + return sg_count; +}; + +/** + * i2o_dma_map_single - Map pointer to controller and fill in I2O message. + * @c: I2O controller + * @ptr: pointer to the data which should be mapped + * @size: size of data in bytes + * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE + * @sg_ptr: pointer to the SG list inside the I2O message + * + * This function does all necessary DMA handling and also writes the I2O + * SGL elements into the I2O message. For details on DMA handling see also + * dma_map_single(). The pointer sg_ptr will only be set to the end of the + * SG list if the allocation was successful. + * + * Returns DMA address which must be checked for failures using + * dma_mapping_error(). + */ +static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, + size_t size, + enum dma_data_direction direction, + u32 __iomem ** sg_ptr) +{ + u32 sg_flags; + u32 __iomem *mptr = *sg_ptr; + dma_addr_t dma_addr; + + switch (direction) { + case DMA_TO_DEVICE: + sg_flags = 0xd4000000; + break; + case DMA_FROM_DEVICE: + sg_flags = 0xd0000000; + break; + default: + return 0; + } + + dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction); + if (!dma_mapping_error(dma_addr)) { +#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 + if ((sizeof(dma_addr_t) > 4) && c->pae_support) { + writel(0x7C020002, mptr++); + writel(PAGE_SIZE, mptr++); + } +#endif + + writel(sg_flags | size, mptr++); + writel(i2o_dma_low(dma_addr), mptr++); +#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 + if ((sizeof(dma_addr_t) > 4) && c->pae_support) + writel(i2o_dma_high(dma_addr), mptr++); +#endif + *sg_ptr = mptr; + } + return dma_addr; +}; + +/** + * i2o_dma_map_sg - Map a SG List to controller and fill in I2O message. + * @c: I2O controller + * @sg: SG list to be mapped + * @sg_count: number of elements in the SG list + * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE + * @sg_ptr: pointer to the SG list inside the I2O message + * + * This function does all necessary DMA handling and also writes the I2O + * SGL elements into the I2O message. For details on DMA handling see also + * dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG + * list if the allocation was successful. + * + * Returns 0 on failure or 1 on success. + */ +static inline int i2o_dma_map_sg(struct i2o_controller *c, + struct scatterlist *sg, int sg_count, + enum dma_data_direction direction, + u32 __iomem ** sg_ptr) +{ + u32 sg_flags; + u32 __iomem *mptr = *sg_ptr; + + switch (direction) { + case DMA_TO_DEVICE: + sg_flags = 0x14000000; + break; + case DMA_FROM_DEVICE: + sg_flags = 0x10000000; + break; + default: + return 0; + } + + sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction); + if (!sg_count) + return 0; + +#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 + if ((sizeof(dma_addr_t) > 4) && c->pae_support) { + writel(0x7C020002, mptr++); + writel(PAGE_SIZE, mptr++); + } +#endif + + while (sg_count-- > 0) { + if (!sg_count) + sg_flags |= 0xC0000000; + writel(sg_flags | sg_dma_len(sg), mptr++); + writel(i2o_dma_low(sg_dma_address(sg)), mptr++); +#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 + if ((sizeof(dma_addr_t) > 4) && c->pae_support) + writel(i2o_dma_high(sg_dma_address(sg)), mptr++); +#endif + sg++; + } + *sg_ptr = mptr; + + return 1; +}; + +/** + * i2o_dma_alloc - Allocate DMA memory + * @dev: struct device pointer to the PCI device of the I2O controller + * @addr: i2o_dma struct which should get the DMA buffer + * @len: length of the new DMA memory + * @gfp_mask: GFP mask + * + * Allocate a coherent DMA memory and write the pointers into addr. + * + * Returns 0 on success or -ENOMEM on failure. + */ +static inline int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, + size_t len, unsigned int gfp_mask) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int dma_64 = 0; + + if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_64BIT_MASK)) { + dma_64 = 1; + if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) + return -ENOMEM; + } + + addr->virt = dma_alloc_coherent(dev, len, &addr->phys, gfp_mask); + + if ((sizeof(dma_addr_t) > 4) && dma_64) + if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)) + printk(KERN_WARNING "i2o: unable to set 64-bit DMA"); + + if (!addr->virt) + return -ENOMEM; + + memset(addr->virt, 0, len); + addr->len = len; + + return 0; +}; + +/** + * i2o_dma_free - Free DMA memory + * @dev: struct device pointer to the PCI device of the I2O controller + * @addr: i2o_dma struct which contains the DMA buffer + * + * Free a coherent DMA memory and set virtual address of addr to NULL. + */ +static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr) +{ + if (addr->virt) { + if (addr->phys) + dma_free_coherent(dev, addr->len, addr->virt, + addr->phys); + else + kfree(addr->virt); + addr->virt = NULL; + } +}; + +/** + * i2o_dma_realloc - Realloc DMA memory + * @dev: struct device pointer to the PCI device of the I2O controller + * @addr: pointer to a i2o_dma struct DMA buffer + * @len: new length of memory + * @gfp_mask: GFP mask + * + * If there was something allocated in the addr, free it first. If len > 0 + * than try to allocate it and write the addresses back to the addr + * structure. If len == 0 set the virtual address to NULL. + * + * Returns the 0 on success or negative error code on failure. + */ +static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, + size_t len, unsigned int gfp_mask) +{ + i2o_dma_free(dev, addr); + + if (len) + return i2o_dma_alloc(dev, addr, len, gfp_mask); + + return 0; +}; + /* I2O driver (OSM) functions */ extern int i2o_driver_register(struct i2o_driver *); extern void i2o_driver_unregister(struct i2o_driver *); @@ -385,49 +629,11 @@ extern int i2o_device_claim_release(struct i2o_device *); /* Exec OSM functions */ extern int i2o_exec_lct_get(struct i2o_controller *); -/* device to i2o_device and driver to i2o_driver convertion functions */ +/* device / driver / kobject conversion functions */ #define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver) #define to_i2o_device(dev) container_of(dev, struct i2o_device, device) - -/* - * Messenger inlines - */ -static inline u32 I2O_POST_READ32(struct i2o_controller *c) -{ - rmb(); - return readl(c->post_port); -}; - -static inline void I2O_POST_WRITE32(struct i2o_controller *c, u32 val) -{ - wmb(); - writel(val, c->post_port); -}; - -static inline u32 I2O_REPLY_READ32(struct i2o_controller *c) -{ - rmb(); - return readl(c->reply_port); -}; - -static inline void I2O_REPLY_WRITE32(struct i2o_controller *c, u32 val) -{ - wmb(); - writel(val, c->reply_port); -}; - -static inline u32 I2O_IRQ_READ32(struct i2o_controller *c) -{ - rmb(); - return readl(c->irq_mask); -}; - -static inline void I2O_IRQ_WRITE32(struct i2o_controller *c, u32 val) -{ - wmb(); - writel(val, c->irq_mask); - wmb(); -}; +#define to_i2o_controller(dev) container_of(dev, struct i2o_controller, device) +#define kobj_to_i2o_device(kobj) to_i2o_device(container_of(kobj, struct device, kobj)) /** * i2o_msg_get - obtain an I2O message from the IOP @@ -443,11 +649,11 @@ static inline void I2O_IRQ_WRITE32(struct i2o_controller *c, u32 val) * available returns I2O_QUEUE_EMPTY and msg is leaved untouched. */ static inline u32 i2o_msg_get(struct i2o_controller *c, - struct i2o_message __iomem **msg) + struct i2o_message __iomem ** msg) { - u32 m; + u32 m = readl(c->in_port); - if ((m = I2O_POST_READ32(c)) != I2O_QUEUE_EMPTY) + if (m != I2O_QUEUE_EMPTY) *msg = c->in_queue.virt + m; return m; @@ -462,7 +668,7 @@ static inline u32 i2o_msg_get(struct i2o_controller *c, */ static inline void i2o_msg_post(struct i2o_controller *c, u32 m) { - I2O_POST_WRITE32(c, m); + writel(m, c->in_port); }; /** @@ -491,12 +697,10 @@ static inline int i2o_msg_post_wait(struct i2o_controller *c, u32 m, * The I2O controller must be informed that the reply message is not needed * anymore. If you forget to flush the reply, the message frame can't be * used by the controller anymore and is therefore lost. - * - * FIXME: is there a timeout after which the controller reuse the message? */ static inline void i2o_flush_reply(struct i2o_controller *c, u32 m) { - I2O_REPLY_WRITE32(c, m); + writel(m, c->out_port); }; /** @@ -530,97 +734,13 @@ static inline struct i2o_message *i2o_msg_out_to_virt(struct i2o_controller *c, * work for receive side messages as they are kmalloc objects * in a different pool. */ -static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct i2o_controller *c, - u32 m) +static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct + i2o_controller *c, + u32 m) { return c->in_queue.virt + m; }; -/** - * i2o_dma_alloc - Allocate DMA memory - * @dev: struct device pointer to the PCI device of the I2O controller - * @addr: i2o_dma struct which should get the DMA buffer - * @len: length of the new DMA memory - * @gfp_mask: GFP mask - * - * Allocate a coherent DMA memory and write the pointers into addr. - * - * Returns 0 on success or -ENOMEM on failure. - */ -static inline int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, - size_t len, unsigned int gfp_mask) -{ - addr->virt = dma_alloc_coherent(dev, len, &addr->phys, gfp_mask); - if (!addr->virt) - return -ENOMEM; - - memset(addr->virt, 0, len); - addr->len = len; - - return 0; -}; - -/** - * i2o_dma_free - Free DMA memory - * @dev: struct device pointer to the PCI device of the I2O controller - * @addr: i2o_dma struct which contains the DMA buffer - * - * Free a coherent DMA memory and set virtual address of addr to NULL. - */ -static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr) -{ - if (addr->virt) { - if (addr->phys) - dma_free_coherent(dev, addr->len, addr->virt, - addr->phys); - else - kfree(addr->virt); - addr->virt = NULL; - } -}; - -/** - * i2o_dma_map - Map the memory to DMA - * @dev: struct device pointer to the PCI device of the I2O controller - * @addr: i2o_dma struct which should be mapped - * - * Map the memory in addr->virt to coherent DMA memory and write the - * physical address into addr->phys. - * - * Returns 0 on success or -ENOMEM on failure. - */ -static inline int i2o_dma_map(struct device *dev, struct i2o_dma *addr) -{ - if (!addr->virt) - return -EFAULT; - - if (!addr->phys) - addr->phys = dma_map_single(dev, addr->virt, addr->len, - DMA_BIDIRECTIONAL); - if (!addr->phys) - return -ENOMEM; - - return 0; -}; - -/** - * i2o_dma_unmap - Unmap the DMA memory - * @dev: struct device pointer to the PCI device of the I2O controller - * @addr: i2o_dma struct which should be unmapped - * - * Unmap the memory in addr->virt from DMA memory. - */ -static inline void i2o_dma_unmap(struct device *dev, struct i2o_dma *addr) -{ - if (!addr->virt) - return; - - if (addr->phys) { - dma_unmap_single(dev, addr->phys, addr->len, DMA_BIDIRECTIONAL); - addr->phys = 0; - } -}; - /* * Endian handling wrapped into the macro - keeps the core code * cleaner. @@ -773,6 +893,14 @@ extern void i2o_debug_state(struct i2o_controller *c); #define I2O_CMD_SCSI_BUSRESET 0x27 /* + * Bus Adapter Class + */ +#define I2O_CMD_BUS_ADAPTER_RESET 0x85 +#define I2O_CMD_BUS_RESET 0x87 +#define I2O_CMD_BUS_SCAN 0x89 +#define I2O_CMD_BUS_QUIESCE 0x8b + +/* * Random Block Storage Class */ #define I2O_CMD_BLOCK_READ 0x30 @@ -784,7 +912,7 @@ extern void i2o_debug_state(struct i2o_controller *c); #define I2O_CMD_BLOCK_MEJECT 0x43 #define I2O_CMD_BLOCK_POWER 0x70 -#define I2O_PRIVATE_MSG 0xFF +#define I2O_CMD_PRIVATE 0xFF /* Command status values */ @@ -922,7 +1050,7 @@ extern void i2o_debug_state(struct i2o_controller *c); #define I2OVER15 0x0001 #define I2OVER20 0x0002 -/* Default is 1.5, FIXME: Need support for both 1.5 and 2.0 */ +/* Default is 1.5 */ #define I2OVERSION I2OVER15 #define SGL_OFFSET_0 I2OVERSION @@ -933,9 +1061,9 @@ extern void i2o_debug_state(struct i2o_controller *c); #define SGL_OFFSET_8 (0x0080 | I2OVERSION) #define SGL_OFFSET_9 (0x0090 | I2OVERSION) #define SGL_OFFSET_10 (0x00A0 | I2OVERSION) - -#define TRL_OFFSET_5 (0x0050 | I2OVERSION) -#define TRL_OFFSET_6 (0x0060 | I2OVERSION) +#define SGL_OFFSET_11 (0x00B0 | I2OVERSION) +#define SGL_OFFSET_12 (0x00C0 | I2OVERSION) +#define SGL_OFFSET(x) (((x)<<4) | I2OVERSION) /* Transaction Reply Lists (TRL) Control Word structure */ #define TRL_SINGLE_FIXED_LENGTH 0x00 @@ -962,17 +1090,13 @@ extern void i2o_debug_state(struct i2o_controller *c); #define ELEVEN_WORD_MSG_SIZE 0x000B0000 #define I2O_MESSAGE_SIZE(x) ((x)<<16) -/* Special TID Assignments */ - +/* special TID assignments */ #define ADAPTER_TID 0 #define HOST_TID 1 -#define MSG_FRAME_SIZE 128 /* i2o_scsi assumes >= 32 */ -#define REPLY_FRAME_SIZE 17 -#define SG_TABLESIZE 30 -#define NMBR_MSG_FRAMES 128 - -#define MSG_POOL_SIZE (MSG_FRAME_SIZE*NMBR_MSG_FRAMES*sizeof(u32)) +/* outbound queue defines */ +#define I2O_MAX_OUTBOUND_MSG_FRAMES 128 +#define I2O_OUTBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */ #define I2O_POST_WAIT_OK 0 #define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT @@ -993,11 +1117,10 @@ extern void i2o_debug_state(struct i2o_controller *c); #define I2O_HRT_GET_TRIES 3 #define I2O_LCT_GET_TRIES 3 -/* request queue sizes */ +/* defines for max_sectors and max_phys_segments */ #define I2O_MAX_SECTORS 1024 -#define I2O_MAX_SEGMENTS 128 - -#define I2O_REQ_MEMPOOL_SIZE 32 +#define I2O_MAX_SECTORS_LIMITED 256 +#define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS #endif /* __KERNEL__ */ #endif /* _I2O_H */ diff --git a/include/linux/ide.h b/include/linux/ide.h index 9cfc099..9212907 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -664,7 +664,6 @@ typedef struct ide_drive_s { struct request *rq; /* current request */ struct ide_drive_s *next; /* circular list of hwgroup drives */ - struct ide_driver_s *driver;/* (ide_driver_t *) */ void *driver_data; /* extra driver data */ struct hd_driveid *id; /* drive model identification info */ struct proc_dir_entry *proc; /* /proc/ide/ directory entry */ @@ -758,6 +757,8 @@ typedef struct ide_drive_s { struct semaphore gendev_rel_sem; /* to deal with device release() */ } ide_drive_t; +#define to_ide_device(dev)container_of(dev, ide_drive_t, gendev) + #define IDE_CHIPSET_PCI_MASK \ ((1<<ide_pci)|(1<<ide_cmd646)|(1<<ide_ali14xx)) #define IDE_CHIPSET_IS_PCI(c) ((IDE_CHIPSET_PCI_MASK >> (c)) & 1) @@ -916,7 +917,7 @@ typedef struct hwif_s { unsigned dma; void (*led_act)(void *data, int rw); -} ide_hwif_t; +} ____cacheline_maxaligned_in_smp ide_hwif_t; /* * internal ide interrupt handler type @@ -1086,28 +1087,20 @@ enum { */ typedef struct ide_driver_s { struct module *owner; - const char *name; const char *version; u8 media; - unsigned busy : 1; unsigned supports_dsc_overlap : 1; - int (*cleanup)(ide_drive_t *); ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t); int (*end_request)(ide_drive_t *, int, int); ide_startstop_t (*error)(ide_drive_t *, struct request *rq, u8, u8); ide_startstop_t (*abort)(ide_drive_t *, struct request *rq); int (*ioctl)(ide_drive_t *, struct inode *, struct file *, unsigned int, unsigned long); ide_proc_entry_t *proc; - int (*attach)(ide_drive_t *); void (*ata_prebuilder)(ide_drive_t *); void (*atapi_prebuilder)(ide_drive_t *); struct device_driver gen_driver; - struct list_head drives; - struct list_head drivers; } ide_driver_t; -#define DRIVER(drive) ((drive)->driver) - int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsigned, unsigned long); /* @@ -1328,8 +1321,6 @@ extern void ide_init_subdrivers(void); void ide_init_disk(struct gendisk *, ide_drive_t *); -extern int ata_attach(ide_drive_t *); - extern int ideprobe_init(void); extern void ide_scan_pcibus(int scan_direction) __init; @@ -1342,11 +1333,8 @@ extern void default_hwif_iops(ide_hwif_t *); extern void default_hwif_mmiops(ide_hwif_t *); extern void default_hwif_transport(ide_hwif_t *); -int ide_register_driver(ide_driver_t *driver); -void ide_unregister_driver(ide_driver_t *driver); -int ide_register_subdriver(ide_drive_t *, ide_driver_t *); -int ide_unregister_subdriver (ide_drive_t *drive); -int ide_replace_subdriver(ide_drive_t *drive, const char *driver); +void ide_register_subdriver(ide_drive_t *, ide_driver_t *); +void ide_unregister_subdriver(ide_drive_t *, ide_driver_t *); #define ON_BOARD 1 #define NEVER_BOARD 0 diff --git a/include/linux/if.h b/include/linux/if.h index d73a9d6..ce627d9 100644 --- a/include/linux/if.h +++ b/include/linux/if.h @@ -33,7 +33,7 @@ #define IFF_LOOPBACK 0x8 /* is a loopback net */ #define IFF_POINTOPOINT 0x10 /* interface is has p-p link */ #define IFF_NOTRAILERS 0x20 /* avoid use of trailers */ -#define IFF_RUNNING 0x40 /* resources allocated */ +#define IFF_RUNNING 0x40 /* interface running and carrier ok */ #define IFF_NOARP 0x80 /* no ARP protocol */ #define IFF_PROMISC 0x100 /* receive all packets */ #define IFF_ALLMULTI 0x200 /* receive all multicast packets*/ diff --git a/include/linux/if_shaper.h b/include/linux/if_shaper.h index 0485b25..004e6f0 100644 --- a/include/linux/if_shaper.h +++ b/include/linux/if_shaper.h @@ -23,7 +23,7 @@ struct shaper __u32 shapeclock; unsigned long recovery; /* Time we can next clock a packet out on an empty queue */ - unsigned long locked; + struct semaphore sem; struct net_device_stats stats; struct net_device *dev; int (*hard_start_xmit) (struct sk_buff *skb, @@ -38,7 +38,6 @@ struct shaper int (*hard_header_cache)(struct neighbour *neigh, struct hh_cache *hh); void (*header_cache_update)(struct hh_cache *hh, struct net_device *dev, unsigned char * haddr); struct net_device_stats* (*get_stats)(struct net_device *dev); - wait_queue_head_t wait_queue; struct timer_list timer; }; diff --git a/include/linux/if_tr.h b/include/linux/if_tr.h index 4fd451f..3fba9e2 100644 --- a/include/linux/if_tr.h +++ b/include/linux/if_tr.h @@ -9,7 +9,7 @@ * * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Donald Becker, <becker@super.org> - * Peter De Schrijver, <stud11@cc4.kuleuven.ac.be> + * Peter De Schrijver, <stud11@cc4.kuleuven.ac.be> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -19,24 +19,18 @@ #ifndef _LINUX_IF_TR_H #define _LINUX_IF_TR_H +#include <asm/byteorder.h> /* For __be16 */ /* IEEE 802.5 Token-Ring magic constants. The frame sizes omit the preamble and FCS/CRC (frame check sequence). */ -#define TR_ALEN 6 /* Octets in one ethernet addr */ -#define TR_HLEN (sizeof(struct trh_hdr)+sizeof(struct trllc)) -#define AC 0x10 -#define LLC_FRAME 0x40 -#if 0 -#define ETH_HLEN 14 /* Total octets in header. */ -#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */ -#define ETH_DATA_LEN 1500 /* Max. octets in payload */ -#define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */ -#endif - +#define TR_ALEN 6 /* Octets in one token-ring addr */ +#define TR_HLEN (sizeof(struct trh_hdr)+sizeof(struct trllc)) +#define AC 0x10 +#define LLC_FRAME 0x40 /* LLC and SNAP constants */ -#define EXTENDED_SAP 0xAA -#define UI_CMD 0x03 +#define EXTENDED_SAP 0xAA +#define UI_CMD 0x03 /* This is an Token-Ring frame header. */ struct trh_hdr { @@ -44,8 +38,8 @@ struct trh_hdr { __u8 fc; /* frame control field */ __u8 daddr[TR_ALEN]; /* destination address */ __u8 saddr[TR_ALEN]; /* source address */ - __u16 rcf; /* route control field */ - __u16 rseg[8]; /* routing registers */ + __be16 rcf; /* route control field */ + __be16 rseg[8]; /* routing registers */ }; #ifdef __KERNEL__ @@ -63,7 +57,7 @@ struct trllc { __u8 ssap; /* source SAP */ __u8 llc; /* LLC control field */ __u8 protid[3]; /* protocol id */ - __u16 ethertype; /* ether type field */ + __be16 ethertype; /* ether type field */ }; /* Token-Ring statistics collection data. */ @@ -96,14 +90,13 @@ struct tr_statistics { }; /* source routing stuff */ - -#define TR_RII 0x80 -#define TR_RCF_DIR_BIT 0x80 -#define TR_RCF_LEN_MASK 0x1f00 -#define TR_RCF_BROADCAST 0x8000 /* all-routes broadcast */ -#define TR_RCF_LIMITED_BROADCAST 0xC000 /* single-route broadcast */ -#define TR_RCF_FRAME2K 0x20 -#define TR_RCF_BROADCAST_MASK 0xC000 -#define TR_MAXRIFLEN 18 +#define TR_RII 0x80 +#define TR_RCF_DIR_BIT 0x80 +#define TR_RCF_LEN_MASK 0x1f00 +#define TR_RCF_BROADCAST 0x8000 /* all-routes broadcast */ +#define TR_RCF_LIMITED_BROADCAST 0xC000 /* single-route broadcast */ +#define TR_RCF_FRAME2K 0x20 +#define TR_RCF_BROADCAST_MASK 0xC000 +#define TR_MAXRIFLEN 18 #endif /* _LINUX_IF_TR_H */ diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 6fafb27..7e1e15f 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -29,6 +29,7 @@ struct ipv4_devconf int no_xfrm; int no_policy; int force_igmp_version; + int promote_secondaries; void *sysctl; }; @@ -71,6 +72,7 @@ struct in_device #define IN_DEV_SEC_REDIRECTS(in_dev) (ipv4_devconf.secure_redirects || (in_dev)->cnf.secure_redirects) #define IN_DEV_IDTAG(in_dev) ((in_dev)->cnf.tag) #define IN_DEV_MEDIUM_ID(in_dev) ((in_dev)->cnf.medium_id) +#define IN_DEV_PROMOTE_SECONDARIES(in_dev) (ipv4_devconf.promote_secondaries || (in_dev)->cnf.promote_secondaries) #define IN_DEV_RX_REDIRECTS(in_dev) \ ((IN_DEV_FORWARD(in_dev) && \ diff --git a/include/linux/init.h b/include/linux/init.h index 05c83e0..59008c3 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -229,6 +229,18 @@ void __init parse_early_param(void); #define __devexitdata __exitdata #endif +#ifdef CONFIG_HOTPLUG_CPU +#define __cpuinit +#define __cpuinitdata +#define __cpuexit +#define __cpuexitdata +#else +#define __cpuinit __init +#define __cpuinitdata __initdata +#define __cpuexit __exit +#define __cpuexitdata __exitdata +#endif + /* Functions marked as __devexit may be discarded at kernel link time, depending on config options. Newer versions of binutils detect references from retained sections to discarded sections and flag an error. Pointers to diff --git a/include/linux/init_task.h b/include/linux/init_task.h index a6a8c1a..03206a4 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -108,7 +108,6 @@ extern struct group_info init_groups; .blocked = {{0}}, \ .alloc_lock = SPIN_LOCK_UNLOCKED, \ .proc_lock = SPIN_LOCK_UNLOCKED, \ - .switch_lock = SPIN_LOCK_UNLOCKED, \ .journal_info = NULL, \ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ } diff --git a/include/linux/input.h b/include/linux/input.h index 72731d7..9d9598e 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -1015,7 +1015,7 @@ static inline void input_set_abs_params(struct input_dev *dev, int axis, int min dev->absbit[LONG(axis)] |= BIT(axis); } -extern struct class_simple *input_class; +extern struct class *input_class; #endif #endif diff --git a/include/linux/ioc4.h b/include/linux/ioc4.h new file mode 100644 index 0000000..3dd18b7 --- /dev/null +++ b/include/linux/ioc4.h @@ -0,0 +1,179 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved. + */ + +#ifndef _LINUX_IOC4_H +#define _LINUX_IOC4_H + +#include <linux/interrupt.h> + +/*************** + * Definitions * + ***************/ + +/* Miscellaneous values inherent to hardware */ + +#define IOC4_EXTINT_COUNT_DIVISOR 520 /* PCI clocks per COUNT tick */ + +/*********************************** + * Structures needed by subdrivers * + ***********************************/ + +/* This structure fully describes the IOC4 miscellaneous registers which + * appear at bar[0]+0x00000 through bar[0]+0x0005c. The corresponding + * PCI resource is managed by the main IOC4 driver because it contains + * registers of interest to many different IOC4 subdrivers. + */ +struct ioc4_misc_regs { + /* Miscellaneous IOC4 registers */ + union ioc4_pci_err_addr_l { + uint32_t raw; + struct { + uint32_t valid:1; /* Address captured */ + uint32_t master_id:4; /* Unit causing error + * 0/1: Serial port 0 TX/RX + * 2/3: Serial port 1 TX/RX + * 4/5: Serial port 2 TX/RX + * 6/7: Serial port 3 TX/RX + * 8: ATA/ATAPI + * 9-15: Undefined + */ + uint32_t mul_err:1; /* Multiple errors occurred */ + uint32_t addr:26; /* Bits 31-6 of error addr */ + } fields; + } pci_err_addr_l; + uint32_t pci_err_addr_h; /* Bits 63-32 of error addr */ + union ioc4_sio_int { + uint32_t raw; + struct { + uint8_t tx_mt:1; /* TX ring buffer empty */ + uint8_t rx_full:1; /* RX ring buffer full */ + uint8_t rx_high:1; /* RX high-water exceeded */ + uint8_t rx_timer:1; /* RX timer has triggered */ + uint8_t delta_dcd:1; /* DELTA_DCD seen */ + uint8_t delta_cts:1; /* DELTA_CTS seen */ + uint8_t intr_pass:1; /* Interrupt pass-through */ + uint8_t tx_explicit:1; /* TX, MCW, or delay complete */ + } fields[4]; + } sio_ir; /* Serial interrupt state */ + union ioc4_other_int { + uint32_t raw; + struct { + uint32_t ata_int:1; /* ATA port passthru */ + uint32_t ata_memerr:1; /* ATA halted by mem error */ + uint32_t memerr:4; /* Serial halted by mem err */ + uint32_t kbd_int:1; /* kbd/mouse intr asserted */ + uint32_t reserved:16; /* zero */ + uint32_t rt_int:1; /* INT_OUT section latch */ + uint32_t gen_int:8; /* Intr. from generic pins */ + } fields; + } other_ir; /* Other interrupt state */ + union ioc4_sio_int sio_ies; /* Serial interrupt enable set */ + union ioc4_other_int other_ies; /* Other interrupt enable set */ + union ioc4_sio_int sio_iec; /* Serial interrupt enable clear */ + union ioc4_other_int other_iec; /* Other interrupt enable clear */ + union ioc4_sio_cr { + uint32_t raw; + struct { + uint32_t cmd_pulse:4; /* Bytebus strobe width */ + uint32_t arb_diag:3; /* PCI bus requester */ + uint32_t sio_diag_idle:1; /* Active ser req? */ + uint32_t ata_diag_idle:1; /* Active ATA req? */ + uint32_t ata_diag_active:1; /* ATA req is winner */ + uint32_t reserved:22; /* zero */ + } fields; + } sio_cr; + uint32_t unused1; + union ioc4_int_out { + uint32_t raw; + struct { + uint32_t count:16; /* Period control */ + uint32_t mode:3; /* Output signal shape */ + uint32_t reserved:11; /* zero */ + uint32_t diag:1; /* Timebase control */ + uint32_t int_out:1; /* Current value */ + } fields; + } int_out; /* External interrupt output control */ + uint32_t unused2; + union ioc4_gpcr { + uint32_t raw; + struct { + uint32_t dir:8; /* Pin direction */ + uint32_t edge:8; /* Edge/level mode */ + uint32_t reserved1:4; /* zero */ + uint32_t int_out_en:1; /* INT_OUT enable */ + uint32_t reserved2:11; /* zero */ + } fields; + } gpcr_s; /* Generic PIO control set */ + union ioc4_gpcr gpcr_c; /* Generic PIO control clear */ + union ioc4_gpdr { + uint32_t raw; + struct { + uint32_t gen_pin:8; /* State of pins */ + uint32_t reserved:24; + } fields; + } gpdr; /* Generic PIO data */ + uint32_t unused3; + union ioc4_gppr { + uint32_t raw; + struct { + uint32_t gen_pin:1; /* Single pin state */ + uint32_t reserved:31; + } fields; + } gppr[8]; /* Generic PIO pins */ +}; + +/* Masks for GPCR DIR pins */ +#define IOC4_GPCR_DIR_0 0x01 /* External interrupt output */ +#define IOC4_GPCR_DIR_1 0x02 /* External interrupt input */ +#define IOC4_GPCR_DIR_2 0x04 +#define IOC4_GPCR_DIR_3 0x08 /* Keyboard/mouse presence */ +#define IOC4_GPCR_DIR_4 0x10 /* Ser. port 0 xcvr select (0=232, 1=422) */ +#define IOC4_GPCR_DIR_5 0x20 /* Ser. port 1 xcvr select (0=232, 1=422) */ +#define IOC4_GPCR_DIR_6 0x40 /* Ser. port 2 xcvr select (0=232, 1=422) */ +#define IOC4_GPCR_DIR_7 0x80 /* Ser. port 3 xcvr select (0=232, 1=422) */ + +/* Masks for GPCR EDGE pins */ +#define IOC4_GPCR_EDGE_0 0x01 +#define IOC4_GPCR_EDGE_1 0x02 /* External interrupt input */ +#define IOC4_GPCR_EDGE_2 0x04 +#define IOC4_GPCR_EDGE_3 0x08 +#define IOC4_GPCR_EDGE_4 0x10 +#define IOC4_GPCR_EDGE_5 0x20 +#define IOC4_GPCR_EDGE_6 0x40 +#define IOC4_GPCR_EDGE_7 0x80 + +/* One of these per IOC4 */ +struct ioc4_driver_data { + struct list_head idd_list; + unsigned long idd_bar0; + struct pci_dev *idd_pdev; + const struct pci_device_id *idd_pci_id; + struct __iomem ioc4_misc_regs *idd_misc_regs; + unsigned long count_period; + void *idd_serial_data; +}; + +/* One per submodule */ +struct ioc4_submodule { + struct list_head is_list; + char *is_name; + struct module *is_owner; + int (*is_probe) (struct ioc4_driver_data *); + int (*is_remove) (struct ioc4_driver_data *); +}; + +#define IOC4_NUM_CARDS 8 /* max cards per partition */ + +/********************************** + * Functions needed by submodules * + **********************************/ + +extern int ioc4_register_submodule(struct ioc4_submodule *); +extern void ioc4_unregister_submodule(struct ioc4_submodule *); + +#endif /* _LINUX_IOC4_H */ diff --git a/include/linux/ioc4_common.h b/include/linux/ioc4_common.h deleted file mode 100644 index b03bcc4..0000000 --- a/include/linux/ioc4_common.h +++ /dev/null @@ -1,21 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved. - */ - -#ifndef _LINUX_IOC4_COMMON_H -#define _LINUX_IOC4_COMMON_H - -/* prototypes */ - -int ioc4_serial_init(void); - -int ioc4_serial_attach_one(struct pci_dev *pdev, const struct - pci_device_id *pci_id); -int ioc4_ide_attach_one(struct pci_dev *pdev, const struct - pci_device_id *pci_id); - -#endif /* _LINUX_IOC4_COMMON_H */ diff --git a/include/linux/ip.h b/include/linux/ip.h index 8438c68..31e7ced 100644 --- a/include/linux/ip.h +++ b/include/linux/ip.h @@ -81,6 +81,7 @@ #ifdef __KERNEL__ #include <linux/config.h> #include <linux/types.h> +#include <net/request_sock.h> #include <net/sock.h> #include <linux/igmp.h> #include <net/flow.h> @@ -107,6 +108,26 @@ struct ip_options { #define optlength(opt) (sizeof(struct ip_options) + opt->optlen) +struct inet_request_sock { + struct request_sock req; + u32 loc_addr; + u32 rmt_addr; + u16 rmt_port; + u16 snd_wscale : 4, + rcv_wscale : 4, + tstamp_ok : 1, + sack_ok : 1, + wscale_ok : 1, + ecn_ok : 1, + acked : 1; + struct ip_options *opt; +}; + +static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk) +{ + return (struct inet_request_sock *)sk; +} + struct ipv6_pinfo; struct inet_sock { diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index 2ec265e..596ca61 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h @@ -209,6 +209,11 @@ struct kernel_ipmi_msg #include <linux/list.h> #include <linux/module.h> +#ifdef CONFIG_PROC_FS +#include <linux/proc_fs.h> +extern struct proc_dir_entry *proc_ipmi_root; +#endif /* CONFIG_PROC_FS */ + /* Opaque type for a IPMI message user. One of these is needed to send and receive messages. */ typedef struct ipmi_user *ipmi_user_t; diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index ab0d0ef..6fcd6a0 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -193,6 +193,19 @@ struct inet6_skb_parm { #define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb)) +struct tcp6_request_sock { + struct tcp_request_sock req; + struct in6_addr loc_addr; + struct in6_addr rmt_addr; + struct sk_buff *pktopts; + int iif; +}; + +static inline struct tcp6_request_sock *tcp6_rsk(const struct request_sock *sk) +{ + return (struct tcp6_request_sock *)sk; +} + /** * struct ipv6_pinfo - ipv6 private area * diff --git a/include/linux/irq.h b/include/linux/irq.h index c3ff4d1..1227779 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -47,6 +47,10 @@ struct hw_interrupt_type { void (*ack)(unsigned int irq); void (*end)(unsigned int irq); void (*set_affinity)(unsigned int irq, cpumask_t dest); + /* Currently used only by UML, might disappear one day.*/ +#ifdef CONFIG_IRQ_RELEASE_METHOD + void (*release)(unsigned int irq, void *dev_id); +#endif }; typedef struct hw_interrupt_type hw_irq_controller; @@ -84,7 +88,6 @@ extern fastcall int handle_IRQ_event(unsigned int irq, struct pt_regs *regs, struct irqaction *action); extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs); extern void note_interrupt(unsigned int irq, irq_desc_t *desc, int action_ret); -extern void report_bad_irq(unsigned int irq, irq_desc_t *desc, int action_ret); extern int can_request_irq(unsigned int irq, unsigned long irqflags); extern void init_irq_proc(void); diff --git a/include/linux/kernel.h b/include/linux/kernel.h index e25b970..687ba8c 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -58,15 +58,23 @@ struct completion; * be biten later when the calling function happens to sleep when it is not * supposed to. */ +#ifdef CONFIG_PREEMPT_VOLUNTARY +extern int cond_resched(void); +# define might_resched() cond_resched() +#else +# define might_resched() do { } while (0) +#endif + #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP -#define might_sleep() __might_sleep(__FILE__, __LINE__) -#define might_sleep_if(cond) do { if (unlikely(cond)) might_sleep(); } while (0) -void __might_sleep(char *file, int line); + void __might_sleep(char *file, int line); +# define might_sleep() \ + do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0) #else -#define might_sleep() do {} while(0) -#define might_sleep_if(cond) do {} while (0) +# define might_sleep() do { might_resched(); } while (0) #endif +#define might_sleep_if(cond) do { if (unlikely(cond)) might_sleep(); } while (0) + #define abs(x) ({ \ int __x = (x); \ (__x < 0) ? -__x : __x; \ diff --git a/include/linux/kexec.h b/include/linux/kexec.h new file mode 100644 index 0000000..c846847 --- /dev/null +++ b/include/linux/kexec.h @@ -0,0 +1,135 @@ +#ifndef LINUX_KEXEC_H +#define LINUX_KEXEC_H + +#ifdef CONFIG_KEXEC +#include <linux/types.h> +#include <linux/list.h> +#include <linux/linkage.h> +#include <linux/compat.h> +#include <asm/kexec.h> + +/* Verify architecture specific macros are defined */ + +#ifndef KEXEC_SOURCE_MEMORY_LIMIT +#error KEXEC_SOURCE_MEMORY_LIMIT not defined +#endif + +#ifndef KEXEC_DESTINATION_MEMORY_LIMIT +#error KEXEC_DESTINATION_MEMORY_LIMIT not defined +#endif + +#ifndef KEXEC_CONTROL_MEMORY_LIMIT +#error KEXEC_CONTROL_MEMORY_LIMIT not defined +#endif + +#ifndef KEXEC_CONTROL_CODE_SIZE +#error KEXEC_CONTROL_CODE_SIZE not defined +#endif + +#ifndef KEXEC_ARCH +#error KEXEC_ARCH not defined +#endif + +/* + * This structure is used to hold the arguments that are used when loading + * kernel binaries. + */ + +typedef unsigned long kimage_entry_t; +#define IND_DESTINATION 0x1 +#define IND_INDIRECTION 0x2 +#define IND_DONE 0x4 +#define IND_SOURCE 0x8 + +#define KEXEC_SEGMENT_MAX 8 +struct kexec_segment { + void __user *buf; + size_t bufsz; + unsigned long mem; /* User space sees this as a (void *) ... */ + size_t memsz; +}; + +#ifdef CONFIG_COMPAT +struct compat_kexec_segment { + compat_uptr_t buf; + compat_size_t bufsz; + compat_ulong_t mem; /* User space sees this as a (void *) ... */ + compat_size_t memsz; +}; +#endif + +struct kimage { + kimage_entry_t head; + kimage_entry_t *entry; + kimage_entry_t *last_entry; + + unsigned long destination; + + unsigned long start; + struct page *control_code_page; + + unsigned long nr_segments; + struct kexec_segment segment[KEXEC_SEGMENT_MAX]; + + struct list_head control_pages; + struct list_head dest_pages; + struct list_head unuseable_pages; + + /* Address of next control page to allocate for crash kernels. */ + unsigned long control_page; + + /* Flags to indicate special processing */ + unsigned int type : 1; +#define KEXEC_TYPE_DEFAULT 0 +#define KEXEC_TYPE_CRASH 1 +}; + + + +/* kexec interface functions */ +extern NORET_TYPE void machine_kexec(struct kimage *image) ATTRIB_NORET; +extern int machine_kexec_prepare(struct kimage *image); +extern void machine_kexec_cleanup(struct kimage *image); +extern asmlinkage long sys_kexec_load(unsigned long entry, + unsigned long nr_segments, + struct kexec_segment __user *segments, + unsigned long flags); +#ifdef CONFIG_COMPAT +extern asmlinkage long compat_sys_kexec_load(unsigned long entry, + unsigned long nr_segments, + struct compat_kexec_segment __user *segments, + unsigned long flags); +#endif +extern struct page *kimage_alloc_control_pages(struct kimage *image, + unsigned int order); +extern void crash_kexec(struct pt_regs *); +int kexec_should_crash(struct task_struct *); +extern struct kimage *kexec_image; + +#define KEXEC_ON_CRASH 0x00000001 +#define KEXEC_ARCH_MASK 0xffff0000 + +/* These values match the ELF architecture values. + * Unless there is a good reason that should continue to be the case. + */ +#define KEXEC_ARCH_DEFAULT ( 0 << 16) +#define KEXEC_ARCH_386 ( 3 << 16) +#define KEXEC_ARCH_X86_64 (62 << 16) +#define KEXEC_ARCH_PPC (20 << 16) +#define KEXEC_ARCH_PPC64 (21 << 16) +#define KEXEC_ARCH_IA_64 (50 << 16) +#define KEXEC_ARCH_S390 (22 << 16) + +#define KEXEC_FLAGS (KEXEC_ON_CRASH) /* List of defined/legal kexec flags */ + +/* Location of a reserved region to hold the crash kernel. + */ +extern struct resource crashk_res; + +#else /* !CONFIG_KEXEC */ +struct pt_regs; +struct task_struct; +static inline void crash_kexec(struct pt_regs *regs) { } +static inline int kexec_should_crash(struct task_struct *p) { return 0; } +#endif /* CONFIG_KEXEC */ +#endif /* LINUX_KEXEC_H */ diff --git a/include/linux/key-ui.h b/include/linux/key-ui.h index 60cc7b7..cc32617 100644 --- a/include/linux/key-ui.h +++ b/include/linux/key-ui.h @@ -1,4 +1,4 @@ -/* key-ui.h: key userspace interface stuff for use by keyfs +/* key-ui.h: key userspace interface stuff * * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) @@ -31,8 +31,10 @@ extern spinlock_t key_serial_lock; * subscribed */ struct keyring_list { - unsigned maxkeys; /* max keys this list can hold */ - unsigned nkeys; /* number of keys currently held */ + struct rcu_head rcu; /* RCU deletion hook */ + unsigned short maxkeys; /* max keys this list can hold */ + unsigned short nkeys; /* number of keys currently held */ + unsigned short delkey; /* key to be unlinked by RCU */ struct key *keys[0]; }; @@ -82,8 +84,45 @@ static inline int key_any_permission(const struct key *key, key_perm_t perm) return kperm != 0; } +static inline int key_task_groups_search(struct task_struct *tsk, gid_t gid) +{ + int ret; + + task_lock(tsk); + ret = groups_search(tsk->group_info, gid); + task_unlock(tsk); + return ret; +} + +static inline int key_task_permission(const struct key *key, + struct task_struct *context, + key_perm_t perm) +{ + key_perm_t kperm; + + if (key->uid == context->fsuid) { + kperm = key->perm >> 16; + } + else if (key->gid != -1 && + key->perm & KEY_GRP_ALL && ( + key->gid == context->fsgid || + key_task_groups_search(context, key->gid) + ) + ) { + kperm = key->perm >> 8; + } + else { + kperm = key->perm; + } + + kperm = kperm & perm & KEY_ALL; + + return kperm == perm; + +} -extern struct key *lookup_user_key(key_serial_t id, int create, int part, +extern struct key *lookup_user_key(struct task_struct *context, + key_serial_t id, int create, int partial, key_perm_t perm); extern long join_session_keyring(const char *name); diff --git a/include/linux/key.h b/include/linux/key.h index 6aa46d0..970bbd9 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -18,7 +18,7 @@ #include <linux/types.h> #include <linux/list.h> #include <linux/rbtree.h> -#include <linux/spinlock.h> +#include <linux/rcupdate.h> #include <asm/atomic.h> #ifdef __KERNEL__ @@ -78,7 +78,6 @@ struct key { key_serial_t serial; /* key serial number */ struct rb_node serial_node; struct key_type *type; /* type of key */ - rwlock_t lock; /* examination vs change lock */ struct rw_semaphore sem; /* change vs change sem */ struct key_user *user; /* owner of this key */ time_t expiry; /* time at which key expires (or 0) */ @@ -86,14 +85,10 @@ struct key { gid_t gid; key_perm_t perm; /* access permissions */ unsigned short quotalen; /* length added to quota */ - unsigned short datalen; /* payload data length */ - unsigned short flags; /* status flags (change with lock writelocked) */ -#define KEY_FLAG_INSTANTIATED 0x00000001 /* set if key has been instantiated */ -#define KEY_FLAG_DEAD 0x00000002 /* set if key type has been deleted */ -#define KEY_FLAG_REVOKED 0x00000004 /* set if key had been revoked */ -#define KEY_FLAG_IN_QUOTA 0x00000008 /* set if key consumes quota */ -#define KEY_FLAG_USER_CONSTRUCT 0x00000010 /* set if key is being constructed in userspace */ -#define KEY_FLAG_NEGATIVE 0x00000020 /* set if key is negative */ + unsigned short datalen; /* payload data length + * - may not match RCU dereferenced payload + * - payload should contain own length + */ #ifdef KEY_DEBUGGING unsigned magic; @@ -101,6 +96,14 @@ struct key { #define KEY_DEBUG_MAGIC_X 0xf8e9dacbu #endif + unsigned long flags; /* status flags (change with bitops) */ +#define KEY_FLAG_INSTANTIATED 0 /* set if key has been instantiated */ +#define KEY_FLAG_DEAD 1 /* set if key type has been deleted */ +#define KEY_FLAG_REVOKED 2 /* set if key had been revoked */ +#define KEY_FLAG_IN_QUOTA 3 /* set if key consumes quota */ +#define KEY_FLAG_USER_CONSTRUCT 4 /* set if key is being constructed in userspace */ +#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */ + /* the description string * - this is used to match a key against search criteria * - this should be a printable string @@ -196,10 +199,12 @@ extern int key_payload_reserve(struct key *key, size_t datalen); extern int key_instantiate_and_link(struct key *key, const void *data, size_t datalen, - struct key *keyring); + struct key *keyring, + struct key *instkey); extern int key_negate_and_link(struct key *key, unsigned timeout, - struct key *keyring); + struct key *keyring, + struct key *instkey); extern void key_revoke(struct key *key); extern void key_put(struct key *key); @@ -242,14 +247,13 @@ extern struct key *keyring_search(struct key *keyring, struct key_type *type, const char *description); -extern struct key *search_process_keyrings(struct key_type *type, - const char *description); - extern int keyring_add_key(struct key *keyring, struct key *key); extern struct key *key_lookup(key_serial_t id); +extern void keyring_replace_payload(struct key *key, void *replacement); + #define key_serial(key) ((key) ? (key)->serial : 0) /* @@ -268,14 +272,22 @@ extern void key_fsuid_changed(struct task_struct *tsk); extern void key_fsgid_changed(struct task_struct *tsk); extern void key_init(void); +#define __install_session_keyring(tsk, keyring) \ +({ \ + struct key *old_session = tsk->signal->session_keyring; \ + tsk->signal->session_keyring = keyring; \ + old_session; \ +}) + #else /* CONFIG_KEYS */ #define key_validate(k) 0 #define key_serial(k) 0 -#define key_get(k) NULL +#define key_get(k) ({ NULL; }) #define key_put(k) do { } while(0) #define alloc_uid_keyring(u) 0 #define switch_uid_keyring(u) do { } while(0) +#define __install_session_keyring(t, k) ({ NULL; }) #define copy_keys(f,t) 0 #define copy_thread_group_keys(t) 0 #define exit_keys(t) do { } while(0) diff --git a/include/linux/keyctl.h b/include/linux/keyctl.h index 381dedc..8d7c59a 100644 --- a/include/linux/keyctl.h +++ b/include/linux/keyctl.h @@ -20,6 +20,16 @@ #define KEY_SPEC_USER_SESSION_KEYRING -5 /* - key ID for UID-session keyring */ #define KEY_SPEC_GROUP_KEYRING -6 /* - key ID for GID-specific keyring */ +/* request-key default keyrings */ +#define KEY_REQKEY_DEFL_NO_CHANGE -1 +#define KEY_REQKEY_DEFL_DEFAULT 0 +#define KEY_REQKEY_DEFL_THREAD_KEYRING 1 +#define KEY_REQKEY_DEFL_PROCESS_KEYRING 2 +#define KEY_REQKEY_DEFL_SESSION_KEYRING 3 +#define KEY_REQKEY_DEFL_USER_KEYRING 4 +#define KEY_REQKEY_DEFL_USER_SESSION_KEYRING 5 +#define KEY_REQKEY_DEFL_GROUP_KEYRING 6 + /* keyctl commands */ #define KEYCTL_GET_KEYRING_ID 0 /* ask for a keyring's ID */ #define KEYCTL_JOIN_SESSION_KEYRING 1 /* join or start named session keyring */ @@ -35,5 +45,6 @@ #define KEYCTL_READ 11 /* read a key or keyring's contents */ #define KEYCTL_INSTANTIATE 12 /* instantiate a partially constructed key */ #define KEYCTL_NEGATE 13 /* negate a partially constructed key */ +#define KEYCTL_SET_REQKEY_KEYRING 14 /* set default request-key keyring */ #endif /* _LINUX_KEYCTL_H */ diff --git a/include/linux/klist.h b/include/linux/klist.h new file mode 100644 index 0000000..eebf5e5 --- /dev/null +++ b/include/linux/klist.h @@ -0,0 +1,55 @@ +/* + * klist.h - Some generic list helpers, extending struct list_head a bit. + * + * Implementations are found in lib/klist.c + * + * + * Copyright (C) 2005 Patrick Mochel + * + * This file is rleased under the GPL v2. + */ + +#include <linux/spinlock.h> +#include <linux/completion.h> +#include <linux/kref.h> +#include <linux/list.h> + + +struct klist { + spinlock_t k_lock; + struct list_head k_list; +}; + + +extern void klist_init(struct klist * k); + + +struct klist_node { + struct klist * n_klist; + struct list_head n_node; + struct kref n_ref; + struct completion n_removed; +}; + +extern void klist_add_tail(struct klist * k, struct klist_node * n); +extern void klist_add_head(struct klist * k, struct klist_node * n); + +extern void klist_del(struct klist_node * n); +extern void klist_remove(struct klist_node * n); + +extern int klist_node_attached(struct klist_node * n); + + +struct klist_iter { + struct klist * i_klist; + struct list_head * i_head; + struct klist_node * i_cur; +}; + + +extern void klist_iter_init(struct klist * k, struct klist_iter * i); +extern void klist_iter_init_node(struct klist * k, struct klist_iter * i, + struct klist_node * n); +extern void klist_iter_exit(struct klist_iter * i); +extern struct klist_node * klist_next(struct klist_iter * i); + diff --git a/include/linux/kmod.h b/include/linux/kmod.h index 95d0e4b..e4a2315 100644 --- a/include/linux/kmod.h +++ b/include/linux/kmod.h @@ -19,6 +19,7 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include <linux/stddef.h> #include <linux/config.h> #include <linux/errno.h> #include <linux/compiler.h> @@ -34,7 +35,17 @@ static inline int request_module(const char * name, ...) { return -ENOSYS; } #endif #define try_then_request_module(x, mod...) ((x) ?: (request_module(mod), (x))) -extern int call_usermodehelper(char *path, char *argv[], char *envp[], int wait); + +struct key; +extern int call_usermodehelper_keys(char *path, char *argv[], char *envp[], + struct key *session_keyring, int wait); + +static inline int +call_usermodehelper(char *path, char **argv, char **envp, int wait) +{ + return call_usermodehelper_keys(path, argv, envp, NULL, wait); +} + extern void usermodehelper_init(void); #endif /* __LINUX_KMOD_H__ */ diff --git a/include/linux/kobject.h b/include/linux/kobject.h index 765d660..3b22304 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -33,7 +33,7 @@ extern u64 hotplug_seqnum; struct kobject { - char * k_name; + const char * k_name; char name[KOBJ_NAME_LEN]; struct kref kref; struct list_head entry; @@ -46,7 +46,7 @@ struct kobject { extern int kobject_set_name(struct kobject *, const char *, ...) __attribute__((format(printf,2,3))); -static inline char * kobject_name(struct kobject * kobj) +static inline const char * kobject_name(const struct kobject * kobj) { return kobj->k_name; } @@ -57,7 +57,7 @@ extern void kobject_cleanup(struct kobject *); extern int kobject_add(struct kobject *); extern void kobject_del(struct kobject *); -extern int kobject_rename(struct kobject *, char *new_name); +extern int kobject_rename(struct kobject *, const char *new_name); extern int kobject_register(struct kobject *); extern void kobject_unregister(struct kobject *); @@ -94,7 +94,7 @@ struct kobj_type { */ struct kset_hotplug_ops { int (*filter)(struct kset *kset, struct kobject *kobj); - char *(*name)(struct kset *kset, struct kobject *kobj); + const char *(*name)(struct kset *kset, struct kobject *kobj); int (*hotplug)(struct kset *kset, struct kobject *kobj, char **envp, int num_envp, char *buffer, int buffer_size); }; diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 99ddba5..5e1a7b0 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -25,27 +25,45 @@ * Rusty Russell). * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes * interface to access function arguments. + * 2005-May Hien Nguyen <hien@us.ibm.com> and Jim Keniston + * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi + * <prasanna@in.ibm.com> added function-return probes. */ #include <linux/config.h> #include <linux/list.h> #include <linux/notifier.h> #include <linux/smp.h> + #include <asm/kprobes.h> +/* kprobe_status settings */ +#define KPROBE_HIT_ACTIVE 0x00000001 +#define KPROBE_HIT_SS 0x00000002 +#define KPROBE_REENTER 0x00000004 +#define KPROBE_HIT_SSDONE 0x00000008 + struct kprobe; struct pt_regs; +struct kretprobe; +struct kretprobe_instance; typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *); typedef int (*kprobe_break_handler_t) (struct kprobe *, struct pt_regs *); typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *, unsigned long flags); typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *, int trapnr); +typedef int (*kretprobe_handler_t) (struct kretprobe_instance *, + struct pt_regs *); + struct kprobe { struct hlist_node hlist; /* list of kprobes for multi-handler support */ struct list_head list; + /*count the number of times this probe was temporarily disarmed */ + unsigned long nmissed; + /* location of the probe point */ kprobe_opcode_t *addr; @@ -85,6 +103,62 @@ struct jprobe { kprobe_opcode_t *entry; /* probe handling code to jump to */ }; +#ifdef ARCH_SUPPORTS_KRETPROBES +extern int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs); +extern void trampoline_post_handler(struct kprobe *p, struct pt_regs *regs, + unsigned long flags); +extern struct task_struct *arch_get_kprobe_task(void *ptr); +extern void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs); +extern void arch_kprobe_flush_task(struct task_struct *tk); +#else /* ARCH_SUPPORTS_KRETPROBES */ +static inline void kretprobe_trampoline(void) +{ +} +static inline int trampoline_probe_handler(struct kprobe *p, + struct pt_regs *regs) +{ + return 0; +} +static inline void trampoline_post_handler(struct kprobe *p, + struct pt_regs *regs, unsigned long flags) +{ +} +static inline void arch_prepare_kretprobe(struct kretprobe *rp, + struct pt_regs *regs) +{ +} +static inline void arch_kprobe_flush_task(struct task_struct *tk) +{ +} +#define arch_get_kprobe_task(ptr) ((struct task_struct *)NULL) +#endif /* ARCH_SUPPORTS_KRETPROBES */ +/* + * Function-return probe - + * Note: + * User needs to provide a handler function, and initialize maxactive. + * maxactive - The maximum number of instances of the probed function that + * can be active concurrently. + * nmissed - tracks the number of times the probed function's return was + * ignored, due to maxactive being too low. + * + */ +struct kretprobe { + struct kprobe kp; + kretprobe_handler_t handler; + int maxactive; + int nmissed; + struct hlist_head free_instances; + struct hlist_head used_instances; +}; + +struct kretprobe_instance { + struct hlist_node uflist; /* either on free list or used list */ + struct hlist_node hlist; + struct kretprobe *rp; + void *ret_addr; + void *stack_addr; +}; + #ifdef CONFIG_KPROBES /* Locks kprobe: irq must be disabled */ void lock_kprobes(void); @@ -99,11 +173,14 @@ static inline int kprobe_running(void) extern int arch_prepare_kprobe(struct kprobe *p); extern void arch_copy_kprobe(struct kprobe *p); +extern void arch_arm_kprobe(struct kprobe *p); +extern void arch_disarm_kprobe(struct kprobe *p); extern void arch_remove_kprobe(struct kprobe *p); extern void show_registers(struct pt_regs *regs); /* Get the kprobe at this addr (if any). Must have called lock_kprobes */ struct kprobe *get_kprobe(void *addr); +struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk); int register_kprobe(struct kprobe *p); void unregister_kprobe(struct kprobe *p); @@ -113,7 +190,16 @@ int register_jprobe(struct jprobe *p); void unregister_jprobe(struct jprobe *p); void jprobe_return(void); -#else +int register_kretprobe(struct kretprobe *rp); +void unregister_kretprobe(struct kretprobe *rp); + +struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp); +struct kretprobe_instance *get_rp_inst(void *sara); +struct kretprobe_instance *get_rp_inst_tsk(struct task_struct *tk); +void add_rp_inst(struct kretprobe_instance *ri); +void kprobe_flush_task(struct task_struct *tk); +void recycle_rp_inst(struct kretprobe_instance *ri); +#else /* CONFIG_KPROBES */ static inline int kprobe_running(void) { return 0; @@ -135,5 +221,15 @@ static inline void unregister_jprobe(struct jprobe *p) static inline void jprobe_return(void) { } -#endif +static inline int register_kretprobe(struct kretprobe *rp) +{ + return -ENOSYS; +} +static inline void unregister_kretprobe(struct kretprobe *rp) +{ +} +static inline void kprobe_flush_task(struct task_struct *tk) +{ +} +#endif /* CONFIG_KPROBES */ #endif /* _LINUX_KPROBES_H */ diff --git a/include/linux/libata.h b/include/linux/libata.h index 1f7e203..6cd9ba6 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -410,6 +410,7 @@ extern u8 ata_chk_err(struct ata_port *ap); extern void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf); extern int ata_port_start (struct ata_port *ap); extern void ata_port_stop (struct ata_port *ap); +extern void ata_host_stop (struct ata_host_set *host_set); extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs); extern void ata_qc_prep(struct ata_queued_cmd *qc); extern int ata_qc_issue_prot(struct ata_queued_cmd *qc); @@ -420,6 +421,7 @@ extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, extern unsigned int ata_dev_classify(struct ata_taskfile *tf); extern void ata_dev_id_string(u16 *id, unsigned char *s, unsigned int ofs, unsigned int len); +extern void ata_dev_config(struct ata_port *ap, unsigned int i); extern void ata_bmdma_setup (struct ata_queued_cmd *qc); extern void ata_bmdma_start (struct ata_queued_cmd *qc); extern void ata_bmdma_stop(struct ata_port *ap); @@ -466,12 +468,34 @@ static inline u8 ata_chk_status(struct ata_port *ap) return ap->ops->check_status(ap); } + +/** + * ata_pause - Flush writes and pause 400 nanoseconds. + * @ap: Port to wait for. + * + * LOCKING: + * Inherited from caller. + */ + static inline void ata_pause(struct ata_port *ap) { ata_altstatus(ap); ndelay(400); } + +/** + * ata_busy_wait - Wait for a port status register + * @ap: Port to wait for. + * + * Waits up to max*10 microseconds for the selected bits in the port's + * status register to be cleared. + * Returns final value of status register. + * + * LOCKING: + * Inherited from caller. + */ + static inline u8 ata_busy_wait(struct ata_port *ap, unsigned int bits, unsigned int max) { @@ -486,6 +510,18 @@ static inline u8 ata_busy_wait(struct ata_port *ap, unsigned int bits, return status; } + +/** + * ata_wait_idle - Wait for a port to be idle. + * @ap: Port to wait for. + * + * Waits up to 10ms for port's BUSY and DRQ signals to clear. + * Returns final value of status register. + * + * LOCKING: + * Inherited from caller. + */ + static inline u8 ata_wait_idle(struct ata_port *ap) { u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); @@ -524,6 +560,18 @@ static inline void ata_tf_init(struct ata_port *ap, struct ata_taskfile *tf, uns tf->device = ATA_DEVICE_OBS | ATA_DEV1; } + +/** + * ata_irq_on - Enable interrupts on a port. + * @ap: Port on which interrupts are enabled. + * + * Enable interrupts on a legacy IDE device using MMIO or PIO, + * wait for idle, clear any pending interrupts. + * + * LOCKING: + * Inherited from caller. + */ + static inline u8 ata_irq_on(struct ata_port *ap) { struct ata_ioports *ioaddr = &ap->ioaddr; @@ -543,6 +591,18 @@ static inline u8 ata_irq_on(struct ata_port *ap) return tmp; } + +/** + * ata_irq_ack - Acknowledge a device interrupt. + * @ap: Port on which interrupts are enabled. + * + * Wait up to 10 ms for legacy IDE device to become idle (BUSY + * or BUSY+DRQ clear). Obtain dma status and port status from + * device. Clear the interrupt. Return port status. + * + * LOCKING: + */ + static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq) { unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY; diff --git a/include/linux/list.h b/include/linux/list.h index 399b51d..aab2db2 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -185,7 +185,7 @@ static inline void list_del(struct list_head *entry) * list_for_each_entry_rcu(). * * Note that the caller is not permitted to immediately free - * the newly deleted entry. Instead, either synchronize_kernel() + * the newly deleted entry. Instead, either synchronize_rcu() * or call_rcu() must be used to defer freeing until an RCU * grace period has elapsed. */ diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index 0d9d225..16d4e5a 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -72,6 +72,8 @@ struct nlm_lockowner { uint32_t pid; }; +struct nlm_wait; + /* * Memory chunk for NLM client RPC request. */ @@ -81,6 +83,7 @@ struct nlm_rqst { struct nlm_host * a_host; /* host handle */ struct nlm_args a_args; /* arguments */ struct nlm_res a_res; /* result */ + struct nlm_wait * a_block; char a_owner[NLMCLNT_OHSIZE]; }; @@ -142,7 +145,9 @@ extern unsigned long nlmsvc_timeout; * Lockd client functions */ struct nlm_rqst * nlmclnt_alloc_call(void); -int nlmclnt_block(struct nlm_host *, struct file_lock *, u32 *); +int nlmclnt_prepare_block(struct nlm_rqst *req, struct nlm_host *host, struct file_lock *fl); +void nlmclnt_finish_block(struct nlm_rqst *req); +long nlmclnt_block(struct nlm_rqst *req, long timeout); int nlmclnt_cancel(struct nlm_host *, struct file_lock *); u32 nlmclnt_grant(struct nlm_lock *); void nlmclnt_recovery(struct nlm_host *, u32); diff --git a/include/linux/loop.h b/include/linux/loop.h index 8220d9c..53fa515 100644 --- a/include/linux/loop.h +++ b/include/linux/loop.h @@ -61,7 +61,7 @@ struct loop_device { struct semaphore lo_sem; struct semaphore lo_ctl_mutex; struct semaphore lo_bh_mutex; - atomic_t lo_pending; + int lo_pending; request_queue_t *lo_queue; }; diff --git a/include/linux/major.h b/include/linux/major.h index 4b62c42..e36a467 100644 --- a/include/linux/major.h +++ b/include/linux/major.h @@ -100,6 +100,7 @@ #define I2O_MAJOR 80 /* 80->87 */ #define SHMIQ_MAJOR 85 /* Linux/mips, SGI /dev/shmiq */ +#define SCSI_CHANGER_MAJOR 86 #define IDE6_MAJOR 88 #define IDE7_MAJOR 89 diff --git a/include/linux/mempool.h b/include/linux/mempool.h index 4a36edf..796220c 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h @@ -20,9 +20,14 @@ typedef struct mempool_s { mempool_free_t *free; wait_queue_head_t wait; } mempool_t; -extern mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data); -extern int mempool_resize(mempool_t *pool, int new_min_nr, unsigned int __nocast gfp_mask); + +extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data); +extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data, int nid); + +extern int mempool_resize(mempool_t *pool, int new_min_nr, + unsigned int __nocast gfp_mask); extern void mempool_destroy(mempool_t *pool); extern void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask); extern void mempool_free(void *element, mempool_t *pool); diff --git a/include/linux/mii.h b/include/linux/mii.h index 20971fe..374b615 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -65,9 +65,13 @@ #define ADVERTISE_SLCT 0x001f /* Selector bits */ #define ADVERTISE_CSMA 0x0001 /* Only selector supported */ #define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ +#define ADVERTISE_1000XFULL 0x0020 /* Try for 1000BASE-X full-duplex */ #define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ +#define ADVERTISE_1000XHALF 0x0040 /* Try for 1000BASE-X half-duplex */ #define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ +#define ADVERTISE_1000XPAUSE 0x0080 /* Try for 1000BASE-X pause */ #define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ +#define ADVERTISE_1000XPSE_ASYM 0x0100 /* Try for 1000BASE-X asym pause */ #define ADVERTISE_100BASE4 0x0200 /* Try for 100mbps 4k packets */ #define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ #define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymetric pause */ @@ -84,9 +88,13 @@ /* Link partner ability register. */ #define LPA_SLCT 0x001f /* Same as advertise selector */ #define LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */ +#define LPA_1000XFULL 0x0020 /* Can do 1000BASE-X full-duplex */ #define LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */ +#define LPA_1000XHALF 0x0040 /* Can do 1000BASE-X half-duplex */ #define LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */ +#define LPA_1000XPAUSE 0x0080 /* Can do 1000BASE-X pause */ #define LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */ +#define LPA_1000XPAUSE_ASYM 0x0100 /* Can do 1000BASE-X pause asym*/ #define LPA_100BASE4 0x0200 /* Can do 100mbps 4k packets */ #define LPA_PAUSE_CAP 0x0400 /* Can pause */ #define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 17518fe..6eb7f48 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -395,19 +395,81 @@ static inline void put_page(struct page *page) /* * The zone field is never updated after free_area_init_core() * sets it, so none of the operations on it need to be atomic. - * We'll have up to (MAX_NUMNODES * MAX_NR_ZONES) zones total, - * so we use (MAX_NODES_SHIFT + MAX_ZONES_SHIFT) here to get enough bits. */ -#define NODEZONE_SHIFT (sizeof(page_flags_t)*8 - MAX_NODES_SHIFT - MAX_ZONES_SHIFT) -#define NODEZONE(node, zone) ((node << ZONES_SHIFT) | zone) + + +/* + * page->flags layout: + * + * There are three possibilities for how page->flags get + * laid out. The first is for the normal case, without + * sparsemem. The second is for sparsemem when there is + * plenty of space for node and section. The last is when + * we have run out of space and have to fall back to an + * alternate (slower) way of determining the node. + * + * No sparsemem: | NODE | ZONE | ... | FLAGS | + * with space for node: | SECTION | NODE | ZONE | ... | FLAGS | + * no space for node: | SECTION | ZONE | ... | FLAGS | + */ +#ifdef CONFIG_SPARSEMEM +#define SECTIONS_WIDTH SECTIONS_SHIFT +#else +#define SECTIONS_WIDTH 0 +#endif + +#define ZONES_WIDTH ZONES_SHIFT + +#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= FLAGS_RESERVED +#define NODES_WIDTH NODES_SHIFT +#else +#define NODES_WIDTH 0 +#endif + +/* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */ +#define SECTIONS_PGOFF ((sizeof(page_flags_t)*8) - SECTIONS_WIDTH) +#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) +#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) + +/* + * We are going to use the flags for the page to node mapping if its in + * there. This includes the case where there is no node, so it is implicit. + */ +#define FLAGS_HAS_NODE (NODES_WIDTH > 0 || NODES_SHIFT == 0) + +#ifndef PFN_SECTION_SHIFT +#define PFN_SECTION_SHIFT 0 +#endif + +/* + * Define the bit shifts to access each section. For non-existant + * sections we define the shift as 0; that plus a 0 mask ensures + * the compiler will optimise away reference to them. + */ +#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) +#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) +#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) + +/* NODE:ZONE or SECTION:ZONE is used to lookup the zone from a page. */ +#if FLAGS_HAS_NODE +#define ZONETABLE_SHIFT (NODES_SHIFT + ZONES_SHIFT) +#else +#define ZONETABLE_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) +#endif +#define ZONETABLE_PGSHIFT ZONES_PGSHIFT + +#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED +#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED +#endif + +#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) +#define NODES_MASK ((1UL << NODES_WIDTH) - 1) +#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) +#define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1) static inline unsigned long page_zonenum(struct page *page) { - return (page->flags >> NODEZONE_SHIFT) & (~(~0UL << ZONES_SHIFT)); -} -static inline unsigned long page_to_nid(struct page *page) -{ - return (page->flags >> (NODEZONE_SHIFT + ZONES_SHIFT)); + return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; } struct zone; @@ -415,13 +477,44 @@ extern struct zone *zone_table[]; static inline struct zone *page_zone(struct page *page) { - return zone_table[page->flags >> NODEZONE_SHIFT]; + return zone_table[(page->flags >> ZONETABLE_PGSHIFT) & + ZONETABLE_MASK]; } -static inline void set_page_zone(struct page *page, unsigned long nodezone_num) +static inline unsigned long page_to_nid(struct page *page) +{ + if (FLAGS_HAS_NODE) + return (page->flags >> NODES_PGSHIFT) & NODES_MASK; + else + return page_zone(page)->zone_pgdat->node_id; +} +static inline unsigned long page_to_section(struct page *page) { - page->flags &= ~(~0UL << NODEZONE_SHIFT); - page->flags |= nodezone_num << NODEZONE_SHIFT; + return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; +} + +static inline void set_page_zone(struct page *page, unsigned long zone) +{ + page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); + page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; +} +static inline void set_page_node(struct page *page, unsigned long node) +{ + page->flags &= ~(NODES_MASK << NODES_PGSHIFT); + page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; +} +static inline void set_page_section(struct page *page, unsigned long section) +{ + page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); + page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; +} + +static inline void set_page_links(struct page *page, unsigned long zone, + unsigned long node, unsigned long pfn) +{ + set_page_zone(page, zone); + set_page_node(page, node); + set_page_section(page, pfn_to_section_nr(pfn)); } #ifndef CONFIG_DISCONTIGMEM @@ -691,6 +784,12 @@ extern void show_mem(void); extern void si_meminfo(struct sysinfo * val); extern void si_meminfo_node(struct sysinfo *val, int nid); +#ifdef CONFIG_NUMA +extern void setup_per_cpu_pageset(void); +#else +static inline void setup_per_cpu_pageset(void) {} +#endif + /* prio_tree.c */ void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index e530c6c..6c90461 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -63,6 +63,12 @@ struct per_cpu_pageset { #endif } ____cacheline_aligned_in_smp; +#ifdef CONFIG_NUMA +#define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)]) +#else +#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)]) +#endif + #define ZONE_DMA 0 #define ZONE_NORMAL 1 #define ZONE_HIGHMEM 2 @@ -122,8 +128,11 @@ struct zone { */ unsigned long lowmem_reserve[MAX_NR_ZONES]; +#ifdef CONFIG_NUMA + struct per_cpu_pageset *pageset[NR_CPUS]; +#else struct per_cpu_pageset pageset[NR_CPUS]; - +#endif /* * free areas of different sizes */ @@ -145,6 +154,14 @@ struct zone { int all_unreclaimable; /* All pages pinned */ /* + * Does the allocator try to reclaim pages from the zone as soon + * as it fails a watermark_ok() in __alloc_pages? + */ + int reclaim_pages; + /* A count of how many reclaimers are scanning this zone */ + atomic_t reclaim_in_progress; + + /* * prev_priority holds the scanning priority for this zone. It is * defined as the scanning priority at which we achieved our reclaim * target at the previous try_to_free_pages() or balance_pgdat() @@ -252,7 +269,9 @@ typedef struct pglist_data { struct zone node_zones[MAX_NR_ZONES]; struct zonelist node_zonelists[GFP_ZONETYPES]; int nr_zones; +#ifdef CONFIG_FLAT_NODE_MEM_MAP struct page *node_mem_map; +#endif struct bootmem_data *bdata; unsigned long node_start_pfn; unsigned long node_present_pages; /* total number of physical pages */ @@ -267,6 +286,12 @@ typedef struct pglist_data { #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) +#ifdef CONFIG_FLAT_NODE_MEM_MAP +#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) +#else +#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) +#endif +#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) extern struct pglist_data *pgdat_list; @@ -381,9 +406,9 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *, #include <linux/topology.h> /* Returns the number of the current Node. */ -#define numa_node_id() (cpu_to_node(_smp_processor_id())) +#define numa_node_id() (cpu_to_node(raw_smp_processor_id())) -#ifndef CONFIG_DISCONTIGMEM +#ifndef CONFIG_NEED_MULTIPLE_NODES extern struct pglist_data contig_page_data; #define NODE_DATA(nid) (&contig_page_data) @@ -391,36 +416,177 @@ extern struct pglist_data contig_page_data; #define MAX_NODES_SHIFT 1 #define pfn_to_nid(pfn) (0) -#else /* CONFIG_DISCONTIGMEM */ +#else /* CONFIG_NEED_MULTIPLE_NODES */ #include <asm/mmzone.h> +#endif /* !CONFIG_NEED_MULTIPLE_NODES */ + +#ifdef CONFIG_SPARSEMEM +#include <asm/sparsemem.h> +#endif + #if BITS_PER_LONG == 32 || defined(ARCH_HAS_ATOMIC_UNSIGNED) /* * with 32 bit page->flags field, we reserve 8 bits for node/zone info. * there are 3 zones (2 bits) and this leaves 8-2=6 bits for nodes. */ -#define MAX_NODES_SHIFT 6 +#define FLAGS_RESERVED 8 + #elif BITS_PER_LONG == 64 /* * with 64 bit flags field, there's plenty of room. */ -#define MAX_NODES_SHIFT 10 +#define FLAGS_RESERVED 32 + +#else + +#error BITS_PER_LONG not defined + #endif -#endif /* !CONFIG_DISCONTIGMEM */ +#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID +#define early_pfn_to_nid(nid) (0UL) +#endif + +#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) +#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) + +#ifdef CONFIG_SPARSEMEM + +/* + * SECTION_SHIFT #bits space required to store a section # + * + * PA_SECTION_SHIFT physical address to/from section number + * PFN_SECTION_SHIFT pfn to/from section number + */ +#define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) + +#define PA_SECTION_SHIFT (SECTION_SIZE_BITS) +#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) + +#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) + +#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) +#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) + +#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS +#error Allocator MAX_ORDER exceeds SECTION_SIZE +#endif + +struct page; +struct mem_section { + /* + * This is, logically, a pointer to an array of struct + * pages. However, it is stored with some other magic. + * (see sparse.c::sparse_init_one_section()) + * + * Making it a UL at least makes someone do a cast + * before using it wrong. + */ + unsigned long section_mem_map; +}; + +extern struct mem_section mem_section[NR_MEM_SECTIONS]; + +static inline struct mem_section *__nr_to_section(unsigned long nr) +{ + return &mem_section[nr]; +} + +/* + * We use the lower bits of the mem_map pointer to store + * a little bit of information. There should be at least + * 3 bits here due to 32-bit alignment. + */ +#define SECTION_MARKED_PRESENT (1UL<<0) +#define SECTION_HAS_MEM_MAP (1UL<<1) +#define SECTION_MAP_LAST_BIT (1UL<<2) +#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) + +static inline struct page *__section_mem_map_addr(struct mem_section *section) +{ + unsigned long map = section->section_mem_map; + map &= SECTION_MAP_MASK; + return (struct page *)map; +} + +static inline int valid_section(struct mem_section *section) +{ + return (section->section_mem_map & SECTION_MARKED_PRESENT); +} + +static inline int section_has_mem_map(struct mem_section *section) +{ + return (section->section_mem_map & SECTION_HAS_MEM_MAP); +} + +static inline int valid_section_nr(unsigned long nr) +{ + return valid_section(__nr_to_section(nr)); +} -#if NODES_SHIFT > MAX_NODES_SHIFT -#error NODES_SHIFT > MAX_NODES_SHIFT +/* + * Given a kernel address, find the home node of the underlying memory. + */ +#define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT) + +static inline struct mem_section *__pfn_to_section(unsigned long pfn) +{ + return __nr_to_section(pfn_to_section_nr(pfn)); +} + +#define pfn_to_page(pfn) \ +({ \ + unsigned long __pfn = (pfn); \ + __section_mem_map_addr(__pfn_to_section(__pfn)) + __pfn; \ +}) +#define page_to_pfn(page) \ +({ \ + page - __section_mem_map_addr(__nr_to_section( \ + page_to_section(page))); \ +}) + +static inline int pfn_valid(unsigned long pfn) +{ + if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) + return 0; + return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); +} + +/* + * These are _only_ used during initialisation, therefore they + * can use __initdata ... They could have names to indicate + * this restriction. + */ +#ifdef CONFIG_NUMA +#define pfn_to_nid early_pfn_to_nid #endif -/* There are currently 3 zones: DMA, Normal & Highmem, thus we need 2 bits */ -#define MAX_ZONES_SHIFT 2 +#define pfn_to_pgdat(pfn) \ +({ \ + NODE_DATA(pfn_to_nid(pfn)); \ +}) + +#define early_pfn_valid(pfn) pfn_valid(pfn) +void sparse_init(void); +#else +#define sparse_init() do {} while (0) +#endif /* CONFIG_SPARSEMEM */ -#if ZONES_SHIFT > MAX_ZONES_SHIFT -#error ZONES_SHIFT > MAX_ZONES_SHIFT +#ifdef CONFIG_NODES_SPAN_OTHER_NODES +#define early_pfn_in_nid(pfn, nid) (early_pfn_to_nid(pfn) == (nid)) +#else +#define early_pfn_in_nid(pfn, nid) (1) #endif +#ifndef early_pfn_valid +#define early_pfn_valid(pfn) (1) +#endif + +void memory_present(int nid, unsigned long start, unsigned long end); +unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); + #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _LINUX_MMZONE_H */ diff --git a/include/linux/module.h b/include/linux/module.h index 0e432a0..f05372b 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -51,6 +51,9 @@ struct module_attribute { ssize_t (*show)(struct module_attribute *, struct module *, char *); ssize_t (*store)(struct module_attribute *, struct module *, const char *, size_t count); + void (*setup)(struct module *, const char *); + int (*test)(struct module *); + void (*free)(struct module *); }; struct module_kobject @@ -239,6 +242,8 @@ struct module /* Sysfs stuff. */ struct module_kobject mkobj; struct module_param_attrs *param_attrs; + const char *version; + const char *srcversion; /* Exported symbols */ const struct kernel_symbol *syms; diff --git a/include/linux/namespace.h b/include/linux/namespace.h index 9eca155..697991b 100644 --- a/include/linux/namespace.h +++ b/include/linux/namespace.h @@ -12,7 +12,6 @@ struct namespace { struct rw_semaphore sem; }; -extern void umount_tree(struct vfsmount *); extern int copy_namespace(int, struct task_struct *); extern void __put_namespace(struct namespace *namespace); diff --git a/include/linux/net.h b/include/linux/net.h index 6d997ff..20cb226 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -101,7 +101,6 @@ enum sock_type { * @sk: internal networking protocol agnostic socket representation * @wait: wait queue for several uses * @type: socket type (%SOCK_STREAM, etc) - * @passcred: credentials (used only in Unix Sockets (aka PF_LOCAL)) */ struct socket { socket_state state; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ac11d73..3a0ed7f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -41,7 +41,7 @@ struct divert_blk; struct vlan_group; struct ethtool_ops; -struct netpoll; +struct netpoll_info; /* source back-compat hooks */ #define SET_ETHTOOL_OPS(netdev,ops) \ ( (netdev)->ethtool_ops = (ops) ) @@ -164,12 +164,6 @@ struct netif_rx_stats unsigned total; unsigned dropped; unsigned time_squeeze; - unsigned throttled; - unsigned fastroute_hit; - unsigned fastroute_success; - unsigned fastroute_defer; - unsigned fastroute_deferred_out; - unsigned fastroute_latency_reduction; unsigned cpu_collision; }; @@ -204,7 +198,7 @@ struct hh_cache /* cached hardware header; allow for machine alignment needs. */ #define HH_DATA_MOD 16 #define HH_DATA_OFF(__len) \ - (HH_DATA_MOD - ((__len) & (HH_DATA_MOD - 1))) + (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) #define HH_DATA_ALIGN(__len) \ (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; @@ -401,7 +395,7 @@ struct net_device } reg_state; /* Net device features */ - int features; + unsigned long features; #define NETIF_F_SG 1 /* Scatter/gather IO. */ #define NETIF_F_IP_CSUM 2 /* Can checksum only TCP/UDP over IPv4. */ #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */ @@ -468,7 +462,7 @@ struct net_device unsigned char *haddr); int (*neigh_setup)(struct net_device *dev, struct neigh_parms *); #ifdef CONFIG_NETPOLL - struct netpoll *np; + struct netpoll_info *npinfo; #endif #ifdef CONFIG_NET_POLL_CONTROLLER void (*poll_controller)(struct net_device *dev); @@ -503,7 +497,7 @@ static inline void *netdev_priv(struct net_device *dev) #define SET_NETDEV_DEV(net, pdev) ((net)->class_dev.dev = (pdev)) struct packet_type { - unsigned short type; /* This is really htons(ether_type). */ + __be16 type; /* This is really htons(ether_type). */ struct net_device *dev; /* NULL is wildcarded here */ int (*func) (struct sk_buff *, struct net_device *, struct packet_type *); @@ -562,12 +556,9 @@ static inline int unregister_gifconf(unsigned int family) struct softnet_data { - int throttle; - int cng_level; - int avg_blog; + struct net_device *output_queue; struct sk_buff_head input_pkt_queue; struct list_head poll_list; - struct net_device *output_queue; struct sk_buff *completion_queue; struct net_device backlog_dev; /* Sorry. 8) */ @@ -913,6 +904,7 @@ extern void dev_mc_discard(struct net_device *dev); extern void dev_set_promiscuity(struct net_device *dev, int inc); extern void dev_set_allmulti(struct net_device *dev, int inc); extern void netdev_state_change(struct net_device *dev); +extern void netdev_features_change(struct net_device *dev); /* Load a device via the kmod */ extern void dev_load(const char *name); extern void dev_mcast_init(void); @@ -924,10 +916,6 @@ extern int skb_checksum_help(struct sk_buff *skb, int inward); extern void net_enable_timestamp(void); extern void net_disable_timestamp(void); -#ifdef CONFIG_SYSCTL -extern char *net_sysctl_strdup(const char *s); -#endif - #endif /* __KERNEL__ */ #endif /* _LINUX_DEV_H */ diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index 9e57500..3ebc36a 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h @@ -75,12 +75,6 @@ enum nf_ip_hook_priorities { #define SO_ORIGINAL_DST 80 #ifdef __KERNEL__ -#ifdef CONFIG_NETFILTER_DEBUG -void nf_debug_ip_local_deliver(struct sk_buff *skb); -void nf_debug_ip_loopback_xmit(struct sk_buff *newskb); -void nf_debug_ip_finish_output2(struct sk_buff *skb); -#endif /*CONFIG_NETFILTER_DEBUG*/ - extern int ip_route_me_harder(struct sk_buff **pskb); /* Call this before modifying an existing IP packet: ensures it is diff --git a/include/linux/netfilter_ipv4/ip_conntrack_core.h b/include/linux/netfilter_ipv4/ip_conntrack_core.h index d84be02..694aec9 100644 --- a/include/linux/netfilter_ipv4/ip_conntrack_core.h +++ b/include/linux/netfilter_ipv4/ip_conntrack_core.h @@ -1,7 +1,6 @@ #ifndef _IP_CONNTRACK_CORE_H #define _IP_CONNTRACK_CORE_H #include <linux/netfilter.h> -#include <linux/netfilter_ipv4/lockhelp.h> /* This header is used to share core functionality between the standalone connection tracking module, and the compatibility layer's use @@ -47,6 +46,6 @@ static inline int ip_conntrack_confirm(struct sk_buff **pskb) extern struct list_head *ip_conntrack_hash; extern struct list_head ip_conntrack_expect_list; -DECLARE_RWLOCK_EXTERN(ip_conntrack_lock); +extern rwlock_t ip_conntrack_lock; #endif /* _IP_CONNTRACK_CORE_H */ diff --git a/include/linux/netfilter_ipv4/ip_nat.h b/include/linux/netfilter_ipv4/ip_nat.h index 2b72b86..e201ec6 100644 --- a/include/linux/netfilter_ipv4/ip_nat.h +++ b/include/linux/netfilter_ipv4/ip_nat.h @@ -50,10 +50,9 @@ struct ip_nat_multi_range_compat #ifdef __KERNEL__ #include <linux/list.h> -#include <linux/netfilter_ipv4/lockhelp.h> /* Protects NAT hash tables, and NAT-private part of conntracks. */ -DECLARE_RWLOCK_EXTERN(ip_nat_lock); +extern rwlock_t ip_nat_lock; /* The structure embedded in the conntrack structure. */ struct ip_nat_info diff --git a/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h b/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h index baa83e7..d9bceed 100644 --- a/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h +++ b/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h @@ -18,7 +18,6 @@ struct clusterip_config; struct ipt_clusterip_tgt_info { u_int32_t flags; - struct clusterip_config *config; /* only relevant for new ones */ u_int8_t clustermac[6]; @@ -27,6 +26,8 @@ struct ipt_clusterip_tgt_info { u_int16_t local_nodes[CLUSTERIP_MAX_NODES]; enum clusterip_hashmode hash_mode; u_int32_t hash_initval; + + struct clusterip_config *config; }; #endif /*_IPT_CLUSTERIP_H_target*/ diff --git a/include/linux/netfilter_ipv4/listhelp.h b/include/linux/netfilter_ipv4/listhelp.h index f2ae7c5..360429f 100644 --- a/include/linux/netfilter_ipv4/listhelp.h +++ b/include/linux/netfilter_ipv4/listhelp.h @@ -2,7 +2,6 @@ #define _LISTHELP_H #include <linux/config.h> #include <linux/list.h> -#include <linux/netfilter_ipv4/lockhelp.h> /* Header to do more comprehensive job than linux/list.h; assume list is first entry in structure. */ diff --git a/include/linux/netfilter_ipv4/lockhelp.h b/include/linux/netfilter_ipv4/lockhelp.h deleted file mode 100644 index a328863..0000000 --- a/include/linux/netfilter_ipv4/lockhelp.h +++ /dev/null @@ -1,129 +0,0 @@ -#ifndef _LOCKHELP_H -#define _LOCKHELP_H -#include <linux/config.h> - -#include <linux/spinlock.h> -#include <asm/atomic.h> -#include <linux/interrupt.h> -#include <linux/smp.h> - -/* Header to do help in lock debugging. */ - -#ifdef CONFIG_NETFILTER_DEBUG -struct spinlock_debug -{ - spinlock_t l; - atomic_t locked_by; -}; - -struct rwlock_debug -{ - rwlock_t l; - long read_locked_map; - long write_locked_map; -}; - -#define DECLARE_LOCK(l) \ -struct spinlock_debug l = { SPIN_LOCK_UNLOCKED, ATOMIC_INIT(-1) } -#define DECLARE_LOCK_EXTERN(l) \ -extern struct spinlock_debug l -#define DECLARE_RWLOCK(l) \ -struct rwlock_debug l = { RW_LOCK_UNLOCKED, 0, 0 } -#define DECLARE_RWLOCK_EXTERN(l) \ -extern struct rwlock_debug l - -#define MUST_BE_LOCKED(l) \ -do { if (atomic_read(&(l)->locked_by) != smp_processor_id()) \ - printk("ASSERT %s:%u %s unlocked\n", __FILE__, __LINE__, #l); \ -} while(0) - -#define MUST_BE_UNLOCKED(l) \ -do { if (atomic_read(&(l)->locked_by) == smp_processor_id()) \ - printk("ASSERT %s:%u %s locked\n", __FILE__, __LINE__, #l); \ -} while(0) - -/* Write locked OK as well. */ -#define MUST_BE_READ_LOCKED(l) \ -do { if (!((l)->read_locked_map & (1UL << smp_processor_id())) \ - && !((l)->write_locked_map & (1UL << smp_processor_id()))) \ - printk("ASSERT %s:%u %s not readlocked\n", __FILE__, __LINE__, #l); \ -} while(0) - -#define MUST_BE_WRITE_LOCKED(l) \ -do { if (!((l)->write_locked_map & (1UL << smp_processor_id()))) \ - printk("ASSERT %s:%u %s not writelocked\n", __FILE__, __LINE__, #l); \ -} while(0) - -#define MUST_BE_READ_WRITE_UNLOCKED(l) \ -do { if ((l)->read_locked_map & (1UL << smp_processor_id())) \ - printk("ASSERT %s:%u %s readlocked\n", __FILE__, __LINE__, #l); \ - else if ((l)->write_locked_map & (1UL << smp_processor_id())) \ - printk("ASSERT %s:%u %s writelocked\n", __FILE__, __LINE__, #l); \ -} while(0) - -#define LOCK_BH(lk) \ -do { \ - MUST_BE_UNLOCKED(lk); \ - spin_lock_bh(&(lk)->l); \ - atomic_set(&(lk)->locked_by, smp_processor_id()); \ -} while(0) - -#define UNLOCK_BH(lk) \ -do { \ - MUST_BE_LOCKED(lk); \ - atomic_set(&(lk)->locked_by, -1); \ - spin_unlock_bh(&(lk)->l); \ -} while(0) - -#define READ_LOCK(lk) \ -do { \ - MUST_BE_READ_WRITE_UNLOCKED(lk); \ - read_lock_bh(&(lk)->l); \ - set_bit(smp_processor_id(), &(lk)->read_locked_map); \ -} while(0) - -#define WRITE_LOCK(lk) \ -do { \ - MUST_BE_READ_WRITE_UNLOCKED(lk); \ - write_lock_bh(&(lk)->l); \ - set_bit(smp_processor_id(), &(lk)->write_locked_map); \ -} while(0) - -#define READ_UNLOCK(lk) \ -do { \ - if (!((lk)->read_locked_map & (1UL << smp_processor_id()))) \ - printk("ASSERT: %s:%u %s not readlocked\n", \ - __FILE__, __LINE__, #lk); \ - clear_bit(smp_processor_id(), &(lk)->read_locked_map); \ - read_unlock_bh(&(lk)->l); \ -} while(0) - -#define WRITE_UNLOCK(lk) \ -do { \ - MUST_BE_WRITE_LOCKED(lk); \ - clear_bit(smp_processor_id(), &(lk)->write_locked_map); \ - write_unlock_bh(&(lk)->l); \ -} while(0) - -#else -#define DECLARE_LOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED -#define DECLARE_LOCK_EXTERN(l) extern spinlock_t l -#define DECLARE_RWLOCK(l) rwlock_t l = RW_LOCK_UNLOCKED -#define DECLARE_RWLOCK_EXTERN(l) extern rwlock_t l - -#define MUST_BE_LOCKED(l) -#define MUST_BE_UNLOCKED(l) -#define MUST_BE_READ_LOCKED(l) -#define MUST_BE_WRITE_LOCKED(l) -#define MUST_BE_READ_WRITE_UNLOCKED(l) - -#define LOCK_BH(l) spin_lock_bh(l) -#define UNLOCK_BH(l) spin_unlock_bh(l) - -#define READ_LOCK(l) read_lock_bh(l) -#define WRITE_LOCK(l) write_lock_bh(l) -#define READ_UNLOCK(l) read_unlock_bh(l) -#define WRITE_UNLOCK(l) write_unlock_bh(l) -#endif /*CONFIG_NETFILTER_DEBUG*/ - -#endif /* _LOCKHELP_H */ diff --git a/include/linux/netlink.h b/include/linux/netlink.h index b2738ac..3029cad 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -14,6 +14,7 @@ #define NETLINK_SELINUX 7 /* SELinux event notifications */ #define NETLINK_ARPD 8 #define NETLINK_AUDIT 9 /* auditing */ +#define NETLINK_FIB_LOOKUP 10 #define NETLINK_ROUTE6 11 /* af_inet6 route comm channel */ #define NETLINK_IP6_FW 13 #define NETLINK_DNRTMSG 14 /* DECnet routing messages */ @@ -146,7 +147,7 @@ struct netlink_callback int (*dump)(struct sk_buff * skb, struct netlink_callback *cb); int (*done)(struct netlink_callback *cb); int family; - long args[4]; + long args[5]; }; struct netlink_notify @@ -156,7 +157,7 @@ struct netlink_notify }; static __inline__ struct nlmsghdr * -__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len) +__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags) { struct nlmsghdr *nlh; int size = NLMSG_LENGTH(len); @@ -164,15 +165,31 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len) nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size)); nlh->nlmsg_type = type; nlh->nlmsg_len = size; - nlh->nlmsg_flags = 0; + nlh->nlmsg_flags = flags; nlh->nlmsg_pid = pid; nlh->nlmsg_seq = seq; return nlh; } +#define NLMSG_NEW(skb, pid, seq, type, len, flags) \ +({ if (skb_tailroom(skb) < (int)NLMSG_SPACE(len)) \ + goto nlmsg_failure; \ + __nlmsg_put(skb, pid, seq, type, len, flags); }) + #define NLMSG_PUT(skb, pid, seq, type, len) \ -({ if (skb_tailroom(skb) < (int)NLMSG_SPACE(len)) goto nlmsg_failure; \ - __nlmsg_put(skb, pid, seq, type, len); }) + NLMSG_NEW(skb, pid, seq, type, len, 0) + +#define NLMSG_NEW_ANSWER(skb, cb, type, len, flags) \ + NLMSG_NEW(skb, NETLINK_CB((cb)->skb).pid, \ + (cb)->nlh->nlmsg_seq, type, len, flags) + +#define NLMSG_END(skb, nlh) \ +({ (nlh)->nlmsg_len = (skb)->tail - (unsigned char *) (nlh); \ + (skb)->len; }) + +#define NLMSG_CANCEL(skb, nlh) \ +({ skb_trim(skb, (unsigned char *) (nlh) - (skb)->data); \ + -1; }) extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, struct nlmsghdr *nlh, diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index c0d8b90..bcd0ac3 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -16,14 +16,19 @@ struct netpoll; struct netpoll { struct net_device *dev; char dev_name[16], *name; - int rx_flags; void (*rx_hook)(struct netpoll *, int, char *, int); void (*drop)(struct sk_buff *skb); u32 local_ip, remote_ip; u16 local_port, remote_port; unsigned char local_mac[6], remote_mac[6]; +}; + +struct netpoll_info { spinlock_t poll_lock; int poll_owner; + int rx_flags; + spinlock_t rx_lock; + struct netpoll *rx_np; /* netpoll that registered an rx_hook */ }; void netpoll_poll(struct netpoll *np); @@ -39,22 +44,35 @@ void netpoll_queue(struct sk_buff *skb); #ifdef CONFIG_NETPOLL static inline int netpoll_rx(struct sk_buff *skb) { - return skb->dev->np && skb->dev->np->rx_flags && __netpoll_rx(skb); + struct netpoll_info *npinfo = skb->dev->npinfo; + unsigned long flags; + int ret = 0; + + if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags)) + return 0; + + spin_lock_irqsave(&npinfo->rx_lock, flags); + /* check rx_flags again with the lock held */ + if (npinfo->rx_flags && __netpoll_rx(skb)) + ret = 1; + spin_unlock_irqrestore(&npinfo->rx_lock, flags); + + return ret; } static inline void netpoll_poll_lock(struct net_device *dev) { - if (dev->np) { - spin_lock(&dev->np->poll_lock); - dev->np->poll_owner = smp_processor_id(); + if (dev->npinfo) { + spin_lock(&dev->npinfo->poll_lock); + dev->npinfo->poll_owner = smp_processor_id(); } } static inline void netpoll_poll_unlock(struct net_device *dev) { - if (dev->np) { - spin_unlock(&dev->np->poll_lock); - dev->np->poll_owner = -1; + if (dev->npinfo) { + dev->npinfo->poll_owner = -1; + spin_unlock(&dev->npinfo->poll_lock); } } diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 5ca8a8d..0c1c306 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -28,7 +28,7 @@ #define NFS4_ACCESS_DELETE 0x0010 #define NFS4_ACCESS_EXECUTE 0x0020 -#define NFS4_FH_PERISTENT 0x0000 +#define NFS4_FH_PERSISTENT 0x0000 #define NFS4_FH_NOEXPIRE_WITH_OPEN 0x0001 #define NFS4_FH_VOLATILE_ANY 0x0002 #define NFS4_FH_VOL_MIGRATION 0x0004 @@ -382,6 +382,8 @@ enum { NFSPROC4_CLNT_READDIR, NFSPROC4_CLNT_SERVER_CAPS, NFSPROC4_CLNT_DELEGRETURN, + NFSPROC4_CLNT_GETACL, + NFSPROC4_CLNT_SETACL, }; #endif diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index dbac7f3..8ea2491 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -15,7 +15,6 @@ #include <linux/pagemap.h> #include <linux/rwsem.h> #include <linux/wait.h> -#include <linux/uio.h> #include <linux/nfs_fs_sb.h> @@ -29,7 +28,6 @@ #include <linux/nfs4.h> #include <linux/nfs_xdr.h> #include <linux/rwsem.h> -#include <linux/workqueue.h> #include <linux/mempool.h> /* @@ -44,13 +42,6 @@ #define NFS_DEF_FILE_IO_BUFFER_SIZE 4096 /* - * The upper limit on timeouts for the exponential backoff algorithm. - */ -#define NFS_WRITEBACK_DELAY (5*HZ) -#define NFS_WRITEBACK_LOCKDELAY (60*HZ) -#define NFS_COMMIT_DELAY (5*HZ) - -/* * superblock magic number for NFS */ #define NFS_SUPER_MAGIC 0x6969 @@ -60,9 +51,6 @@ */ #define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS) -#define NFS_RW_SYNC 0x0001 /* O_SYNC handling */ -#define NFS_RW_SWAP 0x0002 /* This is a swap request */ - /* * When flushing a cluster of dirty pages, there can be different * strategies: @@ -96,7 +84,8 @@ struct nfs_open_context { int error; struct list_head list; - wait_queue_head_t waitq; + + __u64 dir_cookie; }; /* @@ -104,6 +93,8 @@ struct nfs_open_context { */ struct nfs_delegation; +struct posix_acl; + /* * nfs fs inode data in memory */ @@ -140,7 +131,6 @@ struct nfs_inode { * * mtime != read_cache_mtime */ - unsigned long readdir_timestamp; unsigned long read_cache_jiffies; unsigned long attrtimeo; unsigned long attrtimeo_timestamp; @@ -158,6 +148,10 @@ struct nfs_inode { atomic_t data_updates; struct nfs_access_entry cache_access; +#ifdef CONFIG_NFS_V3_ACL + struct posix_acl *acl_access; + struct posix_acl *acl_default; +#endif /* * This is the cookie verifier used for NFSv3 readdir @@ -183,13 +177,13 @@ struct nfs_inode { wait_queue_head_t nfs_i_wait; #ifdef CONFIG_NFS_V4 + struct nfs4_cached_acl *nfs4_acl; /* NFSv4 state */ struct list_head open_states; struct nfs_delegation *delegation; int delegation_state; struct rw_semaphore rwsem; #endif /* CONFIG_NFS_V4*/ - struct inode vfs_inode; }; @@ -203,6 +197,8 @@ struct nfs_inode { #define NFS_INO_INVALID_DATA 0x0010 /* cached data is invalid */ #define NFS_INO_INVALID_ATIME 0x0020 /* cached atime is invalid */ #define NFS_INO_INVALID_ACCESS 0x0040 /* cached access cred invalid */ +#define NFS_INO_INVALID_ACL 0x0080 /* cached acls are invalid */ +#define NFS_INO_REVAL_PAGECACHE 0x1000 /* must revalidate pagecache */ static inline struct nfs_inode *NFS_I(struct inode *inode) { @@ -294,12 +290,12 @@ extern int nfs_release(struct inode *, struct file *); extern int nfs_attribute_timeout(struct inode *inode); extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); +extern void nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); extern int nfs_setattr(struct dentry *, struct iattr *); extern void nfs_begin_attr_update(struct inode *); extern void nfs_end_attr_update(struct inode *); extern void nfs_begin_data_update(struct inode *); extern void nfs_end_data_update(struct inode *); -extern void nfs_end_data_update_defer(struct inode *); extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, struct rpc_cred *cred); extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); extern void put_nfs_open_context(struct nfs_open_context *ctx); @@ -314,6 +310,9 @@ extern u32 root_nfs_parse_addr(char *name); /*__init*/ * linux/fs/nfs/file.c */ extern struct inode_operations nfs_file_inode_operations; +#ifdef CONFIG_NFS_V3 +extern struct inode_operations nfs3_file_inode_operations; +#endif /* CONFIG_NFS_V3 */ extern struct file_operations nfs_file_operations; extern struct address_space_operations nfs_file_aops; @@ -329,6 +328,22 @@ static inline struct rpc_cred *nfs_file_cred(struct file *file) } /* + * linux/fs/nfs/xattr.c + */ +#ifdef CONFIG_NFS_V3_ACL +extern ssize_t nfs3_listxattr(struct dentry *, char *, size_t); +extern ssize_t nfs3_getxattr(struct dentry *, const char *, void *, size_t); +extern int nfs3_setxattr(struct dentry *, const char *, + const void *, size_t, int); +extern int nfs3_removexattr (struct dentry *, const char *name); +#else +# define nfs3_listxattr NULL +# define nfs3_getxattr NULL +# define nfs3_setxattr NULL +# define nfs3_removexattr NULL +#endif + +/* * linux/fs/nfs/direct.c */ extern ssize_t nfs_direct_IO(int, struct kiocb *, const struct iovec *, loff_t, @@ -342,6 +357,9 @@ extern ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, * linux/fs/nfs/dir.c */ extern struct inode_operations nfs_dir_inode_operations; +#ifdef CONFIG_NFS_V3 +extern struct inode_operations nfs3_dir_inode_operations; +#endif /* CONFIG_NFS_V3 */ extern struct file_operations nfs_dir_operations; extern struct dentry_operations nfs_dentry_operations; @@ -377,10 +395,10 @@ extern void nfs_commit_done(struct rpc_task *); */ extern int nfs_sync_inode(struct inode *, unsigned long, unsigned int, int); #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) -extern int nfs_commit_inode(struct inode *, unsigned long, unsigned int, int); +extern int nfs_commit_inode(struct inode *, int); #else static inline int -nfs_commit_inode(struct inode *inode, unsigned long idx_start, unsigned int npages, int how) +nfs_commit_inode(struct inode *inode, int how) { return 0; } @@ -434,11 +452,6 @@ static inline void nfs_writedata_free(struct nfs_write_data *p) mempool_free(p, nfs_wdata_mempool); } -/* Hack for future NFS swap support */ -#ifndef IS_SWAPFILE -# define IS_SWAPFILE(inode) (0) -#endif - /* * linux/fs/nfs/read.c */ @@ -468,6 +481,29 @@ static inline void nfs_readdata_free(struct nfs_read_data *p) extern void nfs_readdata_release(struct rpc_task *task); /* + * linux/fs/nfs3proc.c + */ +#ifdef CONFIG_NFS_V3_ACL +extern struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type); +extern int nfs3_proc_setacl(struct inode *inode, int type, + struct posix_acl *acl); +extern int nfs3_proc_set_default_acl(struct inode *dir, struct inode *inode, + mode_t mode); +extern void nfs3_forget_cached_acls(struct inode *inode); +#else +static inline int nfs3_proc_set_default_acl(struct inode *dir, + struct inode *inode, + mode_t mode) +{ + return 0; +} + +static inline void nfs3_forget_cached_acls(struct inode *inode) +{ +} +#endif /* CONFIG_NFS_V3_ACL */ + +/* * linux/fs/mount_clnt.c * (Used only by nfsroot module) */ @@ -515,230 +551,6 @@ extern void * nfs_root_data(void); #define NFS_JUKEBOX_RETRY_TIME (5 * HZ) -#ifdef CONFIG_NFS_V4 - -struct idmap; - -/* - * In a seqid-mutating op, this macro controls which error return - * values trigger incrementation of the seqid. - * - * from rfc 3010: - * The client MUST monotonically increment the sequence number for the - * CLOSE, LOCK, LOCKU, OPEN, OPEN_CONFIRM, and OPEN_DOWNGRADE - * operations. This is true even in the event that the previous - * operation that used the sequence number received an error. The only - * exception to this rule is if the previous operation received one of - * the following errors: NFSERR_STALE_CLIENTID, NFSERR_STALE_STATEID, - * NFSERR_BAD_STATEID, NFSERR_BAD_SEQID, NFSERR_BADXDR, - * NFSERR_RESOURCE, NFSERR_NOFILEHANDLE. - * - */ -#define seqid_mutating_err(err) \ -(((err) != NFSERR_STALE_CLIENTID) && \ - ((err) != NFSERR_STALE_STATEID) && \ - ((err) != NFSERR_BAD_STATEID) && \ - ((err) != NFSERR_BAD_SEQID) && \ - ((err) != NFSERR_BAD_XDR) && \ - ((err) != NFSERR_RESOURCE) && \ - ((err) != NFSERR_NOFILEHANDLE)) - -enum nfs4_client_state { - NFS4CLNT_OK = 0, -}; - -/* - * The nfs4_client identifies our client state to the server. - */ -struct nfs4_client { - struct list_head cl_servers; /* Global list of servers */ - struct in_addr cl_addr; /* Server identifier */ - u64 cl_clientid; /* constant */ - nfs4_verifier cl_confirm; - unsigned long cl_state; - - u32 cl_lockowner_id; - - /* - * The following rwsem ensures exclusive access to the server - * while we recover the state following a lease expiration. - */ - struct rw_semaphore cl_sem; - - struct list_head cl_delegations; - struct list_head cl_state_owners; - struct list_head cl_unused; - int cl_nunused; - spinlock_t cl_lock; - atomic_t cl_count; - - struct rpc_clnt * cl_rpcclient; - struct rpc_cred * cl_cred; - - struct list_head cl_superblocks; /* List of nfs_server structs */ - - unsigned long cl_lease_time; - unsigned long cl_last_renewal; - struct work_struct cl_renewd; - struct work_struct cl_recoverd; - - wait_queue_head_t cl_waitq; - struct rpc_wait_queue cl_rpcwaitq; - - /* used for the setclientid verifier */ - struct timespec cl_boot_time; - - /* idmapper */ - struct idmap * cl_idmap; - - /* Our own IP address, as a null-terminated string. - * This is used to generate the clientid, and the callback address. - */ - char cl_ipaddr[16]; - unsigned char cl_id_uniquifier; -}; - -/* - * NFS4 state_owners and lock_owners are simply labels for ordered - * sequences of RPC calls. Their sole purpose is to provide once-only - * semantics by allowing the server to identify replayed requests. - * - * The ->so_sema is held during all state_owner seqid-mutating operations: - * OPEN, OPEN_DOWNGRADE, and CLOSE. Its purpose is to properly serialize - * so_seqid. - */ -struct nfs4_state_owner { - struct list_head so_list; /* per-clientid list of state_owners */ - struct nfs4_client *so_client; - u32 so_id; /* 32-bit identifier, unique */ - struct semaphore so_sema; - u32 so_seqid; /* protected by so_sema */ - atomic_t so_count; - - struct rpc_cred *so_cred; /* Associated cred */ - struct list_head so_states; - struct list_head so_delegations; -}; - -/* - * struct nfs4_state maintains the client-side state for a given - * (state_owner,inode) tuple (OPEN) or state_owner (LOCK). - * - * OPEN: - * In order to know when to OPEN_DOWNGRADE or CLOSE the state on the server, - * we need to know how many files are open for reading or writing on a - * given inode. This information too is stored here. - * - * LOCK: one nfs4_state (LOCK) to hold the lock stateid nfs4_state(OPEN) - */ - -struct nfs4_lock_state { - struct list_head ls_locks; /* Other lock stateids */ - fl_owner_t ls_owner; /* POSIX lock owner */ -#define NFS_LOCK_INITIALIZED 1 - int ls_flags; - u32 ls_seqid; - u32 ls_id; - nfs4_stateid ls_stateid; - atomic_t ls_count; -}; - -/* bits for nfs4_state->flags */ -enum { - LK_STATE_IN_USE, - NFS_DELEGATED_STATE, -}; - -struct nfs4_state { - struct list_head open_states; /* List of states for the same state_owner */ - struct list_head inode_states; /* List of states for the same inode */ - struct list_head lock_states; /* List of subservient lock stateids */ - - struct nfs4_state_owner *owner; /* Pointer to the open owner */ - struct inode *inode; /* Pointer to the inode */ - - unsigned long flags; /* Do we hold any locks? */ - struct semaphore lock_sema; /* Serializes file locking operations */ - rwlock_t state_lock; /* Protects the lock_states list */ - - nfs4_stateid stateid; - - unsigned int nreaders; - unsigned int nwriters; - int state; /* State on the server (R,W, or RW) */ - atomic_t count; -}; - - -struct nfs4_exception { - long timeout; - int retry; -}; - -struct nfs4_state_recovery_ops { - int (*recover_open)(struct nfs4_state_owner *, struct nfs4_state *); - int (*recover_lock)(struct nfs4_state *, struct file_lock *); -}; - -extern struct dentry_operations nfs4_dentry_operations; -extern struct inode_operations nfs4_dir_inode_operations; - -/* nfs4proc.c */ -extern int nfs4_map_errors(int err); -extern int nfs4_proc_setclientid(struct nfs4_client *, u32, unsigned short); -extern int nfs4_proc_setclientid_confirm(struct nfs4_client *); -extern int nfs4_proc_async_renew(struct nfs4_client *); -extern int nfs4_proc_renew(struct nfs4_client *); -extern int nfs4_do_close(struct inode *inode, struct nfs4_state *state, mode_t mode); -extern struct inode *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *); -extern int nfs4_open_revalidate(struct inode *, struct dentry *, int); - -extern struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops; -extern struct nfs4_state_recovery_ops nfs4_network_partition_recovery_ops; - -/* nfs4renewd.c */ -extern void nfs4_schedule_state_renewal(struct nfs4_client *); -extern void nfs4_renewd_prepare_shutdown(struct nfs_server *); -extern void nfs4_kill_renewd(struct nfs4_client *); - -/* nfs4state.c */ -extern void init_nfsv4_state(struct nfs_server *); -extern void destroy_nfsv4_state(struct nfs_server *); -extern struct nfs4_client *nfs4_get_client(struct in_addr *); -extern void nfs4_put_client(struct nfs4_client *clp); -extern int nfs4_init_client(struct nfs4_client *clp); -extern struct nfs4_client *nfs4_find_client(struct in_addr *); -extern u32 nfs4_alloc_lockowner_id(struct nfs4_client *); - -extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *); -extern void nfs4_put_state_owner(struct nfs4_state_owner *); -extern void nfs4_drop_state_owner(struct nfs4_state_owner *); -extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *); -extern void nfs4_put_open_state(struct nfs4_state *); -extern void nfs4_close_state(struct nfs4_state *, mode_t); -extern struct nfs4_state *nfs4_find_state(struct inode *, struct rpc_cred *, mode_t mode); -extern void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp); -extern void nfs4_schedule_state_recovery(struct nfs4_client *); -extern struct nfs4_lock_state *nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t); -extern struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t); -extern void nfs4_put_lock_state(struct nfs4_lock_state *state); -extern void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *ls); -extern void nfs4_notify_setlk(struct nfs4_state *, struct file_lock *, struct nfs4_lock_state *); -extern void nfs4_notify_unlck(struct nfs4_state *, struct file_lock *, struct nfs4_lock_state *); -extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t); - - - -struct nfs4_mount_data; -#else -#define init_nfsv4_state(server) do { } while (0) -#define destroy_nfsv4_state(server) do { } while (0) -#define nfs4_put_state_owner(inode, owner) do { } while (0) -#define nfs4_put_open_state(state) do { } while (0) -#define nfs4_close_state(a, b) do { } while (0) -#define nfs4_renewd_prepare_shutdown(server) do { } while (0) -#endif - #endif /* __KERNEL__ */ /* diff --git a/include/linux/nfs_fs_i.h b/include/linux/nfs_fs_i.h index e9a74958..e2c18dab 100644 --- a/include/linux/nfs_fs_i.h +++ b/include/linux/nfs_fs_i.h @@ -16,6 +16,11 @@ struct nfs_lock_info { struct nlm_lockowner *owner; }; +struct nfs4_lock_state; +struct nfs4_lock_info { + struct nfs4_lock_state *owner; +}; + /* * Lock flag values */ diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index fc51645..3d3a305 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -10,6 +10,7 @@ struct nfs_server { struct rpc_clnt * client; /* RPC client handle */ struct rpc_clnt * client_sys; /* 2nd handle for FSINFO */ + struct rpc_clnt * client_acl; /* ACL RPC client handle */ struct nfs_rpc_ops * rpc_ops; /* NFS protocol vector */ struct backing_dev_info backing_dev_info; int flags; /* various flags */ diff --git a/include/linux/nfs_mount.h b/include/linux/nfs_mount.h index 0071428..659c754 100644 --- a/include/linux/nfs_mount.h +++ b/include/linux/nfs_mount.h @@ -58,6 +58,7 @@ struct nfs_mount_data { #define NFS_MOUNT_KERBEROS 0x0100 /* 3 */ #define NFS_MOUNT_NONLM 0x0200 /* 3 */ #define NFS_MOUNT_BROKEN_SUID 0x0400 /* 4 */ +#define NFS_MOUNT_NOACL 0x0800 /* 4 */ #define NFS_MOUNT_STRICTLOCK 0x1000 /* reserved for NFSv4 */ #define NFS_MOUNT_SECFLAVOUR 0x2000 /* 5 */ #define NFS_MOUNT_FLAGMASK 0xFFFF diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 39e4895..da2e077b 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -20,12 +20,19 @@ #include <asm/atomic.h> /* + * Valid flags for the radix tree + */ +#define NFS_PAGE_TAG_DIRTY 0 +#define NFS_PAGE_TAG_WRITEBACK 1 + +/* * Valid flags for a dirty buffer */ #define PG_BUSY 0 #define PG_NEED_COMMIT 1 #define PG_NEED_RESCHED 2 +struct nfs_inode; struct nfs_page { struct list_head wb_list, /* Defines state of page: */ *wb_list_head; /* read/write/commit */ @@ -54,14 +61,17 @@ extern void nfs_clear_request(struct nfs_page *req); extern void nfs_release_request(struct nfs_page *req); -extern void nfs_list_add_request(struct nfs_page *, struct list_head *); - +extern int nfs_scan_lock_dirty(struct nfs_inode *nfsi, struct list_head *dst, + unsigned long idx_start, unsigned int npages); extern int nfs_scan_list(struct list_head *, struct list_head *, unsigned long, unsigned int); extern int nfs_coalesce_requests(struct list_head *, struct list_head *, unsigned int); extern int nfs_wait_on_request(struct nfs_page *); extern void nfs_unlock_request(struct nfs_page *req); +extern int nfs_set_page_writeback_locked(struct nfs_page *req); +extern void nfs_clear_page_writeback(struct nfs_page *req); + /* * Lock the page of an asynchronous request without incrementing the wb_count @@ -86,6 +96,18 @@ nfs_lock_request(struct nfs_page *req) return 1; } +/** + * nfs_list_add_request - Insert a request into a list + * @req: request + * @head: head of list into which to insert the request. + */ +static inline void +nfs_list_add_request(struct nfs_page *req, struct list_head *head) +{ + list_add_tail(&req->wb_list, head); + req->wb_list_head = head; +} + /** * nfs_list_remove_request - Remove a request from its wb_list @@ -96,10 +118,6 @@ nfs_list_remove_request(struct nfs_page *req) { if (list_empty(&req->wb_list)) return; - if (!NFS_WBACK_BUSY(req)) { - printk(KERN_ERR "NFS: unlocked request attempted removed from list!\n"); - BUG(); - } list_del_init(&req->wb_list); req->wb_list_head = NULL; } diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 47037d9..a2bf691 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -2,6 +2,7 @@ #define _LINUX_NFS_XDR_H #include <linux/sunrpc/xprt.h> +#include <linux/nfsacl.h> struct nfs4_fsid { __u64 major; @@ -326,6 +327,20 @@ struct nfs_setattrargs { const u32 * bitmask; }; +struct nfs_setaclargs { + struct nfs_fh * fh; + size_t acl_len; + unsigned int acl_pgbase; + struct page ** acl_pages; +}; + +struct nfs_getaclargs { + struct nfs_fh * fh; + size_t acl_len; + unsigned int acl_pgbase; + struct page ** acl_pages; +}; + struct nfs_setattrres { struct nfs_fattr * fattr; const struct nfs_server * server; @@ -354,6 +369,20 @@ struct nfs_readdirargs { struct page ** pages; }; +struct nfs3_getaclargs { + struct nfs_fh * fh; + int mask; + struct page ** pages; +}; + +struct nfs3_setaclargs { + struct inode * inode; + int mask; + struct posix_acl * acl_access; + struct posix_acl * acl_default; + struct page ** pages; +}; + struct nfs_diropok { struct nfs_fh * fh; struct nfs_fattr * fattr; @@ -477,6 +506,15 @@ struct nfs3_readdirres { int plus; }; +struct nfs3_getaclres { + struct nfs_fattr * fattr; + int mask; + unsigned int acl_access_count; + unsigned int acl_default_count; + struct posix_acl * acl_access; + struct posix_acl * acl_default; +}; + #ifdef CONFIG_NFS_V4 typedef u64 clientid4; @@ -667,6 +705,7 @@ struct nfs_rpc_ops { int version; /* Protocol version */ struct dentry_operations *dentry_ops; struct inode_operations *dir_inode_ops; + struct inode_operations *file_inode_ops; int (*getroot) (struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); @@ -713,6 +752,7 @@ struct nfs_rpc_ops { int (*file_open) (struct inode *, struct file *); int (*file_release) (struct inode *, struct file *); int (*lock)(struct file *, int, struct file_lock *); + void (*clear_acl_cache)(struct inode *); }; /* @@ -732,4 +772,7 @@ extern struct rpc_version nfs_version2; extern struct rpc_version nfs_version3; extern struct rpc_version nfs_version4; +extern struct rpc_version nfsacl_version3; +extern struct rpc_program nfsacl_program; + #endif diff --git a/include/linux/nfsacl.h b/include/linux/nfsacl.h new file mode 100644 index 0000000..54487a9 --- /dev/null +++ b/include/linux/nfsacl.h @@ -0,0 +1,58 @@ +/* + * File: linux/nfsacl.h + * + * (C) 2003 Andreas Gruenbacher <agruen@suse.de> + */ +#ifndef __LINUX_NFSACL_H +#define __LINUX_NFSACL_H + +#define NFS_ACL_PROGRAM 100227 + +#define ACLPROC2_GETACL 1 +#define ACLPROC2_SETACL 2 +#define ACLPROC2_GETATTR 3 +#define ACLPROC2_ACCESS 4 + +#define ACLPROC3_GETACL 1 +#define ACLPROC3_SETACL 2 + + +/* Flags for the getacl/setacl mode */ +#define NFS_ACL 0x0001 +#define NFS_ACLCNT 0x0002 +#define NFS_DFACL 0x0004 +#define NFS_DFACLCNT 0x0008 + +/* Flag for Default ACL entries */ +#define NFS_ACL_DEFAULT 0x1000 + +#ifdef __KERNEL__ + +#include <linux/posix_acl.h> + +/* Maximum number of ACL entries over NFS */ +#define NFS_ACL_MAX_ENTRIES 1024 + +#define NFSACL_MAXWORDS (2*(2+3*NFS_ACL_MAX_ENTRIES)) +#define NFSACL_MAXPAGES ((2*(8+12*NFS_ACL_MAX_ENTRIES) + PAGE_SIZE-1) \ + >> PAGE_SHIFT) + +static inline unsigned int +nfsacl_size(struct posix_acl *acl_access, struct posix_acl *acl_default) +{ + unsigned int w = 16; + w += max(acl_access ? (int)acl_access->a_count : 3, 4) * 12; + if (acl_default) + w += max((int)acl_default->a_count, 4) * 12; + return w; +} + +extern unsigned int +nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode, + struct posix_acl *acl, int encode_entries, int typeflag); +extern unsigned int +nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt, + struct posix_acl **pacl); + +#endif /* __KERNEL__ */ +#endif /* __LINUX_NFSACL_H */ diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h index 8f85d9a..5791dfd 100644 --- a/include/linux/nfsd/nfsd.h +++ b/include/linux/nfsd/nfsd.h @@ -15,6 +15,7 @@ #include <linux/unistd.h> #include <linux/dirent.h> #include <linux/fs.h> +#include <linux/posix_acl.h> #include <linux/mount.h> #include <linux/nfsd/debug.h> @@ -124,20 +125,39 @@ int nfsd_statfs(struct svc_rqst *, struct svc_fh *, int nfsd_notify_change(struct inode *, struct iattr *); int nfsd_permission(struct svc_export *, struct dentry *, int); +#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) +#ifdef CONFIG_NFSD_V2_ACL +extern struct svc_version nfsd_acl_version2; +#else +#define nfsd_acl_version2 NULL +#endif +#ifdef CONFIG_NFSD_V3_ACL +extern struct svc_version nfsd_acl_version3; +#else +#define nfsd_acl_version3 NULL +#endif +struct posix_acl *nfsd_get_posix_acl(struct svc_fh *, int); +int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *); +#endif + /* * NFSv4 State */ #ifdef CONFIG_NFSD_V4 -int nfs4_state_init(void); +void nfs4_state_init(void); +int nfs4_state_start(void); void nfs4_state_shutdown(void); time_t nfs4_lease_time(void); void nfs4_reset_lease(time_t leasetime); +int nfs4_reset_recoverydir(char *recdir); #else -static inline int nfs4_state_init(void){return 0;} +static inline void nfs4_state_init(void){}; +static inline int nfs4_state_start(void){return 0;} static inline void nfs4_state_shutdown(void){} static inline time_t nfs4_lease_time(void){return 0;} static inline void nfs4_reset_lease(time_t leasetime){} +static inline int nfs4_reset_recoverydir(char *recdir) {return 0;} #endif /* diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index b6b2fe1..a84a3fa 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h @@ -61,11 +61,6 @@ typedef struct { #define si_stateownerid si_opaque.so_stateownerid #define si_fileid si_opaque.so_fileid -extern stateid_t zerostateid; -extern stateid_t onestateid; - -#define ZERO_STATEID(stateid) (!memcmp((stateid), &zerostateid, sizeof(stateid_t))) -#define ONE_STATEID(stateid) (!memcmp((stateid), &onestateid, sizeof(stateid_t))) struct nfs4_cb_recall { u32 cbr_ident; @@ -77,8 +72,8 @@ struct nfs4_cb_recall { }; struct nfs4_delegation { - struct list_head dl_del_perfile; /* nfs4_file->fi_del_perfile */ - struct list_head dl_del_perclnt; /* nfs4_client->cl_del_perclnt*/ + struct list_head dl_perfile; + struct list_head dl_perclnt; struct list_head dl_recall_lru; /* delegation recalled */ atomic_t dl_count; /* ref count */ struct nfs4_client *dl_client; @@ -97,7 +92,6 @@ struct nfs4_delegation { /* client delegation callback info */ struct nfs4_callback { /* SETCLIENTID info */ - u32 cb_parsed; /* addr parsed */ u32 cb_addr; unsigned short cb_port; u32 cb_prog; @@ -109,6 +103,8 @@ struct nfs4_callback { struct rpc_clnt * cb_client; }; +#define HEXDIR_LEN 33 /* hex version of 16 byte md5 of cl_name plus '\0' */ + /* * struct nfs4_client - one per client. Clientids live here. * o Each nfs4_client is hashed by clientid. @@ -122,10 +118,11 @@ struct nfs4_callback { struct nfs4_client { struct list_head cl_idhash; /* hash by cl_clientid.id */ struct list_head cl_strhash; /* hash by cl_name */ - struct list_head cl_perclient; /* list: stateowners */ - struct list_head cl_del_perclnt; /* list: delegations */ + struct list_head cl_openowners; + struct list_head cl_delegations; struct list_head cl_lru; /* tail queue */ struct xdr_netobj cl_name; /* id generated by client */ + char cl_recdir[HEXDIR_LEN]; /* recovery dir */ nfs4_verifier cl_verifier; /* generated by client */ time_t cl_time; /* time of last lease renewal */ u32 cl_addr; /* client ipaddress */ @@ -134,6 +131,7 @@ struct nfs4_client { nfs4_verifier cl_confirm; /* generated by server */ struct nfs4_callback cl_callback; /* callback info */ atomic_t cl_count; /* ref count */ + u32 cl_firststate; /* recovery dir creation */ }; /* struct nfs4_client_reset @@ -143,7 +141,7 @@ struct nfs4_client { */ struct nfs4_client_reclaim { struct list_head cr_strhash; /* hash by cr_name */ - struct xdr_netobj cr_name; /* id generated by client */ + char cr_recdir[HEXDIR_LEN]; /* recover dir */ }; static inline void @@ -197,9 +195,9 @@ struct nfs4_stateowner { struct kref so_ref; struct list_head so_idhash; /* hash by so_id */ struct list_head so_strhash; /* hash by op_name */ - struct list_head so_perclient; /* nfs4_client->cl_perclient */ - struct list_head so_perfilestate; /* list: nfs4_stateid */ - struct list_head so_perlockowner; /* nfs4_stateid->st_perlockowner */ + struct list_head so_perclient; + struct list_head so_stateids; + struct list_head so_perstateid; /* for lockowners only */ struct list_head so_close_lru; /* tail queue */ time_t so_time; /* time of placement on so_close_lru */ int so_is_open_owner; /* 1=openowner,0=lockowner */ @@ -217,9 +215,10 @@ struct nfs4_stateowner { * share_acces, share_deny on the file. */ struct nfs4_file { + struct kref fi_ref; struct list_head fi_hash; /* hash by "struct inode *" */ - struct list_head fi_perfile; /* list: nfs4_stateid */ - struct list_head fi_del_perfile; /* list: nfs4_delegation */ + struct list_head fi_stateids; + struct list_head fi_delegations; struct inode *fi_inode; u32 fi_id; /* used with stateowner->so_id * for stateid_hashtbl hash */ @@ -241,8 +240,8 @@ struct nfs4_file { struct nfs4_stateid { struct list_head st_hash; struct list_head st_perfile; - struct list_head st_perfilestate; - struct list_head st_perlockowner; + struct list_head st_perstateowner; + struct list_head st_lockowners; struct nfs4_stateowner * st_stateowner; struct nfs4_file * st_file; stateid_t st_stateid; @@ -267,12 +266,9 @@ struct nfs4_stateid { ((err) != nfserr_stale_stateid) && \ ((err) != nfserr_bad_stateid)) -extern time_t nfs4_laundromat(void); extern int nfsd4_renew(clientid_t *clid); extern int nfs4_preprocess_stateid_op(struct svc_fh *current_fh, stateid_t *stateid, int flags, struct file **filp); -extern int nfs4_share_conflict(struct svc_fh *current_fh, - unsigned int deny_type); extern void nfs4_lock_state(void); extern void nfs4_unlock_state(void); extern int nfs4_in_grace(void); @@ -282,6 +278,15 @@ extern void nfs4_free_stateowner(struct kref *kref); extern void nfsd4_probe_callback(struct nfs4_client *clp); extern void nfsd4_cb_recall(struct nfs4_delegation *dp); extern void nfs4_put_delegation(struct nfs4_delegation *dp); +extern int nfs4_make_rec_clidname(char *clidname, struct xdr_netobj *clname); +extern void nfsd4_init_recdir(char *recdir_name); +extern int nfsd4_recdir_load(void); +extern void nfsd4_shutdown_recdir(void); +extern int nfs4_client_to_reclaim(const char *name); +extern int nfs4_has_reclaimed_state(const char *name); +extern void nfsd4_recdir_purge_old(void); +extern int nfsd4_create_clid_dir(struct nfs4_client *clp); +extern void nfsd4_remove_clid_dir(struct nfs4_client *clp); static inline void nfs4_put_stateowner(struct nfs4_stateowner *so) diff --git a/include/linux/nfsd/xdr.h b/include/linux/nfsd/xdr.h index ecccef7..130d4f5 100644 --- a/include/linux/nfsd/xdr.h +++ b/include/linux/nfsd/xdr.h @@ -169,4 +169,8 @@ int nfssvc_encode_entry(struct readdir_cd *, const char *name, int nfssvc_release_fhandle(struct svc_rqst *, u32 *, struct nfsd_fhandle *); +/* Helper functions for NFSv2 ACL code */ +u32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp); +u32 *nfs2svc_decode_fh(u32 *p, struct svc_fh *fhp); + #endif /* LINUX_NFSD_H */ diff --git a/include/linux/nfsd/xdr3.h b/include/linux/nfsd/xdr3.h index 0ae9e0e..21e18ce 100644 --- a/include/linux/nfsd/xdr3.h +++ b/include/linux/nfsd/xdr3.h @@ -110,6 +110,19 @@ struct nfsd3_commitargs { __u32 count; }; +struct nfsd3_getaclargs { + struct svc_fh fh; + int mask; +}; + +struct posix_acl; +struct nfsd3_setaclargs { + struct svc_fh fh; + int mask; + struct posix_acl *acl_access; + struct posix_acl *acl_default; +}; + struct nfsd3_attrstat { __u32 status; struct svc_fh fh; @@ -209,6 +222,14 @@ struct nfsd3_commitres { struct svc_fh fh; }; +struct nfsd3_getaclres { + __u32 status; + struct svc_fh fh; + int mask; + struct posix_acl *acl_access; + struct posix_acl *acl_default; +}; + /* dummy type for release */ struct nfsd3_fhandle_pair { __u32 dummy; @@ -241,6 +262,7 @@ union nfsd3_xdrstore { struct nfsd3_fsinfores fsinfores; struct nfsd3_pathconfres pathconfres; struct nfsd3_commitres commitres; + struct nfsd3_getaclres getaclres; }; #define NFS3_SVC_XDRSIZE sizeof(union nfsd3_xdrstore) @@ -316,6 +338,10 @@ int nfs3svc_encode_entry(struct readdir_cd *, const char *name, int nfs3svc_encode_entry_plus(struct readdir_cd *, const char *name, int namlen, loff_t offset, ino_t ino, unsigned int); +/* Helper functions for NFSv3 ACL code */ +u32 *nfs3svc_encode_post_op_attr(struct svc_rqst *rqstp, u32 *p, + struct svc_fh *fhp); +u32 *nfs3svc_decode_fh(u32 *p, struct svc_fh *fhp); #endif /* _LINUX_NFSD_XDR3_H */ diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h index a1f5ad0..4d24d65 100644 --- a/include/linux/nfsd/xdr4.h +++ b/include/linux/nfsd/xdr4.h @@ -210,6 +210,7 @@ struct nfsd4_open { u32 op_share_access; /* request */ u32 op_share_deny; /* request */ stateid_t op_stateid; /* response */ + u32 op_recall; /* recall */ struct nfsd4_change_info op_cinfo; /* response */ u32 op_rflags; /* response */ int op_truncate; /* used during processing */ diff --git a/include/linux/nfsd_idmap.h b/include/linux/nfsd_idmap.h index 9bb7f30..e82746f 100644 --- a/include/linux/nfsd_idmap.h +++ b/include/linux/nfsd_idmap.h @@ -43,8 +43,13 @@ /* XXX from linux/nfs_idmap.h */ #define IDMAP_NAMESZ 128 +#ifdef CONFIG_NFSD_V4 void nfsd_idmap_init(void); void nfsd_idmap_shutdown(void); +#else +static inline void nfsd_idmap_init(void) {}; +static inline void nfsd_idmap_shutdown(void) {}; +#endif int nfsd_map_name_to_uid(struct svc_rqst *, const char *, size_t, __u32 *); int nfsd_map_name_to_gid(struct svc_rqst *, const char *, size_t, __u32 *); diff --git a/include/linux/node.h b/include/linux/node.h index 6e0a697..254dc3d 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -27,6 +27,7 @@ struct node { }; extern int register_node(struct node *, int, struct node *); +extern void unregister_node(struct node *node); #define to_node(sys_device) container_of(sys_device, struct node, sysdev) diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 9303a00..5937dd60 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -56,6 +56,7 @@ extern int notifier_call_chain(struct notifier_block **n, unsigned long val, voi #define NETDEV_CHANGEADDR 0x0008 #define NETDEV_GOING_DOWN 0x0009 #define NETDEV_CHANGENAME 0x000A +#define NETDEV_FEAT_CHANGE 0x000B #define SYS_DOWN 0x0001 /* Notify of system down */ #define SYS_RESTART SYS_DOWN diff --git a/include/linux/numa.h b/include/linux/numa.h index bd0c8c4..f0c539b 100644 --- a/include/linux/numa.h +++ b/include/linux/numa.h @@ -3,7 +3,7 @@ #include <linux/config.h> -#ifdef CONFIG_DISCONTIGMEM +#ifndef CONFIG_FLATMEM #include <asm/numnodes.h> #endif diff --git a/include/linux/nvram.h b/include/linux/nvram.h index b031e41..9189829 100644 --- a/include/linux/nvram.h +++ b/include/linux/nvram.h @@ -20,8 +20,6 @@ extern void __nvram_write_byte(unsigned char c, int i); extern void nvram_write_byte(unsigned char c, int i); extern int __nvram_check_checksum(void); extern int nvram_check_checksum(void); -extern void __nvram_set_checksum(void); -extern void nvram_set_checksum(void); #endif #endif /* _LINUX_NVRAM_H */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 39ab8c6..f5a6695 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -61,21 +61,20 @@ #define PG_active 6 #define PG_slab 7 /* slab debug (Suparna wants this) */ -#define PG_highmem 8 -#define PG_checked 9 /* kill me in 2.5.<early>. */ -#define PG_arch_1 10 -#define PG_reserved 11 - -#define PG_private 12 /* Has something at ->private */ -#define PG_writeback 13 /* Page is under writeback */ -#define PG_nosave 14 /* Used for system suspend/resume */ -#define PG_compound 15 /* Part of a compound page */ - -#define PG_swapcache 16 /* Swap page: swp_entry_t in private */ -#define PG_mappedtodisk 17 /* Has blocks allocated on-disk */ -#define PG_reclaim 18 /* To be reclaimed asap */ -#define PG_nosave_free 19 /* Free, should not be written */ -#define PG_uncached 20 /* Page has been mapped as uncached */ +#define PG_checked 8 /* kill me in 2.5.<early>. */ +#define PG_arch_1 9 +#define PG_reserved 10 +#define PG_private 11 /* Has something at ->private */ + +#define PG_writeback 12 /* Page is under writeback */ +#define PG_nosave 13 /* Used for system suspend/resume */ +#define PG_compound 14 /* Part of a compound page */ +#define PG_swapcache 15 /* Swap page: swp_entry_t in private */ + +#define PG_mappedtodisk 16 /* Has blocks allocated on-disk */ +#define PG_reclaim 17 /* To be reclaimed asap */ +#define PG_nosave_free 18 /* Free, should not be written */ +#define PG_uncached 19 /* Page has been mapped as uncached */ /* * Global page accounting. One instance per CPU. Only unsigned longs are @@ -136,8 +135,8 @@ struct page_state { extern void get_page_state(struct page_state *ret); extern void get_full_page_state(struct page_state *ret); -extern unsigned long __read_page_state(unsigned offset); -extern void __mod_page_state(unsigned offset, unsigned long delta); +extern unsigned long __read_page_state(unsigned long offset); +extern void __mod_page_state(unsigned long offset, unsigned long delta); #define read_page_state(member) \ __read_page_state(offsetof(struct page_state, member)) @@ -215,7 +214,7 @@ extern void __mod_page_state(unsigned offset, unsigned long delta); #define TestSetPageSlab(page) test_and_set_bit(PG_slab, &(page)->flags) #ifdef CONFIG_HIGHMEM -#define PageHighMem(page) test_bit(PG_highmem, &(page)->flags) +#define PageHighMem(page) is_highmem(page_zone(page)) #else #define PageHighMem(page) 0 /* needed to optimize away at compile time */ #endif diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 0422031..d9a2564 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -52,12 +52,12 @@ void release_pages(struct page **pages, int nr, int cold); static inline struct page *page_cache_alloc(struct address_space *x) { - return alloc_pages(mapping_gfp_mask(x), 0); + return alloc_pages(mapping_gfp_mask(x)|__GFP_NORECLAIM, 0); } static inline struct page *page_cache_alloc_cold(struct address_space *x) { - return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0); + return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD|__GFP_NORECLAIM, 0); } typedef int filler_t(void *, struct page *); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index ae27792..bf60880 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -575,6 +575,7 @@ #define PCI_DEVICE_ID_CT_65550 0x00e0 #define PCI_DEVICE_ID_CT_65554 0x00e4 #define PCI_DEVICE_ID_CT_65555 0x00e5 +#define PCI_DEVICE_ID_CT_69000 0x00c0 #define PCI_VENDOR_ID_MIRO 0x1031 #define PCI_DEVICE_ID_MIRO_36050 0x5601 @@ -874,6 +875,7 @@ #define PCI_DEVICE_ID_APPLE_KL_USB_P 0x0026 #define PCI_DEVICE_ID_APPLE_UNI_N_AGP_P 0x0027 #define PCI_DEVICE_ID_APPLE_UNI_N_AGP15 0x002d +#define PCI_DEVICE_ID_APPLE_UNI_N_PCI15 0x002e #define PCI_DEVICE_ID_APPLE_UNI_N_FW2 0x0030 #define PCI_DEVICE_ID_APPLE_UNI_N_GMAC2 0x0032 #define PCI_DEVIEC_ID_APPLE_UNI_N_ATA 0x0033 @@ -1230,6 +1232,12 @@ #define PCI_DEVICE_ID_NVIDIA_QUADRO4_900XGL 0x0258 #define PCI_DEVICE_ID_NVIDIA_QUADRO4_750XGL 0x0259 #define PCI_DEVICE_ID_NVIDIA_QUADRO4_700XGL 0x025B +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE 0x0265 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA 0x0266 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2 0x0267 +#define PCI_DEVICE_ID_NVIDIA_NVENET_12 0x0268 +#define PCI_DEVICE_ID_NVIDIA_NVENET_13 0x0269 +#define PCI_DEVICE_ID_NVIDIA_MCP51_AUDIO 0x026B #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800 0x0280 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800_8X 0x0281 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800SE 0x0282 @@ -1560,6 +1568,7 @@ #define PCI_DEVICE_ID_SERVERWORKS_OSB4USB 0x0220 #define PCI_DEVICE_ID_SERVERWORKS_CSB5USB PCI_DEVICE_ID_SERVERWORKS_OSB4USB #define PCI_DEVICE_ID_SERVERWORKS_CSB6USB 0x0221 +#define PCI_DEVICE_ID_SERVERWORKS_CSB6LPC 0x0227 #define PCI_DEVICE_ID_SERVERWORKS_GCLE 0x0225 #define PCI_DEVICE_ID_SERVERWORKS_GCLE2 0x0227 #define PCI_DEVICE_ID_SERVERWORKS_CSB5ISA 0x0230 @@ -2064,12 +2073,14 @@ #define PCI_VENDOR_ID_BROADCOM 0x14e4 #define PCI_DEVICE_ID_TIGON3_5752 0x1600 +#define PCI_DEVICE_ID_TIGON3_5752M 0x1601 #define PCI_DEVICE_ID_TIGON3_5700 0x1644 #define PCI_DEVICE_ID_TIGON3_5701 0x1645 #define PCI_DEVICE_ID_TIGON3_5702 0x1646 #define PCI_DEVICE_ID_TIGON3_5703 0x1647 #define PCI_DEVICE_ID_TIGON3_5704 0x1648 #define PCI_DEVICE_ID_TIGON3_5704S_2 0x1649 +#define PCI_DEVICE_ID_NX2_5706 0x164a #define PCI_DEVICE_ID_TIGON3_5702FE 0x164d #define PCI_DEVICE_ID_TIGON3_5705 0x1653 #define PCI_DEVICE_ID_TIGON3_5705_2 0x1654 @@ -2089,6 +2100,7 @@ #define PCI_DEVICE_ID_TIGON3_5702X 0x16a6 #define PCI_DEVICE_ID_TIGON3_5703X 0x16a7 #define PCI_DEVICE_ID_TIGON3_5704S 0x16a8 +#define PCI_DEVICE_ID_NX2_5706S 0x16aa #define PCI_DEVICE_ID_TIGON3_5702A3 0x16c6 #define PCI_DEVICE_ID_TIGON3_5703A3 0x16c7 #define PCI_DEVICE_ID_TIGON3_5781 0x16dd @@ -2373,6 +2385,8 @@ #define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582 #define PCI_DEVICE_ID_INTEL_82915GM_HB 0x2590 #define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592 +#define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770 +#define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772 #define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640 #define PCI_DEVICE_ID_INTEL_ICH6_1 0x2641 #define PCI_DEVICE_ID_INTEL_ICH6_2 0x2642 diff --git a/include/linux/pfkeyv2.h b/include/linux/pfkeyv2.h index e6b5192..7240667 100644 --- a/include/linux/pfkeyv2.h +++ b/include/linux/pfkeyv2.h @@ -245,6 +245,7 @@ struct sadb_x_nat_t_port { /* Security Association flags */ #define SADB_SAFLAGS_PFS 1 +#define SADB_SAFLAGS_NOPMTUDISC 0x20000000 #define SADB_SAFLAGS_DECAP_DSCP 0x40000000 #define SADB_SAFLAGS_NOECN 0x80000000 diff --git a/include/linux/pkt_cls.h b/include/linux/pkt_cls.h index d2aa214..25d2d67 100644 --- a/include/linux/pkt_cls.h +++ b/include/linux/pkt_cls.h @@ -408,6 +408,7 @@ enum TCF_EM_NBYTE, TCF_EM_U32, TCF_EM_META, + TCF_EM_TEXT, __TCF_EM_MAX }; diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h index 73d84c0..1d9da36 100644 --- a/include/linux/pkt_sched.h +++ b/include/linux/pkt_sched.h @@ -427,6 +427,7 @@ enum TCA_NETEM_UNSPEC, TCA_NETEM_CORR, TCA_NETEM_DELAY_DIST, + TCA_NETEM_REORDER, __TCA_NETEM_MAX, }; @@ -437,7 +438,7 @@ struct tc_netem_qopt __u32 latency; /* added delay (us) */ __u32 limit; /* fifo limit (packets) */ __u32 loss; /* random packet loss (0=none ~0=100%) */ - __u32 gap; /* re-ordering gap (0 for delay all) */ + __u32 gap; /* re-ordering gap (0 for none) */ __u32 duplicate; /* random packet dup (0=none ~0=100%) */ __u32 jitter; /* random jitter in latency (us) */ }; @@ -449,6 +450,12 @@ struct tc_netem_corr __u32 dup_corr; /* duplicate correlation */ }; +struct tc_netem_reorder +{ + __u32 probability; + __u32 correlation; +}; + #define NETEM_DIST_SCALE 8192 #endif diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h index 4e2d2a9..4b32bce 100644 --- a/include/linux/pktcdvd.h +++ b/include/linux/pktcdvd.h @@ -159,7 +159,7 @@ struct packet_iosched struct bio *read_queue_tail; struct bio *write_queue; struct bio *write_queue_tail; - int high_prio_read; /* An important read request has been queued */ + sector_t last_write; /* The sector where the last write ended */ int successive_reads; }; diff --git a/include/linux/pm.h b/include/linux/pm.h index ed2b76e..1447932 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -103,7 +103,8 @@ extern int pm_active; /* * Register a device with power management */ -struct pm_dev __deprecated *pm_register(pm_dev_t type, unsigned long id, pm_callback callback); +struct pm_dev __deprecated * +pm_register(pm_dev_t type, unsigned long id, pm_callback callback); /* * Unregister a device with power management @@ -190,17 +191,18 @@ typedef u32 __bitwise pm_message_t; /* * There are 4 important states driver can be in: * ON -- driver is working - * FREEZE -- stop operations and apply whatever policy is applicable to a suspended driver - * of that class, freeze queues for block like IDE does, drop packets for - * ethernet, etc... stop DMA engine too etc... so a consistent image can be - * saved; but do not power any hardware down. - * SUSPEND - like FREEZE, but hardware is doing as much powersaving as possible. Roughly - * pci D3. + * FREEZE -- stop operations and apply whatever policy is applicable to a + * suspended driver of that class, freeze queues for block like IDE + * does, drop packets for ethernet, etc... stop DMA engine too etc... + * so a consistent image can be saved; but do not power any hardware + * down. + * SUSPEND - like FREEZE, but hardware is doing as much powersaving as + * possible. Roughly pci D3. * - * Unfortunately, current drivers only recognize numeric values 0 (ON) and 3 (SUSPEND). - * We'll need to fix the drivers. So yes, putting 3 to all diferent defines is intentional, - * and will go away as soon as drivers are fixed. Also note that typedef is neccessary, - * we'll probably want to switch to + * Unfortunately, current drivers only recognize numeric values 0 (ON) and 3 + * (SUSPEND). We'll need to fix the drivers. So yes, putting 3 to all different + * defines is intentional, and will go away as soon as drivers are fixed. Also + * note that typedef is neccessary, we'll probably want to switch to * typedef struct pm_message_t { int event; int flags; } pm_message_t * or something similar soon. */ @@ -222,11 +224,18 @@ struct dev_pm_info { extern void device_pm_set_parent(struct device * dev, struct device * parent); -extern int device_suspend(pm_message_t state); extern int device_power_down(pm_message_t state); extern void device_power_up(void); extern void device_resume(void); +#ifdef CONFIG_PM +extern int device_suspend(pm_message_t state); +#else +static inline int device_suspend(pm_message_t state) +{ + return 0; +} +#endif #endif /* __KERNEL__ */ diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h index 5efd0a6..6e53c34 100644 --- a/include/linux/posix_acl_xattr.h +++ b/include/linux/posix_acl_xattr.h @@ -23,13 +23,13 @@ #define ACL_UNDEFINED_ID (-1) typedef struct { - __u16 e_tag; - __u16 e_perm; - __u32 e_id; + __le16 e_tag; + __le16 e_perm; + __le32 e_id; } posix_acl_xattr_entry; typedef struct { - __u32 a_version; + __le32 a_version; posix_acl_xattr_entry a_entries[0]; } posix_acl_xattr_header; @@ -52,4 +52,7 @@ posix_acl_xattr_count(size_t size) return size / sizeof(posix_acl_xattr_entry); } +struct posix_acl *posix_acl_from_xattr(const void *value, size_t size); +int posix_acl_to_xattr(const struct posix_acl *acl, void *buffer, size_t size); + #endif /* _POSIX_ACL_XATTR_H */ diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 59e5052..0563581 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -74,6 +74,13 @@ struct kcore_list { size_t size; }; +struct vmcore { + struct list_head list; + unsigned long long paddr; + unsigned long size; + loff_t offset; +}; + #ifdef CONFIG_PROC_FS extern struct proc_dir_entry proc_root; diff --git a/include/linux/qnx4_fs.h b/include/linux/qnx4_fs.h index 22ba580..fc610bb 100644 --- a/include/linux/qnx4_fs.h +++ b/include/linux/qnx4_fs.h @@ -46,11 +46,11 @@ struct qnx4_inode_entry { char di_fname[QNX4_SHORT_NAME_MAX]; qnx4_off_t di_size; qnx4_xtnt_t di_first_xtnt; - __u32 di_xblk; - __s32 di_ftime; - __s32 di_mtime; - __s32 di_atime; - __s32 di_ctime; + __le32 di_xblk; + __le32 di_ftime; + __le32 di_mtime; + __le32 di_atime; + __le32 di_ctime; qnx4_nxtnt_t di_num_xtnts; qnx4_mode_t di_mode; qnx4_muid_t di_uid; @@ -63,18 +63,18 @@ struct qnx4_inode_entry { struct qnx4_link_info { char dl_fname[QNX4_NAME_MAX]; - __u32 dl_inode_blk; + __le32 dl_inode_blk; __u8 dl_inode_ndx; __u8 dl_spare[10]; __u8 dl_status; }; struct qnx4_xblk { - __u32 xblk_next_xblk; - __u32 xblk_prev_xblk; + __le32 xblk_next_xblk; + __le32 xblk_prev_xblk; __u8 xblk_num_xtnts; __u8 xblk_spare[3]; - __s32 xblk_num_blocks; + __le32 xblk_num_blocks; qnx4_xtnt_t xblk_xtnts[QNX4_MAX_XTNTS_PER_XBLK]; char xblk_signature[8]; qnx4_xtnt_t xblk_first_xtnt; diff --git a/include/linux/qnxtypes.h b/include/linux/qnxtypes.h index fb518e3..a3eb113 100644 --- a/include/linux/qnxtypes.h +++ b/include/linux/qnxtypes.h @@ -12,18 +12,18 @@ #ifndef _QNX4TYPES_H #define _QNX4TYPES_H -typedef __u16 qnx4_nxtnt_t; +typedef __le16 qnx4_nxtnt_t; typedef __u8 qnx4_ftype_t; typedef struct { - __u32 xtnt_blk; - __u32 xtnt_size; + __le32 xtnt_blk; + __le32 xtnt_size; } qnx4_xtnt_t; -typedef __u16 qnx4_mode_t; -typedef __u16 qnx4_muid_t; -typedef __u16 qnx4_mgid_t; -typedef __u32 qnx4_off_t; -typedef __u16 qnx4_nlink_t; +typedef __le16 qnx4_mode_t; +typedef __le16 qnx4_muid_t; +typedef __le16 qnx4_mgid_t; +typedef __le32 qnx4_off_t; +typedef __le16 qnx4_nlink_t; #endif diff --git a/include/linux/quota.h b/include/linux/quota.h index ac5b90f..700ead4 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -138,8 +138,11 @@ struct if_dqinfo { #include <linux/dqblk_v2.h> /* Maximal numbers of writes for quota operation (insert/delete/update) - * (over all formats) - info block, 4 pointer blocks, data block */ -#define DQUOT_MAX_WRITES 6 + * (over VFS all formats) */ +#define DQUOT_INIT_ALLOC max(V1_INIT_ALLOC, V2_INIT_ALLOC) +#define DQUOT_INIT_REWRITE max(V1_INIT_REWRITE, V2_INIT_REWRITE) +#define DQUOT_DEL_ALLOC max(V1_DEL_ALLOC, V2_DEL_ALLOC) +#define DQUOT_DEL_REWRITE max(V1_DEL_REWRITE, V2_DEL_REWRITE) /* * Data for one user/group kept in memory diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index e57baa8..d211507 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -39,7 +39,8 @@ extern int dquot_commit_info(struct super_block *sb, int type); extern int dquot_mark_dquot_dirty(struct dquot *dquot); extern int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path); -extern int vfs_quota_on_mount(int type, int format_id, struct dentry *dentry); +extern int vfs_quota_on_mount(struct super_block *sb, char *qf_name, + int format_id, int type); extern int vfs_quota_off(struct super_block *sb, int type); #define vfs_quota_off_mount(sb, type) vfs_quota_off(sb, type) extern int vfs_quota_sync(struct super_block *sb, int type); diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h new file mode 100644 index 0000000..e24b74b --- /dev/null +++ b/include/linux/raid/bitmap.h @@ -0,0 +1,273 @@ +/* + * bitmap.h: Copyright (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003 + * + * additions: Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. + */ +#ifndef BITMAP_H +#define BITMAP_H 1 + +#define BITMAP_MAJOR 3 +#define BITMAP_MINOR 38 + +/* + * in-memory bitmap: + * + * Use 16 bit block counters to track pending writes to each "chunk". + * The 2 high order bits are special-purpose, the first is a flag indicating + * whether a resync is needed. The second is a flag indicating whether a + * resync is active. + * This means that the counter is actually 14 bits: + * + * +--------+--------+------------------------------------------------+ + * | resync | resync | counter | + * | needed | active | | + * | (0-1) | (0-1) | (0-16383) | + * +--------+--------+------------------------------------------------+ + * + * The "resync needed" bit is set when: + * a '1' bit is read from storage at startup. + * a write request fails on some drives + * a resync is aborted on a chunk with 'resync active' set + * It is cleared (and resync-active set) when a resync starts across all drives + * of the chunk. + * + * + * The "resync active" bit is set when: + * a resync is started on all drives, and resync_needed is set. + * resync_needed will be cleared (as long as resync_active wasn't already set). + * It is cleared when a resync completes. + * + * The counter counts pending write requests, plus the on-disk bit. + * When the counter is '1' and the resync bits are clear, the on-disk + * bit can be cleared aswell, thus setting the counter to 0. + * When we set a bit, or in the counter (to start a write), if the fields is + * 0, we first set the disk bit and set the counter to 1. + * + * If the counter is 0, the on-disk bit is clear and the stipe is clean + * Anything that dirties the stipe pushes the counter to 2 (at least) + * and sets the on-disk bit (lazily). + * If a periodic sweep find the counter at 2, it is decremented to 1. + * If the sweep find the counter at 1, the on-disk bit is cleared and the + * counter goes to zero. + * + * Also, we'll hijack the "map" pointer itself and use it as two 16 bit block + * counters as a fallback when "page" memory cannot be allocated: + * + * Normal case (page memory allocated): + * + * page pointer (32-bit) + * + * [ ] ------+ + * | + * +-------> [ ][ ]..[ ] (4096 byte page == 2048 counters) + * c1 c2 c2048 + * + * Hijacked case (page memory allocation failed): + * + * hijacked page pointer (32-bit) + * + * [ ][ ] (no page memory allocated) + * counter #1 (16-bit) counter #2 (16-bit) + * + */ + +#ifdef __KERNEL__ + +#define PAGE_BITS (PAGE_SIZE << 3) +#define PAGE_BIT_SHIFT (PAGE_SHIFT + 3) + +typedef __u16 bitmap_counter_t; +#define COUNTER_BITS 16 +#define COUNTER_BIT_SHIFT 4 +#define COUNTER_BYTE_RATIO (COUNTER_BITS / 8) +#define COUNTER_BYTE_SHIFT (COUNTER_BIT_SHIFT - 3) + +#define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1))) +#define RESYNC_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 2))) +#define COUNTER_MAX ((bitmap_counter_t) RESYNC_MASK - 1) +#define NEEDED(x) (((bitmap_counter_t) x) & NEEDED_MASK) +#define RESYNC(x) (((bitmap_counter_t) x) & RESYNC_MASK) +#define COUNTER(x) (((bitmap_counter_t) x) & COUNTER_MAX) + +/* how many counters per page? */ +#define PAGE_COUNTER_RATIO (PAGE_BITS / COUNTER_BITS) +/* same, except a shift value for more efficient bitops */ +#define PAGE_COUNTER_SHIFT (PAGE_BIT_SHIFT - COUNTER_BIT_SHIFT) +/* same, except a mask value for more efficient bitops */ +#define PAGE_COUNTER_MASK (PAGE_COUNTER_RATIO - 1) + +#define BITMAP_BLOCK_SIZE 512 +#define BITMAP_BLOCK_SHIFT 9 + +/* how many blocks per chunk? (this is variable) */ +#define CHUNK_BLOCK_RATIO(bitmap) ((bitmap)->chunksize >> BITMAP_BLOCK_SHIFT) +#define CHUNK_BLOCK_SHIFT(bitmap) ((bitmap)->chunkshift - BITMAP_BLOCK_SHIFT) +#define CHUNK_BLOCK_MASK(bitmap) (CHUNK_BLOCK_RATIO(bitmap) - 1) + +/* when hijacked, the counters and bits represent even larger "chunks" */ +/* there will be 1024 chunks represented by each counter in the page pointers */ +#define PAGEPTR_BLOCK_RATIO(bitmap) \ + (CHUNK_BLOCK_RATIO(bitmap) << PAGE_COUNTER_SHIFT >> 1) +#define PAGEPTR_BLOCK_SHIFT(bitmap) \ + (CHUNK_BLOCK_SHIFT(bitmap) + PAGE_COUNTER_SHIFT - 1) +#define PAGEPTR_BLOCK_MASK(bitmap) (PAGEPTR_BLOCK_RATIO(bitmap) - 1) + +/* + * on-disk bitmap: + * + * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap + * file a page at a time. There's a superblock at the start of the file. + */ + +/* map chunks (bits) to file pages - offset by the size of the superblock */ +#define CHUNK_BIT_OFFSET(chunk) ((chunk) + (sizeof(bitmap_super_t) << 3)) + +#endif + +/* + * bitmap structures: + */ + +#define BITMAP_MAGIC 0x6d746962 + +/* use these for bitmap->flags and bitmap->sb->state bit-fields */ +enum bitmap_state { + BITMAP_ACTIVE = 0x001, /* the bitmap is in use */ + BITMAP_STALE = 0x002 /* the bitmap file is out of date or had -EIO */ +}; + +/* the superblock at the front of the bitmap file -- little endian */ +typedef struct bitmap_super_s { + __u32 magic; /* 0 BITMAP_MAGIC */ + __u32 version; /* 4 the bitmap major for now, could change... */ + __u8 uuid[16]; /* 8 128 bit uuid - must match md device uuid */ + __u64 events; /* 24 event counter for the bitmap (1)*/ + __u64 events_cleared;/*32 event counter when last bit cleared (2) */ + __u64 sync_size; /* 40 the size of the md device's sync range(3) */ + __u32 state; /* 48 bitmap state information */ + __u32 chunksize; /* 52 the bitmap chunk size in bytes */ + __u32 daemon_sleep; /* 56 seconds between disk flushes */ + + __u8 pad[256 - 60]; /* set to zero */ +} bitmap_super_t; + +/* notes: + * (1) This event counter is updated before the eventcounter in the md superblock + * When a bitmap is loaded, it is only accepted if this event counter is equal + * to, or one greater than, the event counter in the superblock. + * (2) This event counter is updated when the other one is *if*and*only*if* the + * array is not degraded. As bits are not cleared when the array is degraded, + * this represents the last time that any bits were cleared. + * If a device is being added that has an event count with this value or + * higher, it is accepted as conforming to the bitmap. + * (3)This is the number of sectors represented by the bitmap, and is the range that + * resync happens across. For raid1 and raid5/6 it is the size of individual + * devices. For raid10 it is the size of the array. + */ + +#ifdef __KERNEL__ + +/* the in-memory bitmap is represented by bitmap_pages */ +struct bitmap_page { + /* + * map points to the actual memory page + */ + char *map; + /* + * in emergencies (when map cannot be alloced), hijack the map + * pointer and use it as two counters itself + */ + unsigned int hijacked:1; + /* + * count of dirty bits on the page + */ + unsigned int count:31; +}; + +/* keep track of bitmap file pages that have pending writes on them */ +struct page_list { + struct list_head list; + struct page *page; +}; + +/* the main bitmap structure - one per mddev */ +struct bitmap { + struct bitmap_page *bp; + unsigned long pages; /* total number of pages in the bitmap */ + unsigned long missing_pages; /* number of pages not yet allocated */ + + mddev_t *mddev; /* the md device that the bitmap is for */ + + int counter_bits; /* how many bits per block counter */ + + /* bitmap chunksize -- how much data does each bit represent? */ + unsigned long chunksize; + unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */ + unsigned long chunks; /* total number of data chunks for the array */ + + /* We hold a count on the chunk currently being synced, and drop + * it when the last block is started. If the resync is aborted + * midway, we need to be able to drop that count, so we remember + * the counted chunk.. + */ + unsigned long syncchunk; + + __u64 events_cleared; + + /* bitmap spinlock */ + spinlock_t lock; + + long offset; /* offset from superblock if file is NULL */ + struct file *file; /* backing disk file */ + struct page *sb_page; /* cached copy of the bitmap file superblock */ + struct page **filemap; /* list of cache pages for the file */ + unsigned long *filemap_attr; /* attributes associated w/ filemap pages */ + unsigned long file_pages; /* number of pages in the file */ + + unsigned long flags; + + /* + * the bitmap daemon - periodically wakes up and sweeps the bitmap + * file, cleaning up bits and flushing out pages to disk as necessary + */ + unsigned long daemon_lastrun; /* jiffies of last run */ + unsigned long daemon_sleep; /* how many seconds between updates? */ + + /* + * bitmap_writeback_daemon waits for file-pages that have been written, + * as there is no way to get a call-back when a page write completes. + */ + mdk_thread_t *writeback_daemon; + spinlock_t write_lock; + wait_queue_head_t write_wait; + struct list_head complete_pages; + mempool_t *write_pool; +}; + +/* the bitmap API */ + +/* these are used only by md/bitmap */ +int bitmap_create(mddev_t *mddev); +void bitmap_destroy(mddev_t *mddev); +int bitmap_active(struct bitmap *bitmap); + +char *file_path(struct file *file, char *buf, int count); +void bitmap_print_sb(struct bitmap *bitmap); +int bitmap_update_sb(struct bitmap *bitmap); + +int bitmap_setallbits(struct bitmap *bitmap); +void bitmap_write_all(struct bitmap *bitmap); + +/* these are exported */ +int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors); +void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, + int success); +int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks); +void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted); +void bitmap_close_sync(struct bitmap *bitmap); + +int bitmap_unplug(struct bitmap *bitmap); +int bitmap_daemon_work(struct bitmap *bitmap); +#endif + +#endif diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h index a6a67d1..ffa316c 100644 --- a/include/linux/raid/md.h +++ b/include/linux/raid/md.h @@ -60,7 +60,14 @@ */ #define MD_MAJOR_VERSION 0 #define MD_MINOR_VERSION 90 -#define MD_PATCHLEVEL_VERSION 1 +/* + * MD_PATCHLEVEL_VERSION indicates kernel functionality. + * >=1 means different superblock formats are selectable using SET_ARRAY_INFO + * and major_version/minor_version accordingly + * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT + * in the super status byte + */ +#define MD_PATCHLEVEL_VERSION 2 extern int register_md_personality (int p_num, mdk_personality_t *p); extern int unregister_md_personality (int p_num); @@ -69,7 +76,7 @@ extern mdk_thread_t * md_register_thread (void (*run) (mddev_t *mddev), extern void md_unregister_thread (mdk_thread_t *thread); extern void md_wakeup_thread(mdk_thread_t *thread); extern void md_check_recovery(mddev_t *mddev); -extern void md_write_start(mddev_t *mddev); +extern void md_write_start(mddev_t *mddev, struct bio *bi); extern void md_write_end(mddev_t *mddev); extern void md_handle_safemode(mddev_t *mddev); extern void md_done_sync(mddev_t *mddev, int blocks, int ok); @@ -78,6 +85,12 @@ extern void md_unplug_mddev(mddev_t *mddev); extern void md_print_devices (void); +extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, + sector_t sector, int size, struct page *page); +extern int sync_page_io(struct block_device *bdev, sector_t sector, int size, + struct page *page, int rw); + + #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } #endif diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index c9a0d40..8c14ba5 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -15,6 +15,9 @@ #ifndef _MD_K_H #define _MD_K_H +/* and dm-bio-list.h is not under include/linux because.... ??? */ +#include "../../../drivers/md/dm-bio-list.h" + #define MD_RESERVED 0UL #define LINEAR 1UL #define RAID0 2UL @@ -180,6 +183,10 @@ struct mdk_rdev_s int desc_nr; /* descriptor index in the superblock */ int raid_disk; /* role of device in array */ + int saved_raid_disk; /* role that device used to have in the + * array and could again if we did a partial + * resync from the bitmap + */ atomic_t nr_pending; /* number of pending requests. * only maintained for arrays that @@ -252,6 +259,11 @@ struct mddev_s atomic_t recovery_active; /* blocks scheduled, but not written */ wait_queue_head_t recovery_wait; sector_t recovery_cp; + + spinlock_t write_lock; + wait_queue_head_t sb_wait; /* for waiting on superblock updates */ + atomic_t pending_writes; /* number of active superblock writes */ + unsigned int safemode; /* if set, update "clean" superblock * when no writes pending. */ @@ -260,6 +272,13 @@ struct mddev_s atomic_t writes_pending; request_queue_t *queue; /* for plugging ... */ + struct bitmap *bitmap; /* the bitmap for the device */ + struct file *bitmap_file; /* the bitmap file */ + long bitmap_offset; /* offset from superblock of + * start of bitmap. May be + * negative, but not '0' + */ + struct list_head all_mddevs; }; @@ -291,7 +310,7 @@ struct mdk_personality_s int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev); int (*hot_remove_disk) (mddev_t *mddev, int number); int (*spare_active) (mddev_t *mddev); - int (*sync_request)(mddev_t *mddev, sector_t sector_nr, int go_faster); + sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster); int (*resize) (mddev_t *mddev, sector_t sectors); int (*reshape) (mddev_t *mddev, int raid_disks); int (*reconfig) (mddev_t *mddev, int layout, int chunk_size); @@ -334,6 +353,7 @@ typedef struct mdk_thread_s { unsigned long flags; struct completion *event; struct task_struct *tsk; + unsigned long timeout; const char *name; } mdk_thread_t; diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h index 8ba95d6..dc65cd4 100644 --- a/include/linux/raid/md_p.h +++ b/include/linux/raid/md_p.h @@ -96,6 +96,7 @@ typedef struct mdp_device_descriptor_s { #define MD_SB_CLEAN 0 #define MD_SB_ERRORS 1 +#define MD_SB_BITMAP_PRESENT 8 /* bitmap may be present nearby */ typedef struct mdp_superblock_s { /* * Constant generic information @@ -184,7 +185,7 @@ struct mdp_superblock_1 { /* constant array information - 128 bytes */ __u32 magic; /* MD_SB_MAGIC: 0xa92b4efc - little endian */ __u32 major_version; /* 1 */ - __u32 feature_map; /* 0 for now */ + __u32 feature_map; /* bit 0 set if 'bitmap_offset' is meaningful */ __u32 pad0; /* always set to 0 when writing */ __u8 set_uuid[16]; /* user-space generated. */ @@ -197,7 +198,11 @@ struct mdp_superblock_1 { __u32 chunksize; /* in 512byte sectors */ __u32 raid_disks; - __u8 pad1[128-96]; /* set to 0 when written */ + __u32 bitmap_offset; /* sectors after start of superblock that bitmap starts + * NOTE: signed, so bitmap can be before superblock + * only meaningful of feature_map[0] is set. + */ + __u8 pad1[128-100]; /* set to 0 when written */ /* constant this-device information - 64 bytes */ __u64 data_offset; /* sector start of data, often 0 */ diff --git a/include/linux/raid/md_u.h b/include/linux/raid/md_u.h index a2df5c2..81da20c 100644 --- a/include/linux/raid/md_u.h +++ b/include/linux/raid/md_u.h @@ -23,6 +23,7 @@ #define GET_DISK_INFO _IOR (MD_MAJOR, 0x12, mdu_disk_info_t) #define PRINT_RAID_DEBUG _IO (MD_MAJOR, 0x13) #define RAID_AUTORUN _IO (MD_MAJOR, 0x14) +#define GET_BITMAP_FILE _IOR (MD_MAJOR, 0x15, mdu_bitmap_file_t) /* configuration */ #define CLEAR_ARRAY _IO (MD_MAJOR, 0x20) @@ -36,6 +37,7 @@ #define HOT_ADD_DISK _IO (MD_MAJOR, 0x28) #define SET_DISK_FAULTY _IO (MD_MAJOR, 0x29) #define HOT_GENERATE_ERROR _IO (MD_MAJOR, 0x2a) +#define SET_BITMAP_FILE _IOW (MD_MAJOR, 0x2b, int) /* usage */ #define RUN_ARRAY _IOW (MD_MAJOR, 0x30, mdu_param_t) @@ -106,6 +108,11 @@ typedef struct mdu_start_info_s { } mdu_start_info_t; +typedef struct mdu_bitmap_file_s +{ + char pathname[4096]; +} mdu_bitmap_file_t; + typedef struct mdu_param_s { int personality; /* 1,2,3,4 */ diff --git a/include/linux/raid/raid1.h b/include/linux/raid/raid1.h index abbfdd9..9d93cf1 100644 --- a/include/linux/raid/raid1.h +++ b/include/linux/raid/raid1.h @@ -36,12 +36,21 @@ struct r1_private_data_s { spinlock_t device_lock; struct list_head retry_list; + /* queue pending writes and submit them on unplug */ + struct bio_list pending_bio_list; + /* queue of writes that have been unplugged */ + struct bio_list flushing_bio_list; + /* for use when syncing mirrors: */ spinlock_t resync_lock; - int nr_pending; - int barrier; + int nr_pending; + int barrier; sector_t next_resync; + int fullsync; /* set to 1 if a full sync is needed, + * (fresh device added). + * Cleared when a sync completes. + */ wait_queue_head_t wait_idle; wait_queue_head_t wait_resume; @@ -85,14 +94,17 @@ struct r1bio_s { int read_disk; struct list_head retry_list; + struct bitmap_update *bitmap_update; /* * if the IO is in WRITE direction, then multiple bios are used. * We choose the number when they are allocated. */ struct bio *bios[0]; + /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/ }; /* bits for r1bio.state */ #define R1BIO_Uptodate 0 #define R1BIO_IsSync 1 +#define R1BIO_Degraded 2 #endif diff --git a/include/linux/reboot.h b/include/linux/reboot.h index d60fafc..2d4dd23 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h @@ -51,6 +51,10 @@ extern void machine_restart(char *cmd); extern void machine_halt(void); extern void machine_power_off(void); +extern void machine_shutdown(void); +struct pt_regs; +extern void machine_crash_shutdown(struct pt_regs *); + #endif #endif /* _LINUX_REBOOT_H */ diff --git a/include/linux/reiserfs_acl.h b/include/linux/reiserfs_acl.h index 2aef9c3..0760507 100644 --- a/include/linux/reiserfs_acl.h +++ b/include/linux/reiserfs_acl.h @@ -1,6 +1,5 @@ #include <linux/init.h> #include <linux/posix_acl.h> -#include <linux/xattr_acl.h> #define REISERFS_ACL_VERSION 0x0001 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index 3214862..4c7c568 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h @@ -1644,11 +1644,18 @@ struct reiserfs_journal_header { #define JOURNAL_MAX_TRANS_AGE 30 #define JOURNAL_PER_BALANCE_CNT (3 * (MAX_HEIGHT-2) + 9) #ifdef CONFIG_QUOTA -#define REISERFS_QUOTA_TRANS_BLOCKS 2 /* We need to update data and inode (atime) */ -#define REISERFS_QUOTA_INIT_BLOCKS (DQUOT_MAX_WRITES*(JOURNAL_PER_BALANCE_CNT+2)+1) /* 1 balancing, 1 bitmap, 1 data per write + stat data update */ +/* We need to update data and inode (atime) */ +#define REISERFS_QUOTA_TRANS_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & (1<<REISERFS_QUOTA) ? 2 : 0) +/* 1 balancing, 1 bitmap, 1 data per write + stat data update */ +#define REISERFS_QUOTA_INIT_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & (1<<REISERFS_QUOTA) ? \ +(DQUOT_INIT_ALLOC*(JOURNAL_PER_BALANCE_CNT+2)+DQUOT_INIT_REWRITE+1) : 0) +/* same as with INIT */ +#define REISERFS_QUOTA_DEL_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & (1<<REISERFS_QUOTA) ? \ +(DQUOT_DEL_ALLOC*(JOURNAL_PER_BALANCE_CNT+2)+DQUOT_DEL_REWRITE+1) : 0) #else -#define REISERFS_QUOTA_TRANS_BLOCKS 0 -#define REISERFS_QUOTA_INIT_BLOCKS 0 +#define REISERFS_QUOTA_TRANS_BLOCKS(s) 0 +#define REISERFS_QUOTA_INIT_BLOCKS(s) 0 +#define REISERFS_QUOTA_DEL_BLOCKS(s) 0 #endif /* both of these can be as low as 1, or as high as you want. The min is the diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h index 37a3a7a..31c709d 100644 --- a/include/linux/reiserfs_fs_sb.h +++ b/include/linux/reiserfs_fs_sb.h @@ -467,6 +467,8 @@ enum reiserfs_mount_options { REISERFS_ERROR_RO, REISERFS_ERROR_CONTINUE, + REISERFS_QUOTA, /* Some quota option specified */ + REISERFS_TEST1, REISERFS_TEST2, REISERFS_TEST3, diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 11b484e..e80fb7ee 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -93,6 +93,12 @@ int page_referenced(struct page *, int is_locked, int ignore_token); int try_to_unmap(struct page *); /* + * Called from mm/filemap_xip.c to unmap empty zero page + */ +pte_t *page_check_address(struct page *, struct mm_struct *, unsigned long); + + +/* * Used by swapoff to help locate where page is expected in vma. */ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 91ac97c..d021888 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -89,6 +89,13 @@ enum { RTM_GETANYCAST = 62, #define RTM_GETANYCAST RTM_GETANYCAST + RTM_NEWNEIGHTBL = 64, +#define RTM_NEWNEIGHTBL RTM_NEWNEIGHTBL + RTM_GETNEIGHTBL = 66, +#define RTM_GETNEIGHTBL RTM_GETNEIGHTBL + RTM_SETNEIGHTBL, +#define RTM_SETNEIGHTBL RTM_SETNEIGHTBL + __RTM_MAX, #define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1) }; @@ -493,6 +500,106 @@ struct nda_cacheinfo __u32 ndm_refcnt; }; + +/***************************************************************** + * Neighbour tables specific messages. + * + * To retrieve the neighbour tables send RTM_GETNEIGHTBL with the + * NLM_F_DUMP flag set. Every neighbour table configuration is + * spread over multiple messages to avoid running into message + * size limits on systems with many interfaces. The first message + * in the sequence transports all not device specific data such as + * statistics, configuration, and the default parameter set. + * This message is followed by 0..n messages carrying device + * specific parameter sets. + * Although the ordering should be sufficient, NDTA_NAME can be + * used to identify sequences. The initial message can be identified + * by checking for NDTA_CONFIG. The device specific messages do + * not contain this TLV but have NDTPA_IFINDEX set to the + * corresponding interface index. + * + * To change neighbour table attributes, send RTM_SETNEIGHTBL + * with NDTA_NAME set. Changeable attribute include NDTA_THRESH[1-3], + * NDTA_GC_INTERVAL, and all TLVs in NDTA_PARMS unless marked + * otherwise. Device specific parameter sets can be changed by + * setting NDTPA_IFINDEX to the interface index of the corresponding + * device. + ****/ + +struct ndt_stats +{ + __u64 ndts_allocs; + __u64 ndts_destroys; + __u64 ndts_hash_grows; + __u64 ndts_res_failed; + __u64 ndts_lookups; + __u64 ndts_hits; + __u64 ndts_rcv_probes_mcast; + __u64 ndts_rcv_probes_ucast; + __u64 ndts_periodic_gc_runs; + __u64 ndts_forced_gc_runs; +}; + +enum { + NDTPA_UNSPEC, + NDTPA_IFINDEX, /* u32, unchangeable */ + NDTPA_REFCNT, /* u32, read-only */ + NDTPA_REACHABLE_TIME, /* u64, read-only, msecs */ + NDTPA_BASE_REACHABLE_TIME, /* u64, msecs */ + NDTPA_RETRANS_TIME, /* u64, msecs */ + NDTPA_GC_STALETIME, /* u64, msecs */ + NDTPA_DELAY_PROBE_TIME, /* u64, msecs */ + NDTPA_QUEUE_LEN, /* u32 */ + NDTPA_APP_PROBES, /* u32 */ + NDTPA_UCAST_PROBES, /* u32 */ + NDTPA_MCAST_PROBES, /* u32 */ + NDTPA_ANYCAST_DELAY, /* u64, msecs */ + NDTPA_PROXY_DELAY, /* u64, msecs */ + NDTPA_PROXY_QLEN, /* u32 */ + NDTPA_LOCKTIME, /* u64, msecs */ + __NDTPA_MAX +}; +#define NDTPA_MAX (__NDTPA_MAX - 1) + +struct ndtmsg +{ + __u8 ndtm_family; + __u8 ndtm_pad1; + __u16 ndtm_pad2; +}; + +struct ndt_config +{ + __u16 ndtc_key_len; + __u16 ndtc_entry_size; + __u32 ndtc_entries; + __u32 ndtc_last_flush; /* delta to now in msecs */ + __u32 ndtc_last_rand; /* delta to now in msecs */ + __u32 ndtc_hash_rnd; + __u32 ndtc_hash_mask; + __u32 ndtc_hash_chain_gc; + __u32 ndtc_proxy_qlen; +}; + +enum { + NDTA_UNSPEC, + NDTA_NAME, /* char *, unchangeable */ + NDTA_THRESH1, /* u32 */ + NDTA_THRESH2, /* u32 */ + NDTA_THRESH3, /* u32 */ + NDTA_CONFIG, /* struct ndt_config, read-only */ + NDTA_PARMS, /* nested TLV NDTPA_* */ + NDTA_STATS, /* struct ndt_stats, read-only */ + NDTA_GC_INTERVAL, /* u64, msecs */ + __NDTA_MAX +}; +#define NDTA_MAX (__NDTA_MAX - 1) + +#define NDTA_RTA(r) ((struct rtattr*)(((char*)(r)) + \ + NLMSG_ALIGN(sizeof(struct ndtmsg)))) +#define NDTA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ndtmsg)) + + /**** * General form of address family dependent message. ****/ @@ -785,10 +892,82 @@ extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const voi goto rtattr_failure; \ __rta_fill(skb, attrtype, attrlen, data); }) -#define RTA_PUT_NOHDR(skb, attrlen, data) \ +#define RTA_APPEND(skb, attrlen, data) \ ({ if (unlikely(skb_tailroom(skb) < (int)(attrlen))) \ goto rtattr_failure; \ - memcpy(skb_put(skb, RTA_ALIGN(attrlen)), data, attrlen); }) + memcpy(skb_put(skb, attrlen), data, attrlen); }) + +#define RTA_PUT_NOHDR(skb, attrlen, data) \ + RTA_APPEND(skb, RTA_ALIGN(attrlen), data) + +#define RTA_PUT_U8(skb, attrtype, value) \ +({ u8 _tmp = (value); \ + RTA_PUT(skb, attrtype, sizeof(u8), &_tmp); }) + +#define RTA_PUT_U16(skb, attrtype, value) \ +({ u16 _tmp = (value); \ + RTA_PUT(skb, attrtype, sizeof(u16), &_tmp); }) + +#define RTA_PUT_U32(skb, attrtype, value) \ +({ u32 _tmp = (value); \ + RTA_PUT(skb, attrtype, sizeof(u32), &_tmp); }) + +#define RTA_PUT_U64(skb, attrtype, value) \ +({ u64 _tmp = (value); \ + RTA_PUT(skb, attrtype, sizeof(u64), &_tmp); }) + +#define RTA_PUT_SECS(skb, attrtype, value) \ + RTA_PUT_U64(skb, attrtype, (value) / HZ) + +#define RTA_PUT_MSECS(skb, attrtype, value) \ + RTA_PUT_U64(skb, attrtype, jiffies_to_msecs(value)) + +#define RTA_PUT_STRING(skb, attrtype, value) \ + RTA_PUT(skb, attrtype, strlen(value) + 1, value) + +#define RTA_PUT_FLAG(skb, attrtype) \ + RTA_PUT(skb, attrtype, 0, NULL); + +#define RTA_NEST(skb, type) \ +({ struct rtattr *__start = (struct rtattr *) (skb)->tail; \ + RTA_PUT(skb, type, 0, NULL); \ + __start; }) + +#define RTA_NEST_END(skb, start) \ +({ (start)->rta_len = ((skb)->tail - (unsigned char *) (start)); \ + (skb)->len; }) + +#define RTA_NEST_CANCEL(skb, start) \ +({ if (start) \ + skb_trim(skb, (unsigned char *) (start) - (skb)->data); \ + -1; }) + +#define RTA_GET_U8(rta) \ +({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u8)) \ + goto rtattr_failure; \ + *(u8 *) RTA_DATA(rta); }) + +#define RTA_GET_U16(rta) \ +({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u16)) \ + goto rtattr_failure; \ + *(u16 *) RTA_DATA(rta); }) + +#define RTA_GET_U32(rta) \ +({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u32)) \ + goto rtattr_failure; \ + *(u32 *) RTA_DATA(rta); }) + +#define RTA_GET_U64(rta) \ +({ u64 _tmp; \ + if (!rta || RTA_PAYLOAD(rta) < sizeof(u64)) \ + goto rtattr_failure; \ + memcpy(&_tmp, RTA_DATA(rta), sizeof(_tmp)); \ + _tmp; }) + +#define RTA_GET_FLAG(rta) (!!(rta)) + +#define RTA_GET_SECS(rta) ((unsigned long) RTA_GET_U64(rta) * HZ) +#define RTA_GET_MSECS(rta) (msecs_to_jiffies((unsigned long) RTA_GET_U64(rta))) static inline struct rtattr * __rta_reserve(struct sk_buff *skb, int attrtype, int attrlen) diff --git a/include/linux/sched.h b/include/linux/sched.h index 4dbb1090..9530b19 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -201,8 +201,8 @@ extern unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); -extern void arch_unmap_area(struct vm_area_struct *area); -extern void arch_unmap_area_topdown(struct vm_area_struct *area); +extern void arch_unmap_area(struct mm_struct *, unsigned long); +extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); #define set_mm_counter(mm, member, value) (mm)->_##member = (value) #define get_mm_counter(mm, member) ((mm)->_##member) @@ -218,9 +218,10 @@ struct mm_struct { unsigned long (*get_unmapped_area) (struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); - void (*unmap_area) (struct vm_area_struct *area); - unsigned long mmap_base; /* base of mmap area */ - unsigned long free_area_cache; /* first hole */ + void (*unmap_area) (struct mm_struct *mm, unsigned long addr); + unsigned long mmap_base; /* base of mmap area */ + unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */ + unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */ pgd_t * pgd; atomic_t mm_users; /* How many users with user space? */ atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ @@ -245,7 +246,7 @@ struct mm_struct { unsigned long saved_auxv[42]; /* for /proc/PID/auxv */ - unsigned dumpable:1; + unsigned dumpable:2; cpumask_t cpu_vm_mask; /* Architecture-specific MM context */ @@ -367,6 +368,11 @@ struct signal_struct { #endif }; +/* Context switch must be unlocked if interrupts are to be enabled */ +#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW +# define __ARCH_WANT_UNLOCKED_CTXSW +#endif + /* * Bits in flags field of signal_struct. */ @@ -459,10 +465,11 @@ enum idle_type #define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ #define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ #define SD_BALANCE_EXEC 4 /* Balance on exec */ -#define SD_WAKE_IDLE 8 /* Wake to idle CPU on task wakeup */ -#define SD_WAKE_AFFINE 16 /* Wake task to waking CPU */ -#define SD_WAKE_BALANCE 32 /* Perform balancing at task wakeup */ -#define SD_SHARE_CPUPOWER 64 /* Domain members share cpu power */ +#define SD_BALANCE_FORK 8 /* Balance on fork, clone */ +#define SD_WAKE_IDLE 16 /* Wake to idle CPU on task wakeup */ +#define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */ +#define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */ +#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */ struct sched_group { struct sched_group *next; /* Must be a circular list */ @@ -487,6 +494,11 @@ struct sched_domain { unsigned long long cache_hot_time; /* Task considered cache hot (ns) */ unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ unsigned int per_cpu_gain; /* CPU % gained by adding domain cpus */ + unsigned int busy_idx; + unsigned int idle_idx; + unsigned int newidle_idx; + unsigned int wake_idx; + unsigned int forkexec_idx; int flags; /* See SD_* */ /* Runtime fields. */ @@ -510,10 +522,16 @@ struct sched_domain { unsigned long alb_failed; unsigned long alb_pushed; - /* sched_balance_exec() stats */ - unsigned long sbe_attempts; + /* SD_BALANCE_EXEC stats */ + unsigned long sbe_cnt; + unsigned long sbe_balanced; unsigned long sbe_pushed; + /* SD_BALANCE_FORK stats */ + unsigned long sbf_cnt; + unsigned long sbf_balanced; + unsigned long sbf_pushed; + /* try_to_wake_up() stats */ unsigned long ttwu_wake_remote; unsigned long ttwu_move_affine; @@ -521,6 +539,8 @@ struct sched_domain { #endif }; +extern void partition_sched_domains(cpumask_t *partition1, + cpumask_t *partition2); #ifdef ARCH_HAS_SCHED_DOMAIN /* Useful helpers that arch setup code may use. Defined in kernel/sched.c */ extern cpumask_t cpu_isolated_map; @@ -560,9 +580,10 @@ struct group_info { groups_free(group_info); \ } while (0) -struct group_info *groups_alloc(int gidsetsize); -void groups_free(struct group_info *group_info); -int set_current_groups(struct group_info *group_info); +extern struct group_info *groups_alloc(int gidsetsize); +extern void groups_free(struct group_info *group_info); +extern int set_current_groups(struct group_info *group_info); +extern int groups_search(struct group_info *group_info, gid_t grp); /* access the groups "array" with this macro */ #define GROUP_AT(gi, i) \ ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) @@ -580,6 +601,9 @@ struct task_struct { int lock_depth; /* BKL lock depth */ +#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) + int oncpu; +#endif int prio, static_prio; struct list_head run_list; prio_array_t *array; @@ -659,6 +683,7 @@ struct task_struct { struct user_struct *user; #ifdef CONFIG_KEYS struct key *thread_keyring; /* keyring private to this thread */ + unsigned char jit_keyring; /* default keyring to attach requested keys to */ #endif int oomkilladj; /* OOM kill score adjustment (bit shift). */ char comm[TASK_COMM_LEN]; /* executable name excluding path @@ -701,8 +726,6 @@ struct task_struct { spinlock_t alloc_lock; /* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */ spinlock_t proc_lock; -/* context-switch lock */ - spinlock_t switch_lock; /* journalling filesystem info */ void *journal_info; @@ -909,7 +932,7 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk, #else static inline void kick_process(struct task_struct *tsk) { } #endif -extern void FASTCALL(sched_fork(task_t * p)); +extern void FASTCALL(sched_fork(task_t * p, int clone_flags)); extern void FASTCALL(sched_exit(task_t * p)); extern int in_group_p(gid_t); @@ -1242,33 +1265,78 @@ extern void normalize_rt_tasks(void); #endif -/* try_to_freeze - * - * Checks whether we need to enter the refrigerator - * and returns 1 if we did so. - */ #ifdef CONFIG_PM -extern void refrigerator(unsigned long); +/* + * Check if a process has been frozen + */ +static inline int frozen(struct task_struct *p) +{ + return p->flags & PF_FROZEN; +} + +/* + * Check if there is a request to freeze a process + */ +static inline int freezing(struct task_struct *p) +{ + return p->flags & PF_FREEZE; +} + +/* + * Request that a process be frozen + * FIXME: SMP problem. We may not modify other process' flags! + */ +static inline void freeze(struct task_struct *p) +{ + p->flags |= PF_FREEZE; +} + +/* + * Wake up a frozen process + */ +static inline int thaw_process(struct task_struct *p) +{ + if (frozen(p)) { + p->flags &= ~PF_FROZEN; + wake_up_process(p); + return 1; + } + return 0; +} + +/* + * freezing is complete, mark process as frozen + */ +static inline void frozen_process(struct task_struct *p) +{ + p->flags = (p->flags & ~PF_FREEZE) | PF_FROZEN; +} + +extern void refrigerator(void); extern int freeze_processes(void); extern void thaw_processes(void); -static inline int try_to_freeze(unsigned long refrigerator_flags) +static inline int try_to_freeze(void) { - if (unlikely(current->flags & PF_FREEZE)) { - refrigerator(refrigerator_flags); + if (freezing(current)) { + refrigerator(); return 1; } else return 0; } #else -static inline void refrigerator(unsigned long flag) {} +static inline int frozen(struct task_struct *p) { return 0; } +static inline int freezing(struct task_struct *p) { return 0; } +static inline void freeze(struct task_struct *p) { BUG(); } +static inline int thaw_process(struct task_struct *p) { return 1; } +static inline void frozen_process(struct task_struct *p) { BUG(); } + +static inline void refrigerator(void) {} static inline int freeze_processes(void) { BUG(); return 0; } static inline void thaw_processes(void) {} -static inline int try_to_freeze(unsigned long refrigerator_flags) -{ - return 0; -} +static inline int try_to_freeze(void) { return 0; } + #endif /* CONFIG_PM */ #endif /* __KERNEL__ */ diff --git a/include/linux/signal.h b/include/linux/signal.h index 0a98f5e..7be18b5 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -231,10 +231,8 @@ extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); extern long do_sigpending(void __user *, unsigned long); extern int sigprocmask(int, sigset_t *, sigset_t *); -#ifndef HAVE_ARCH_GET_SIGNAL_TO_DELIVER struct pt_regs; extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie); -#endif #endif /* __KERNEL__ */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index cc04f5c..416a2e4 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -27,6 +27,7 @@ #include <linux/highmem.h> #include <linux/poll.h> #include <linux/net.h> +#include <linux/textsearch.h> #include <net/checksum.h> #define HAVE_ALLOC_SKB /* For the drivers to know */ @@ -193,7 +194,6 @@ struct skb_shared_info { * @nfcache: Cache info * @nfct: Associated connection, if any * @nfctinfo: Relationship of this skb to the connection - * @nf_debug: Netfilter debugging * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c * @private: Data which is private to the HIPPI implementation * @tc_index: Traffic control index @@ -264,9 +264,6 @@ struct sk_buff { __u32 nfcache; __u32 nfctinfo; struct nf_conntrack *nfct; -#ifdef CONFIG_NETFILTER_DEBUG - unsigned int nf_debug; -#endif #ifdef CONFIG_BRIDGE_NETFILTER struct nf_bridge_info *nf_bridge; #endif @@ -325,6 +322,28 @@ extern void skb_over_panic(struct sk_buff *skb, int len, extern void skb_under_panic(struct sk_buff *skb, int len, void *here); +struct skb_seq_state +{ + __u32 lower_offset; + __u32 upper_offset; + __u32 frag_idx; + __u32 stepped_offset; + struct sk_buff *root_skb; + struct sk_buff *cur_skb; + __u8 *frag_data; +}; + +extern void skb_prepare_seq_read(struct sk_buff *skb, + unsigned int from, unsigned int to, + struct skb_seq_state *st); +extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data, + struct skb_seq_state *st); +extern void skb_abort_seq_read(struct skb_seq_state *st); + +extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, + unsigned int to, struct ts_config *config, + struct ts_state *state); + /* Internal */ #define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end)) @@ -1219,15 +1238,6 @@ static inline void nf_reset(struct sk_buff *skb) { nf_conntrack_put(skb->nfct); skb->nfct = NULL; -#ifdef CONFIG_NETFILTER_DEBUG - skb->nf_debug = 0; -#endif -} -static inline void nf_reset_debug(struct sk_buff *skb) -{ -#ifdef CONFIG_NETFILTER_DEBUG - skb->nf_debug = 0; -#endif } #ifdef CONFIG_BRIDGE_NETFILTER diff --git a/include/linux/slab.h b/include/linux/slab.h index 7d66385..76cf7e6 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -64,6 +64,7 @@ extern int kmem_cache_shrink(kmem_cache_t *); extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast); extern void kmem_cache_free(kmem_cache_t *, void *); extern unsigned int kmem_cache_size(kmem_cache_t *); +extern const char *kmem_cache_name(kmem_cache_t *); extern kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags); /* Size description struct for general caches. */ diff --git a/include/linux/smp.h b/include/linux/smp.h index dcf1db3..9dfa3ee 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -92,10 +92,7 @@ void smp_prepare_boot_cpu(void); /* * These macros fold the SMP functionality into a single CPU system */ - -#if !defined(__smp_processor_id) || !defined(CONFIG_PREEMPT) -# define smp_processor_id() 0 -#endif +#define raw_smp_processor_id() 0 #define hard_smp_processor_id() 0 #define smp_call_function(func,info,retry,wait) ({ 0; }) #define on_each_cpu(func,info,retry,wait) ({ func(info); 0; }) @@ -106,30 +103,25 @@ static inline void smp_send_reschedule(int cpu) { } #endif /* !SMP */ /* - * DEBUG_PREEMPT support: check whether smp_processor_id() is being - * used in a preemption-safe way. + * smp_processor_id(): get the current CPU ID. * - * An architecture has to enable this debugging code explicitly. - * It can do so by renaming the smp_processor_id() macro to - * __smp_processor_id(). This should only be done after some minimal - * testing, because usually there are a number of false positives - * that an architecture will trigger. + * if DEBUG_PREEMPT is enabled the we check whether it is + * used in a preemption-safe way. (smp_processor_id() is safe + * if it's used in a preemption-off critical section, or in + * a thread that is bound to the current CPU.) * - * To fix a false positive (i.e. smp_processor_id() use that the - * debugging code reports but which use for some reason is legal), - * change the smp_processor_id() reference to _smp_processor_id(), - * which is the nondebug variant. NOTE: don't use this to hack around - * real bugs. + * NOTE: raw_smp_processor_id() is for internal use only + * (smp_processor_id() is the preferred variant), but in rare + * instances it might also be used to turn off false positives + * (i.e. smp_processor_id() use that the debugging code reports but + * which use for some reason is legal). Don't use this to hack around + * the warning message, as your code might not work under PREEMPT. */ -#ifdef __smp_processor_id -# if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) - extern unsigned int smp_processor_id(void); -# else -# define smp_processor_id() __smp_processor_id() -# endif -# define _smp_processor_id() __smp_processor_id() +#ifdef CONFIG_DEBUG_PREEMPT + extern unsigned int debug_smp_processor_id(void); +# define smp_processor_id() debug_smp_processor_id() #else -# define _smp_processor_id() smp_processor_id() +# define smp_processor_id() raw_smp_processor_id() #endif #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) diff --git a/include/linux/string.h b/include/linux/string.h index b9fc594..93994c6 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -88,6 +88,8 @@ extern int memcmp(const void *,const void *,__kernel_size_t); extern void * memchr(const void *,int,__kernel_size_t); #endif +extern char *kstrdup(const char *s, int gfp); + #ifdef __cplusplus } #endif diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 2709caf..ab151bb 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -111,6 +111,11 @@ struct rpc_procinfo { struct rpc_clnt *rpc_create_client(struct rpc_xprt *xprt, char *servname, struct rpc_program *info, u32 version, rpc_authflavor_t authflavor); +struct rpc_clnt *rpc_new_client(struct rpc_xprt *xprt, char *servname, + struct rpc_program *info, + u32 version, rpc_authflavor_t authflavor); +struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, + struct rpc_program *, int); struct rpc_clnt *rpc_clone_client(struct rpc_clnt *); int rpc_shutdown_client(struct rpc_clnt *); int rpc_destroy_client(struct rpc_clnt *); @@ -129,6 +134,7 @@ void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset); void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset); void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); size_t rpc_max_payload(struct rpc_clnt *); +int rpc_ping(struct rpc_clnt *clnt, int flags); static __inline__ int rpc_call(struct rpc_clnt *clnt, u32 proc, void *argp, void *resp, int flags) diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 99d17ed..4d77e90 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -31,7 +31,6 @@ struct rpc_wait_queue; struct rpc_wait { struct list_head list; /* wait queue links */ struct list_head links; /* Links to related tasks */ - wait_queue_head_t waitq; /* sync: sleep on this q */ struct rpc_wait_queue * rpc_waitq; /* RPC wait queue we're on */ }; diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 3700397..5af8800 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -185,6 +185,17 @@ xdr_ressize_check(struct svc_rqst *rqstp, u32 *p) return vec->iov_len <= PAGE_SIZE; } +static inline struct page * +svc_take_res_page(struct svc_rqst *rqstp) +{ + if (rqstp->rq_arghi <= rqstp->rq_argused) + return NULL; + rqstp->rq_arghi--; + rqstp->rq_respages[rqstp->rq_resused] = + rqstp->rq_argpages[rqstp->rq_arghi]; + return rqstp->rq_respages[rqstp->rq_resused++]; +} + static inline int svc_take_page(struct svc_rqst *rqstp) { if (rqstp->rq_arghi <= rqstp->rq_argused) @@ -240,9 +251,10 @@ struct svc_deferred_req { }; /* - * RPC program + * List of RPC programs on the same transport endpoint */ struct svc_program { + struct svc_program * pg_next; /* other programs (same xprt) */ u32 pg_prog; /* program number */ unsigned int pg_lovers; /* lowest version */ unsigned int pg_hivers; /* lowest version */ diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 541dcf8..34ec3e8 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -146,7 +146,8 @@ extern void xdr_shift_buf(struct xdr_buf *, size_t); extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *); extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, int, int); extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, int); -extern int read_bytes_from_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len); +extern int read_bytes_from_xdr_buf(struct xdr_buf *, int, void *, int); +extern int write_bytes_to_xdr_buf(struct xdr_buf *, int, void *, int); /* * Helper structure for copying from an sk_buff. @@ -160,7 +161,7 @@ typedef struct { typedef size_t (*skb_read_actor_t)(skb_reader_t *desc, void *to, size_t len); -extern void xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int, +extern ssize_t xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int, skb_reader_t *, skb_read_actor_t); struct socket; @@ -168,6 +169,22 @@ struct sockaddr; extern int xdr_sendpages(struct socket *, struct sockaddr *, int, struct xdr_buf *, unsigned int, int); +extern int xdr_encode_word(struct xdr_buf *, int, u32); +extern int xdr_decode_word(struct xdr_buf *, int, u32 *); + +struct xdr_array2_desc; +typedef int (*xdr_xcode_elem_t)(struct xdr_array2_desc *desc, void *elem); +struct xdr_array2_desc { + unsigned int elem_size; + unsigned int array_len; + xdr_xcode_elem_t xcode; +}; + +extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base, + struct xdr_array2_desc *desc); +extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base, + struct xdr_array2_desc *desc); + /* * Provide some simple tools for XDR buffer overflow-checking etc. */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 2bf0d5f..f2e96fd 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -58,7 +58,7 @@ static inline int software_suspend(void) } #endif -#ifdef CONFIG_SMP +#ifdef CONFIG_SUSPEND_SMP extern void disable_nonboot_cpus(void); extern void enable_nonboot_cpus(void); #else diff --git a/include/linux/swap.h b/include/linux/swap.h index 3bbc41b..2343f99 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -172,7 +172,8 @@ extern int rotate_reclaimable_page(struct page *page); extern void swap_setup(void); /* linux/mm/vmscan.c */ -extern int try_to_free_pages(struct zone **, unsigned int, unsigned int); +extern int try_to_free_pages(struct zone **, unsigned int); +extern int zone_reclaim(struct zone *, unsigned int, unsigned int); extern int shrink_all_memory(int); extern int vm_swappiness; diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index c39f6f7..52830b6 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -159,8 +159,9 @@ asmlinkage long sys_shutdown(int, int); asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user *arg); asmlinkage long sys_restart_syscall(void); -asmlinkage long sys_kexec_load(void *entry, unsigned long nr_segments, - struct kexec_segment *segments, unsigned long flags); +asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, + struct kexec_segment __user *segments, + unsigned long flags); asmlinkage long sys_exit(int error_code); asmlinkage void sys_exit_group(int error_code); diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 7729981..ebfe125 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -136,6 +136,7 @@ enum KERN_UNKNOWN_NMI_PANIC=66, /* int: unknown nmi panic flag */ KERN_BOOTLOADER_TYPE=67, /* int: boot loader type */ KERN_RANDOMIZE=68, /* int: randomize virtual address space */ + KERN_SETUID_DUMPABLE=69, /* int: behaviour of dumps for setuid core */ }; @@ -242,6 +243,7 @@ enum NET_CORE_MOD_CONG=16, NET_CORE_DEV_WEIGHT=17, NET_CORE_SOMAXCONN=18, + NET_CORE_BUDGET=19, }; /* /proc/sys/net/ethernet */ @@ -332,20 +334,14 @@ enum NET_TCP_FRTO=92, NET_TCP_LOW_LATENCY=93, NET_IPV4_IPFRAG_SECRET_INTERVAL=94, - NET_TCP_WESTWOOD=95, NET_IPV4_IGMP_MAX_MSF=96, NET_TCP_NO_METRICS_SAVE=97, - NET_TCP_VEGAS=98, - NET_TCP_VEGAS_ALPHA=99, - NET_TCP_VEGAS_BETA=100, - NET_TCP_VEGAS_GAMMA=101, - NET_TCP_BIC=102, - NET_TCP_BIC_FAST_CONVERGENCE=103, - NET_TCP_BIC_LOW_WINDOW=104, NET_TCP_DEFAULT_WIN_SCALE=105, NET_TCP_MODERATE_RCVBUF=106, NET_TCP_TSO_WIN_DIVISOR=107, NET_TCP_BIC_BETA=108, + NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR=109, + NET_TCP_CONG_CONTROL=110, }; enum { @@ -399,6 +395,7 @@ enum NET_IPV4_CONF_FORCE_IGMP_VERSION=17, NET_IPV4_CONF_ARP_ANNOUNCE=18, NET_IPV4_CONF_ARP_IGNORE=19, + NET_IPV4_CONF_PROMOTE_SECONDARIES=20, __NET_IPV4_CONF_MAX }; diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 38b58b3..392da5a 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -16,13 +16,13 @@ struct kobject; struct module; struct attribute { - char * name; + const char * name; struct module * owner; mode_t mode; }; struct attribute_group { - char * name; + const char * name; struct attribute ** attrs; }; @@ -73,6 +73,7 @@ struct sysfs_dirent { int s_type; umode_t s_mode; struct dentry * s_dentry; + struct iattr * s_iattr; }; #define SYSFS_ROOT 0x0001 @@ -105,11 +106,11 @@ sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, mode_t mode); extern void sysfs_remove_file(struct kobject *, const struct attribute *); -extern int -sysfs_create_link(struct kobject * kobj, struct kobject * target, char * name); +extern int +sysfs_create_link(struct kobject * kobj, struct kobject * target, const char * name); extern void -sysfs_remove_link(struct kobject *, char * name); +sysfs_remove_link(struct kobject *, const char * name); int sysfs_create_bin_file(struct kobject * kobj, struct bin_attribute * attr); int sysfs_remove_bin_file(struct kobject * kobj, struct bin_attribute * attr); @@ -153,12 +154,12 @@ static inline void sysfs_remove_file(struct kobject * k, const struct attribute ; } -static inline int sysfs_create_link(struct kobject * k, struct kobject * t, char * n) +static inline int sysfs_create_link(struct kobject * k, struct kobject * t, const char * n) { return 0; } -static inline void sysfs_remove_link(struct kobject * k, char * name) +static inline void sysfs_remove_link(struct kobject * k, const char * name) { ; } diff --git a/include/linux/tc_ematch/tc_em_meta.h b/include/linux/tc_ematch/tc_em_meta.h index aa6b48bb..a6b2cc5 100644 --- a/include/linux/tc_ematch/tc_em_meta.h +++ b/include/linux/tc_ematch/tc_em_meta.h @@ -56,6 +56,36 @@ enum TCF_META_ID_TCCLASSID, TCF_META_ID_RTCLASSID, TCF_META_ID_RTIIF, + TCF_META_ID_SK_FAMILY, + TCF_META_ID_SK_STATE, + TCF_META_ID_SK_REUSE, + TCF_META_ID_SK_BOUND_IF, + TCF_META_ID_SK_REFCNT, + TCF_META_ID_SK_SHUTDOWN, + TCF_META_ID_SK_PROTO, + TCF_META_ID_SK_TYPE, + TCF_META_ID_SK_RCVBUF, + TCF_META_ID_SK_RMEM_ALLOC, + TCF_META_ID_SK_WMEM_ALLOC, + TCF_META_ID_SK_OMEM_ALLOC, + TCF_META_ID_SK_WMEM_QUEUED, + TCF_META_ID_SK_RCV_QLEN, + TCF_META_ID_SK_SND_QLEN, + TCF_META_ID_SK_ERR_QLEN, + TCF_META_ID_SK_FORWARD_ALLOCS, + TCF_META_ID_SK_SNDBUF, + TCF_META_ID_SK_ALLOCS, + TCF_META_ID_SK_ROUTE_CAPS, + TCF_META_ID_SK_HASHENT, + TCF_META_ID_SK_LINGERTIME, + TCF_META_ID_SK_ACK_BACKLOG, + TCF_META_ID_SK_MAX_ACK_BACKLOG, + TCF_META_ID_SK_PRIO, + TCF_META_ID_SK_RCVLOWAT, + TCF_META_ID_SK_RCVTIMEO, + TCF_META_ID_SK_SNDTIMEO, + TCF_META_ID_SK_SENDMSG_OFF, + TCF_META_ID_SK_WRITE_PENDING, __TCF_META_ID_MAX }; #define TCF_META_ID_MAX (__TCF_META_ID_MAX - 1) diff --git a/include/linux/tc_ematch/tc_em_text.h b/include/linux/tc_ematch/tc_em_text.h new file mode 100644 index 0000000..7cd43e9 --- /dev/null +++ b/include/linux/tc_ematch/tc_em_text.h @@ -0,0 +1,19 @@ +#ifndef __LINUX_TC_EM_TEXT_H +#define __LINUX_TC_EM_TEXT_H + +#include <linux/pkt_cls.h> + +#define TC_EM_TEXT_ALGOSIZ 16 + +struct tcf_em_text +{ + char algo[TC_EM_TEXT_ALGOSIZ]; + __u16 from_offset; + __u16 to_offset; + __u16 pattern_len; + __u8 from_layer:4; + __u8 to_layer:4; + __u8 pad; +}; + +#endif diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 14a55e3..dfd93d0 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -127,6 +127,7 @@ enum { #define TCP_WINDOW_CLAMP 10 /* Bound advertised window */ #define TCP_INFO 11 /* Information about this connection. */ #define TCP_QUICKACK 12 /* Block/reenable quick acks */ +#define TCP_CONGESTION 13 /* Congestion control algorithm */ #define TCPI_OPT_TIMESTAMPS 1 #define TCPI_OPT_SACK 2 @@ -203,13 +204,6 @@ struct tcp_sack_block { __u32 end_seq; }; -enum tcp_congestion_algo { - TCP_RENO=0, - TCP_VEGAS, - TCP_WESTWOOD, - TCP_BIC, -}; - struct tcp_options_received { /* PAWS/RTTM data */ long ts_recent_stamp;/* Time we stored ts_recent (for aging) */ @@ -230,6 +224,17 @@ struct tcp_options_received { __u16 mss_clamp; /* Maximal mss, negotiated at connection setup */ }; +struct tcp_request_sock { + struct inet_request_sock req; + __u32 rcv_isn; + __u32 snt_isn; +}; + +static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req) +{ + return (struct tcp_request_sock *)req; +} + struct tcp_sock { /* inet_sock has to be the first member of tcp_sock */ struct inet_sock inet; @@ -294,7 +299,7 @@ struct tcp_sock { __u8 reordering; /* Packet reordering metric. */ __u8 frto_counter; /* Number of new acks after RTO */ - __u8 adv_cong; /* Using Vegas, Westwood, or BIC */ + __u8 unused; __u8 defer_accept; /* User waits for some data after accept() */ /* RTT measurement */ @@ -368,22 +373,7 @@ struct tcp_sock { __u32 total_retrans; /* Total retransmits for entire connection */ - /* The syn_wait_lock is necessary only to avoid proc interface having - * to grab the main lock sock while browsing the listening hash - * (otherwise it's deadlock prone). - * This lock is acquired in read mode only from listening_get_next() - * and it's acquired in write mode _only_ from code that is actively - * changing the syn_wait_queue. All readers that are holding - * the master sock lock don't need to grab this lock in read mode - * too as the syn_wait_queue writes are always protected from - * the main sock lock. - */ - rwlock_t syn_wait_lock; - struct tcp_listen_opt *listen_opt; - - /* FIFO of established children */ - struct open_request *accept_queue; - struct open_request *accept_queue_tail; + struct request_sock_queue accept_queue; /* FIFO of established children */ unsigned int keepalive_time; /* time before keep alive takes place */ unsigned int keepalive_intvl; /* time interval between keep alive probes */ @@ -405,37 +395,10 @@ struct tcp_sock { __u32 time; } rcvq_space; -/* TCP Westwood structure */ - struct { - __u32 bw_ns_est; /* first bandwidth estimation..not too smoothed 8) */ - __u32 bw_est; /* bandwidth estimate */ - __u32 rtt_win_sx; /* here starts a new evaluation... */ - __u32 bk; - __u32 snd_una; /* used for evaluating the number of acked bytes */ - __u32 cumul_ack; - __u32 accounted; - __u32 rtt; - __u32 rtt_min; /* minimum observed RTT */ - } westwood; - -/* Vegas variables */ - struct { - __u32 beg_snd_nxt; /* right edge during last RTT */ - __u32 beg_snd_una; /* left edge during last RTT */ - __u32 beg_snd_cwnd; /* saves the size of the cwnd */ - __u8 doing_vegas_now;/* if true, do vegas for this RTT */ - __u16 cntRTT; /* # of RTTs measured within last RTT */ - __u32 minRTT; /* min of RTTs measured within last RTT (in usec) */ - __u32 baseRTT; /* the min of all Vegas RTT measurements seen (in usec) */ - } vegas; - - /* BI TCP Parameters */ - struct { - __u32 cnt; /* increase cwnd by 1 after this number of ACKs */ - __u32 last_max_cwnd; /* last maximium snd_cwnd */ - __u32 last_cwnd; /* the last snd_cwnd */ - __u32 last_stamp; /* time when updated last_cwnd */ - } bictcp; + /* Pluggable TCP congestion control hook */ + struct tcp_congestion_ops *ca_ops; + u32 ca_priv[16]; +#define TCP_CA_PRIV_SIZE (16*sizeof(u32)) }; static inline struct tcp_sock *tcp_sk(const struct sock *sk) @@ -443,6 +406,11 @@ static inline struct tcp_sock *tcp_sk(const struct sock *sk) return (struct tcp_sock *)sk; } +static inline void *tcp_ca(const struct tcp_sock *tp) +{ + return (void *) tp->ca_priv; +} + #endif #endif /* _LINUX_TCP_H */ diff --git a/include/linux/tcp_diag.h b/include/linux/tcp_diag.h index ceee962..7a59967 100644 --- a/include/linux/tcp_diag.h +++ b/include/linux/tcp_diag.h @@ -99,9 +99,10 @@ enum TCPDIAG_MEMINFO, TCPDIAG_INFO, TCPDIAG_VEGASINFO, + TCPDIAG_CONG, }; -#define TCPDIAG_MAX TCPDIAG_VEGASINFO +#define TCPDIAG_MAX TCPDIAG_CONG /* TCPDIAG_MEM */ @@ -123,5 +124,4 @@ struct tcpvegas_info { __u32 tcpv_minrtt; }; - #endif /* _TCP_DIAG_H_ */ diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h new file mode 100644 index 0000000..941f45a --- /dev/null +++ b/include/linux/textsearch.h @@ -0,0 +1,180 @@ +#ifndef __LINUX_TEXTSEARCH_H +#define __LINUX_TEXTSEARCH_H + +#ifdef __KERNEL__ + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/err.h> + +struct ts_config; + +/** + * TS_AUTOLOAD - Automatically load textsearch modules when needed + */ +#define TS_AUTOLOAD 1 + +/** + * struct ts_state - search state + * @offset: offset for next match + * @cb: control buffer, for persistant variables of get_next_block() + */ +struct ts_state +{ + unsigned int offset; + char cb[40]; +}; + +/** + * struct ts_ops - search module operations + * @name: name of search algorithm + * @init: initialization function to prepare a search + * @find: find the next occurrence of the pattern + * @destroy: destroy algorithm specific parts of a search configuration + * @get_pattern: return head of pattern + * @get_pattern_len: return length of pattern + * @owner: module reference to algorithm + */ +struct ts_ops +{ + const char *name; + struct ts_config * (*init)(const void *, unsigned int, int); + unsigned int (*find)(struct ts_config *, + struct ts_state *); + void (*destroy)(struct ts_config *); + void * (*get_pattern)(struct ts_config *); + unsigned int (*get_pattern_len)(struct ts_config *); + struct module *owner; + struct list_head list; +}; + +/** + * struct ts_config - search configuration + * @ops: operations of chosen algorithm + * @get_next_block: callback to fetch the next block to search in + * @finish: callback to finalize a search + */ +struct ts_config +{ + struct ts_ops *ops; + + /** + * get_next_block - fetch next block of data + * @consumed: number of bytes consumed by the caller + * @dst: destination buffer + * @conf: search configuration + * @state: search state + * + * Called repeatedly until 0 is returned. Must assign the + * head of the next block of data to &*dst and return the length + * of the block or 0 if at the end. consumed == 0 indicates + * a new search. May store/read persistant values in state->cb. + */ + unsigned int (*get_next_block)(unsigned int consumed, + const u8 **dst, + struct ts_config *conf, + struct ts_state *state); + + /** + * finish - finalize/clean a series of get_next_block() calls + * @conf: search configuration + * @state: search state + * + * Called after the last use of get_next_block(), may be used + * to cleanup any leftovers. + */ + void (*finish)(struct ts_config *conf, + struct ts_state *state); +}; + +/** + * textsearch_next - continue searching for a pattern + * @conf: search configuration + * @state: search state + * + * Continues a search looking for more occurrences of the pattern. + * textsearch_find() must be called to find the first occurrence + * in order to reset the state. + * + * Returns the position of the next occurrence of the pattern or + * UINT_MAX if not match was found. + */ +static inline unsigned int textsearch_next(struct ts_config *conf, + struct ts_state *state) +{ + unsigned int ret = conf->ops->find(conf, state); + + if (conf->finish) + conf->finish(conf, state); + + return ret; +} + +/** + * textsearch_find - start searching for a pattern + * @conf: search configuration + * @state: search state + * + * Returns the position of first occurrence of the pattern or + * UINT_MAX if no match was found. + */ +static inline unsigned int textsearch_find(struct ts_config *conf, + struct ts_state *state) +{ + state->offset = 0; + return textsearch_next(conf, state); +} + +/** + * textsearch_get_pattern - return head of the pattern + * @conf: search configuration + */ +static inline void *textsearch_get_pattern(struct ts_config *conf) +{ + return conf->ops->get_pattern(conf); +} + +/** + * textsearch_get_pattern_len - return length of the pattern + * @conf: search configuration + */ +static inline unsigned int textsearch_get_pattern_len(struct ts_config *conf) +{ + return conf->ops->get_pattern_len(conf); +} + +extern int textsearch_register(struct ts_ops *); +extern int textsearch_unregister(struct ts_ops *); +extern struct ts_config *textsearch_prepare(const char *, const void *, + unsigned int, int, int); +extern void textsearch_destroy(struct ts_config *conf); +extern unsigned int textsearch_find_continuous(struct ts_config *, + struct ts_state *, + const void *, unsigned int); + + +#define TS_PRIV_ALIGNTO 8 +#define TS_PRIV_ALIGN(len) (((len) + TS_PRIV_ALIGNTO-1) & ~(TS_PRIV_ALIGNTO-1)) + +static inline struct ts_config *alloc_ts_config(size_t payload, int gfp_mask) +{ + struct ts_config *conf; + + conf = kmalloc(TS_PRIV_ALIGN(sizeof(*conf)) + payload, gfp_mask); + if (conf == NULL) + return ERR_PTR(-ENOMEM); + + memset(conf, 0, TS_PRIV_ALIGN(sizeof(*conf)) + payload); + return conf; +} + +static inline void *ts_config_priv(struct ts_config *conf) +{ + return ((u8 *) conf + TS_PRIV_ALIGN(sizeof(struct ts_config))); +} + +#endif /* __KERNEL__ */ + +#endif diff --git a/include/linux/textsearch_fsm.h b/include/linux/textsearch_fsm.h new file mode 100644 index 0000000..fdfa078 --- /dev/null +++ b/include/linux/textsearch_fsm.h @@ -0,0 +1,48 @@ +#ifndef __LINUX_TEXTSEARCH_FSM_H +#define __LINUX_TEXTSEARCH_FSM_H + +#include <linux/types.h> + +enum { + TS_FSM_SPECIFIC, /* specific character */ + TS_FSM_WILDCARD, /* any character */ + TS_FSM_DIGIT, /* isdigit() */ + TS_FSM_XDIGIT, /* isxdigit() */ + TS_FSM_PRINT, /* isprint() */ + TS_FSM_ALPHA, /* isalpha() */ + TS_FSM_ALNUM, /* isalnum() */ + TS_FSM_ASCII, /* isascii() */ + TS_FSM_CNTRL, /* iscntrl() */ + TS_FSM_GRAPH, /* isgraph() */ + TS_FSM_LOWER, /* islower() */ + TS_FSM_UPPER, /* isupper() */ + TS_FSM_PUNCT, /* ispunct() */ + TS_FSM_SPACE, /* isspace() */ + __TS_FSM_TYPE_MAX, +}; +#define TS_FSM_TYPE_MAX (__TS_FSM_TYPE_MAX - 1) + +enum { + TS_FSM_SINGLE, /* 1 occurrence */ + TS_FSM_PERHAPS, /* 1 or 0 occurrence */ + TS_FSM_ANY, /* 0..n occurrences */ + TS_FSM_MULTI, /* 1..n occurrences */ + TS_FSM_HEAD_IGNORE, /* 0..n ignored occurrences at head */ + __TS_FSM_RECUR_MAX, +}; +#define TS_FSM_RECUR_MAX (__TS_FSM_RECUR_MAX - 1) + +/** + * struct ts_fsm_token - state machine token (state) + * @type: type of token + * @recur: number of recurrences + * @value: character value for TS_FSM_SPECIFIC + */ +struct ts_fsm_token +{ + __u16 type; + __u8 recur; + __u8 value; +}; + +#endif diff --git a/include/linux/timer.h b/include/linux/timer.h index 90db1cc..221f81a 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -6,45 +6,33 @@ #include <linux/spinlock.h> #include <linux/stddef.h> -struct tvec_t_base_s; +struct timer_base_s; struct timer_list { struct list_head entry; unsigned long expires; - spinlock_t lock; unsigned long magic; void (*function)(unsigned long); unsigned long data; - struct tvec_t_base_s *base; + struct timer_base_s *base; }; #define TIMER_MAGIC 0x4b87ad6e +extern struct timer_base_s __init_timer_base; + #define TIMER_INITIALIZER(_function, _expires, _data) { \ .function = (_function), \ .expires = (_expires), \ .data = (_data), \ - .base = NULL, \ + .base = &__init_timer_base, \ .magic = TIMER_MAGIC, \ - .lock = SPIN_LOCK_UNLOCKED, \ } -/*** - * init_timer - initialize a timer. - * @timer: the timer to be initialized - * - * init_timer() must be done to a timer prior calling *any* of the - * other timer functions. - */ -static inline void init_timer(struct timer_list * timer) -{ - timer->base = NULL; - timer->magic = TIMER_MAGIC; - spin_lock_init(&timer->lock); -} +void fastcall init_timer(struct timer_list * timer); /*** * timer_pending - is a timer pending? @@ -58,7 +46,7 @@ static inline void init_timer(struct timer_list * timer) */ static inline int timer_pending(const struct timer_list * timer) { - return timer->base != NULL; + return timer->entry.next != NULL; } extern void add_timer_on(struct timer_list *timer, int cpu); @@ -88,13 +76,15 @@ static inline void add_timer(struct timer_list * timer) } #ifdef CONFIG_SMP + extern int try_to_del_timer_sync(struct timer_list *timer); extern int del_timer_sync(struct timer_list *timer); - extern int del_singleshot_timer_sync(struct timer_list *timer); #else -# define del_timer_sync(t) del_timer(t) -# define del_singleshot_timer_sync(t) del_timer(t) +# define try_to_del_timer_sync(t) del_timer(t) +# define del_timer_sync(t) del_timer(t) #endif +#define del_singleshot_timer_sync(t) del_timer_sync(t) + extern void init_timers(void); extern void run_local_timers(void); extern void it_real_fn(unsigned long); diff --git a/include/linux/topology.h b/include/linux/topology.h index d70e897..0320225 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -89,6 +89,11 @@ .cache_hot_time = 0, \ .cache_nice_tries = 0, \ .per_cpu_gain = 25, \ + .busy_idx = 0, \ + .idle_idx = 0, \ + .newidle_idx = 1, \ + .wake_idx = 0, \ + .forkexec_idx = 0, \ .flags = SD_LOAD_BALANCE \ | SD_BALANCE_NEWIDLE \ | SD_BALANCE_EXEC \ @@ -115,12 +120,15 @@ .cache_hot_time = (5*1000000/2), \ .cache_nice_tries = 1, \ .per_cpu_gain = 100, \ + .busy_idx = 2, \ + .idle_idx = 1, \ + .newidle_idx = 2, \ + .wake_idx = 1, \ + .forkexec_idx = 1, \ .flags = SD_LOAD_BALANCE \ | SD_BALANCE_NEWIDLE \ | SD_BALANCE_EXEC \ - | SD_WAKE_AFFINE \ - | SD_WAKE_IDLE \ - | SD_WAKE_BALANCE, \ + | SD_WAKE_AFFINE, \ .last_balance = jiffies, \ .balance_interval = 1, \ .nr_balance_failed = 0, \ diff --git a/include/linux/tty.h b/include/linux/tty.h index 1b76106..59ff42c 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -345,6 +345,7 @@ extern int tty_check_change(struct tty_struct * tty); extern void stop_tty(struct tty_struct * tty); extern void start_tty(struct tty_struct * tty); extern int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc); +extern int tty_unregister_ldisc(int disc); extern int tty_register_driver(struct tty_driver *driver); extern int tty_unregister_driver(struct tty_driver *driver); extern void tty_register_device(struct tty_driver *driver, unsigned index, struct device *dev); diff --git a/include/linux/usb.h b/include/linux/usb.h index 41d1a64..eb282b5 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -287,15 +287,14 @@ struct usb_bus { struct dentry *usbfs_dentry; /* usbfs dentry entry for the bus */ - struct class_device class_dev; /* class device for this bus */ + struct class_device *class_dev; /* class device for this bus */ + struct kref kref; /* handles reference counting this bus */ void (*release)(struct usb_bus *bus); /* function to destroy this bus's memory */ -#if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE) +#if defined(CONFIG_USB_MON) struct mon_bus *mon_bus; /* non-null when associated */ int monitored; /* non-zero when monitored */ #endif }; -#define to_usb_bus(d) container_of(d, struct usb_bus, class_dev) - /* -------------------------------------------------------------------------- */ @@ -796,6 +795,10 @@ typedef void (*usb_complete_t)(struct urb *, struct pt_regs *); * of the iso_frame_desc array, and the number of errors is reported in * error_count. Completion callbacks for ISO transfers will normally * (re)submit URBs to ensure a constant transfer rate. + * + * Note that even fields marked "public" should not be touched by the driver + * when the urb is owned by the hcd, that is, since the call to + * usb_submit_urb() till the entry into the completion routine. */ struct urb { @@ -803,12 +806,12 @@ struct urb struct kref kref; /* reference count of the URB */ spinlock_t lock; /* lock for the URB */ void *hcpriv; /* private data for host controller */ - struct list_head urb_list; /* list pointer to all active urbs */ int bandwidth; /* bandwidth for INT/ISO request */ atomic_t use_count; /* concurrent submissions counter */ u8 reject; /* submissions will fail */ /* public, documented fields in the urb that can be used by drivers */ + struct list_head urb_list; /* list head for use by the urb owner */ struct usb_device *dev; /* (in) pointer to associated device */ unsigned int pipe; /* (in) pipe information */ int status; /* (return) non-ISO status */ diff --git a/include/linux/wait.h b/include/linux/wait.h index 17c874a..d38c9fe 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -33,7 +33,7 @@ int default_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key struct __wait_queue { unsigned int flags; #define WQ_FLAG_EXCLUSIVE 0x01 - struct task_struct * task; + void *private; wait_queue_func_t func; struct list_head task_list; }; @@ -60,7 +60,7 @@ typedef struct __wait_queue_head wait_queue_head_t; */ #define __WAITQUEUE_INITIALIZER(name, tsk) { \ - .task = tsk, \ + .private = tsk, \ .func = default_wake_function, \ .task_list = { NULL, NULL } } @@ -86,7 +86,7 @@ static inline void init_waitqueue_head(wait_queue_head_t *q) static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) { q->flags = 0; - q->task = p; + q->private = p; q->func = default_wake_function; } @@ -94,7 +94,7 @@ static inline void init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func) { q->flags = 0; - q->task = NULL; + q->private = NULL; q->func = func; } @@ -110,7 +110,7 @@ static inline int waitqueue_active(wait_queue_head_t *q) * aio specifies a wait queue entry with an async notification * callback routine, not associated with any task. */ -#define is_sync_wait(wait) (!(wait) || ((wait)->task)) +#define is_sync_wait(wait) (!(wait) || ((wait)->private)) extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)); extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)); @@ -384,18 +384,16 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); #define DEFINE_WAIT(name) \ wait_queue_t name = { \ - .task = current, \ + .private = current, \ .func = autoremove_wake_function, \ - .task_list = { .next = &(name).task_list, \ - .prev = &(name).task_list, \ - }, \ + .task_list = LIST_HEAD_INIT((name).task_list), \ } #define DEFINE_WAIT_BIT(name, word, bit) \ struct wait_bit_queue name = { \ .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \ .wait = { \ - .task = current, \ + .private = current, \ .func = wake_bit_function, \ .task_list = \ LIST_HEAD_INIT((name).wait.task_list), \ @@ -404,7 +402,7 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); #define init_wait(wait) \ do { \ - (wait)->task = current; \ + (wait)->private = current; \ (wait)->func = autoremove_wake_function; \ INIT_LIST_HEAD(&(wait)->task_list); \ } while (0) diff --git a/include/linux/wireless.h b/include/linux/wireless.h index 2f51f2b..ae485f9 100644 --- a/include/linux/wireless.h +++ b/include/linux/wireless.h @@ -1,10 +1,10 @@ /* * This file define a set of standard wireless extensions * - * Version : 17 21.6.04 + * Version : 18 12.3.05 * * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com> - * Copyright (c) 1997-2004 Jean Tourrilhes, All Rights Reserved. + * Copyright (c) 1997-2005 Jean Tourrilhes, All Rights Reserved. */ #ifndef _LINUX_WIRELESS_H @@ -82,7 +82,7 @@ * (there is some stuff that will be added in the future...) * I just plan to increment with each new version. */ -#define WIRELESS_EXT 17 +#define WIRELESS_EXT 18 /* * Changes : @@ -182,6 +182,21 @@ * - Document (struct iw_quality *)->updated, add new flags (INVALID) * - Wireless Event capability in struct iw_range * - Add support for relative TxPower (yick !) + * + * V17 to V18 (From Jouni Malinen <jkmaline@cc.hut.fi>) + * ---------- + * - Add support for WPA/WPA2 + * - Add extended encoding configuration (SIOCSIWENCODEEXT and + * SIOCGIWENCODEEXT) + * - Add SIOCSIWGENIE/SIOCGIWGENIE + * - Add SIOCSIWMLME + * - Add SIOCSIWPMKSA + * - Add struct iw_range bit field for supported encoding capabilities + * - Add optional scan request parameters for SIOCSIWSCAN + * - Add SIOCSIWAUTH/SIOCGIWAUTH for setting authentication and WPA + * related parameters (extensible up to 4096 parameter values) + * - Add wireless events: IWEVGENIE, IWEVMICHAELMICFAILURE, + * IWEVASSOCREQIE, IWEVASSOCRESPIE, IWEVPMKIDCAND */ /**************************** CONSTANTS ****************************/ @@ -256,6 +271,30 @@ #define SIOCSIWPOWER 0x8B2C /* set Power Management settings */ #define SIOCGIWPOWER 0x8B2D /* get Power Management settings */ +/* WPA : Generic IEEE 802.11 informatiom element (e.g., for WPA/RSN/WMM). + * This ioctl uses struct iw_point and data buffer that includes IE id and len + * fields. More than one IE may be included in the request. Setting the generic + * IE to empty buffer (len=0) removes the generic IE from the driver. Drivers + * are allowed to generate their own WPA/RSN IEs, but in these cases, drivers + * are required to report the used IE as a wireless event, e.g., when + * associating with an AP. */ +#define SIOCSIWGENIE 0x8B30 /* set generic IE */ +#define SIOCGIWGENIE 0x8B31 /* get generic IE */ + +/* WPA : IEEE 802.11 MLME requests */ +#define SIOCSIWMLME 0x8B16 /* request MLME operation; uses + * struct iw_mlme */ +/* WPA : Authentication mode parameters */ +#define SIOCSIWAUTH 0x8B32 /* set authentication mode params */ +#define SIOCGIWAUTH 0x8B33 /* get authentication mode params */ + +/* WPA : Extended version of encoding configuration */ +#define SIOCSIWENCODEEXT 0x8B34 /* set encoding token & mode */ +#define SIOCGIWENCODEEXT 0x8B35 /* get encoding token & mode */ + +/* WPA2 : PMKSA cache management */ +#define SIOCSIWPMKSA 0x8B36 /* PMKSA cache operation */ + /* -------------------- DEV PRIVATE IOCTL LIST -------------------- */ /* These 32 ioctl are wireless device private, for 16 commands. @@ -297,6 +336,34 @@ #define IWEVCUSTOM 0x8C02 /* Driver specific ascii string */ #define IWEVREGISTERED 0x8C03 /* Discovered a new node (AP mode) */ #define IWEVEXPIRED 0x8C04 /* Expired a node (AP mode) */ +#define IWEVGENIE 0x8C05 /* Generic IE (WPA, RSN, WMM, ..) + * (scan results); This includes id and + * length fields. One IWEVGENIE may + * contain more than one IE. Scan + * results may contain one or more + * IWEVGENIE events. */ +#define IWEVMICHAELMICFAILURE 0x8C06 /* Michael MIC failure + * (struct iw_michaelmicfailure) + */ +#define IWEVASSOCREQIE 0x8C07 /* IEs used in (Re)Association Request. + * The data includes id and length + * fields and may contain more than one + * IE. This event is required in + * Managed mode if the driver + * generates its own WPA/RSN IE. This + * should be sent just before + * IWEVREGISTERED event for the + * association. */ +#define IWEVASSOCRESPIE 0x8C08 /* IEs used in (Re)Association + * Response. The data includes id and + * length fields and may contain more + * than one IE. This may be sent + * between IWEVASSOCREQIE and + * IWEVREGISTERED events for the + * association. */ +#define IWEVPMKIDCAND 0x8C09 /* PMKID candidate for RSN + * pre-authentication + * (struct iw_pmkid_cand) */ #define IWEVFIRST 0x8C00 @@ -432,12 +499,94 @@ #define IW_SCAN_THIS_MODE 0x0020 /* Scan only this Mode */ #define IW_SCAN_ALL_RATE 0x0040 /* Scan all Bit-Rates */ #define IW_SCAN_THIS_RATE 0x0080 /* Scan only this Bit-Rate */ +/* struct iw_scan_req scan_type */ +#define IW_SCAN_TYPE_ACTIVE 0 +#define IW_SCAN_TYPE_PASSIVE 1 /* Maximum size of returned data */ #define IW_SCAN_MAX_DATA 4096 /* In bytes */ /* Max number of char in custom event - use multiple of them if needed */ #define IW_CUSTOM_MAX 256 /* In bytes */ +/* Generic information element */ +#define IW_GENERIC_IE_MAX 1024 + +/* MLME requests (SIOCSIWMLME / struct iw_mlme) */ +#define IW_MLME_DEAUTH 0 +#define IW_MLME_DISASSOC 1 + +/* SIOCSIWAUTH/SIOCGIWAUTH struct iw_param flags */ +#define IW_AUTH_INDEX 0x0FFF +#define IW_AUTH_FLAGS 0xF000 +/* SIOCSIWAUTH/SIOCGIWAUTH parameters (0 .. 4095) + * (IW_AUTH_INDEX mask in struct iw_param flags; this is the index of the + * parameter that is being set/get to; value will be read/written to + * struct iw_param value field) */ +#define IW_AUTH_WPA_VERSION 0 +#define IW_AUTH_CIPHER_PAIRWISE 1 +#define IW_AUTH_CIPHER_GROUP 2 +#define IW_AUTH_KEY_MGMT 3 +#define IW_AUTH_TKIP_COUNTERMEASURES 4 +#define IW_AUTH_DROP_UNENCRYPTED 5 +#define IW_AUTH_80211_AUTH_ALG 6 +#define IW_AUTH_WPA_ENABLED 7 +#define IW_AUTH_RX_UNENCRYPTED_EAPOL 8 +#define IW_AUTH_ROAMING_CONTROL 9 +#define IW_AUTH_PRIVACY_INVOKED 10 + +/* IW_AUTH_WPA_VERSION values (bit field) */ +#define IW_AUTH_WPA_VERSION_DISABLED 0x00000001 +#define IW_AUTH_WPA_VERSION_WPA 0x00000002 +#define IW_AUTH_WPA_VERSION_WPA2 0x00000004 + +/* IW_AUTH_PAIRWISE_CIPHER and IW_AUTH_GROUP_CIPHER values (bit field) */ +#define IW_AUTH_CIPHER_NONE 0x00000001 +#define IW_AUTH_CIPHER_WEP40 0x00000002 +#define IW_AUTH_CIPHER_TKIP 0x00000004 +#define IW_AUTH_CIPHER_CCMP 0x00000008 +#define IW_AUTH_CIPHER_WEP104 0x00000010 + +/* IW_AUTH_KEY_MGMT values (bit field) */ +#define IW_AUTH_KEY_MGMT_802_1X 1 +#define IW_AUTH_KEY_MGMT_PSK 2 + +/* IW_AUTH_80211_AUTH_ALG values (bit field) */ +#define IW_AUTH_ALG_OPEN_SYSTEM 0x00000001 +#define IW_AUTH_ALG_SHARED_KEY 0x00000002 +#define IW_AUTH_ALG_LEAP 0x00000004 + +/* IW_AUTH_ROAMING_CONTROL values */ +#define IW_AUTH_ROAMING_ENABLE 0 /* driver/firmware based roaming */ +#define IW_AUTH_ROAMING_DISABLE 1 /* user space program used for roaming + * control */ + +/* SIOCSIWENCODEEXT definitions */ +#define IW_ENCODE_SEQ_MAX_SIZE 8 +/* struct iw_encode_ext ->alg */ +#define IW_ENCODE_ALG_NONE 0 +#define IW_ENCODE_ALG_WEP 1 +#define IW_ENCODE_ALG_TKIP 2 +#define IW_ENCODE_ALG_CCMP 3 +/* struct iw_encode_ext ->ext_flags */ +#define IW_ENCODE_EXT_TX_SEQ_VALID 0x00000001 +#define IW_ENCODE_EXT_RX_SEQ_VALID 0x00000002 +#define IW_ENCODE_EXT_GROUP_KEY 0x00000004 +#define IW_ENCODE_EXT_SET_TX_KEY 0x00000008 + +/* IWEVMICHAELMICFAILURE : struct iw_michaelmicfailure ->flags */ +#define IW_MICFAILURE_KEY_ID 0x00000003 /* Key ID 0..3 */ +#define IW_MICFAILURE_GROUP 0x00000004 +#define IW_MICFAILURE_PAIRWISE 0x00000008 +#define IW_MICFAILURE_STAKEY 0x00000010 +#define IW_MICFAILURE_COUNT 0x00000060 /* 1 or 2 (0 = count not supported) + */ + +/* Bit field values for enc_capa in struct iw_range */ +#define IW_ENC_CAPA_WPA 0x00000001 +#define IW_ENC_CAPA_WPA2 0x00000002 +#define IW_ENC_CAPA_CIPHER_TKIP 0x00000004 +#define IW_ENC_CAPA_CIPHER_CCMP 0x00000008 + /* Event capability macros - in (struct iw_range *)->event_capa * Because we have more than 32 possible events, we use an array of * 32 bit bitmasks. Note : 32 bits = 0x20 = 2^5. */ @@ -546,6 +695,132 @@ struct iw_thrspy struct iw_quality high; /* High threshold */ }; +/* + * Optional data for scan request + * + * Note: these optional parameters are controlling parameters for the + * scanning behavior, these do not apply to getting scan results + * (SIOCGIWSCAN). Drivers are expected to keep a local BSS table and + * provide a merged results with all BSSes even if the previous scan + * request limited scanning to a subset, e.g., by specifying an SSID. + * Especially, scan results are required to include an entry for the + * current BSS if the driver is in Managed mode and associated with an AP. + */ +struct iw_scan_req +{ + __u8 scan_type; /* IW_SCAN_TYPE_{ACTIVE,PASSIVE} */ + __u8 essid_len; + __u8 num_channels; /* num entries in channel_list; + * 0 = scan all allowed channels */ + __u8 flags; /* reserved as padding; use zero, this may + * be used in the future for adding flags + * to request different scan behavior */ + struct sockaddr bssid; /* ff:ff:ff:ff:ff:ff for broadcast BSSID or + * individual address of a specific BSS */ + + /* + * Use this ESSID if IW_SCAN_THIS_ESSID flag is used instead of using + * the current ESSID. This allows scan requests for specific ESSID + * without having to change the current ESSID and potentially breaking + * the current association. + */ + __u8 essid[IW_ESSID_MAX_SIZE]; + + /* + * Optional parameters for changing the default scanning behavior. + * These are based on the MLME-SCAN.request from IEEE Std 802.11. + * TU is 1.024 ms. If these are set to 0, driver is expected to use + * reasonable default values. min_channel_time defines the time that + * will be used to wait for the first reply on each channel. If no + * replies are received, next channel will be scanned after this. If + * replies are received, total time waited on the channel is defined by + * max_channel_time. + */ + __u32 min_channel_time; /* in TU */ + __u32 max_channel_time; /* in TU */ + + struct iw_freq channel_list[IW_MAX_FREQUENCIES]; +}; + +/* ------------------------- WPA SUPPORT ------------------------- */ + +/* + * Extended data structure for get/set encoding (this is used with + * SIOCSIWENCODEEXT/SIOCGIWENCODEEXT. struct iw_point and IW_ENCODE_* + * flags are used in the same way as with SIOCSIWENCODE/SIOCGIWENCODE and + * only the data contents changes (key data -> this structure, including + * key data). + * + * If the new key is the first group key, it will be set as the default + * TX key. Otherwise, default TX key index is only changed if + * IW_ENCODE_EXT_SET_TX_KEY flag is set. + * + * Key will be changed with SIOCSIWENCODEEXT in all cases except for + * special "change TX key index" operation which is indicated by setting + * key_len = 0 and ext_flags |= IW_ENCODE_EXT_SET_TX_KEY. + * + * tx_seq/rx_seq are only used when respective + * IW_ENCODE_EXT_{TX,RX}_SEQ_VALID flag is set in ext_flags. Normal + * TKIP/CCMP operation is to set RX seq with SIOCSIWENCODEEXT and start + * TX seq from zero whenever key is changed. SIOCGIWENCODEEXT is normally + * used only by an Authenticator (AP or an IBSS station) to get the + * current TX sequence number. Using TX_SEQ_VALID for SIOCSIWENCODEEXT and + * RX_SEQ_VALID for SIOCGIWENCODEEXT are optional, but can be useful for + * debugging/testing. + */ +struct iw_encode_ext +{ + __u32 ext_flags; /* IW_ENCODE_EXT_* */ + __u8 tx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */ + __u8 rx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */ + struct sockaddr addr; /* ff:ff:ff:ff:ff:ff for broadcast/multicast + * (group) keys or unicast address for + * individual keys */ + __u16 alg; /* IW_ENCODE_ALG_* */ + __u16 key_len; + __u8 key[0]; +}; + +/* SIOCSIWMLME data */ +struct iw_mlme +{ + __u16 cmd; /* IW_MLME_* */ + __u16 reason_code; + struct sockaddr addr; +}; + +/* SIOCSIWPMKSA data */ +#define IW_PMKSA_ADD 1 +#define IW_PMKSA_REMOVE 2 +#define IW_PMKSA_FLUSH 3 + +#define IW_PMKID_LEN 16 + +struct iw_pmksa +{ + __u32 cmd; /* IW_PMKSA_* */ + struct sockaddr bssid; + __u8 pmkid[IW_PMKID_LEN]; +}; + +/* IWEVMICHAELMICFAILURE data */ +struct iw_michaelmicfailure +{ + __u32 flags; + struct sockaddr src_addr; + __u8 tsc[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */ +}; + +/* IWEVPMKIDCAND data */ +#define IW_PMKID_CAND_PREAUTH 0x00000001 /* RNS pre-authentication enabled */ +struct iw_pmkid_cand +{ + __u32 flags; /* IW_PMKID_CAND_* */ + __u32 index; /* the smaller the index, the higher the + * priority */ + struct sockaddr bssid; +}; + /* ------------------------ WIRELESS STATS ------------------------ */ /* * Wireless statistics (used for /proc/net/wireless) @@ -725,6 +1000,8 @@ struct iw_range struct iw_freq freq[IW_MAX_FREQUENCIES]; /* list */ /* Note : this frequency list doesn't need to fit channel numbers, * because each entry contain its channel index */ + + __u32 enc_capa; /* IW_ENC_CAPA_* bit field */ }; /* diff --git a/include/linux/x25.h b/include/linux/x25.h index 7531cfe..16d4493 100644 --- a/include/linux/x25.h +++ b/include/linux/x25.h @@ -4,6 +4,8 @@ * History * mar/20/00 Daniela Squassoni Disabling/enabling of facilities * negotiation. + * apr/02/05 Shaun Pereira Selective sub address matching with + * call user data */ #ifndef X25_KERNEL_H @@ -16,6 +18,9 @@ #define SIOCX25GCALLUSERDATA (SIOCPROTOPRIVATE + 4) #define SIOCX25SCALLUSERDATA (SIOCPROTOPRIVATE + 5) #define SIOCX25GCAUSEDIAG (SIOCPROTOPRIVATE + 6) +#define SIOCX25SCUDMATCHLEN (SIOCPROTOPRIVATE + 7) +#define SIOCX25CALLACCPTAPPRV (SIOCPROTOPRIVATE + 8) +#define SIOCX25SENDCALLACCPT (SIOCPROTOPRIVATE + 9) /* * Values for {get,set}sockopt. @@ -109,4 +114,11 @@ struct x25_causediag { unsigned char diagnostic; }; +/* + * Further optional call user data match length selection + */ +struct x25_subaddr { + unsigned int cudmatchlength; +}; + #endif diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h index fd2ef74..f0d4233 100644 --- a/include/linux/xfrm.h +++ b/include/linux/xfrm.h @@ -174,6 +174,8 @@ enum xfrm_attr_type_t { XFRMA_ALG_COMP, /* struct xfrm_algo */ XFRMA_ENCAP, /* struct xfrm_algo + struct xfrm_encap_tmpl */ XFRMA_TMPL, /* 1 or more struct xfrm_user_tmpl */ + XFRMA_SA, + XFRMA_POLICY, __XFRMA_MAX #define XFRMA_MAX (__XFRMA_MAX - 1) @@ -194,6 +196,7 @@ struct xfrm_usersa_info { __u8 flags; #define XFRM_STATE_NOECN 1 #define XFRM_STATE_DECAP_DSCP 2 +#define XFRM_STATE_NOPMTUDISC 4 }; struct xfrm_usersa_id { @@ -257,5 +260,7 @@ struct xfrm_usersa_flush { #define XFRMGRP_ACQUIRE 1 #define XFRMGRP_EXPIRE 2 +#define XFRMGRP_SA 4 +#define XFRMGRP_POLICY 8 #endif /* _LINUX_XFRM_H */ diff --git a/include/media/audiochip.h b/include/media/audiochip.h index d3e9e30..f345a61 100644 --- a/include/media/audiochip.h +++ b/include/media/audiochip.h @@ -1,3 +1,7 @@ +/* + * $Id: audiochip.h,v 1.3 2005/06/12 04:19:19 mchehab Exp $ + */ + #ifndef AUDIOCHIP_H #define AUDIOCHIP_H diff --git a/include/media/id.h b/include/media/id.h index 1b0320d..a39a642 100644 --- a/include/media/id.h +++ b/include/media/id.h @@ -1,3 +1,7 @@ +/* + * $Id: id.h,v 1.4 2005/06/12 04:19:19 mchehab Exp $ + */ + /* FIXME: this temporarely, until these are included in linux/i2c-id.h */ /* drivers */ diff --git a/include/media/ir-common.h b/include/media/ir-common.h index 62c963a..6986705 100644 --- a/include/media/ir-common.h +++ b/include/media/ir-common.h @@ -1,5 +1,5 @@ /* - * $Id: ir-common.h,v 1.8 2005/02/22 12:28:40 kraxel Exp $ + * $Id: ir-common.h,v 1.9 2005/05/15 19:01:26 mchehab Exp $ * * some common structs and functions to handle infrared remotes via * input layer ... @@ -50,6 +50,7 @@ extern IR_KEYTAB_TYPE ir_codes_rc5_tv[IR_KEYTAB_SIZE]; extern IR_KEYTAB_TYPE ir_codes_winfast[IR_KEYTAB_SIZE]; extern IR_KEYTAB_TYPE ir_codes_empty[IR_KEYTAB_SIZE]; extern IR_KEYTAB_TYPE ir_codes_hauppauge_new[IR_KEYTAB_SIZE]; +extern IR_KEYTAB_TYPE ir_codes_pixelview[IR_KEYTAB_SIZE]; void ir_input_init(struct input_dev *dev, struct ir_input_state *ir, int ir_type, IR_KEYTAB_TYPE *ir_codes); diff --git a/include/media/tuner.h b/include/media/tuner.h index 156a9c5..2dd8310 100644 --- a/include/media/tuner.h +++ b/include/media/tuner.h @@ -25,6 +25,8 @@ #include "id.h" +#define ADDR_UNSET (255) + #define TUNER_TEMIC_PAL 0 /* 4002 FH5 (3X 7756, 9483) */ #define TUNER_PHILIPS_PAL_I 1 #define TUNER_PHILIPS_NTSC 2 @@ -98,12 +100,23 @@ #define TUNER_PHILIPS_FQ1216AME_MK4 56 /* Hauppauge PVR-150 PAL */ #define TUNER_PHILIPS_FQ1236A_MK4 57 /* Hauppauge PVR-500MCE NTSC */ +#define TUNER_YMEC_TVF_8531MF 58 +#define TUNER_YMEC_TVF_5533MF 59 /* Pixelview Pro Ultra NTSC */ +#define TUNER_THOMSON_DTT7611 60 +#define TUNER_TENA_9533_DI 61 +#define TUNER_TEA5767 62 /* Only FM Radio Tuner */ + +#define TEA5767_TUNER_NAME "Philips TEA5767HN FM Radio" + +#define TUNER_THOMSON_DTT7611 60 + #define NOTUNER 0 #define PAL 1 /* PAL_BG */ #define PAL_I 2 #define NTSC 3 #define SECAM 4 #define ATSC 5 +#define RADIO 6 #define NoTuner 0 #define Philips 1 @@ -119,10 +132,20 @@ #define TCL 11 #define THOMSON 12 +enum v4l_radio_tuner { + TEA5767_LOW_LO_32768 = 0, + TEA5767_HIGH_LO_32768 = 1, + TEA5767_LOW_LO_13MHz = 2, + TEA5767_HIGH_LO_13MHz = 3, +}; + + #define TUNER_SET_TYPE _IOW('t',1,int) /* set tuner type */ #define TUNER_SET_TVFREQ _IOW('t',2,int) /* set tv freq */ +#define TUNER_SET_TYPE_ADDR _IOW('T',3,int) /* set tuner type and I2C addr */ #define TDA9887_SET_CONFIG _IOW('t',5,int) + /* tv card specific */ # define TDA9887_PRESENT (1<<0) # define TDA9887_PORT1_INACTIVE (1<<1) @@ -143,6 +166,12 @@ #define I2C_ADDR_TDA8290 0x4b #define I2C_ADDR_TDA8275 0x61 +struct tuner_addr { + enum v4l2_tuner_type v4l2_tuner; + unsigned int type; + unsigned short addr; +}; + struct tuner { /* device */ struct i2c_client i2c; diff --git a/include/media/tveeprom.h b/include/media/tveeprom.h index 627603e..5c4fe30 100644 --- a/include/media/tveeprom.h +++ b/include/media/tveeprom.h @@ -1,3 +1,7 @@ +/* + * $Id: tveeprom.h,v 1.2 2005/06/12 04:19:19 mchehab Exp $ + */ + struct tveeprom { u32 has_radio; diff --git a/include/net/ax25.h b/include/net/ax25.h index 9e6368a..828a3a93 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -220,7 +220,7 @@ static __inline__ void ax25_cb_put(ax25_cb *ax25) } } -static inline unsigned short ax25_type_trans(struct sk_buff *skb, struct net_device *dev) +static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev) { skb->dev = dev; skb->pkt_type = PACKET_HOST; diff --git a/include/net/ip.h b/include/net/ip.h index 3f63992..32360bb 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -163,6 +163,7 @@ DECLARE_SNMP_STAT(struct linux_mib, net_statistics); extern int sysctl_local_port_range[2]; extern int sysctl_ip_default_ttl; +extern int sysctl_ip_nonlocal_bind; #ifdef CONFIG_INET /* The function in 2.2 was invalid, producing wrong result for diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 3199045..a66e9de 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -167,14 +167,17 @@ extern int fib6_walk_continue(struct fib6_walker_t *w); extern int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nlmsghdr *nlh, - void *rtattr); + void *rtattr, + struct netlink_skb_parms *req); extern int fib6_del(struct rt6_info *rt, struct nlmsghdr *nlh, - void *rtattr); + void *rtattr, + struct netlink_skb_parms *req); extern void inet6_rt_notify(int event, struct rt6_info *rt, - struct nlmsghdr *nlh); + struct nlmsghdr *nlh, + struct netlink_skb_parms *req); extern void fib6_run_gc(unsigned long dummy); diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index d5d1dd1..f920706 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -41,13 +41,16 @@ extern int ipv6_route_ioctl(unsigned int cmd, void __user *arg); extern int ip6_route_add(struct in6_rtmsg *rtmsg, struct nlmsghdr *, - void *rtattr); + void *rtattr, + struct netlink_skb_parms *req); extern int ip6_ins_rt(struct rt6_info *, struct nlmsghdr *, - void *rtattr); + void *rtattr, + struct netlink_skb_parms *req); extern int ip6_del_rt(struct rt6_info *, struct nlmsghdr *, - void *rtattr); + void *rtattr, + struct netlink_skb_parms *req); extern int ip6_rt_addr_add(struct in6_addr *addr, struct net_device *dev, diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index e5a5f6b..a4208a3 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -109,6 +109,20 @@ struct fib_result { #endif }; +struct fib_result_nl { + u32 fl_addr; /* To be looked up*/ + u32 fl_fwmark; + unsigned char fl_tos; + unsigned char fl_scope; + unsigned char tb_id_in; + + unsigned char tb_id; /* Results */ + unsigned char prefixlen; + unsigned char nh_sel; + unsigned char type; + unsigned char scope; + int err; +}; #ifdef CONFIG_IP_ROUTE_MULTIPATH diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 4f33bbc..8980989 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -65,11 +65,10 @@ struct neighbour; struct neigh_parms { + struct net_device *dev; struct neigh_parms *next; int (*neigh_setup)(struct neighbour *); struct neigh_table *tbl; - int entries; - void *priv; void *sysctl_table; @@ -192,7 +191,6 @@ struct neigh_table atomic_t entries; rwlock_t lock; unsigned long last_rand; - struct neigh_parms *parms_list; kmem_cache_t *kmem_cachep; struct neigh_statistics *stats; struct neighbour **hash_buckets; @@ -252,6 +250,9 @@ extern int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); extern int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); extern void neigh_app_ns(struct neighbour *n); +extern int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb); +extern int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); + extern void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie); extern void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *)); extern void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_entry *)); diff --git a/include/net/request_sock.h b/include/net/request_sock.h new file mode 100644 index 0000000..72fd6f5 --- /dev/null +++ b/include/net/request_sock.h @@ -0,0 +1,255 @@ +/* + * NET Generic infrastructure for Network protocols. + * + * Definitions for request_sock + * + * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br> + * + * From code originally in include/net/tcp.h + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _REQUEST_SOCK_H +#define _REQUEST_SOCK_H + +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +#include <net/sock.h> + +struct request_sock; +struct sk_buff; +struct dst_entry; +struct proto; + +struct request_sock_ops { + int family; + kmem_cache_t *slab; + int obj_size; + int (*rtx_syn_ack)(struct sock *sk, + struct request_sock *req, + struct dst_entry *dst); + void (*send_ack)(struct sk_buff *skb, + struct request_sock *req); + void (*send_reset)(struct sk_buff *skb); + void (*destructor)(struct request_sock *req); +}; + +/* struct request_sock - mini sock to represent a connection request + */ +struct request_sock { + struct request_sock *dl_next; /* Must be first member! */ + u16 mss; + u8 retrans; + u8 __pad; + /* The following two fields can be easily recomputed I think -AK */ + u32 window_clamp; /* window clamp at creation time */ + u32 rcv_wnd; /* rcv_wnd offered first time */ + u32 ts_recent; + unsigned long expires; + struct request_sock_ops *rsk_ops; + struct sock *sk; +}; + +static inline struct request_sock *reqsk_alloc(struct request_sock_ops *ops) +{ + struct request_sock *req = kmem_cache_alloc(ops->slab, SLAB_ATOMIC); + + if (req != NULL) + req->rsk_ops = ops; + + return req; +} + +static inline void __reqsk_free(struct request_sock *req) +{ + kmem_cache_free(req->rsk_ops->slab, req); +} + +static inline void reqsk_free(struct request_sock *req) +{ + req->rsk_ops->destructor(req); + __reqsk_free(req); +} + +extern int sysctl_max_syn_backlog; + +/** struct listen_sock - listen state + * + * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs + */ +struct listen_sock { + u8 max_qlen_log; + /* 3 bytes hole, try to use */ + int qlen; + int qlen_young; + int clock_hand; + u32 hash_rnd; + struct request_sock *syn_table[0]; +}; + +/** struct request_sock_queue - queue of request_socks + * + * @rskq_accept_head - FIFO head of established children + * @rskq_accept_tail - FIFO tail of established children + * @syn_wait_lock - serializer + * + * %syn_wait_lock is necessary only to avoid proc interface having to grab the main + * lock sock while browsing the listening hash (otherwise it's deadlock prone). + * + * This lock is acquired in read mode only from listening_get_next() seq_file + * op and it's acquired in write mode _only_ from code that is actively + * changing rskq_accept_head. All readers that are holding the master sock lock + * don't need to grab this lock in read mode too as rskq_accept_head. writes + * are always protected from the main sock lock. + */ +struct request_sock_queue { + struct request_sock *rskq_accept_head; + struct request_sock *rskq_accept_tail; + rwlock_t syn_wait_lock; + struct listen_sock *listen_opt; +}; + +extern int reqsk_queue_alloc(struct request_sock_queue *queue, + const int nr_table_entries); + +static inline struct listen_sock *reqsk_queue_yank_listen_sk(struct request_sock_queue *queue) +{ + struct listen_sock *lopt; + + write_lock_bh(&queue->syn_wait_lock); + lopt = queue->listen_opt; + queue->listen_opt = NULL; + write_unlock_bh(&queue->syn_wait_lock); + + return lopt; +} + +static inline void reqsk_queue_destroy(struct request_sock_queue *queue) +{ + kfree(reqsk_queue_yank_listen_sk(queue)); +} + +static inline struct request_sock * + reqsk_queue_yank_acceptq(struct request_sock_queue *queue) +{ + struct request_sock *req = queue->rskq_accept_head; + + queue->rskq_accept_head = queue->rskq_accept_head = NULL; + return req; +} + +static inline int reqsk_queue_empty(struct request_sock_queue *queue) +{ + return queue->rskq_accept_head == NULL; +} + +static inline void reqsk_queue_unlink(struct request_sock_queue *queue, + struct request_sock *req, + struct request_sock **prev_req) +{ + write_lock(&queue->syn_wait_lock); + *prev_req = req->dl_next; + write_unlock(&queue->syn_wait_lock); +} + +static inline void reqsk_queue_add(struct request_sock_queue *queue, + struct request_sock *req, + struct sock *parent, + struct sock *child) +{ + req->sk = child; + sk_acceptq_added(parent); + + if (queue->rskq_accept_head == NULL) + queue->rskq_accept_head = req; + else + queue->rskq_accept_tail->dl_next = req; + + queue->rskq_accept_tail = req; + req->dl_next = NULL; +} + +static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue) +{ + struct request_sock *req = queue->rskq_accept_head; + + BUG_TRAP(req != NULL); + + queue->rskq_accept_head = req->dl_next; + if (queue->rskq_accept_head == NULL) + queue->rskq_accept_tail = NULL; + + return req; +} + +static inline struct sock *reqsk_queue_get_child(struct request_sock_queue *queue, + struct sock *parent) +{ + struct request_sock *req = reqsk_queue_remove(queue); + struct sock *child = req->sk; + + BUG_TRAP(child != NULL); + + sk_acceptq_removed(parent); + __reqsk_free(req); + return child; +} + +static inline int reqsk_queue_removed(struct request_sock_queue *queue, + struct request_sock *req) +{ + struct listen_sock *lopt = queue->listen_opt; + + if (req->retrans == 0) + --lopt->qlen_young; + + return --lopt->qlen; +} + +static inline int reqsk_queue_added(struct request_sock_queue *queue) +{ + struct listen_sock *lopt = queue->listen_opt; + const int prev_qlen = lopt->qlen; + + lopt->qlen_young++; + lopt->qlen++; + return prev_qlen; +} + +static inline int reqsk_queue_len(struct request_sock_queue *queue) +{ + return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0; +} + +static inline int reqsk_queue_len_young(struct request_sock_queue *queue) +{ + return queue->listen_opt->qlen_young; +} + +static inline int reqsk_queue_is_full(struct request_sock_queue *queue) +{ + return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log; +} + +static inline void reqsk_queue_hash_req(struct request_sock_queue *queue, + u32 hash, struct request_sock *req, + unsigned timeout) +{ + struct listen_sock *lopt = queue->listen_opt; + + req->expires = jiffies + timeout; + req->retrans = 0; + req->sk = NULL; + req->dl_next = lopt->syn_table[hash]; + + write_lock(&queue->syn_wait_lock); + lopt->syn_table[hash] = req; + write_unlock(&queue->syn_wait_lock); +} + +#endif /* _REQUEST_SOCK_H */ diff --git a/include/net/route.h b/include/net/route.h index efe92b2..c3cd069 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -107,7 +107,7 @@ struct rt_cache_stat extern struct rt_cache_stat *rt_cache_stat; #define RT_CACHE_STAT_INC(field) \ - (per_cpu_ptr(rt_cache_stat, _smp_processor_id())->field++) + (per_cpu_ptr(rt_cache_stat, raw_smp_processor_id())->field++) extern struct ip_rt_acct *ip_rt_acct; @@ -181,9 +181,6 @@ static inline int ip_route_newports(struct rtable **rp, u16 sport, u16 dport, memcpy(&fl, &(*rp)->fl, sizeof(fl)); fl.fl_ip_sport = sport; fl.fl_ip_dport = dport; -#if defined(CONFIG_IP_ROUTE_MULTIPATH_CACHED) - fl.flags |= FLOWI_FLAG_MULTIPATHOLDROUTE; -#endif ip_rt_put(*rp); *rp = NULL; return ip_route_output_flow(rp, &fl, sk, 0); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index c57504b..7b97405 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -172,4 +172,126 @@ tcf_destroy(struct tcf_proto *tp) kfree(tp); } +static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff_head *list) +{ + __skb_queue_tail(list, skb); + sch->qstats.backlog += skb->len; + sch->bstats.bytes += skb->len; + sch->bstats.packets++; + + return NET_XMIT_SUCCESS; +} + +static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) +{ + return __qdisc_enqueue_tail(skb, sch, &sch->q); +} + +static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch, + struct sk_buff_head *list) +{ + struct sk_buff *skb = __skb_dequeue(list); + + if (likely(skb != NULL)) + sch->qstats.backlog -= skb->len; + + return skb; +} + +static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) +{ + return __qdisc_dequeue_head(sch, &sch->q); +} + +static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch, + struct sk_buff_head *list) +{ + struct sk_buff *skb = __skb_dequeue_tail(list); + + if (likely(skb != NULL)) + sch->qstats.backlog -= skb->len; + + return skb; +} + +static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch) +{ + return __qdisc_dequeue_tail(sch, &sch->q); +} + +static inline int __qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff_head *list) +{ + __skb_queue_head(list, skb); + sch->qstats.backlog += skb->len; + sch->qstats.requeues++; + + return NET_XMIT_SUCCESS; +} + +static inline int qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch) +{ + return __qdisc_requeue(skb, sch, &sch->q); +} + +static inline void __qdisc_reset_queue(struct Qdisc *sch, + struct sk_buff_head *list) +{ + /* + * We do not know the backlog in bytes of this list, it + * is up to the caller to correct it + */ + skb_queue_purge(list); +} + +static inline void qdisc_reset_queue(struct Qdisc *sch) +{ + __qdisc_reset_queue(sch, &sch->q); + sch->qstats.backlog = 0; +} + +static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch, + struct sk_buff_head *list) +{ + struct sk_buff *skb = __qdisc_dequeue_tail(sch, list); + + if (likely(skb != NULL)) { + unsigned int len = skb->len; + kfree_skb(skb); + return len; + } + + return 0; +} + +static inline unsigned int qdisc_queue_drop(struct Qdisc *sch) +{ + return __qdisc_queue_drop(sch, &sch->q); +} + +static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) +{ + kfree_skb(skb); + sch->qstats.drops++; + + return NET_XMIT_DROP; +} + +static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch) +{ + sch->qstats.drops++; + +#ifdef CONFIG_NET_CLS_POLICE + if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) + goto drop; + + return NET_XMIT_SUCCESS; + +drop: +#endif + kfree_skb(skb); + return NET_XMIT_DROP; +} + #endif diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index ebc5282..dc107ff 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h @@ -65,9 +65,11 @@ typedef enum { SCTP_CMD_TIMER_START, /* Start a timer. */ SCTP_CMD_TIMER_RESTART, /* Restart a timer. */ SCTP_CMD_TIMER_STOP, /* Stop a timer. */ - SCTP_CMD_COUNTER_RESET, /* Reset a counter. */ - SCTP_CMD_COUNTER_INC, /* Increment a counter. */ + SCTP_CMD_INIT_CHOOSE_TRANSPORT, /* Choose transport for an INIT. */ + SCTP_CMD_INIT_COUNTER_RESET, /* Reset init counter. */ + SCTP_CMD_INIT_COUNTER_INC, /* Increment init counter. */ SCTP_CMD_INIT_RESTART, /* High level, do init timer work. */ + SCTP_CMD_COOKIEECHO_RESTART, /* High level, do cookie-echo timer work. */ SCTP_CMD_INIT_FAILED, /* High level, do init failure work. */ SCTP_CMD_REPORT_DUP, /* Report a duplicate TSN. */ SCTP_CMD_STRIKE, /* Mark a strike against a transport. */ @@ -118,7 +120,6 @@ typedef union { int error; sctp_state_t state; sctp_event_timeout_t to; - sctp_counter_t counter; void *ptr; struct sctp_chunk *chunk; struct sctp_association *asoc; @@ -165,7 +166,6 @@ SCTP_ARG_CONSTRUCTOR(U16, __u16, u16) SCTP_ARG_CONSTRUCTOR(U8, __u8, u8) SCTP_ARG_CONSTRUCTOR(ERROR, int, error) SCTP_ARG_CONSTRUCTOR(STATE, sctp_state_t, state) -SCTP_ARG_CONSTRUCTOR(COUNTER, sctp_counter_t, counter) SCTP_ARG_CONSTRUCTOR(TO, sctp_event_timeout_t, to) SCTP_ARG_CONSTRUCTOR(PTR, void *, ptr) SCTP_ARG_CONSTRUCTOR(CHUNK, struct sctp_chunk *, chunk) diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 2b76c0f..4868c7f 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -263,13 +263,6 @@ enum { SCTP_MIN_PMTU = 576 }; enum { SCTP_MAX_DUP_TSNS = 16 }; enum { SCTP_MAX_GABS = 16 }; -typedef enum { - SCTP_COUNTER_INIT_ERROR, -} sctp_counter_t; - -/* How many counters does an association need? */ -#define SCTP_NUMBER_COUNTERS 5 - /* Here we define the default timers. */ /* cookie timer def = ? seconds */ diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 960abfa..ef27381 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -223,6 +223,22 @@ DECLARE_SNMP_STAT(struct sctp_mib, sctp_statistics); extern int sctp_debug_flag; #define SCTP_DEBUG_PRINTK(whatever...) \ ((void) (sctp_debug_flag && printk(KERN_DEBUG whatever))) +#define SCTP_DEBUG_PRINTK_IPADDR(lead, trail, leadparm, saddr, otherparms...) \ + if (sctp_debug_flag) { \ + if (saddr->sa.sa_family == AF_INET6) { \ + printk(KERN_DEBUG \ + lead "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x" trail, \ + leadparm, \ + NIP6(saddr->v6.sin6_addr), \ + otherparms); \ + } else { \ + printk(KERN_DEBUG \ + lead "%u.%u.%u.%u" trail, \ + leadparm, \ + NIPQUAD(saddr->v4.sin_addr.s_addr), \ + otherparms); \ + } \ + } #define SCTP_ENABLE_DEBUG { sctp_debug_flag = 1; } #define SCTP_DISABLE_DEBUG { sctp_debug_flag = 0; } @@ -236,6 +252,7 @@ extern int sctp_debug_flag; #else /* SCTP_DEBUG */ #define SCTP_DEBUG_PRINTK(whatever...) +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) #define SCTP_ENABLE_DEBUG #define SCTP_DISABLE_DEBUG #define SCTP_ASSERT(expr, str, func) diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index f4fcee1..88d9fe5 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -116,7 +116,8 @@ sctp_state_fn_t sctp_sf_eat_data_fast_4_4; sctp_state_fn_t sctp_sf_eat_sack_6_2; sctp_state_fn_t sctp_sf_tabort_8_4_8; sctp_state_fn_t sctp_sf_operr_notify; -sctp_state_fn_t sctp_sf_t1_timer_expire; +sctp_state_fn_t sctp_sf_t1_init_timer_expire; +sctp_state_fn_t sctp_sf_t1_cookie_timer_expire; sctp_state_fn_t sctp_sf_t2_timer_expire; sctp_state_fn_t sctp_sf_t4_timer_expire; sctp_state_fn_t sctp_sf_t5_timer_expire; @@ -130,7 +131,6 @@ sctp_state_fn_t sctp_sf_do_ecne; sctp_state_fn_t sctp_sf_ootb; sctp_state_fn_t sctp_sf_pdiscard; sctp_state_fn_t sctp_sf_violation; -sctp_state_fn_t sctp_sf_violation_chunklen; sctp_state_fn_t sctp_sf_discard_chunk; sctp_state_fn_t sctp_sf_do_5_2_1_siminit; sctp_state_fn_t sctp_sf_do_5_2_2_dupinit; @@ -258,8 +258,6 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc, void sctp_chunk_assign_tsn(struct sctp_chunk *); void sctp_chunk_assign_ssn(struct sctp_chunk *); -void sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, __u16 error); - /* Prototypes for statetable processing. */ int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype, diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 6c24d9c..dfad4d3 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -867,10 +867,13 @@ struct sctp_transport { */ unsigned long last_time_ecne_reduced; - /* active : The current active state of this destination, - * : i.e. DOWN, UP, etc. + /* The number of times INIT has been sent on this transport. */ + int init_sent_count; + + /* state : The current state of this destination, + * : i.e. SCTP_ACTIVE, SCTP_INACTIVE, SCTP_UNKOWN. */ - int active; + int state; /* hb_allowed : The current heartbeat state of this destination, * : i.e. ALLOW-HB, NO-HEARTBEAT, etc. @@ -1222,9 +1225,6 @@ struct sctp_endpoint { /* sendbuf acct. policy. */ __u32 sndbuf_policy; - - /* Name for debugging output... */ - char *debug_name; }; /* Recover the outter endpoint structure. */ @@ -1314,11 +1314,23 @@ struct sctp_association { * : association. Normally this information is * : hashed or keyed for quick lookup and access * : of the TCB. + * : The list is also initialized with the list + * : of addresses passed with the sctp_connectx() + * : call. * * It is a list of SCTP_transport's. */ struct list_head transport_addr_list; + /* transport_count + * + * Peer : A count of the number of peer addresses + * Transport : in the Peer Transport Address List. + * Address : + * Count : + */ + __u16 transport_count; + /* port * The transport layer port number. */ @@ -1486,6 +1498,9 @@ struct sctp_association { /* Transport to which SHUTDOWN chunk was last sent. */ struct sctp_transport *shutdown_last_sent_to; + /* Transport to which INIT chunk was last sent. */ + struct sctp_transport *init_last_sent_to; + /* Next TSN : The next TSN number to be assigned to a new * : DATA chunk. This is sent in the INIT or INIT * : ACK chunk to the peer and incremented each @@ -1549,8 +1564,11 @@ struct sctp_association { /* The message size at which SCTP fragmentation will occur. */ __u32 frag_point; - /* Currently only one counter is used to count INIT errors. */ - int counters[SCTP_NUMBER_COUNTERS]; + /* Counter used to count INIT errors. */ + int init_err_counter; + + /* Count the number of INIT cycles (for doubling timeout). */ + int init_cycle; /* Default send parameters. */ __u16 default_stream; @@ -1708,6 +1726,8 @@ void sctp_association_free(struct sctp_association *); void sctp_association_put(struct sctp_association *); void sctp_association_hold(struct sctp_association *); +struct sctp_transport *sctp_assoc_choose_init_transport( + struct sctp_association *); struct sctp_transport *sctp_assoc_choose_shutdown_transport( struct sctp_association *); void sctp_assoc_update_retran_path(struct sctp_association *); @@ -1717,9 +1737,12 @@ int sctp_assoc_lookup_laddr(struct sctp_association *asoc, const union sctp_addr *laddr); struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *, const union sctp_addr *address, - const int gfp); + const int gfp, + const int peer_state); void sctp_assoc_del_peer(struct sctp_association *asoc, const union sctp_addr *addr); +void sctp_assoc_rm_peer(struct sctp_association *asoc, + struct sctp_transport *peer); void sctp_assoc_control_transport(struct sctp_association *, struct sctp_transport *, sctp_transport_cmd_t, sctp_sn_error_t); diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h index 2758e8c..f6328ae 100644 --- a/include/net/sctp/user.h +++ b/include/net/sctp/user.h @@ -111,6 +111,8 @@ enum sctp_optname { #define SCTP_GET_LOCAL_ADDRS_NUM SCTP_GET_LOCAL_ADDRS_NUM SCTP_GET_LOCAL_ADDRS, /* Get all local addresss. */ #define SCTP_GET_LOCAL_ADDRS SCTP_GET_LOCAL_ADDRS + SCTP_SOCKOPT_CONNECTX, /* CONNECTX requests. */ +#define SCTP_SOCKOPT_CONNECTX SCTP_SOCKOPT_CONNECTX }; /* @@ -527,6 +529,7 @@ struct sctp_paddrinfo { enum sctp_spinfo_state { SCTP_INACTIVE, SCTP_ACTIVE, + SCTP_UNKNOWN = 0xffff /* Value used for transport state unknown */ }; /* diff --git a/include/net/snmp.h b/include/net/snmp.h index a15ab25..a36bed8 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -128,18 +128,18 @@ struct linux_mib { #define SNMP_STAT_USRPTR(name) (name[1]) #define SNMP_INC_STATS_BH(mib, field) \ - (per_cpu_ptr(mib[0], _smp_processor_id())->mibs[field]++) + (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field]++) #define SNMP_INC_STATS_OFFSET_BH(mib, field, offset) \ - (per_cpu_ptr(mib[0], _smp_processor_id())->mibs[field + (offset)]++) + (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field + (offset)]++) #define SNMP_INC_STATS_USER(mib, field) \ - (per_cpu_ptr(mib[1], _smp_processor_id())->mibs[field]++) + (per_cpu_ptr(mib[1], raw_smp_processor_id())->mibs[field]++) #define SNMP_INC_STATS(mib, field) \ - (per_cpu_ptr(mib[!in_softirq()], _smp_processor_id())->mibs[field]++) + (per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id())->mibs[field]++) #define SNMP_DEC_STATS(mib, field) \ - (per_cpu_ptr(mib[!in_softirq()], _smp_processor_id())->mibs[field]--) + (per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id())->mibs[field]--) #define SNMP_ADD_STATS_BH(mib, field, addend) \ - (per_cpu_ptr(mib[0], _smp_processor_id())->mibs[field] += addend) + (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field] += addend) #define SNMP_ADD_STATS_USER(mib, field, addend) \ - (per_cpu_ptr(mib[1], _smp_processor_id())->mibs[field] += addend) + (per_cpu_ptr(mib[1], raw_smp_processor_id())->mibs[field] += addend) #endif diff --git a/include/net/sock.h b/include/net/sock.h index a9ef3a6..e593af5 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -484,6 +484,8 @@ extern void sk_stream_kill_queues(struct sock *sk); extern int sk_wait_data(struct sock *sk, long *timeo); +struct request_sock_ops; + /* Networking protocol blocks we attach to sockets. * socket layer -> transport layer interface * transport -> network interface is defined by struct inet_proto @@ -547,6 +549,8 @@ struct proto { kmem_cache_t *slab; unsigned int obj_size; + struct request_sock_ops *rsk_prot; + struct module *owner; char name[32]; diff --git a/include/net/tcp.h b/include/net/tcp.h index e71f8ba..ec9e20c 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -31,6 +31,7 @@ #include <linux/cache.h> #include <linux/percpu.h> #include <net/checksum.h> +#include <net/request_sock.h> #include <net/sock.h> #include <net/snmp.h> #include <net/ip.h> @@ -504,25 +505,6 @@ static __inline__ int tcp_sk_listen_hashfn(struct sock *sk) #else # define TCP_TW_RECYCLE_TICK (12+2-TCP_TW_RECYCLE_SLOTS_LOG) #endif - -#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation - * max_cwnd = snd_cwnd * beta - */ -#define BICTCP_MAX_INCREMENT 32 /* - * Limit on the amount of - * increment allowed during - * binary search. - */ -#define BICTCP_FUNC_OF_MIN_INCR 11 /* - * log(B/Smin)/log(B/(B-1))+1, - * Smin:min increment - * B:log factor - */ -#define BICTCP_B 4 /* - * In binary search, - * go to point (max+min)/N - */ - /* * TCP option */ @@ -563,7 +545,6 @@ static __inline__ int tcp_sk_listen_hashfn(struct sock *sk) #define TCP_NAGLE_PUSH 4 /* Cork is overriden for already queued data */ /* sysctl variables for tcp */ -extern int sysctl_max_syn_backlog; extern int sysctl_tcp_timestamps; extern int sysctl_tcp_window_scaling; extern int sysctl_tcp_sack; @@ -596,16 +577,7 @@ extern int sysctl_tcp_adv_win_scale; extern int sysctl_tcp_tw_reuse; extern int sysctl_tcp_frto; extern int sysctl_tcp_low_latency; -extern int sysctl_tcp_westwood; -extern int sysctl_tcp_vegas_cong_avoid; -extern int sysctl_tcp_vegas_alpha; -extern int sysctl_tcp_vegas_beta; -extern int sysctl_tcp_vegas_gamma; extern int sysctl_tcp_nometrics_save; -extern int sysctl_tcp_bic; -extern int sysctl_tcp_bic_fast_convergence; -extern int sysctl_tcp_bic_low_window; -extern int sysctl_tcp_bic_beta; extern int sysctl_tcp_moderate_rcvbuf; extern int sysctl_tcp_tso_win_divisor; @@ -613,74 +585,6 @@ extern atomic_t tcp_memory_allocated; extern atomic_t tcp_sockets_allocated; extern int tcp_memory_pressure; -struct open_request; - -struct or_calltable { - int family; - int (*rtx_syn_ack) (struct sock *sk, struct open_request *req, struct dst_entry*); - void (*send_ack) (struct sk_buff *skb, struct open_request *req); - void (*destructor) (struct open_request *req); - void (*send_reset) (struct sk_buff *skb); -}; - -struct tcp_v4_open_req { - __u32 loc_addr; - __u32 rmt_addr; - struct ip_options *opt; -}; - -#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) -struct tcp_v6_open_req { - struct in6_addr loc_addr; - struct in6_addr rmt_addr; - struct sk_buff *pktopts; - int iif; -}; -#endif - -/* this structure is too big */ -struct open_request { - struct open_request *dl_next; /* Must be first member! */ - __u32 rcv_isn; - __u32 snt_isn; - __u16 rmt_port; - __u16 mss; - __u8 retrans; - __u8 __pad; - __u16 snd_wscale : 4, - rcv_wscale : 4, - tstamp_ok : 1, - sack_ok : 1, - wscale_ok : 1, - ecn_ok : 1, - acked : 1; - /* The following two fields can be easily recomputed I think -AK */ - __u32 window_clamp; /* window clamp at creation time */ - __u32 rcv_wnd; /* rcv_wnd offered first time */ - __u32 ts_recent; - unsigned long expires; - struct or_calltable *class; - struct sock *sk; - union { - struct tcp_v4_open_req v4_req; -#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) - struct tcp_v6_open_req v6_req; -#endif - } af; -}; - -/* SLAB cache for open requests. */ -extern kmem_cache_t *tcp_openreq_cachep; - -#define tcp_openreq_alloc() kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC) -#define tcp_openreq_fastfree(req) kmem_cache_free(tcp_openreq_cachep, req) - -static inline void tcp_openreq_free(struct open_request *req) -{ - req->class->destructor(req); - tcp_openreq_fastfree(req); -} - #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #define TCP_INET_FAMILY(fam) ((fam) == AF_INET) #else @@ -708,7 +612,7 @@ struct tcp_func { struct sock * (*syn_recv_sock) (struct sock *sk, struct sk_buff *skb, - struct open_request *req, + struct request_sock *req, struct dst_entry *dst); int (*remember_stamp) (struct sock *sk); @@ -852,8 +756,8 @@ extern enum tcp_tw_status tcp_timewait_state_process(struct tcp_tw_bucket *tw, unsigned len); extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb, - struct open_request *req, - struct open_request **prev); + struct request_sock *req, + struct request_sock **prev); extern int tcp_child_process(struct sock *parent, struct sock *child, struct sk_buff *skb); @@ -903,12 +807,12 @@ extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); extern struct sock * tcp_create_openreq_child(struct sock *sk, - struct open_request *req, + struct request_sock *req, struct sk_buff *skb); extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, - struct open_request *req, + struct request_sock *req, struct dst_entry *dst); extern int tcp_v4_do_rcv(struct sock *sk, @@ -922,7 +826,7 @@ extern int tcp_connect(struct sock *sk); extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, - struct open_request *req); + struct request_sock *req); extern int tcp_disconnect(struct sock *sk, int flags); @@ -1204,6 +1108,82 @@ static inline void tcp_packets_out_dec(struct tcp_sock *tp, tp->packets_out -= tcp_skb_pcount(skb); } +/* Events passed to congestion control interface */ +enum tcp_ca_event { + CA_EVENT_TX_START, /* first transmit when no packets in flight */ + CA_EVENT_CWND_RESTART, /* congestion window restart */ + CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */ + CA_EVENT_FRTO, /* fast recovery timeout */ + CA_EVENT_LOSS, /* loss timeout */ + CA_EVENT_FAST_ACK, /* in sequence ack */ + CA_EVENT_SLOW_ACK, /* other ack */ +}; + +/* + * Interface for adding new TCP congestion control handlers + */ +#define TCP_CA_NAME_MAX 16 +struct tcp_congestion_ops { + struct list_head list; + + /* initialize private data (optional) */ + void (*init)(struct tcp_sock *tp); + /* cleanup private data (optional) */ + void (*release)(struct tcp_sock *tp); + + /* return slow start threshold (required) */ + u32 (*ssthresh)(struct tcp_sock *tp); + /* lower bound for congestion window (optional) */ + u32 (*min_cwnd)(struct tcp_sock *tp); + /* do new cwnd calculation (required) */ + void (*cong_avoid)(struct tcp_sock *tp, u32 ack, + u32 rtt, u32 in_flight, int good_ack); + /* round trip time sample per acked packet (optional) */ + void (*rtt_sample)(struct tcp_sock *tp, u32 usrtt); + /* call before changing ca_state (optional) */ + void (*set_state)(struct tcp_sock *tp, u8 new_state); + /* call when cwnd event occurs (optional) */ + void (*cwnd_event)(struct tcp_sock *tp, enum tcp_ca_event ev); + /* new value of cwnd after loss (optional) */ + u32 (*undo_cwnd)(struct tcp_sock *tp); + /* hook for packet ack accounting (optional) */ + void (*pkts_acked)(struct tcp_sock *tp, u32 num_acked); + /* get info for tcp_diag (optional) */ + void (*get_info)(struct tcp_sock *tp, u32 ext, struct sk_buff *skb); + + char name[TCP_CA_NAME_MAX]; + struct module *owner; +}; + +extern int tcp_register_congestion_control(struct tcp_congestion_ops *type); +extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type); + +extern void tcp_init_congestion_control(struct tcp_sock *tp); +extern void tcp_cleanup_congestion_control(struct tcp_sock *tp); +extern int tcp_set_default_congestion_control(const char *name); +extern void tcp_get_default_congestion_control(char *name); +extern int tcp_set_congestion_control(struct tcp_sock *tp, const char *name); + +extern struct tcp_congestion_ops tcp_init_congestion_ops; +extern u32 tcp_reno_ssthresh(struct tcp_sock *tp); +extern void tcp_reno_cong_avoid(struct tcp_sock *tp, u32 ack, + u32 rtt, u32 in_flight, int flag); +extern u32 tcp_reno_min_cwnd(struct tcp_sock *tp); +extern struct tcp_congestion_ops tcp_reno; + +static inline void tcp_set_ca_state(struct tcp_sock *tp, u8 ca_state) +{ + if (tp->ca_ops->set_state) + tp->ca_ops->set_state(tp, ca_state); + tp->ca_state = ca_state; +} + +static inline void tcp_ca_event(struct tcp_sock *tp, enum tcp_ca_event event) +{ + if (tp->ca_ops->cwnd_event) + tp->ca_ops->cwnd_event(tp, event); +} + /* This determines how many packets are "in the network" to the best * of our knowledge. In many cases it is conservative, but where * detailed information is available from the receiver (via SACK @@ -1223,91 +1203,6 @@ static __inline__ unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) return (tp->packets_out - tp->left_out + tp->retrans_out); } -/* - * Which congestion algorithim is in use on the connection. - */ -#define tcp_is_vegas(__tp) ((__tp)->adv_cong == TCP_VEGAS) -#define tcp_is_westwood(__tp) ((__tp)->adv_cong == TCP_WESTWOOD) -#define tcp_is_bic(__tp) ((__tp)->adv_cong == TCP_BIC) - -/* Recalculate snd_ssthresh, we want to set it to: - * - * Reno: - * one half the current congestion window, but no - * less than two segments - * - * BIC: - * behave like Reno until low_window is reached, - * then increase congestion window slowly - */ -static inline __u32 tcp_recalc_ssthresh(struct tcp_sock *tp) -{ - if (tcp_is_bic(tp)) { - if (sysctl_tcp_bic_fast_convergence && - tp->snd_cwnd < tp->bictcp.last_max_cwnd) - tp->bictcp.last_max_cwnd = (tp->snd_cwnd * - (BICTCP_BETA_SCALE - + sysctl_tcp_bic_beta)) - / (2 * BICTCP_BETA_SCALE); - else - tp->bictcp.last_max_cwnd = tp->snd_cwnd; - - if (tp->snd_cwnd > sysctl_tcp_bic_low_window) - return max((tp->snd_cwnd * sysctl_tcp_bic_beta) - / BICTCP_BETA_SCALE, 2U); - } - - return max(tp->snd_cwnd >> 1U, 2U); -} - -/* Stop taking Vegas samples for now. */ -#define tcp_vegas_disable(__tp) ((__tp)->vegas.doing_vegas_now = 0) - -static inline void tcp_vegas_enable(struct tcp_sock *tp) -{ - /* There are several situations when we must "re-start" Vegas: - * - * o when a connection is established - * o after an RTO - * o after fast recovery - * o when we send a packet and there is no outstanding - * unacknowledged data (restarting an idle connection) - * - * In these circumstances we cannot do a Vegas calculation at the - * end of the first RTT, because any calculation we do is using - * stale info -- both the saved cwnd and congestion feedback are - * stale. - * - * Instead we must wait until the completion of an RTT during - * which we actually receive ACKs. - */ - - /* Begin taking Vegas samples next time we send something. */ - tp->vegas.doing_vegas_now = 1; - - /* Set the beginning of the next send window. */ - tp->vegas.beg_snd_nxt = tp->snd_nxt; - - tp->vegas.cntRTT = 0; - tp->vegas.minRTT = 0x7fffffff; -} - -/* Should we be taking Vegas samples right now? */ -#define tcp_vegas_enabled(__tp) ((__tp)->vegas.doing_vegas_now) - -extern void tcp_ca_init(struct tcp_sock *tp); - -static inline void tcp_set_ca_state(struct tcp_sock *tp, u8 ca_state) -{ - if (tcp_is_vegas(tp)) { - if (ca_state == TCP_CA_Open) - tcp_vegas_enable(tp); - else - tcp_vegas_disable(tp); - } - tp->ca_state = ca_state; -} - /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd. * The exception is rate halving phase, when cwnd is decreasing towards * ssthresh. @@ -1356,7 +1251,7 @@ static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) static inline void __tcp_enter_cwr(struct tcp_sock *tp) { tp->undo_marker = 0; - tp->snd_ssthresh = tcp_recalc_ssthresh(tp); + tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1U); tp->snd_cwnd_cnt = 0; @@ -1750,99 +1645,71 @@ static inline int tcp_full_space(const struct sock *sk) return tcp_win_from_space(sk->sk_rcvbuf); } -static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req, +static inline void tcp_acceptq_queue(struct sock *sk, struct request_sock *req, struct sock *child) { - struct tcp_sock *tp = tcp_sk(sk); - - req->sk = child; - sk_acceptq_added(sk); - - if (!tp->accept_queue_tail) { - tp->accept_queue = req; - } else { - tp->accept_queue_tail->dl_next = req; - } - tp->accept_queue_tail = req; - req->dl_next = NULL; + reqsk_queue_add(&tcp_sk(sk)->accept_queue, req, sk, child); } -struct tcp_listen_opt -{ - u8 max_qlen_log; /* log_2 of maximal queued SYNs */ - int qlen; - int qlen_young; - int clock_hand; - u32 hash_rnd; - struct open_request *syn_table[TCP_SYNQ_HSIZE]; -}; - static inline void -tcp_synq_removed(struct sock *sk, struct open_request *req) +tcp_synq_removed(struct sock *sk, struct request_sock *req) { - struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt; - - if (--lopt->qlen == 0) + if (reqsk_queue_removed(&tcp_sk(sk)->accept_queue, req) == 0) tcp_delete_keepalive_timer(sk); - if (req->retrans == 0) - lopt->qlen_young--; } static inline void tcp_synq_added(struct sock *sk) { - struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt; - - if (lopt->qlen++ == 0) + if (reqsk_queue_added(&tcp_sk(sk)->accept_queue) == 0) tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT); - lopt->qlen_young++; } static inline int tcp_synq_len(struct sock *sk) { - return tcp_sk(sk)->listen_opt->qlen; + return reqsk_queue_len(&tcp_sk(sk)->accept_queue); } static inline int tcp_synq_young(struct sock *sk) { - return tcp_sk(sk)->listen_opt->qlen_young; + return reqsk_queue_len_young(&tcp_sk(sk)->accept_queue); } static inline int tcp_synq_is_full(struct sock *sk) { - return tcp_synq_len(sk) >> tcp_sk(sk)->listen_opt->max_qlen_log; + return reqsk_queue_is_full(&tcp_sk(sk)->accept_queue); } -static inline void tcp_synq_unlink(struct tcp_sock *tp, struct open_request *req, - struct open_request **prev) +static inline void tcp_synq_unlink(struct tcp_sock *tp, struct request_sock *req, + struct request_sock **prev) { - write_lock(&tp->syn_wait_lock); - *prev = req->dl_next; - write_unlock(&tp->syn_wait_lock); + reqsk_queue_unlink(&tp->accept_queue, req, prev); } -static inline void tcp_synq_drop(struct sock *sk, struct open_request *req, - struct open_request **prev) +static inline void tcp_synq_drop(struct sock *sk, struct request_sock *req, + struct request_sock **prev) { tcp_synq_unlink(tcp_sk(sk), req, prev); tcp_synq_removed(sk, req); - tcp_openreq_free(req); + reqsk_free(req); } -static __inline__ void tcp_openreq_init(struct open_request *req, +static __inline__ void tcp_openreq_init(struct request_sock *req, struct tcp_options_received *rx_opt, struct sk_buff *skb) { + struct inet_request_sock *ireq = inet_rsk(req); + req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */ - req->rcv_isn = TCP_SKB_CB(skb)->seq; + tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; req->mss = rx_opt->mss_clamp; req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; - req->tstamp_ok = rx_opt->tstamp_ok; - req->sack_ok = rx_opt->sack_ok; - req->snd_wscale = rx_opt->snd_wscale; - req->wscale_ok = rx_opt->wscale_ok; - req->acked = 0; - req->ecn_ok = 0; - req->rmt_port = skb->h.th->source; + ireq->tstamp_ok = rx_opt->tstamp_ok; + ireq->sack_ok = rx_opt->sack_ok; + ireq->snd_wscale = rx_opt->snd_wscale; + ireq->wscale_ok = rx_opt->wscale_ok; + ireq->acked = 0; + ireq->ecn_ok = 0; + ireq->rmt_port = skb->h.th->source; } extern void tcp_enter_memory_pressure(void); @@ -1972,52 +1839,4 @@ struct tcp_iter_state { extern int tcp_proc_register(struct tcp_seq_afinfo *afinfo); extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo); -/* TCP Westwood functions and constants */ - -#define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */ -#define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */ - -static inline void tcp_westwood_update_rtt(struct tcp_sock *tp, __u32 rtt_seq) -{ - if (tcp_is_westwood(tp)) - tp->westwood.rtt = rtt_seq; -} - -static inline __u32 __tcp_westwood_bw_rttmin(const struct tcp_sock *tp) -{ - return max((tp->westwood.bw_est) * (tp->westwood.rtt_min) / - (__u32) (tp->mss_cache_std), - 2U); -} - -static inline __u32 tcp_westwood_bw_rttmin(const struct tcp_sock *tp) -{ - return tcp_is_westwood(tp) ? __tcp_westwood_bw_rttmin(tp) : 0; -} - -static inline int tcp_westwood_ssthresh(struct tcp_sock *tp) -{ - __u32 ssthresh = 0; - - if (tcp_is_westwood(tp)) { - ssthresh = __tcp_westwood_bw_rttmin(tp); - if (ssthresh) - tp->snd_ssthresh = ssthresh; - } - - return (ssthresh != 0); -} - -static inline int tcp_westwood_cwnd(struct tcp_sock *tp) -{ - __u32 cwnd = 0; - - if (tcp_is_westwood(tp)) { - cwnd = __tcp_westwood_bw_rttmin(tp); - if (cwnd) - tp->snd_cwnd = cwnd; - } - - return (cwnd != 0); -} #endif /* _TCP_H */ diff --git a/include/net/tcp_ecn.h b/include/net/tcp_ecn.h index dc14563..64980ee 100644 --- a/include/net/tcp_ecn.h +++ b/include/net/tcp_ecn.h @@ -2,6 +2,7 @@ #define _NET_TCP_ECN_H_ 1 #include <net/inet_ecn.h> +#include <net/request_sock.h> #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH)) @@ -38,9 +39,9 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct tcp_sock *tp, } static __inline__ void -TCP_ECN_make_synack(struct open_request *req, struct tcphdr *th) +TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th) { - if (req->ecn_ok) + if (inet_rsk(req)->ecn_ok) th->ece = 1; } @@ -111,16 +112,16 @@ static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th) } static inline void TCP_ECN_openreq_child(struct tcp_sock *tp, - struct open_request *req) + struct request_sock *req) { - tp->ecn_flags = req->ecn_ok ? TCP_ECN_OK : 0; + tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0; } static __inline__ void -TCP_ECN_create_request(struct open_request *req, struct tcphdr *th) +TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th) { if (sysctl_tcp_ecn && th->ece && th->cwr) - req->ecn_ok = 1; + inet_rsk(req)->ecn_ok = 1; } #endif diff --git a/include/net/x25.h b/include/net/x25.h index 7a1ba5b..8b39b98 100644 --- a/include/net/x25.h +++ b/include/net/x25.h @@ -79,6 +79,8 @@ enum { #define X25_DEFAULT_PACKET_SIZE X25_PS128 /* Default Packet Size */ #define X25_DEFAULT_THROUGHPUT 0x0A /* Deafult Throughput */ #define X25_DEFAULT_REVERSE 0x00 /* Default Reverse Charging */ +#define X25_DENY_ACCPT_APPRV 0x01 /* Default value */ +#define X25_ALLOW_ACCPT_APPRV 0x00 /* Control enabled */ #define X25_SMODULUS 8 #define X25_EMODULUS 128 @@ -94,7 +96,7 @@ enum { #define X25_FAC_CLASS_C 0x80 #define X25_FAC_CLASS_D 0xC0 -#define X25_FAC_REVERSE 0x01 +#define X25_FAC_REVERSE 0x01 /* also fast select */ #define X25_FAC_THROUGHPUT 0x02 #define X25_FAC_PACKET_SIZE 0x42 #define X25_FAC_WINDOW_SIZE 0x43 @@ -134,8 +136,8 @@ struct x25_sock { struct sock sk; struct x25_address source_addr, dest_addr; struct x25_neigh *neighbour; - unsigned int lci; - unsigned char state, condition, qbitincl, intflag; + unsigned int lci, cudmatchlength; + unsigned char state, condition, qbitincl, intflag, accptapprv; unsigned short vs, vr, va, vl; unsigned long t2, t21, t22, t23; unsigned short fraglen; @@ -242,7 +244,6 @@ extern int x25_validate_nr(struct sock *, unsigned short); extern void x25_write_internal(struct sock *, int); extern int x25_decode(struct sock *, struct sk_buff *, int *, int *, int *, int *, int *); extern void x25_disconnect(struct sock *, int, unsigned char, unsigned char); -extern int x25_check_calluserdata(struct x25_calluserdata *,struct x25_calluserdata *); /* x25_timer.c */ extern void x25_start_heartbeat(struct sock *); diff --git a/include/net/xfrm.h b/include/net/xfrm.h index e142a25..029522a 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -158,6 +158,20 @@ enum { XFRM_STATE_DEAD }; +/* callback structure passed from either netlink or pfkey */ +struct km_event +{ + union { + u32 hard; + u32 proto; + u32 byid; + } data; + + u32 seq; + u32 pid; + u32 event; +}; + struct xfrm_type; struct xfrm_dst; struct xfrm_policy_afinfo { @@ -179,6 +193,8 @@ struct xfrm_policy_afinfo { extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo); extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo); +extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c); +extern void km_state_notify(struct xfrm_state *x, struct km_event *c); #define XFRM_ACQ_EXPIRES 30 @@ -188,6 +204,7 @@ struct xfrm_state_afinfo { rwlock_t lock; struct list_head *state_bydst; struct list_head *state_byspi; + int (*init_flags)(struct xfrm_state *x); void (*init_tempsel)(struct xfrm_state *x, struct flowi *fl, struct xfrm_tmpl *tmpl, xfrm_address_t *daddr, xfrm_address_t *saddr); @@ -209,7 +226,7 @@ struct xfrm_type struct module *owner; __u8 proto; - int (*init_state)(struct xfrm_state *x, void *args); + int (*init_state)(struct xfrm_state *x); void (*destructor)(struct xfrm_state *); int (*input)(struct xfrm_state *, struct xfrm_decap_state *, struct sk_buff *skb); int (*post_input)(struct xfrm_state *, struct xfrm_decap_state *, struct sk_buff *skb); @@ -290,11 +307,11 @@ struct xfrm_mgr { struct list_head list; char *id; - int (*notify)(struct xfrm_state *x, int event); + int (*notify)(struct xfrm_state *x, struct km_event *c); int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir); struct xfrm_policy *(*compile_policy)(u16 family, int opt, u8 *data, int len, int *dir); int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport); - int (*notify_policy)(struct xfrm_policy *x, int dir, int event); + int (*notify_policy)(struct xfrm_policy *x, int dir, struct km_event *c); }; extern int xfrm_register_km(struct xfrm_mgr *km); @@ -515,6 +532,8 @@ struct xfrm_dst struct dst_entry *route; u32 route_mtu_cached; u32 child_mtu_cached; + u32 route_cookie; + u32 path_cookie; }; static inline void xfrm_dst_destroy(struct xfrm_dst *xdst) @@ -654,7 +673,7 @@ static inline int xfrm_sk_clone_policy(struct sock *sk) return 0; } -extern void xfrm_policy_delete(struct xfrm_policy *pol, int dir); +extern int xfrm_policy_delete(struct xfrm_policy *pol, int dir); static inline void xfrm_sk_free_policy(struct sock *sk) { @@ -815,12 +834,13 @@ extern int xfrm_state_add(struct xfrm_state *x); extern int xfrm_state_update(struct xfrm_state *x); extern struct xfrm_state *xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto, unsigned short family); extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq); -extern void xfrm_state_delete(struct xfrm_state *x); +extern int xfrm_state_delete(struct xfrm_state *x); extern void xfrm_state_flush(u8 proto); extern int xfrm_replay_check(struct xfrm_state *x, u32 seq); extern void xfrm_replay_advance(struct xfrm_state *x, u32 seq); extern int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb); extern int xfrm_state_mtu(struct xfrm_state *x, int mtu); +extern int xfrm_init_state(struct xfrm_state *x); extern int xfrm4_rcv(struct sk_buff *skb); extern int xfrm4_output(struct sk_buff *skb); extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler); diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h index 6d3413a..67b867f3 100644 --- a/include/pcmcia/ss.h +++ b/include/pcmcia/ss.h @@ -77,6 +77,11 @@ extern socket_state_t dead_socket; /* Use this just for bridge windows */ #define MAP_IOSPACE 0x20 +/* power hook operations */ +#define HOOK_POWER_PRE 0x01 +#define HOOK_POWER_POST 0x02 + + typedef struct pccard_io_map { u_char map; u_char flags; @@ -222,6 +227,9 @@ struct pcmcia_socket { /* Zoom video behaviour is so chip specific its not worth adding this to _ops */ void (*zoom_video)(struct pcmcia_socket *, int); + + /* so is power hook */ + int (*power_hook)(struct pcmcia_socket *sock, int operation); /* state thread */ struct semaphore skt_sem; /* protects socket h/w state */ diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index 659ecf48..1fb2337 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -41,6 +41,7 @@ extern const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE]; #define FORMAT_UNIT 0x04 #define READ_BLOCK_LIMITS 0x05 #define REASSIGN_BLOCKS 0x07 +#define INITIALIZE_ELEMENT_STATUS 0x07 #define READ_6 0x08 #define WRITE_6 0x0a #define SEEK_6 0x0b @@ -65,6 +66,7 @@ extern const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE]; #define READ_10 0x28 #define WRITE_10 0x2a #define SEEK_10 0x2b +#define POSITION_TO_ELEMENT 0x2b #define WRITE_VERIFY 0x2e #define VERIFY 0x2f #define SEARCH_HIGH 0x30 @@ -97,6 +99,7 @@ extern const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE]; #define PERSISTENT_RESERVE_OUT 0x5f #define REPORT_LUNS 0xa0 #define MOVE_MEDIUM 0xa5 +#define EXCHANGE_MEDIUM 0xa6 #define READ_12 0xa8 #define WRITE_12 0xaa #define WRITE_VERIFY_12 0xae @@ -210,6 +213,7 @@ static inline int scsi_status_is_good(int status) #define TYPE_COMM 0x09 /* Communications device */ #define TYPE_ENCLOSURE 0x0d /* Enclosure Services Device */ #define TYPE_RAID 0x0c +#define TYPE_RBC 0x0e #define TYPE_NO_LUN 0x7f /* diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index c018020..63c91dd 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -154,7 +154,9 @@ struct scsi_target { unsigned int id; /* target id ... replace * scsi_device.id eventually */ unsigned long create:1; /* signal that it needs to be added */ - unsigned long starget_data[0]; + void *hostdata; /* available to low-level driver */ + unsigned long starget_data[0]; /* for the transport */ + /* starget_data must be the last element!!!! */ } __attribute__((aligned(sizeof(unsigned long)))); #define to_scsi_target(d) container_of(d, struct scsi_target, dev) diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 1cee1e1..db9914a 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -10,6 +10,7 @@ struct block_device; struct module; struct scsi_cmnd; struct scsi_device; +struct scsi_target; struct Scsi_Host; struct scsi_host_cmd_pool; struct scsi_transport_template; @@ -228,6 +229,30 @@ struct scsi_host_template { void (* slave_destroy)(struct scsi_device *); /* + * Before the mid layer attempts to scan for a new device attached + * to a target where no target currently exists, it will call this + * entry in your driver. Should your driver need to allocate any + * structs or perform any other init items in order to send commands + * to a currently unused target, then this is where you can perform + * those allocations. + * + * Return values: 0 on success, non-0 on failure + * + * Status: OPTIONAL + */ + int (* target_alloc)(struct scsi_target *); + + /* + * Immediately prior to deallocating the target structure, and + * after all activity to attached scsi devices has ceased, the + * midlayer calls this point so that the driver may deallocate + * and terminate any references to the target. + * + * Status: OPTIONAL + */ + void (* target_destroy)(struct scsi_target *); + + /* * fill in this function to allow the queue depth of this host * to be changeable (on a per device basis). returns either * the current queue depth setting (may be different from what diff --git a/include/scsi/scsi_transport.h b/include/scsi/scsi_transport.h index 2dcee7a..a4f1837 100644 --- a/include/scsi/scsi_transport.h +++ b/include/scsi/scsi_transport.h @@ -21,6 +21,7 @@ #define SCSI_TRANSPORT_H #include <linux/transport_class.h> +#include <scsi/scsi_host.h> struct scsi_transport_template { /* the attribute containers */ @@ -32,8 +33,11 @@ struct scsi_transport_template { * space of this size will be left at the end of the * scsi_* structure */ int device_size; + int device_private_offset; int target_size; + int target_private_offset; int host_size; + /* no private offset for the host; there's an alternative mechanism */ /* * True if the transport wants to use a host-based work-queue @@ -45,4 +49,38 @@ struct scsi_transport_template { dev_to_shost((tc)->dev) +/* Private area maintenance. The driver requested allocations come + * directly after the transport class allocations (if any). The idea + * is that you *must* call these only once. The code assumes that the + * initial values are the ones the transport specific code requires */ +static inline void +scsi_transport_reserve_target(struct scsi_transport_template * t, int space) +{ + BUG_ON(t->target_private_offset != 0); + t->target_private_offset = ALIGN(t->target_size, sizeof(void *)); + t->target_size = t->target_private_offset + space; +} +static inline void +scsi_transport_reserve_device(struct scsi_transport_template * t, int space) +{ + BUG_ON(t->device_private_offset != 0); + t->device_private_offset = ALIGN(t->device_size, sizeof(void *)); + t->device_size = t->device_private_offset + space; +} +static inline void * +scsi_transport_target_data(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(&starget->dev); + return (u8 *)starget->starget_data + + shost->transportt->target_private_offset; + +} +static inline void * +scsi_transport_device_data(struct scsi_device *sdev) +{ + struct Scsi_Host *shost = sdev->host; + return (u8 *)sdev->sdev_data + + shost->transportt->device_private_offset; +} + #endif /* SCSI_TRANSPORT_H */ diff --git a/include/scsi/sg_request.h b/include/scsi/sg_request.h new file mode 100644 index 0000000..57ff525 --- /dev/null +++ b/include/scsi/sg_request.h @@ -0,0 +1,26 @@ +typedef struct scsi_request Scsi_Request; + +static Scsi_Request *dummy_cmdp; /* only used for sizeof */ + +typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */ + unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */ + unsigned short sglist_len; /* size of malloc'd scatter-gather list ++ */ + unsigned bufflen; /* Size of (aggregate) data buffer */ + unsigned b_malloc_len; /* actual len malloc'ed in buffer */ + void *buffer; /* Data buffer or scatter list (k_use_sg>0) */ + char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */ + unsigned char cmd_opcode; /* first byte of command */ +} Sg_scatter_hold; + +typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ + Scsi_Request *my_cmdp; /* != 0 when request with lower levels */ + struct sg_request *nextrp; /* NULL -> tail request (slist) */ + struct sg_fd *parentfp; /* NULL -> not in use */ + Sg_scatter_hold data; /* hold buffer, perhaps scatter list */ + sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */ + unsigned char sense_b[sizeof (dummy_cmdp->sr_sense_buffer)]; + char res_used; /* 1 -> using reserve buffer, 0 -> not ... */ + char orphan; /* 1 -> drop on sight, 0 -> normal */ + char sg_io_owned; /* 1 -> packet belongs to SG_IO */ + volatile char done; /* 0->before bh, 1->before read, 2->read */ +} Sg_request; diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h index 2433e27..1309c12 100644 --- a/include/sound/ac97_codec.h +++ b/include/sound/ac97_codec.h @@ -437,6 +437,7 @@ struct snd_ac97_build_ops { void (*suspend) (ac97_t *ac97); void (*resume) (ac97_t *ac97); #endif + void (*update_jacks) (ac97_t *ac97); /* for jack-sharing */ }; struct _snd_ac97_bus_ops { @@ -516,6 +517,9 @@ struct _snd_ac97 { } ad18xx; unsigned int dev_flags; /* device specific */ } spec; + /* jack-sharing info */ + unsigned char indep_surround; + unsigned char channel_mode; }; /* conditions */ @@ -569,8 +573,8 @@ enum { }; struct ac97_quirk { - unsigned short vendor; /* PCI vendor id */ - unsigned short device; /* PCI device id */ + unsigned short subvendor; /* PCI subsystem vendor id */ + unsigned short subdevice; /* PCI sybsystem device id */ unsigned short mask; /* device id bit mask, 0 = accept all */ unsigned int codec_id; /* codec id (if any), 0 = accept all */ const char *name; /* name shown as info */ diff --git a/include/sound/asound.h b/include/sound/asound.h index a4d149f..9974f83 100644 --- a/include/sound/asound.h +++ b/include/sound/asound.h @@ -113,9 +113,10 @@ enum sndrv_hwdep_iface { SNDRV_HWDEP_IFACE_BLUETOOTH, /* Bluetooth audio */ SNDRV_HWDEP_IFACE_USX2Y_PCM, /* Tascam US122, US224 & US428 rawusb pcm */ SNDRV_HWDEP_IFACE_PCXHR, /* Digigram PCXHR */ + SNDRV_HWDEP_IFACE_SB_RC, /* SB Extigy/Audigy2NX remote control */ /* Don't forget to change the following: */ - SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_PCXHR + SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_SB_RC }; struct sndrv_hwdep_info { @@ -344,7 +345,7 @@ enum sndrv_pcm_hw_param { SNDRV_PCM_HW_PARAM_LAST_INTERVAL = SNDRV_PCM_HW_PARAM_TICK_TIME }; -#define SNDRV_PCM_HW_PARAMS_RUNTIME (1<<0) +#define SNDRV_PCM_HW_PARAMS_NORESAMPLE (1<<0) /* avoid rate resampling */ struct sndrv_interval { unsigned int min, max; @@ -559,7 +560,7 @@ enum { * Timer section - /dev/snd/timer */ -#define SNDRV_TIMER_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 2) +#define SNDRV_TIMER_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 4) enum sndrv_timer_class { SNDRV_TIMER_CLASS_NONE = -1, @@ -672,10 +673,11 @@ enum { SNDRV_TIMER_IOCTL_INFO = _IOR('T', 0x11, struct sndrv_timer_info), SNDRV_TIMER_IOCTL_PARAMS = _IOW('T', 0x12, struct sndrv_timer_params), SNDRV_TIMER_IOCTL_STATUS = _IOR('T', 0x14, struct sndrv_timer_status), - SNDRV_TIMER_IOCTL_START = _IO('T', 0x20), - SNDRV_TIMER_IOCTL_STOP = _IO('T', 0x21), - SNDRV_TIMER_IOCTL_CONTINUE = _IO('T', 0x22), - SNDRV_TIMER_IOCTL_PAUSE = _IO('T', 0x23), + /* The following four ioctls are changed since 1.0.9 due to confliction */ + SNDRV_TIMER_IOCTL_START = _IO('T', 0xa0), + SNDRV_TIMER_IOCTL_STOP = _IO('T', 0xa1), + SNDRV_TIMER_IOCTL_CONTINUE = _IO('T', 0xa2), + SNDRV_TIMER_IOCTL_PAUSE = _IO('T', 0xa3), }; struct sndrv_timer_read { diff --git a/include/sound/control.h b/include/sound/control.h index 7b9444c..ef7903c 100644 --- a/include/sound/control.h +++ b/include/sound/control.h @@ -106,7 +106,7 @@ typedef int (*snd_kctl_ioctl_func_t) (snd_card_t * card, void snd_ctl_notify(snd_card_t * card, unsigned int mask, snd_ctl_elem_id_t * id); snd_kcontrol_t *snd_ctl_new(snd_kcontrol_t * kcontrol, unsigned int access); -snd_kcontrol_t *snd_ctl_new1(snd_kcontrol_new_t * kcontrolnew, void * private_data); +snd_kcontrol_t *snd_ctl_new1(const snd_kcontrol_new_t * kcontrolnew, void * private_data); void snd_ctl_free_one(snd_kcontrol_t * kcontrol); int snd_ctl_add(snd_card_t * card, snd_kcontrol_t * kcontrol); int snd_ctl_remove(snd_card_t * card, snd_kcontrol_t * kcontrol); diff --git a/include/sound/core.h b/include/sound/core.h index 9117c23..f8c4ef0a 100644 --- a/include/sound/core.h +++ b/include/sound/core.h @@ -292,6 +292,7 @@ void *snd_hidden_kcalloc(size_t n, size_t size, int flags); void snd_hidden_kfree(const void *obj); void *snd_hidden_vmalloc(unsigned long size); void snd_hidden_vfree(void *obj); +char *snd_hidden_kstrdup(const char *s, int flags); #define kmalloc(size, flags) snd_hidden_kmalloc(size, flags) #define kcalloc(n, size, flags) snd_hidden_kcalloc(n, size, flags) #define kfree(obj) snd_hidden_kfree(obj) @@ -301,6 +302,7 @@ void snd_hidden_vfree(void *obj); #define vmalloc_nocheck(size) snd_wrapper_vmalloc(size) #define kfree_nocheck(obj) snd_wrapper_kfree(obj) #define vfree_nocheck(obj) snd_wrapper_vfree(obj) +#define kstrdup(s, flags) snd_hidden_kstrdup(s, flags) #else #define snd_memory_init() /*NOP*/ #define snd_memory_done() /*NOP*/ @@ -311,7 +313,6 @@ void snd_hidden_vfree(void *obj); #define kfree_nocheck(obj) kfree(obj) #define vfree_nocheck(obj) vfree(obj) #endif -char *snd_kmalloc_strdup(const char *string, int flags); int copy_to_user_fromio(void __user *dst, const volatile void __iomem *src, size_t count); int copy_from_user_toio(volatile void __iomem *dst, const void __user *src, size_t count); diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h index 43b6786..c50b919 100644 --- a/include/sound/emu10k1.h +++ b/include/sound/emu10k1.h @@ -83,7 +83,8 @@ #define IPR 0x08 /* Global interrupt pending register */ /* Clear pending interrupts by writing a 1 to */ /* the relevant bits and zero to the other bits */ - +#define IPR_P16V 0x80000000 /* Bit set when the CA0151 P16V chip wishes + to interrupt */ #define IPR_GPIOMSG 0x20000000 /* GPIO message interrupt (RE'd, still not sure which INTE bits enable it) */ @@ -746,6 +747,7 @@ /* Assumes sample lock */ /* These three bitfields apply to CDSRCS, GPSRCS, and (except as noted) ZVSRCS. */ +#define SRCS_SPDIFVALID 0x04000000 /* SPDIF stream valid */ #define SRCS_SPDIFLOCKED 0x02000000 /* SPDIF stream locked */ #define SRCS_RATELOCKED 0x01000000 /* Sample rate locked */ #define SRCS_ESTSAMPLERATE 0x0007ffff /* Do not modify this field. */ @@ -803,10 +805,26 @@ #define A_FXWC2 0x75 /* Selects 0x9f-0x80 for FX recording */ #define A_SPDIF_SAMPLERATE 0x76 /* Set the sample rate of SPDIF output */ -#define A_SPDIF_RATE_MASK 0x000000c0 +#define A_SAMPLE_RATE 0x76 /* Various sample rate settings. */ +#define A_SAMPLE_RATE_NOT_USED 0x0ffc111e /* Bits that are not used and cannot be set. */ +#define A_SAMPLE_RATE_UNKNOWN 0xf0030001 /* Bits that can be set, but have unknown use. */ +#define A_SPDIF_RATE_MASK 0x000000e0 /* Any other values for rates, just use 48000 */ #define A_SPDIF_48000 0x00000000 -#define A_SPDIF_44100 0x00000080 +#define A_SPDIF_192000 0x00000020 #define A_SPDIF_96000 0x00000040 +#define A_SPDIF_44100 0x00000080 + +#define A_I2S_CAPTURE_RATE_MASK 0x00000e00 /* This sets the capture PCM rate, but it is */ +#define A_I2S_CAPTURE_48000 0x00000000 /* unclear if this sets the ADC rate as well. */ +#define A_I2S_CAPTURE_192000 0x00000200 +#define A_I2S_CAPTURE_96000 0x00000400 +#define A_I2S_CAPTURE_44100 0x00000800 + +#define A_PCM_RATE_MASK 0x0000e000 /* This sets the playback PCM rate on the P16V */ +#define A_PCM_48000 0x00000000 +#define A_PCM_192000 0x00002000 +#define A_PCM_96000 0x00004000 +#define A_PCM_44100 0x00008000 /* 0x77,0x78,0x79 "something i2s-related" - default to 0x01080000 on my audigy 2 ZS --rlrevell */ /* 0x7a, 0x7b - lookup tables */ @@ -1039,28 +1057,28 @@ typedef struct { u32 vendor; u32 device; u32 subsystem; + unsigned char revision; unsigned char emu10k1_chip; /* Original SB Live. Not SB Live 24bit. */ unsigned char emu10k2_chip; /* Audigy 1 or Audigy 2. */ unsigned char ca0102_chip; /* Audigy 1 or Audigy 2. Not SB Audigy 2 Value. */ unsigned char ca0108_chip; /* Audigy 2 Value */ unsigned char ca0151_chip; /* P16V */ unsigned char spk71; /* Has 7.1 speakers */ + unsigned char sblive51; /* SBLive! 5.1 - extout 0x11 -> center, 0x12 -> lfe */ unsigned char spdif_bug; /* Has Spdif phasing bug */ unsigned char ac97_chip; /* Has an AC97 chip */ unsigned char ecard; /* APS EEPROM */ - char * driver; - char * name; + const char *driver; + const char *name; + const char *id; /* for backward compatibility - can be NULL if not needed */ } emu_chip_details_t; struct _snd_emu10k1 { int irq; unsigned long port; /* I/O port number */ - unsigned int APS: 1, /* APS flag */ - no_ac97: 1, /* no AC'97 */ - tos_link: 1, /* tos link detected */ - rear_ac97: 1, /* rear channels are on AC'97 */ - spk71:1; /* 7.1 configuration (Audigy 2 ZS) */ + unsigned int tos_link: 1, /* tos link detected */ + rear_ac97: 1; /* rear channels are on AC'97 */ const emu_chip_details_t *card_capabilities; /* Contains profile of card capabilities */ unsigned int audigy; /* is Audigy? */ unsigned int revision; /* chip revision */ @@ -1109,7 +1127,10 @@ struct _snd_emu10k1 { emu10k1_voice_t voices[NUM_G]; emu10k1_voice_t p16v_voices[4]; + emu10k1_voice_t p16v_capture_voice; int p16v_device_offset; + u32 p16v_capture_source; + u32 p16v_capture_channel; emu10k1_pcm_mixer_t pcm_mixer[32]; emu10k1_pcm_mixer_t efx_pcm_mixer[NUM_EFX_PLAYBACK]; snd_kcontrol_t *ctl_send_routing; @@ -1453,7 +1474,6 @@ int snd_emu10k1_fx8010_unregister_irq_handler(emu10k1_t *emu, #endif typedef struct { - unsigned int card; /* card type */ unsigned int internal_tram_size; /* in samples */ unsigned int external_tram_size; /* in samples */ char fxbus_names[16][32]; /* names of FXBUSes */ diff --git a/include/sound/gus.h b/include/sound/gus.h index 8b6287a..b4b461ca 100644 --- a/include/sound/gus.h +++ b/include/sound/gus.h @@ -526,9 +526,6 @@ extern void snd_gf1_adlib_write(snd_gus_card_t * gus, unsigned char reg, unsigne extern void snd_gf1_dram_addr(snd_gus_card_t * gus, unsigned int addr); extern void snd_gf1_poke(snd_gus_card_t * gus, unsigned int addr, unsigned char data); extern unsigned char snd_gf1_peek(snd_gus_card_t * gus, unsigned int addr); -extern void snd_gf1_pokew(snd_gus_card_t * gus, unsigned int addr, unsigned short data); -extern unsigned short snd_gf1_peekw(snd_gus_card_t * gus, unsigned int addr); -extern void snd_gf1_dram_setmem(snd_gus_card_t * gus, unsigned int addr, unsigned short value, unsigned int count); extern void snd_gf1_write_addr(snd_gus_card_t * gus, unsigned char reg, unsigned int addr, short w_16bit); extern unsigned int snd_gf1_read_addr(snd_gus_card_t * gus, unsigned char reg, short w_16bit); extern void snd_gf1_i_ctrl_stop(snd_gus_card_t * gus, unsigned char reg); @@ -544,9 +541,6 @@ extern inline unsigned short snd_gf1_i_read16(snd_gus_card_t * gus, unsigned cha { return snd_gf1_i_look16(gus, reg | 0x80); } -extern void snd_gf1_i_adlib_write(snd_gus_card_t * gus, unsigned char reg, unsigned char data); -extern void snd_gf1_i_write_addr(snd_gus_card_t * gus, unsigned char reg, unsigned int addr, short w_16bit); -extern unsigned int snd_gf1_i_read_addr(snd_gus_card_t * gus, unsigned char reg, short w_16bit); extern void snd_gf1_select_active_voices(snd_gus_card_t * gus); @@ -580,10 +574,6 @@ extern void snd_gf1_lfo_command(snd_gus_card_t * gus, int voice, unsigned char * void snd_gf1_mem_lock(snd_gf1_mem_t * alloc, int xup); int snd_gf1_mem_xfree(snd_gf1_mem_t * alloc, snd_gf1_mem_block_t * block); -snd_gf1_mem_block_t *snd_gf1_mem_look(snd_gf1_mem_t * alloc, - unsigned int address); -snd_gf1_mem_block_t *snd_gf1_mem_share(snd_gf1_mem_t * alloc, - unsigned int *share_id); snd_gf1_mem_block_t *snd_gf1_mem_alloc(snd_gf1_mem_t * alloc, int owner, char *name, int size, int w_16, int align, unsigned int *share_id); @@ -608,23 +598,13 @@ int snd_gf1_dma_transfer_block(snd_gus_card_t * gus, /* gus_volume.c */ unsigned short snd_gf1_lvol_to_gvol_raw(unsigned int vol); -unsigned int snd_gf1_gvol_to_lvol_raw(unsigned short gf1_vol); -unsigned int snd_gf1_calc_ramp_rate(snd_gus_card_t * gus, - unsigned short start, - unsigned short end, - unsigned int us); unsigned short snd_gf1_translate_freq(snd_gus_card_t * gus, unsigned int freq2); -unsigned short snd_gf1_compute_pitchbend(unsigned short pitchbend, unsigned short sens); -unsigned short snd_gf1_compute_freq(unsigned int freq, - unsigned int rate, - unsigned short mix_rate); /* gus_reset.c */ void snd_gf1_set_default_handlers(snd_gus_card_t * gus, unsigned int what); void snd_gf1_smart_stop_voice(snd_gus_card_t * gus, unsigned short voice); void snd_gf1_stop_voice(snd_gus_card_t * gus, unsigned short voice); -void snd_gf1_clear_voices(snd_gus_card_t * gus, unsigned short v_min, unsigned short v_max); void snd_gf1_stop_voices(snd_gus_card_t * gus, unsigned short v_min, unsigned short v_max); snd_gus_voice_t *snd_gf1_alloc_voice(snd_gus_card_t * gus, int type, int client, int port); void snd_gf1_free_voice(snd_gus_card_t * gus, snd_gus_voice_t *voice); @@ -641,9 +621,6 @@ int snd_gf1_pcm_new(snd_gus_card_t * gus, int pcm_dev, int control_index, snd_pc #ifdef CONFIG_SND_DEBUG extern void snd_gf1_print_voice_registers(snd_gus_card_t * gus); -extern void snd_gf1_print_global_registers(snd_gus_card_t * gus); -extern void snd_gf1_print_setup_registers(snd_gus_card_t * gus); -extern void snd_gf1_peek_print_block(snd_gus_card_t * gus, unsigned int addr, int count, int w_16bit); #endif /* gus.c */ diff --git a/include/sound/hdspm.h b/include/sound/hdspm.h new file mode 100644 index 0000000..c34427c --- /dev/null +++ b/include/sound/hdspm.h @@ -0,0 +1,131 @@ +#ifndef __SOUND_HDSPM_H /* -*- linux-c -*- */ +#define __SOUND_HDSPM_H +/* + * Copyright (C) 2003 Winfried Ritsch (IEM) + * based on hdsp.h from Thomas Charbonnel (thomas@undata.org) + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* Maximum channels is 64 even on 56Mode you have 64playbacks to matrix */ +#define HDSPM_MAX_CHANNELS 64 + +/* -------------------- IOCTL Peak/RMS Meters -------------------- */ + +typedef struct _snd_hdspm_peak_rms hdspm_peak_rms_t; + +/* peam rms level structure like we get from hardware + + maybe in future we can memory map it so I just copy it + to user on ioctl call now an dont change anything + rms are made out of low and high values + where (long) ????_rms = (????_rms_l >> 8) + ((????_rms_h & 0xFFFFFF00)<<24) + (i asume so from the code) +*/ + +struct _snd_hdspm_peak_rms { + + unsigned int level_offset[1024]; + + unsigned int input_peak[64]; + unsigned int playback_peak[64]; + unsigned int output_peak[64]; + unsigned int xxx_peak[64]; /* not used */ + + unsigned int reserved[256]; /* not used */ + + unsigned int input_rms_l[64]; + unsigned int playback_rms_l[64]; + unsigned int output_rms_l[64]; + unsigned int xxx_rms_l[64]; /* not used */ + + unsigned int input_rms_h[64]; + unsigned int playback_rms_h[64]; + unsigned int output_rms_h[64]; + unsigned int xxx_rms_h[64]; /* not used */ +}; + +struct sndrv_hdspm_peak_rms_ioctl { + hdspm_peak_rms_t *peak; +}; + +/* use indirect access due to the limit of ioctl bit size */ +#define SNDRV_HDSPM_IOCTL_GET_PEAK_RMS _IOR('H', 0x40, struct sndrv_hdspm_peak_rms_ioctl) + +/* ------------ CONFIG block IOCTL ---------------------- */ + +typedef struct _snd_hdspm_config_info hdspm_config_info_t; + +struct _snd_hdspm_config_info { + unsigned char pref_sync_ref; + unsigned char wordclock_sync_check; + unsigned char madi_sync_check; + unsigned int system_sample_rate; + unsigned int autosync_sample_rate; + unsigned char system_clock_mode; + unsigned char clock_source; + unsigned char autosync_ref; + unsigned char line_out; + unsigned int passthru; + unsigned int analog_out; +}; + +#define SNDRV_HDSPM_IOCTL_GET_CONFIG_INFO _IOR('H', 0x41, hdspm_config_info_t) + + +/* get Soundcard Version */ + +typedef struct _snd_hdspm_version hdspm_version_t; + +struct _snd_hdspm_version { + unsigned short firmware_rev; +}; + +#define SNDRV_HDSPM_IOCTL_GET_VERSION _IOR('H', 0x43, hdspm_version_t) + + +/* ------------- get Matrix Mixer IOCTL --------------- */ + +/* MADI mixer: 64inputs+64playback in 64outputs = 8192 => *4Byte = 32768 Bytes */ + +/* organisation is 64 channelfader in a continous memory block */ +/* equivalent to hardware definition, maybe for future feature of mmap of them */ +/* each of 64 outputs has 64 infader and 64 outfader: + Ins to Outs mixer[out].in[in], Outstreams to Outs mixer[out].pb[pb] */ + +#define HDSPM_MIXER_CHANNELS HDSPM_MAX_CHANNELS + +typedef struct _snd_hdspm_channelfader snd_hdspm_channelfader_t; + +struct _snd_hdspm_channelfader { + unsigned int in[HDSPM_MIXER_CHANNELS]; + unsigned int pb[HDSPM_MIXER_CHANNELS]; +}; + +typedef struct _snd_hdspm_mixer hdspm_mixer_t; + +struct _snd_hdspm_mixer { + snd_hdspm_channelfader_t ch[HDSPM_MIXER_CHANNELS]; +}; + +struct sndrv_hdspm_mixer_ioctl { + hdspm_mixer_t *mixer; +}; + +/* use indirect access due to the limit of ioctl bit size */ +#define SNDRV_HDSPM_IOCTL_GET_MIXER _IOR('H', 0x44, struct sndrv_hdspm_mixer_ioctl) + +#endif /* __SOUND_HDSPM_H */ diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 53fc04d..d935417 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -848,23 +848,6 @@ int snd_interval_ratnum(snd_interval_t *i, void _snd_pcm_hw_params_any(snd_pcm_hw_params_t *params); void _snd_pcm_hw_param_setempty(snd_pcm_hw_params_t *params, snd_pcm_hw_param_t var); -int snd_pcm_hw_param_min(snd_pcm_substream_t *substream, - snd_pcm_hw_params_t *params, - snd_pcm_hw_param_t var, - unsigned int val, int *dir); -int snd_pcm_hw_param_max(snd_pcm_substream_t *substream, - snd_pcm_hw_params_t *params, - snd_pcm_hw_param_t var, - unsigned int val, int *dir); -int snd_pcm_hw_param_setinteger(snd_pcm_substream_t *substream, - snd_pcm_hw_params_t *params, - snd_pcm_hw_param_t var); -int snd_pcm_hw_param_first(snd_pcm_substream_t *substream, - snd_pcm_hw_params_t *params, - snd_pcm_hw_param_t var, int *dir); -int snd_pcm_hw_param_last(snd_pcm_substream_t *substream, - snd_pcm_hw_params_t *params, - snd_pcm_hw_param_t var, int *dir); int snd_pcm_hw_param_near(snd_pcm_substream_t *substream, snd_pcm_hw_params_t *params, snd_pcm_hw_param_t var, @@ -876,7 +859,6 @@ int snd_pcm_hw_param_set(snd_pcm_substream_t *pcm, int snd_pcm_hw_params_choose(snd_pcm_substream_t *substream, snd_pcm_hw_params_t *params); int snd_pcm_hw_refine(snd_pcm_substream_t *substream, snd_pcm_hw_params_t *params); -int snd_pcm_hw_params(snd_pcm_substream_t *substream, snd_pcm_hw_params_t *params); int snd_pcm_hw_constraints_init(snd_pcm_substream_t *substream); int snd_pcm_hw_constraints_complete(snd_pcm_substream_t *substream); @@ -922,8 +904,22 @@ int snd_pcm_format_unsigned(snd_pcm_format_t format); int snd_pcm_format_linear(snd_pcm_format_t format); int snd_pcm_format_little_endian(snd_pcm_format_t format); int snd_pcm_format_big_endian(snd_pcm_format_t format); +/** + * snd_pcm_format_cpu_endian - Check the PCM format is CPU-endian + * @format: the format to check + * + * Returns 1 if the given PCM format is CPU-endian, 0 if + * opposite, or a negative error code if endian not specified. + */ +/* int snd_pcm_format_cpu_endian(snd_pcm_format_t format); */ +#ifdef SNDRV_LITTLE_ENDIAN +#define snd_pcm_format_cpu_endian snd_pcm_format_little_endian +#else +#define snd_pcm_format_cpu_endian snd_pcm_format_big_endian +#endif int snd_pcm_format_width(snd_pcm_format_t format); /* in bits */ int snd_pcm_format_physical_width(snd_pcm_format_t format); /* in bits */ +ssize_t snd_pcm_format_size(snd_pcm_format_t format, size_t samples); const unsigned char *snd_pcm_format_silence_64(snd_pcm_format_t format); int snd_pcm_format_set_silence(snd_pcm_format_t format, void *buf, unsigned int frames); snd_pcm_format_t snd_pcm_build_linear_format(int width, int unsignd, int big_endian); diff --git a/include/sound/seq_midi_event.h b/include/sound/seq_midi_event.h index 4357cac..8857e2b 100644 --- a/include/sound/seq_midi_event.h +++ b/include/sound/seq_midi_event.h @@ -41,9 +41,7 @@ struct snd_midi_event_t { }; int snd_midi_event_new(int bufsize, snd_midi_event_t **rdev); -int snd_midi_event_resize_buffer(snd_midi_event_t *dev, int bufsize); void snd_midi_event_free(snd_midi_event_t *dev); -void snd_midi_event_init(snd_midi_event_t *dev); void snd_midi_event_reset_encode(snd_midi_event_t *dev); void snd_midi_event_reset_decode(snd_midi_event_t *dev); void snd_midi_event_no_status(snd_midi_event_t *dev, int on); diff --git a/include/sound/seq_virmidi.h b/include/sound/seq_virmidi.h index cf4e238..1ad27e8 100644 --- a/include/sound/seq_virmidi.h +++ b/include/sound/seq_virmidi.h @@ -79,6 +79,5 @@ struct _snd_virmidi_dev { #define SNDRV_VIRMIDI_SEQ_DISPATCH 2 int snd_virmidi_new(snd_card_t *card, int device, snd_rawmidi_t **rrmidi); -int snd_virmidi_receive(snd_rawmidi_t *rmidi, snd_seq_event_t *ev); #endif /* __SOUND_SEQ_VIRMIDI */ diff --git a/include/sound/timer.h b/include/sound/timer.h index 57fde99..1898511 100644 --- a/include/sound/timer.h +++ b/include/sound/timer.h @@ -152,6 +152,4 @@ extern int snd_timer_pause(snd_timer_instance_t * timeri); extern void snd_timer_interrupt(snd_timer_t * timer, unsigned long ticks_left); -extern unsigned int snd_timer_system_resolution(void); - #endif /* __SOUND_TIMER_H */ diff --git a/include/sound/version.h b/include/sound/version.h index 98b4230..46acfa8 100644 --- a/include/sound/version.h +++ b/include/sound/version.h @@ -1,3 +1,3 @@ /* include/version.h. Generated by configure. */ -#define CONFIG_SND_VERSION "1.0.9rc2" -#define CONFIG_SND_DATE " (Thu Mar 24 10:33:39 2005 UTC)" +#define CONFIG_SND_VERSION "1.0.9" +#define CONFIG_SND_DATE " (Sun May 29 07:31:02 2005 UTC)" |